From 7d0bac62a4baa2f0c1d142afccecf2935043ce15 Mon Sep 17 00:00:00 2001 From: Matthew Dillon Date: Sun, 22 Jun 2003 04:30:43 +0000 Subject: [PATCH] thread stage 10: (note stage 9 was the kern/lwkt_rwlock commit). Cleanup thread and process creation functions. Check the spl against ipending in cpu_lwkt_restore (so the idle loop does not lockup the machine). Remove the old VM object kstack allocation and freeing code. Leave newly created processes in a stopped state to fix wakeup/fork_handler races. Normalize the lwkt_init_*() functions. Add a sysctl debug.untimely_switch which will cause the last crit_exit() to yield, which causes a task switch to occur in wakeup() and catches a lot of 4.x-isms that can be found and fixed on UP. --- sys/i386/i386/machdep.c | 25 ++---- sys/i386/i386/pmap.c | 128 ++-------------------------- sys/i386/i386/swtch.s | 10 ++- sys/i386/i386/vm_machdep.c | 7 +- sys/kern/init_main.c | 5 +- sys/kern/kern_fork.c | 55 ++++++++---- sys/kern/kern_kthread.c | 3 +- sys/kern/kern_synch.c | 8 +- sys/kern/lwkt_rwlock.c | 7 +- sys/kern/lwkt_thread.c | 89 +++++++++++++++---- sys/kern/vfs_aio.c | 3 +- sys/netproto/smb/smb_subr.c | 3 +- sys/platform/pc32/i386/machdep.c | 25 ++---- sys/platform/pc32/i386/pmap.c | 128 ++-------------------------- sys/platform/pc32/i386/swtch.s | 10 ++- sys/platform/pc32/i386/vm_machdep.c | 7 +- sys/sys/proc.h | 3 +- sys/sys/thread.h | 6 +- sys/vm/pmap.h | 6 +- sys/vm/vm_glue.c | 6 +- 20 files changed, 191 insertions(+), 343 deletions(-) diff --git a/sys/i386/i386/machdep.c b/sys/i386/i386/machdep.c index 7b5444ddfc..1207add430 100644 --- a/sys/i386/i386/machdep.c +++ b/sys/i386/i386/machdep.c @@ -36,7 +36,7 @@ * * from: @(#)machdep.c 7.4 (Berkeley) 6/3/91 * $FreeBSD: src/sys/i386/i386/machdep.c,v 1.385.2.30 2003/05/31 08:48:05 alc Exp $ - * $DragonFly: src/sys/i386/i386/Attic/machdep.c,v 1.8 2003/06/21 07:54:55 dillon Exp $ + * $DragonFly: src/sys/i386/i386/Attic/machdep.c,v 1.9 2003/06/22 04:30:39 dillon Exp $ */ #include "apm.h" @@ -1866,14 +1866,14 @@ init386(first) * Prevent lowering of the ipl if we call tsleep() early. */ safepri = cpl; + gd = &CPU_prvspace[0].globaldata; - thread0.td_kstack = (void *)proc0paddr; + lwkt_init_thread(&thread0, proc0paddr); + gd->gd_curthread = &thread0; + thread0.td_switch = cpu_heavy_switch; /* YYY eventually LWKT */ proc0.p_addr = (void *)thread0.td_kstack; proc0.p_thread = &thread0; thread0.td_proc = &proc0; - thread0.td_pcb = (struct pcb *) - ((char *)proc0paddr + UPAGES*PAGE_SIZE - sizeof(struct pcb)); - thread0.td_kstack = (char *)proc0paddr; thread0.td_flags = TDF_RUNNING; atdevbase = ISA_HOLE_START + KERNBASE; @@ -1912,7 +1912,6 @@ init386(first) gdt_segs[GPRIV_SEL].ssd_limit = atop(0 - 1); gdt_segs[GPROC0_SEL].ssd_base = (int) &common_tss; #endif - gd = &CPU_prvspace[0].globaldata; gd->gd_prvspace = &CPU_prvspace[0]; /* * Note: on both UP and SMP curthread must be set non-NULL @@ -2073,7 +2072,6 @@ init386(first) thread0.td_pcb->pcb_mpnest = 1; #endif thread0.td_pcb->pcb_ext = 0; - thread0.td_switch = cpu_heavy_switch; /* YYY eventually LWKT */ proc0.p_md.md_regs = &proc0_tf; } @@ -2086,20 +2084,15 @@ void cpu_gdinit(struct globaldata *gd, int cpu) { char *sp; - struct pcb *pcb; - if (cpu == 0) - gd->gd_curthread = &thread0; - else + TAILQ_INIT(&gd->gd_tdfreeq); /* for pmap_{new,dispose}_thread() */ + if (cpu) gd->gd_curthread = &gd->gd_idlethread; sp = gd->gd_prvspace->idlestack; - gd->gd_idlethread.td_kstack = sp; - pcb = (struct pcb *)(sp + sizeof(gd->gd_prvspace->idlestack)) - 1; - gd->gd_idlethread.td_pcb = pcb; - gd->gd_idlethread.td_sp = (char *)pcb - 16 - sizeof(void *); + lwkt_init_thread(&gd->gd_idlethread, sp); gd->gd_idlethread.td_switch = cpu_lwkt_switch; + gd->gd_idlethread.td_sp -= sizeof(void *); *(void **)gd->gd_idlethread.td_sp = cpu_idle_restore; - TAILQ_INIT(&gd->gd_tdfreeq); /* for pmap_{new,dispose}_thread() */ } #if defined(I586_CPU) && !defined(NO_F00F_HACK) diff --git a/sys/i386/i386/pmap.c b/sys/i386/i386/pmap.c index 08b7f01010..dbb4099097 100644 --- a/sys/i386/i386/pmap.c +++ b/sys/i386/i386/pmap.c @@ -40,7 +40,7 @@ * * from: @(#)pmap.c 7.7 (Berkeley) 5/12/91 * $FreeBSD: src/sys/i386/i386/pmap.c,v 1.250.2.18 2002/03/06 22:48:53 silby Exp $ - * $DragonFly: src/sys/i386/i386/Attic/pmap.c,v 1.9 2003/06/21 17:31:08 dillon Exp $ + * $DragonFly: src/sys/i386/i386/Attic/pmap.c,v 1.10 2003/06/22 04:30:39 dillon Exp $ */ /* @@ -843,30 +843,11 @@ retry: /* * Create a new thread and optionally associate it with a (new) process. */ -struct thread * -pmap_new_thread() +void +pmap_init_thread(thread_t td) { - struct thread *td; - - /* HIPRI YYY */ - if (mycpu->gd_tdfreecount > 0) { - --mycpu->gd_tdfreecount; - td = TAILQ_FIRST(&mycpu->gd_tdfreeq); - KASSERT(td != NULL, ("unexpected null cache td")); - TAILQ_REMOVE(&mycpu->gd_tdfreeq, td, td_threadq); - } else { - td = zalloc(thread_zone); - td->td_kstack = - (void *)kmem_alloc(kernel_map, UPAGES * PAGE_SIZE); - lwkt_rwlock_init(&td->td_rwlock); - } - - /* - * Sometimes td_pcb is moved around YYY. Make sure that it is - * properly initialized. - */ td->td_pcb = (struct pcb *)(td->td_kstack + UPAGES * PAGE_SIZE) - 1; - return(td); + td->td_sp = (char *)td->td_pcb - 16; } /* @@ -897,74 +878,13 @@ pmap_dispose_thread(struct thread *td) * This routine directly affects the fork perf for a process. */ void -pmap_new_proc(struct proc *p, struct thread *td) +pmap_init_proc(struct proc *p, struct thread *td) { p->p_addr = (void *)td->td_kstack; p->p_thread = td; td->td_proc = p; td->td_switch = cpu_heavy_switch; bzero(p->p_addr, sizeof(*p->p_addr)); -#if 0 - - int i, updateneeded; - vm_object_t upobj; - vm_page_t m; - struct user *up; - unsigned *ptek, oldpte; - - /* - * allocate object for the upages - */ - if ((upobj = p->p_upages_obj) == NULL) { - upobj = vm_object_allocate( OBJT_DEFAULT, UPAGES); - p->p_upages_obj = upobj; - } - - /* get a kernel virtual address for the UPAGES for this proc */ - if ((up = p->p_addr) == NULL) { - up = (struct user *) kmem_alloc_nofault(kernel_map, - UPAGES * PAGE_SIZE); - if (up == NULL) - panic("pmap_new_proc: u_map allocation failed"); - p->p_addr = up; - } - - ptek = (unsigned *) vtopte((vm_offset_t) up); - - updateneeded = 0; - for(i=0;iwire_count++; - cnt.v_wire_count++; - - oldpte = *(ptek + i); - /* - * Enter the page into the kernel address space. - */ - *(ptek + i) = VM_PAGE_TO_PHYS(m) | PG_RW | PG_V | pgeflag; - if (oldpte) { - if ((oldpte & PG_G) || (cpu_class > CPUCLASS_386)) { - invlpg((vm_offset_t) up + i * PAGE_SIZE); - } else { - updateneeded = 1; - } - } - - vm_page_wakeup(m); - vm_page_flag_clear(m, PG_ZERO); - vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE); - m->valid = VM_PAGE_BITS_ALL; - } - if (updateneeded) - invltlb(); -#endif } /* @@ -984,44 +904,6 @@ pmap_dispose_proc(struct proc *p) } p->p_addr = NULL; return(td); -#if 0 - int i; - vm_object_t upobj; - vm_page_t m; - unsigned *ptek, oldpte; - - upobj = p->p_upages_obj; - - ptek = (unsigned *) vtopte((vm_offset_t) p->p_addr); - for(i=0;i CPUCLASS_386)) - invlpg((vm_offset_t) p->p_addr + i * PAGE_SIZE); - vm_page_unwire(m, 0); - vm_page_free(m); - } -#if defined(I386_CPU) - if (cpu_class <= CPUCLASS_386) - invltlb(); -#endif - - /* - * If the process got swapped out some of its UPAGES might have gotten - * swapped. Just get rid of the object to clean up the swap use - * proactively. NOTE! might block waiting for paging I/O to complete. - */ - if (upobj->type == OBJT_SWAP) { - p->p_upages_obj = NULL; - vm_object_deallocate(upobj); - } -#endif } /* diff --git a/sys/i386/i386/swtch.s b/sys/i386/i386/swtch.s index e9cf0e5a5b..51136ad44d 100644 --- a/sys/i386/i386/swtch.s +++ b/sys/i386/i386/swtch.s @@ -35,7 +35,7 @@ * SUCH DAMAGE. * * $FreeBSD: src/sys/i386/i386/swtch.s,v 1.89.2.10 2003/01/23 03:36:24 ps Exp $ - * $DragonFly: src/sys/i386/i386/Attic/swtch.s,v 1.9 2003/06/21 17:31:08 dillon Exp $ + * $DragonFly: src/sys/i386/i386/Attic/swtch.s,v 1.10 2003/06/22 04:30:39 dillon Exp $ */ #include "npx.h" @@ -250,6 +250,8 @@ ENTRY(cpu_exit_switch) * we restore everything. * * YYY STI/CLI sequencing. + * + * YYY note: spl check is done in mi_switch when it splx()'s. */ ENTRY(cpu_heavy_restore) /* interrupts are disabled */ @@ -581,5 +583,11 @@ ENTRY(cpu_lwkt_restore) popl %ebp movl TD_MACH+MTD_CPL(%eax),%ecx /* YYY temporary */ movl %ecx,_cpl /* YYY temporary */ + andl _ipending,%ecx /* YYY temporary */ + je 1f + pushl %ecx + call splx /* YYY set gd_reqpri instead? */ + addl $4,%esp +1: ret diff --git a/sys/i386/i386/vm_machdep.c b/sys/i386/i386/vm_machdep.c index 8291617ddf..bccdf9a1ac 100644 --- a/sys/i386/i386/vm_machdep.c +++ b/sys/i386/i386/vm_machdep.c @@ -39,7 +39,7 @@ * from: @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91 * Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$ * $FreeBSD: src/sys/i386/i386/vm_machdep.c,v 1.132.2.9 2003/01/25 19:02:23 dillon Exp $ - * $DragonFly: src/sys/i386/i386/Attic/vm_machdep.c,v 1.8 2003/06/21 17:31:08 dillon Exp $ + * $DragonFly: src/sys/i386/i386/Attic/vm_machdep.c,v 1.9 2003/06/22 04:30:39 dillon Exp $ */ #include "npx.h" @@ -148,8 +148,8 @@ cpu_fork(p1, p2, flags) #endif /* Copy p1's pcb. */ - *p2->p_thread->td_pcb = *p1->p_thread->td_pcb; pcb2 = p2->p_thread->td_pcb; + *pcb2 = *p1->p_thread->td_pcb; /* * Create a new fresh stack for the new process. @@ -170,8 +170,7 @@ cpu_fork(p1, p2, flags) * to use the LWKT restore function directly so we can get rid of * all the extra crap we are setting up. */ - p2->p_md.md_regs = (struct trapframe *) - ((char *)p2->p_thread->td_pcb - 16) - 1; + p2->p_md.md_regs = (struct trapframe *)((char *)pcb2 - 16) - 1; bcopy(p1->p_md.md_regs, p2->p_md.md_regs, sizeof(*p2->p_md.md_regs)); /* diff --git a/sys/kern/init_main.c b/sys/kern/init_main.c index 01e6731ef7..535fea02f0 100644 --- a/sys/kern/init_main.c +++ b/sys/kern/init_main.c @@ -40,7 +40,7 @@ * * @(#)init_main.c 8.9 (Berkeley) 1/21/94 * $FreeBSD: src/sys/kern/init_main.c,v 1.134.2.8 2003/06/06 20:21:32 tegge Exp $ - * $DragonFly: src/sys/kern/init_main.c,v 1.8 2003/06/21 07:54:57 dillon Exp $ + * $DragonFly: src/sys/kern/init_main.c,v 1.9 2003/06/22 04:30:42 dillon Exp $ */ #include "opt_init_path.h" @@ -564,7 +564,6 @@ create_init(const void *udata __unused) panic("cannot fork init: %d\n", error); initproc->p_flag |= P_INMEM | P_SYSTEM; cpu_set_fork_handler(initproc, start_init, NULL); - remrunqueue(initproc); splx(s); } SYSINIT(init,SI_SUB_CREATE_INIT, SI_ORDER_FIRST, create_init, NULL) @@ -575,7 +574,7 @@ SYSINIT(init,SI_SUB_CREATE_INIT, SI_ORDER_FIRST, create_init, NULL) static void kick_init(const void *udata __unused) { - setrunqueue(initproc); + start_forked_proc(&proc0, initproc); } SYSINIT(kickinit,SI_SUB_KTHREAD_INIT, SI_ORDER_FIRST, kick_init, NULL) diff --git a/sys/kern/kern_fork.c b/sys/kern/kern_fork.c index 4e419a4cb0..20971fdc10 100644 --- a/sys/kern/kern_fork.c +++ b/sys/kern/kern_fork.c @@ -37,7 +37,7 @@ * * @(#)kern_fork.c 8.6 (Berkeley) 4/8/94 * $FreeBSD: src/sys/kern/kern_fork.c,v 1.72.2.13 2003/06/06 20:21:32 tegge Exp $ - * $DragonFly: src/sys/kern/kern_fork.c,v 1.5 2003/06/18 18:30:08 dillon Exp $ + * $DragonFly: src/sys/kern/kern_fork.c,v 1.6 2003/06/22 04:30:42 dillon Exp $ */ #include "opt_ktrace.h" @@ -100,6 +100,7 @@ fork(p, uap) error = fork1(p, RFFDG | RFPROC, &p2); if (error == 0) { + start_forked_proc(p, p2); p->p_retval[0] = p2->p_pid; p->p_retval[1] = 0; } @@ -117,6 +118,7 @@ vfork(p, uap) error = fork1(p, RFFDG | RFPROC | RFPPWAIT | RFMEM, &p2); if (error == 0) { + start_forked_proc(p, p2); p->p_retval[0] = p2->p_pid; p->p_retval[1] = 0; } @@ -133,6 +135,7 @@ rfork(p, uap) error = fork1(p, uap->flags, &p2); if (error == 0) { + start_forked_proc(p, p2); p->p_retval[0] = p2 ? p2->p_pid : 0; p->p_retval[1] = 0; } @@ -539,29 +542,12 @@ again: */ microtime(&(p2->p_stats->p_start)); p2->p_acflag = AFORK; - (void) splhigh(); - p2->p_stat = SRUN; - setrunqueue(p2); - (void) spl0(); - - /* - * Now can be swapped. - */ - PRELE(p1); /* * tell any interested parties about the new process */ KNOTE(&p1->p_klist, NOTE_FORK | p2->p_pid); - /* - * Preserve synchronization semantics of vfork. If waiting for - * child to exec or exit, set P_PPWAIT on child, and sleep on our - * proc (in case of exit). - */ - while (p2->p_flag & P_PPWAIT) - tsleep(p1, PWAIT, "ppwait", 0); - /* * Return child proc pointer to parent. */ @@ -619,3 +605,36 @@ rm_at_fork(function) } return (0); } + +/* + * Add a forked process to the run queue after any remaining setup, such + * as setting the fork handler, has been completed. + */ + +void +start_forked_proc(struct proc *p1, struct proc *p2) +{ + /* + * Move from SIDL to RUN queue + */ + KASSERT(p2->p_stat == SIDL, + ("cannot start forked process, bad status: %p", p2)); + (void) splhigh(); + p2->p_stat = SRUN; + setrunqueue(p2); + (void) spl0(); + + /* + * Now can be swapped. + */ + PRELE(p1); + + /* + * Preserve synchronization semantics of vfork. If waiting for + * child to exec or exit, set P_PPWAIT on child, and sleep on our + * proc (in case of exit). + */ + while (p2->p_flag & P_PPWAIT) + tsleep(p1, PWAIT, "ppwait", 0); +} + diff --git a/sys/kern/kern_kthread.c b/sys/kern/kern_kthread.c index 4972c22823..577b82d7c0 100644 --- a/sys/kern/kern_kthread.c +++ b/sys/kern/kern_kthread.c @@ -24,7 +24,7 @@ * SUCH DAMAGE. * * $FreeBSD: src/sys/kern/kern_kthread.c,v 1.5.2.3 2001/12/25 01:51:14 dillon Exp $ - * $DragonFly: src/sys/kern/kern_kthread.c,v 1.2 2003/06/17 04:28:41 dillon Exp $ + * $DragonFly: src/sys/kern/kern_kthread.c,v 1.3 2003/06/22 04:30:42 dillon Exp $ */ #include @@ -95,6 +95,7 @@ kthread_create(void (*func)(void *), void *arg, /* call the processes' main()... */ cpu_set_fork_handler(p2, func, arg); + start_forked_proc(&proc0, p2); return 0; } diff --git a/sys/kern/kern_synch.c b/sys/kern/kern_synch.c index 5a015b86b7..e3eb990435 100644 --- a/sys/kern/kern_synch.c +++ b/sys/kern/kern_synch.c @@ -37,7 +37,7 @@ * * @(#)kern_synch.c 8.9 (Berkeley) 5/19/95 * $FreeBSD: src/sys/kern/kern_synch.c,v 1.87.2.6 2002/10/13 07:29:53 kbyanc Exp $ - * $DragonFly: src/sys/kern/kern_synch.c,v 1.5 2003/06/21 07:54:57 dillon Exp $ + * $DragonFly: src/sys/kern/kern_synch.c,v 1.6 2003/06/22 04:30:42 dillon Exp $ */ #include "opt_ktrace.h" @@ -448,7 +448,7 @@ tsleep(ident, priority, wmesg, timo) return (0); } KASSERT(p != NULL, ("tsleep1")); - KASSERT(ident != NULL && p->p_stat == SRUN, ("tsleep")); + KASSERT(ident != NULL && p->p_stat == SRUN, ("tsleep %p %s %d", ident, wmesg, p->p_stat)); p->p_wchan = ident; p->p_wmesg = wmesg; @@ -552,8 +552,8 @@ xsleep(struct xwait *w, int priority, const char *wmesg, int timo, int *gen) splx(s); return (0); } - KASSERT(p != NULL, ("tsleep1")); - KASSERT(w != NULL && p->p_stat == SRUN, ("tsleep")); + KASSERT(p != NULL, ("xsleep1")); + KASSERT(w != NULL && p->p_stat == SRUN, ("xsleep")); /* * If the generation number does not match we return immediately. diff --git a/sys/kern/lwkt_rwlock.c b/sys/kern/lwkt_rwlock.c index 8476b02443..467a1c4371 100644 --- a/sys/kern/lwkt_rwlock.c +++ b/sys/kern/lwkt_rwlock.c @@ -25,7 +25,7 @@ * * Implements simple shared/exclusive locks using LWKT. * - * $DragonFly: src/sys/kern/Attic/lwkt_rwlock.c,v 1.1 2003/06/21 17:31:19 dillon Exp $ + * $DragonFly: src/sys/kern/Attic/lwkt_rwlock.c,v 1.2 2003/06/22 04:30:42 dillon Exp $ */ #include @@ -35,10 +35,13 @@ #include #include +/* + * NOTE! called from low level boot, we cannot do anything fancy. + */ void lwkt_rwlock_init(lwkt_rwlock_t lock) { - lwkt_wait_init(&lock->rw_wait); + lwkt_init_wait(&lock->rw_wait); } void diff --git a/sys/kern/lwkt_thread.c b/sys/kern/lwkt_thread.c index e5ef33a798..1ef5ce8a3f 100644 --- a/sys/kern/lwkt_thread.c +++ b/sys/kern/lwkt_thread.c @@ -27,7 +27,7 @@ * thread scheduler, which means that generally speaking we only need * to use a critical section to prevent hicups. * - * $DragonFly: src/sys/kern/lwkt_thread.c,v 1.3 2003/06/21 17:31:19 dillon Exp $ + * $DragonFly: src/sys/kern/lwkt_thread.c,v 1.4 2003/06/22 04:30:42 dillon Exp $ */ #include @@ -37,8 +37,24 @@ #include #include #include +#include +#include #include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static int untimely_switch = 0; +SYSCTL_INT(_debug, OID_AUTO, untimely_switch, CTLFLAG_RW, &untimely_switch, 0, ""); + + static __inline void _lwkt_dequeue(thread_t td) @@ -70,6 +86,59 @@ lwkt_gdinit(struct globaldata *gd) TAILQ_INIT(&gd->gd_tdrunq); } +/* + * Initialize a thread wait structure prior to first use. + * + * NOTE! called from low level boot code, we cannot do anything fancy! + */ +void +lwkt_init_wait(lwkt_wait_t w) +{ + TAILQ_INIT(&w->wa_waitq); +} + +/* + * Create a new thread. The thread must be associated with a process context + * or LWKT start address before it can be scheduled. + */ +thread_t +lwkt_alloc_thread(void) +{ + struct thread *td; + void *stack; + + crit_enter(); + if (mycpu->gd_tdfreecount > 0) { + --mycpu->gd_tdfreecount; + td = TAILQ_FIRST(&mycpu->gd_tdfreeq); + KASSERT(td != NULL, ("unexpected null cache td")); + TAILQ_REMOVE(&mycpu->gd_tdfreeq, td, td_threadq); + crit_exit(); + stack = td->td_kstack; + } else { + crit_exit(); + td = zalloc(thread_zone); + stack = (void *)kmem_alloc(kernel_map, UPAGES * PAGE_SIZE); + } + lwkt_init_thread(td, stack); + return(td); +} + +/* + * Initialize a preexisting thread structure. This function is used by + * lwkt_alloc_thread() and also used to initialize the per-cpu idlethread. + * + * NOTE! called from low level boot code, we cannot do anything fancy! + */ +void +lwkt_init_thread(thread_t td, void *stack) +{ + bzero(td, sizeof(struct thread)); + lwkt_rwlock_init(&td->td_rwlock); + td->td_kstack = stack; + pmap_init_thread(td); +} + /* * Switch to the next runnable lwkt. If no LWKTs are runnable then * switch to the idlethread. Switching must occur within a critical @@ -137,12 +206,10 @@ lwkt_yield_quick(void) /* * YYY enabling will cause wakeup() to task-switch, which really * confused the old 4.x code. This is a good way to simulate - * preemption without actually doing preemption, because a lot - * of code (including schedule, deschedule) uses critical sections - * which devolve to here if an interrupt occured. + * preemption and MP without actually doing preemption or MP, because a + * lot of code assumes that wakeup() does not block. */ -#if 0 - if (intr_nesting_level == 0) { + if (untimely_switch && intr_nesting_level == 0) { crit_enter(); /* * YYY temporary hacks until we disassociate the userland scheduler @@ -157,7 +224,6 @@ lwkt_yield_quick(void) } crit_exit_noyield(); } -#endif } /* @@ -315,15 +381,6 @@ lwkt_deschedule(thread_t td) crit_exit(); } -/* - * Initialize a thread wait queue - */ -void -lwkt_wait_init(lwkt_wait_t w) -{ - TAILQ_INIT(&w->wa_waitq); -} - /* * This function deschedules the current thread and blocks on the specified * wait queue. We obtain ownership of the wait queue in order to block diff --git a/sys/kern/vfs_aio.c b/sys/kern/vfs_aio.c index 5fb52a857a..643cbc2bdd 100644 --- a/sys/kern/vfs_aio.c +++ b/sys/kern/vfs_aio.c @@ -14,7 +14,7 @@ * of the author. This software is distributed AS-IS. * * $FreeBSD: src/sys/kern/vfs_aio.c,v 1.70.2.28 2003/05/29 06:15:35 alc Exp $ - * $DragonFly: src/sys/kern/vfs_aio.c,v 1.2 2003/06/17 04:28:41 dillon Exp $ + * $DragonFly: src/sys/kern/vfs_aio.c,v 1.3 2003/06/22 04:30:42 dillon Exp $ */ /* @@ -881,6 +881,7 @@ aio_newproc() if (error) return error; cpu_set_fork_handler(np, aio_daemon, curproc); + start_forked_proc(p, np); /* * Wait until daemon is started, but continue on just in case to diff --git a/sys/netproto/smb/smb_subr.c b/sys/netproto/smb/smb_subr.c index b255c6a50f..3e5b18f601 100644 --- a/sys/netproto/smb/smb_subr.c +++ b/sys/netproto/smb/smb_subr.c @@ -30,7 +30,7 @@ * SUCH DAMAGE. * * $FreeBSD: src/sys/netsmb/smb_subr.c,v 1.1.2.2 2001/09/03 08:55:11 bp Exp $ - * $DragonFly: src/sys/netproto/smb/smb_subr.c,v 1.2 2003/06/17 04:28:54 dillon Exp $ + * $DragonFly: src/sys/netproto/smb/smb_subr.c,v 1.3 2003/06/22 04:30:43 dillon Exp $ */ #include #include @@ -418,6 +418,7 @@ kthread_create2(void (*func)(void *), void *arg, /* call the processes' main()... */ cpu_set_fork_handler(p2, func, arg); + start_forked_proc(&proc0, p2); return 0; } diff --git a/sys/platform/pc32/i386/machdep.c b/sys/platform/pc32/i386/machdep.c index bef6545285..3b885b016b 100644 --- a/sys/platform/pc32/i386/machdep.c +++ b/sys/platform/pc32/i386/machdep.c @@ -36,7 +36,7 @@ * * from: @(#)machdep.c 7.4 (Berkeley) 6/3/91 * $FreeBSD: src/sys/i386/i386/machdep.c,v 1.385.2.30 2003/05/31 08:48:05 alc Exp $ - * $DragonFly: src/sys/platform/pc32/i386/machdep.c,v 1.8 2003/06/21 07:54:55 dillon Exp $ + * $DragonFly: src/sys/platform/pc32/i386/machdep.c,v 1.9 2003/06/22 04:30:39 dillon Exp $ */ #include "apm.h" @@ -1866,14 +1866,14 @@ init386(first) * Prevent lowering of the ipl if we call tsleep() early. */ safepri = cpl; + gd = &CPU_prvspace[0].globaldata; - thread0.td_kstack = (void *)proc0paddr; + lwkt_init_thread(&thread0, proc0paddr); + gd->gd_curthread = &thread0; + thread0.td_switch = cpu_heavy_switch; /* YYY eventually LWKT */ proc0.p_addr = (void *)thread0.td_kstack; proc0.p_thread = &thread0; thread0.td_proc = &proc0; - thread0.td_pcb = (struct pcb *) - ((char *)proc0paddr + UPAGES*PAGE_SIZE - sizeof(struct pcb)); - thread0.td_kstack = (char *)proc0paddr; thread0.td_flags = TDF_RUNNING; atdevbase = ISA_HOLE_START + KERNBASE; @@ -1912,7 +1912,6 @@ init386(first) gdt_segs[GPRIV_SEL].ssd_limit = atop(0 - 1); gdt_segs[GPROC0_SEL].ssd_base = (int) &common_tss; #endif - gd = &CPU_prvspace[0].globaldata; gd->gd_prvspace = &CPU_prvspace[0]; /* * Note: on both UP and SMP curthread must be set non-NULL @@ -2073,7 +2072,6 @@ init386(first) thread0.td_pcb->pcb_mpnest = 1; #endif thread0.td_pcb->pcb_ext = 0; - thread0.td_switch = cpu_heavy_switch; /* YYY eventually LWKT */ proc0.p_md.md_regs = &proc0_tf; } @@ -2086,20 +2084,15 @@ void cpu_gdinit(struct globaldata *gd, int cpu) { char *sp; - struct pcb *pcb; - if (cpu == 0) - gd->gd_curthread = &thread0; - else + TAILQ_INIT(&gd->gd_tdfreeq); /* for pmap_{new,dispose}_thread() */ + if (cpu) gd->gd_curthread = &gd->gd_idlethread; sp = gd->gd_prvspace->idlestack; - gd->gd_idlethread.td_kstack = sp; - pcb = (struct pcb *)(sp + sizeof(gd->gd_prvspace->idlestack)) - 1; - gd->gd_idlethread.td_pcb = pcb; - gd->gd_idlethread.td_sp = (char *)pcb - 16 - sizeof(void *); + lwkt_init_thread(&gd->gd_idlethread, sp); gd->gd_idlethread.td_switch = cpu_lwkt_switch; + gd->gd_idlethread.td_sp -= sizeof(void *); *(void **)gd->gd_idlethread.td_sp = cpu_idle_restore; - TAILQ_INIT(&gd->gd_tdfreeq); /* for pmap_{new,dispose}_thread() */ } #if defined(I586_CPU) && !defined(NO_F00F_HACK) diff --git a/sys/platform/pc32/i386/pmap.c b/sys/platform/pc32/i386/pmap.c index e9deafacce..79f378159b 100644 --- a/sys/platform/pc32/i386/pmap.c +++ b/sys/platform/pc32/i386/pmap.c @@ -40,7 +40,7 @@ * * from: @(#)pmap.c 7.7 (Berkeley) 5/12/91 * $FreeBSD: src/sys/i386/i386/pmap.c,v 1.250.2.18 2002/03/06 22:48:53 silby Exp $ - * $DragonFly: src/sys/platform/pc32/i386/pmap.c,v 1.9 2003/06/21 17:31:08 dillon Exp $ + * $DragonFly: src/sys/platform/pc32/i386/pmap.c,v 1.10 2003/06/22 04:30:39 dillon Exp $ */ /* @@ -843,30 +843,11 @@ retry: /* * Create a new thread and optionally associate it with a (new) process. */ -struct thread * -pmap_new_thread() +void +pmap_init_thread(thread_t td) { - struct thread *td; - - /* HIPRI YYY */ - if (mycpu->gd_tdfreecount > 0) { - --mycpu->gd_tdfreecount; - td = TAILQ_FIRST(&mycpu->gd_tdfreeq); - KASSERT(td != NULL, ("unexpected null cache td")); - TAILQ_REMOVE(&mycpu->gd_tdfreeq, td, td_threadq); - } else { - td = zalloc(thread_zone); - td->td_kstack = - (void *)kmem_alloc(kernel_map, UPAGES * PAGE_SIZE); - lwkt_rwlock_init(&td->td_rwlock); - } - - /* - * Sometimes td_pcb is moved around YYY. Make sure that it is - * properly initialized. - */ td->td_pcb = (struct pcb *)(td->td_kstack + UPAGES * PAGE_SIZE) - 1; - return(td); + td->td_sp = (char *)td->td_pcb - 16; } /* @@ -897,74 +878,13 @@ pmap_dispose_thread(struct thread *td) * This routine directly affects the fork perf for a process. */ void -pmap_new_proc(struct proc *p, struct thread *td) +pmap_init_proc(struct proc *p, struct thread *td) { p->p_addr = (void *)td->td_kstack; p->p_thread = td; td->td_proc = p; td->td_switch = cpu_heavy_switch; bzero(p->p_addr, sizeof(*p->p_addr)); -#if 0 - - int i, updateneeded; - vm_object_t upobj; - vm_page_t m; - struct user *up; - unsigned *ptek, oldpte; - - /* - * allocate object for the upages - */ - if ((upobj = p->p_upages_obj) == NULL) { - upobj = vm_object_allocate( OBJT_DEFAULT, UPAGES); - p->p_upages_obj = upobj; - } - - /* get a kernel virtual address for the UPAGES for this proc */ - if ((up = p->p_addr) == NULL) { - up = (struct user *) kmem_alloc_nofault(kernel_map, - UPAGES * PAGE_SIZE); - if (up == NULL) - panic("pmap_new_proc: u_map allocation failed"); - p->p_addr = up; - } - - ptek = (unsigned *) vtopte((vm_offset_t) up); - - updateneeded = 0; - for(i=0;iwire_count++; - cnt.v_wire_count++; - - oldpte = *(ptek + i); - /* - * Enter the page into the kernel address space. - */ - *(ptek + i) = VM_PAGE_TO_PHYS(m) | PG_RW | PG_V | pgeflag; - if (oldpte) { - if ((oldpte & PG_G) || (cpu_class > CPUCLASS_386)) { - invlpg((vm_offset_t) up + i * PAGE_SIZE); - } else { - updateneeded = 1; - } - } - - vm_page_wakeup(m); - vm_page_flag_clear(m, PG_ZERO); - vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE); - m->valid = VM_PAGE_BITS_ALL; - } - if (updateneeded) - invltlb(); -#endif } /* @@ -984,44 +904,6 @@ pmap_dispose_proc(struct proc *p) } p->p_addr = NULL; return(td); -#if 0 - int i; - vm_object_t upobj; - vm_page_t m; - unsigned *ptek, oldpte; - - upobj = p->p_upages_obj; - - ptek = (unsigned *) vtopte((vm_offset_t) p->p_addr); - for(i=0;i CPUCLASS_386)) - invlpg((vm_offset_t) p->p_addr + i * PAGE_SIZE); - vm_page_unwire(m, 0); - vm_page_free(m); - } -#if defined(I386_CPU) - if (cpu_class <= CPUCLASS_386) - invltlb(); -#endif - - /* - * If the process got swapped out some of its UPAGES might have gotten - * swapped. Just get rid of the object to clean up the swap use - * proactively. NOTE! might block waiting for paging I/O to complete. - */ - if (upobj->type == OBJT_SWAP) { - p->p_upages_obj = NULL; - vm_object_deallocate(upobj); - } -#endif } /* diff --git a/sys/platform/pc32/i386/swtch.s b/sys/platform/pc32/i386/swtch.s index ccf1095dae..b9604e497e 100644 --- a/sys/platform/pc32/i386/swtch.s +++ b/sys/platform/pc32/i386/swtch.s @@ -35,7 +35,7 @@ * SUCH DAMAGE. * * $FreeBSD: src/sys/i386/i386/swtch.s,v 1.89.2.10 2003/01/23 03:36:24 ps Exp $ - * $DragonFly: src/sys/platform/pc32/i386/swtch.s,v 1.9 2003/06/21 17:31:08 dillon Exp $ + * $DragonFly: src/sys/platform/pc32/i386/swtch.s,v 1.10 2003/06/22 04:30:39 dillon Exp $ */ #include "npx.h" @@ -250,6 +250,8 @@ ENTRY(cpu_exit_switch) * we restore everything. * * YYY STI/CLI sequencing. + * + * YYY note: spl check is done in mi_switch when it splx()'s. */ ENTRY(cpu_heavy_restore) /* interrupts are disabled */ @@ -581,5 +583,11 @@ ENTRY(cpu_lwkt_restore) popl %ebp movl TD_MACH+MTD_CPL(%eax),%ecx /* YYY temporary */ movl %ecx,_cpl /* YYY temporary */ + andl _ipending,%ecx /* YYY temporary */ + je 1f + pushl %ecx + call splx /* YYY set gd_reqpri instead? */ + addl $4,%esp +1: ret diff --git a/sys/platform/pc32/i386/vm_machdep.c b/sys/platform/pc32/i386/vm_machdep.c index 2448fcbed9..5a244de17f 100644 --- a/sys/platform/pc32/i386/vm_machdep.c +++ b/sys/platform/pc32/i386/vm_machdep.c @@ -39,7 +39,7 @@ * from: @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91 * Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$ * $FreeBSD: src/sys/i386/i386/vm_machdep.c,v 1.132.2.9 2003/01/25 19:02:23 dillon Exp $ - * $DragonFly: src/sys/platform/pc32/i386/vm_machdep.c,v 1.8 2003/06/21 17:31:08 dillon Exp $ + * $DragonFly: src/sys/platform/pc32/i386/vm_machdep.c,v 1.9 2003/06/22 04:30:39 dillon Exp $ */ #include "npx.h" @@ -148,8 +148,8 @@ cpu_fork(p1, p2, flags) #endif /* Copy p1's pcb. */ - *p2->p_thread->td_pcb = *p1->p_thread->td_pcb; pcb2 = p2->p_thread->td_pcb; + *pcb2 = *p1->p_thread->td_pcb; /* * Create a new fresh stack for the new process. @@ -170,8 +170,7 @@ cpu_fork(p1, p2, flags) * to use the LWKT restore function directly so we can get rid of * all the extra crap we are setting up. */ - p2->p_md.md_regs = (struct trapframe *) - ((char *)p2->p_thread->td_pcb - 16) - 1; + p2->p_md.md_regs = (struct trapframe *)((char *)pcb2 - 16) - 1; bcopy(p1->p_md.md_regs, p2->p_md.md_regs, sizeof(*p2->p_md.md_regs)); /* diff --git a/sys/sys/proc.h b/sys/sys/proc.h index 3cb25ae7d3..437a939eb2 100644 --- a/sys/sys/proc.h +++ b/sys/sys/proc.h @@ -37,7 +37,7 @@ * * @(#)proc.h 8.15 (Berkeley) 5/19/95 * $FreeBSD: src/sys/sys/proc.h,v 1.99.2.9 2003/06/06 20:21:32 tegge Exp $ - * $DragonFly: src/sys/sys/proc.h,v 1.10 2003/06/21 17:31:22 dillon Exp $ + * $DragonFly: src/sys/sys/proc.h,v 1.11 2003/06/22 04:30:43 dillon Exp $ */ #ifndef _SYS_PROC_H_ @@ -445,6 +445,7 @@ void exit1 __P((struct proc *, int)) __dead2; void cpu_fork __P((struct proc *, struct proc *, int)); void cpu_set_fork_handler __P((struct proc *, void (*)(void *), void *)); int fork1 __P((struct proc *, int, struct proc **)); +void start_forked_proc __P((struct proc *, struct proc *)); int trace_req __P((struct proc *)); void cpu_wait __P((struct proc *)); int cpu_coredump __P((struct proc *, struct vnode *, struct ucred *)); diff --git a/sys/sys/thread.h b/sys/sys/thread.h index f43096c079..dfb928f88c 100644 --- a/sys/sys/thread.h +++ b/sys/sys/thread.h @@ -4,7 +4,7 @@ * Implements the architecture independant portion of the LWKT * subsystem. * - * $DragonFly: src/sys/sys/thread.h,v 1.5 2003/06/21 17:31:22 dillon Exp $ + * $DragonFly: src/sys/sys/thread.h,v 1.6 2003/06/22 04:30:43 dillon Exp $ */ #ifndef _SYS_THREAD_H_ @@ -178,7 +178,9 @@ struct thread { extern struct vm_zone *thread_zone; -extern void lwkt_wait_init(struct lwkt_wait *w); +extern struct thread *lwkt_alloc_thread(void); +extern void lwkt_init_thread(struct thread *td, void *stack); +extern void lwkt_init_wait(struct lwkt_wait *w); extern void lwkt_gdinit(struct globaldata *gd); extern void lwkt_switch(void); extern void lwkt_preempt(void); diff --git a/sys/vm/pmap.h b/sys/vm/pmap.h index e81c23b35a..789ce967cd 100644 --- a/sys/vm/pmap.h +++ b/sys/vm/pmap.h @@ -62,7 +62,7 @@ * rights to redistribute these changes. * * $FreeBSD: src/sys/vm/pmap.h,v 1.33.2.4 2002/03/06 22:44:24 silby Exp $ - * $DragonFly: src/sys/vm/pmap.h,v 1.4 2003/06/19 06:26:10 dillon Exp $ + * $DragonFly: src/sys/vm/pmap.h,v 1.5 2003/06/22 04:30:43 dillon Exp $ */ /* @@ -136,8 +136,8 @@ void pmap_zero_page __P((vm_offset_t)); void pmap_zero_page_area __P((vm_offset_t, int off, int size)); void pmap_prefault __P((pmap_t, vm_offset_t, vm_map_entry_t)); int pmap_mincore __P((pmap_t pmap, vm_offset_t addr)); -void pmap_new_proc __P((struct proc *p, struct thread *td)); -struct thread *pmap_new_thread __P((void)); +void pmap_init_proc __P((struct proc *p, struct thread *td)); +void pmap_init_thread __P((struct thread *td)); void pmap_dispose_thread __P((struct thread *td)); struct thread *pmap_dispose_proc __P((struct proc *p)); void pmap_swapout_proc __P((struct proc *p)); diff --git a/sys/vm/vm_glue.c b/sys/vm/vm_glue.c index 07023f1510..1179a4c673 100644 --- a/sys/vm/vm_glue.c +++ b/sys/vm/vm_glue.c @@ -60,7 +60,7 @@ * rights to redistribute these changes. * * $FreeBSD: src/sys/vm/vm_glue.c,v 1.94.2.4 2003/01/13 22:51:17 dillon Exp $ - * $DragonFly: src/sys/vm/vm_glue.c,v 1.4 2003/06/19 06:26:10 dillon Exp $ + * $DragonFly: src/sys/vm/vm_glue.c,v 1.5 2003/06/22 04:30:43 dillon Exp $ */ #include "opt_vm.h" @@ -239,8 +239,8 @@ vm_fork(p1, p2, flags) shmfork(p1, p2); } - td2 = pmap_new_thread(); - pmap_new_proc(p2, td2); + td2 = lwkt_alloc_thread(); + pmap_init_proc(p2, td2); up = p2->p_addr; -- 2.41.0