From 13d13d890db469ff6082dd0b232f1865419f1441 Mon Sep 17 00:00:00 2001 From: Simon Schubert Date: Mon, 26 Feb 2007 21:41:08 +0000 Subject: [PATCH] 1:1 Userland threading stage 4.6/4: Factor out a new function `lwp_fork()' from fork1(). Rearrange various fork helpers to operate exclusively on procs or lwps. Collect some MI code from MD sources into lwp_fork(). Reviewed-by: Thomas E. Spanjaard --- sys/kern/kern_fork.c | 144 ++++++++++++++++++++------- sys/kern/lwkt_caps.c | 4 +- sys/platform/pc32/i386/pmap.c | 14 +-- sys/platform/vkernel/platform/pmap.c | 15 +-- sys/sys/caps.h | 4 +- sys/vm/pmap.h | 4 +- sys/vm/vm_extern.h | 4 +- sys/vm/vm_glue.c | 20 +--- 8 files changed, 124 insertions(+), 85 deletions(-) diff --git a/sys/kern/kern_fork.c b/sys/kern/kern_fork.c index ed15362bba..8f36107bba 100644 --- a/sys/kern/kern_fork.c +++ b/sys/kern/kern_fork.c @@ -37,7 +37,7 @@ * * @(#)kern_fork.c 8.6 (Berkeley) 4/8/94 * $FreeBSD: src/sys/kern/kern_fork.c,v 1.72.2.14 2003/06/26 04:15:10 silby Exp $ - * $DragonFly: src/sys/kern/kern_fork.c,v 1.64 2007/02/25 23:17:12 corecode Exp $ + * $DragonFly: src/sys/kern/kern_fork.c,v 1.65 2007/02/26 21:41:08 corecode Exp $ */ #include "opt_ktrace.h" @@ -83,6 +83,8 @@ struct forklist { TAILQ_HEAD(forklist_head, forklist); static struct forklist_head fork_list = TAILQ_HEAD_INITIALIZER(fork_list); +static struct lwp *lwp_fork(struct lwp *, struct proc *, int flags); + int forksleep; /* Place for fork1() to sleep on. */ /* ARGSUSED */ @@ -159,7 +161,6 @@ fork1(struct lwp *lp1, int flags, struct proc **procp) struct proc *p1 = lp1->lwp_proc; struct proc *p2, *pptr; struct pgrp *pgrp; - struct lwp *lp2; uid_t uid; int ok, error; static int curfail = 0; @@ -176,7 +177,14 @@ fork1(struct lwp *lp1, int flags, struct proc **procp) */ if ((flags & RFPROC) == 0) { - vm_fork(lp1, 0, flags); + /* + * This kind of stunt does not work anymore if + * there are native threads (lwps) running + */ + if (p1->p_nthreads != 1) + return (EINVAL); + + vm_fork(p1, 0, flags); /* * Close all file descriptors. @@ -266,7 +274,6 @@ fork1(struct lwp *lp1, int flags, struct proc **procp) /* Allocate new proc. */ p2 = zalloc(proc_zone); - lp2 = zalloc(lwp_zone); /* * Setup linkage for kernel based threading XXX lwp @@ -286,11 +293,7 @@ fork1(struct lwp *lp1, int flags, struct proc **procp) p2->p_emuldata = NULL; LIST_INIT(&p2->p_lwps); - /* XXX lwp */ - lp2->lwp_proc = p2; - lp2->lwp_tid = 0; - LIST_INSERT_HEAD(&p2->p_lwps, lp2, lwp_list); - p2->p_nthreads = 1; + p2->p_nthreads = 0; p2->p_nstopped = 0; p2->p_lasttid = 0; @@ -299,7 +302,6 @@ fork1(struct lwp *lp1, int flags, struct proc **procp) * process once it starts getting hooked into the rest of the system. */ p2->p_stat = SIDL; - lp2->lwp_stat = LSRUN; /* XXX use other state? start_forked_proc() handles this*/ proc_add_allproc(p2); /* @@ -309,14 +311,8 @@ fork1(struct lwp *lp1, int flags, struct proc **procp) */ bzero(&p2->p_startzero, (unsigned) ((caddr_t)&p2->p_endzero - (caddr_t)&p2->p_startzero)); - bzero(&lp2->lwp_startzero, - (unsigned) ((caddr_t)&lp2->lwp_endzero - - (caddr_t)&lp2->lwp_startzero)); bcopy(&p1->p_startcopy, &p2->p_startcopy, (unsigned) ((caddr_t)&p2->p_endcopy - (caddr_t)&p2->p_startcopy)); - bcopy(&lp1->lwp_startcopy, &lp2->lwp_startcopy, - (unsigned) ((caddr_t)&lp2->lwp_endcopy - - (caddr_t)&lp2->lwp_startcopy)); p2->p_aioinfo = NULL; @@ -327,7 +323,7 @@ fork1(struct lwp *lp1, int flags, struct proc **procp) */ p2->p_flag = 0; p2->p_lock = 0; - lp2->lwp_lock = 0; + if (p1->p_flag & P_PROFIL) startprofclock(p2); p2->p_ucred = crhold(p1->p_ucred); @@ -338,6 +334,8 @@ fork1(struct lwp *lp1, int flags, struct proc **procp) if (p2->p_args) p2->p_args->ar_ref++; + p2->p_usched = p1->p_usched; + if (flags & RFSIGSHARE) { p2->p_sigacts = p1->p_sigacts; p2->p_sigacts->ps_refcnt++; @@ -395,7 +393,6 @@ fork1(struct lwp *lp1, int flags, struct proc **procp) * been preserved. */ p2->p_flag |= p1->p_flag & P_SUGID; - lp2->lwp_flag |= lp1->lwp_flag & LWP_ALTSTACK; if (p1->p_session->s_ttyvp != NULL && p1->p_flag & P_CONTROLT) p2->p_flag |= P_CONTROLT; if (flags & RFPPWAIT) @@ -445,33 +442,23 @@ fork1(struct lwp *lp1, int flags, struct proc **procp) } #endif - /* - * Inherit the scheduler and initialize scheduler-related fields. - * Set cpbase to the last timeout that occured (not the upcoming - * timeout). - * - * A critical section is required since a timer IPI can update - * scheduler specific data. - */ - crit_enter(); - p2->p_usched = p1->p_usched; - lp2->lwp_cpbase = mycpu->gd_schedclock.time - - mycpu->gd_schedclock.periodic; - p2->p_usched->heuristic_forking(lp1, lp2); - crit_exit(); - /* * This begins the section where we must prevent the parent * from being swapped. + * + * Gets PRELE'd in the caller in start_forked_proc(). */ PHOLD(p1); + vm_fork(p1, p2, flags); + /* - * Finish creating the child process. It will return via a different - * execution path later. (ie: directly into user mode) + * Create the first lwp associated with the new proc. + * It will return via a different execution path later, directly + * into userland, after it was put on the runq by + * start_forked_proc(). */ - vm_fork(lp1, p2, flags); - caps_fork(lp1->lwp_thread, lp2->lwp_thread, flags); + lwp_fork(lp1, p2, flags); if (flags == (RFFDG | RFPROC)) { mycpu->gd_cnt.v_forks++; @@ -518,6 +505,89 @@ done: return (error); } +static struct lwp * +lwp_fork(struct lwp *origlp, struct proc *destproc, int flags) +{ + struct lwp *lp; + struct thread *td; + lwpid_t tid; + + /* + * We need to prevent wrap-around collisions. + * Until we have a nice tid allocator, we need to + * start searching for free tids once we wrap around. + * + * XXX give me a nicer allocator + */ + if (destproc->p_lasttid + 1 <= 0) { + tid = 0; +restart: + FOREACH_LWP_IN_PROC(lp, destproc) { + if (lp->lwp_tid != tid) + continue; + /* tids match, search next. */ + tid++; + /* + * Wait -- the whole tid space is depleted? + * Impossible. + */ + if (tid <= 0) + panic("lwp_fork: All tids depleted?!"); + goto restart; + } + /* When we come here, the tid is not occupied */ + } else { + tid = destproc->p_lasttid++; + } + + lp = zalloc(lwp_zone); + lp->lwp_proc = destproc; + lp->lwp_tid = tid; + LIST_INSERT_HEAD(&destproc->p_lwps, lp, lwp_list); + destproc->p_nthreads++; + lp->lwp_stat = LSRUN; + bzero(&lp->lwp_startzero, + (unsigned) ((caddr_t)&lp->lwp_endzero - + (caddr_t)&lp->lwp_startzero)); + bcopy(&origlp->lwp_startcopy, &lp->lwp_startcopy, + (unsigned) ((caddr_t)&lp->lwp_endcopy - + (caddr_t)&lp->lwp_startcopy)); + lp->lwp_lock = 0; + lp->lwp_flag |= origlp->lwp_flag & LWP_ALTSTACK; + /* + * Set cpbase to the last timeout that occured (not the upcoming + * timeout). + * + * A critical section is required since a timer IPI can update + * scheduler specific data. + */ + crit_enter(); + lp->lwp_cpbase = mycpu->gd_schedclock.time - + mycpu->gd_schedclock.periodic; + destproc->p_usched->heuristic_forking(origlp, lp); + crit_exit(); + + td = lwkt_alloc_thread(NULL, LWKT_THREAD_STACK, -1, 0); + lp->lwp_thread = td; + td->td_proc = destproc; + td->td_lwp = lp; + td->td_switch = cpu_heavy_switch; +#ifdef SMP + KKASSERT(td->td_mpcount == 1); +#endif + lwkt_setpri(td, TDPRI_KERN_USER); + lwkt_set_comm(td, "%s", destproc->p_comm); + + /* + * cpu_fork will copy and update the pcb, set up the kernel stack, + * and make the child ready to run. + */ + cpu_fork(origlp, lp, flags); + caps_fork(origlp->lwp_thread, lp->lwp_thread); + + return (lp); +} + /* * The next two functionms are general routines to handle adding/deleting * items on the fork callout list. diff --git a/sys/kern/lwkt_caps.c b/sys/kern/lwkt_caps.c index 641cd718f9..1384937851 100644 --- a/sys/kern/lwkt_caps.c +++ b/sys/kern/lwkt_caps.c @@ -31,7 +31,7 @@ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $DragonFly: src/sys/kern/lwkt_caps.c,v 1.12 2007/02/03 17:05:58 corecode Exp $ + * $DragonFly: src/sys/kern/lwkt_caps.c,v 1.13 2007/02/26 21:41:08 corecode Exp $ */ /* @@ -508,7 +508,7 @@ caps_free(caps_kinfo_t caps) * forked condition and reforge the connection. */ void -caps_fork(struct thread *td1, struct thread *td2, int flags) +caps_fork(struct thread *td1, struct thread *td2) { caps_kinfo_t caps1; caps_kinfo_t caps2; diff --git a/sys/platform/pc32/i386/pmap.c b/sys/platform/pc32/i386/pmap.c index 830e566b24..f60ac2972c 100644 --- a/sys/platform/pc32/i386/pmap.c +++ b/sys/platform/pc32/i386/pmap.c @@ -40,7 +40,7 @@ * * from: @(#)pmap.c 7.7 (Berkeley) 5/12/91 * $FreeBSD: src/sys/i386/i386/pmap.c,v 1.250.2.18 2002/03/06 22:48:53 silby Exp $ - * $DragonFly: src/sys/platform/pc32/i386/pmap.c,v 1.76 2007/02/25 23:17:13 corecode Exp $ + * $DragonFly: src/sys/platform/pc32/i386/pmap.c,v 1.77 2007/02/26 21:41:08 corecode Exp $ */ /* @@ -892,21 +892,11 @@ pmap_init_thread(thread_t td) } /* - * Create the UPAGES for a new process. * This routine directly affects the fork perf for a process. */ void -pmap_init_proc(struct proc *p, struct thread *td) +pmap_init_proc(struct proc *p) { - struct lwp *lp = ONLY_LWP_IN_PROC(p); - - lp->lwp_thread = td; - td->td_proc = p; - td->td_lwp = lp; - td->td_switch = cpu_heavy_switch; -#ifdef SMP - KKASSERT(td->td_mpcount == 1); -#endif } /* diff --git a/sys/platform/vkernel/platform/pmap.c b/sys/platform/vkernel/platform/pmap.c index 224dce5b5e..7cc100ff4f 100644 --- a/sys/platform/vkernel/platform/pmap.c +++ b/sys/platform/vkernel/platform/pmap.c @@ -38,7 +38,7 @@ * * from: @(#)pmap.c 7.7 (Berkeley) 5/12/91 * $FreeBSD: src/sys/i386/i386/pmap.c,v 1.250.2.18 2002/03/06 22:48:53 silby Exp $ - * $DragonFly: src/sys/platform/vkernel/platform/pmap.c,v 1.19 2007/02/25 23:17:13 corecode Exp $ + * $DragonFly: src/sys/platform/vkernel/platform/pmap.c,v 1.20 2007/02/26 21:41:08 corecode Exp $ */ /* * NOTE: PMAP_INVAL_ADD: In pc32 this function is called prior to adjusting @@ -850,20 +850,11 @@ pmap_init_thread(thread_t td) } /* - * Initialize MD portions of a process structure. XXX this aint MD + * This routine directly affects the fork perf for a process. */ void -pmap_init_proc(struct proc *p, struct thread *td) +pmap_init_proc(struct proc *p) { - struct lwp *lp = ONLY_LWP_IN_PROC(p); - - lp->lwp_thread = td; - td->td_proc = p; - td->td_lwp = lp; - td->td_switch = cpu_heavy_switch; -#ifdef SMP - KKASSERT(td->td_mpcount == 1); -#endif } /* diff --git a/sys/sys/caps.h b/sys/sys/caps.h index 43f07a0237..ad681bbde5 100644 --- a/sys/sys/caps.h +++ b/sys/sys/caps.h @@ -3,7 +3,7 @@ * * Implements an architecture independant Capability Service API * - * $DragonFly: src/sys/sys/caps.h,v 1.10 2007/02/03 17:05:59 corecode Exp $ + * $DragonFly: src/sys/sys/caps.h,v 1.11 2007/02/26 21:41:08 corecode Exp $ */ #ifndef _SYS_CAPS_H_ @@ -164,7 +164,7 @@ typedef struct caps_kmsg { * kernel support */ void caps_exit(struct thread *td); -void caps_fork(struct thread *td1, struct thread *td2, int flags); +void caps_fork(struct thread *td1, struct thread *td2); #else diff --git a/sys/vm/pmap.h b/sys/vm/pmap.h index 79973ca06e..ee6bf5cf82 100644 --- a/sys/vm/pmap.h +++ b/sys/vm/pmap.h @@ -62,7 +62,7 @@ * rights to redistribute these changes. * * $FreeBSD: src/sys/vm/pmap.h,v 1.33.2.4 2002/03/06 22:44:24 silby Exp $ - * $DragonFly: src/sys/vm/pmap.h,v 1.23 2007/02/24 14:24:06 corecode Exp $ + * $DragonFly: src/sys/vm/pmap.h,v 1.24 2007/02/26 21:41:08 corecode Exp $ */ /* @@ -165,7 +165,7 @@ void pmap_page_assertzero (vm_paddr_t); void pmap_zero_page_area (vm_paddr_t, int off, int size); void pmap_prefault (pmap_t, vm_offset_t, vm_map_entry_t); int pmap_mincore (pmap_t pmap, vm_offset_t addr); -void pmap_init_proc (struct proc *p, struct thread *td); +void pmap_init_proc (struct proc *); void pmap_init_thread (struct thread *td); void pmap_dispose_proc (struct proc *p); void pmap_activate (struct proc *p); diff --git a/sys/vm/vm_extern.h b/sys/vm/vm_extern.h index 660712f19e..ffea700f27 100644 --- a/sys/vm/vm_extern.h +++ b/sys/vm/vm_extern.h @@ -32,7 +32,7 @@ * * @(#)vm_extern.h 8.2 (Berkeley) 1/12/94 * $FreeBSD: src/sys/vm/vm_extern.h,v 1.46.2.3 2003/01/13 22:51:17 dillon Exp $ - * $DragonFly: src/sys/vm/vm_extern.h,v 1.24 2007/01/11 20:53:42 dillon Exp $ + * $DragonFly: src/sys/vm/vm_extern.h,v 1.25 2007/02/26 21:41:08 corecode Exp $ */ #ifndef _VM_VM_EXTERN_H_ @@ -94,7 +94,7 @@ vm_page_t vm_fault_page_quick (vm_offset_t, vm_prot_t, int *); void vm_fault_copy_entry (vm_map_t, vm_map_t, vm_map_entry_t, vm_map_entry_t); void vm_fault_unwire (vm_map_t, vm_map_entry_t); int vm_fault_wire (vm_map_t, vm_map_entry_t, boolean_t); -void vm_fork (struct lwp *, struct proc *, int); +void vm_fork (struct proc *, struct proc *, int); void vm_fault_ratecheck(void); void vm_waitproc (struct proc *); int vm_mmap (vm_map_t, vm_offset_t *, vm_size_t, vm_prot_t, vm_prot_t, int, void *, vm_ooffset_t); diff --git a/sys/vm/vm_glue.c b/sys/vm/vm_glue.c index d0de59ec12..a55a8f4483 100644 --- a/sys/vm/vm_glue.c +++ b/sys/vm/vm_glue.c @@ -60,7 +60,7 @@ * rights to redistribute these changes. * * $FreeBSD: src/sys/vm/vm_glue.c,v 1.94.2.4 2003/01/13 22:51:17 dillon Exp $ - * $DragonFly: src/sys/vm/vm_glue.c,v 1.52 2007/02/25 23:17:13 corecode Exp $ + * $DragonFly: src/sys/vm/vm_glue.c,v 1.53 2007/02/26 21:41:08 corecode Exp $ */ #include "opt_vm.h" @@ -224,11 +224,8 @@ vsunlock(caddr_t addr, u_int len) * to user mode to avoid stack copying and relocation problems. */ void -vm_fork(struct lwp *lp1, struct proc *p2, int flags) +vm_fork(struct proc *p1, struct proc *p2, int flags) { - struct proc *p1 = lp1->lwp_proc; - struct thread *td2; - if ((flags & RFPROC) == 0) { /* * Divorce the memory, if it is shared, essentially @@ -240,7 +237,7 @@ vm_fork(struct lwp *lp1, struct proc *p2, int flags) vmspace_unshare(p1); } } - cpu_fork(lp1, NULL, flags); + cpu_fork(ONLY_LWP_IN_PROC(p1), NULL, flags); return; } @@ -262,16 +259,7 @@ vm_fork(struct lwp *lp1, struct proc *p2, int flags) shmfork(p1, p2); } - td2 = lwkt_alloc_thread(NULL, LWKT_THREAD_STACK, -1, 0); - pmap_init_proc(p2, td2); - lwkt_setpri(td2, TDPRI_KERN_USER); - lwkt_set_comm(td2, "%s", p1->p_comm); - - /* - * cpu_fork will copy and update the pcb, set up the kernel stack, - * and make the child ready to run. - */ - cpu_fork(lp1, td2->td_lwp, flags); + pmap_init_proc(p2); } /* -- 2.41.0