X-Git-Url: https://gitweb.dragonflybsd.org/dragonfly.git/blobdiff_plain/ee295cbe38bc13d3645c4f0688a642e69a30cfbc..4643740aa6f3eac7d3dfab05967ec55b5d8ba984:/sys/kern/kern_fork.c diff --git a/sys/kern/kern_fork.c b/sys/kern/kern_fork.c index 086c2cdd1b..f5c8f2820b 100644 --- a/sys/kern/kern_fork.c +++ b/sys/kern/kern_fork.c @@ -198,6 +198,8 @@ sys_lwp_create(struct lwp_create_args *uap) plimit_lwp_fork(p); /* force exclusive access */ lp = lwp_fork(curthread->td_lwp, p, RFPROC); error = cpu_prepare_lwp(lp, ¶ms); + if (error) + goto fail; if (params.tid1 != NULL && (error = copyout(&lp->lwp_tid, params.tid1, sizeof(lp->lwp_tid)))) goto fail; @@ -221,9 +223,12 @@ fail: lwp_rb_tree_RB_REMOVE(&p->p_lwp_tree, lp); --p->p_nthreads; /* lwp_dispose expects an exited lwp, and a held proc */ - lp->lwp_flag |= LWP_WEXIT; + atomic_set_int(&lp->lwp_mpflags, LWP_MP_WEXIT); lp->lwp_thread->td_flags |= TDF_EXITING; + lwkt_remove_tdallq(lp->lwp_thread); PHOLD(p); + biosched_done(lp->lwp_thread); + dsched_exit_thread(lp->lwp_thread); lwp_dispose(lp); lwkt_reltoken(&p->p_token); fail2: @@ -332,11 +337,12 @@ fork1(struct lwp *lp1, int flags, struct proc **procp) error = EAGAIN; goto done; } + /* * Increment the nprocs resource before blocking can occur. There * are hard-limits as to the number of processes that can run. */ - nprocs++; + atomic_add_int(&nprocs, 1); /* * Increment the count of procs running with this uid. Don't allow @@ -348,7 +354,7 @@ fork1(struct lwp *lp1, int flags, struct proc **procp) /* * Back out the process count */ - nprocs--; + atomic_add_int(&nprocs, -1); if (ppsratecheck(&lastfail, &curfail, 1)) kprintf("maxproc limit exceeded by uid %d, please " "see tuning(7) and login.conf(5).\n", uid); @@ -373,7 +379,7 @@ fork1(struct lwp *lp1, int flags, struct proc **procp) RB_INIT(&p2->p_lwp_tree); spin_init(&p2->p_spin); - lwkt_token_init(&p2->p_token, "iproc"); + lwkt_token_init(&p2->p_token, "proc"); p2->p_lasttid = -1; /* first tid will be 0 */ /* @@ -399,12 +405,12 @@ fork1(struct lwp *lp1, int flags, struct proc **procp) * other consumers to gain temporary references to p2 * (p2->p_lock can change). */ - if (p1->p_flag & P_PROFIL) + if (p1->p_flags & P_PROFIL) startprofclock(p2); p2->p_ucred = crhold(lp1->lwp_thread->td_ucred); if (jailed(p2->p_ucred)) - p2->p_flag |= P_JAILED; + p2->p_flags |= P_JAILED; if (p2->p_args) refcount_acquire(&p2->p_args->ar_ref); @@ -480,11 +486,11 @@ fork1(struct lwp *lp1, int flags, struct proc **procp) * Preserve some more flags in subprocess. P_PROFIL has already * been preserved. */ - p2->p_flag |= p1->p_flag & P_SUGID; - if (p1->p_session->s_ttyvp != NULL && p1->p_flag & P_CONTROLT) - p2->p_flag |= P_CONTROLT; + p2->p_flags |= p1->p_flags & P_SUGID; + if (p1->p_session->s_ttyvp != NULL && p1->p_flags & P_CONTROLT) + p2->p_flags |= P_CONTROLT; if (flags & RFPPWAIT) - p2->p_flag |= P_PPWAIT; + p2->p_flags |= P_PPWAIT; /* * Inherit the virtual kernel structure (allows a virtual kernel @@ -522,7 +528,7 @@ fork1(struct lwp *lp1, int flags, struct proc **procp) lwkt_reltoken(&pptr->p_token); varsymset_init(&p2->p_varsymset, &p1->p_varsymset); - callout_init(&p2->p_ithandle); + callout_init_mp(&p2->p_ithandle); #ifdef KTRACE /* @@ -606,6 +612,7 @@ done: static struct lwp * lwp_fork(struct lwp *origlp, struct proc *destproc, int flags) { + globaldata_t gd = mycpu; struct lwp *lp; struct thread *td; @@ -617,7 +624,7 @@ lwp_fork(struct lwp *origlp, struct proc *destproc, int flags) bcopy(&origlp->lwp_startcopy, &lp->lwp_startcopy, (unsigned) ((caddr_t)&lp->lwp_endcopy - (caddr_t)&lp->lwp_startcopy)); - lp->lwp_flag |= origlp->lwp_flag & LWP_ALTSTACK; + lp->lwp_flags |= origlp->lwp_flags & LWP_ALTSTACK; /* * Set cpbase to the last timeout that occured (not the upcoming * timeout). @@ -626,13 +633,18 @@ lwp_fork(struct lwp *origlp, struct proc *destproc, int flags) * scheduler specific data. */ crit_enter(); - lp->lwp_cpbase = mycpu->gd_schedclock.time - - mycpu->gd_schedclock.periodic; + lp->lwp_cpbase = gd->gd_schedclock.time - gd->gd_schedclock.periodic; destproc->p_usched->heuristic_forking(origlp, lp); crit_exit(); lp->lwp_cpumask &= usched_mastermask; + lwkt_token_init(&lp->lwp_token, "lwp_token"); + spin_init(&lp->lwp_spin); - td = lwkt_alloc_thread(NULL, LWKT_THREAD_STACK, -1, 0); + /* + * Assign the thread to the current cpu to begin with so we + * can manipulate it. + */ + td = lwkt_alloc_thread(NULL, LWKT_THREAD_STACK, gd->gd_cpuid, 0); lp->lwp_thread = td; td->td_proc = destproc; td->td_lwp = lp; @@ -660,7 +672,6 @@ lwp_fork(struct lwp *origlp, struct proc *destproc, int flags) destproc->p_lasttid = lp->lwp_tid; destproc->p_nthreads++; - return (lp); } @@ -745,8 +756,12 @@ start_forked_proc(struct lwp *lp1, struct proc *p2) /* * Preserve synchronization semantics of vfork. If waiting for * child to exec or exit, set P_PPWAIT on child, and sleep on our - * proc (in case of exit). + * proc (in case of exec or exit). + * + * We must hold our p_token to interlock the flag/tsleep */ - while (p2->p_flag & P_PPWAIT) + lwkt_gettoken(&p2->p_token); + while (p2->p_flags & P_PPWAIT) tsleep(lp1->lwp_proc, 0, "ppwait", 0); + lwkt_reltoken(&p2->p_token); }