2 * Copyright (c) 1982, 1986, 1989, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * @(#)kern_exit.c 8.7 (Berkeley) 2/12/94
39 * $FreeBSD: src/sys/kern/kern_exit.c,v 1.92.2.11 2003/01/13 22:51:16 dillon Exp $
42 #include "opt_compat.h"
43 #include "opt_ktrace.h"
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/sysproto.h>
48 #include <sys/kernel.h>
49 #include <sys/malloc.h>
51 #include <sys/ktrace.h>
52 #include <sys/pioctl.h>
55 #include <sys/vnode.h>
56 #include <sys/resourcevar.h>
57 #include <sys/signalvar.h>
58 #include <sys/taskqueue.h>
59 #include <sys/ptrace.h>
60 #include <sys/acct.h> /* for acct_process() function prototype */
61 #include <sys/filedesc.h>
65 #include <sys/kern_syscall.h>
66 #include <sys/unistd.h>
67 #include <sys/eventhandler.h>
68 #include <sys/dsched.h>
71 #include <vm/vm_param.h>
74 #include <vm/vm_map.h>
75 #include <vm/vm_extern.h>
78 #include <sys/refcount.h>
79 #include <sys/thread2.h>
80 #include <sys/sysref2.h>
81 #include <sys/mplock2.h>
83 static void reaplwps(void *context, int dummy);
84 static void reaplwp(struct lwp *lp);
85 static void killlwps(struct lwp *lp);
87 static MALLOC_DEFINE(M_ATEXIT, "atexit", "atexit callback");
88 static MALLOC_DEFINE(M_ZOMBIE, "zombie", "zombie proc status");
90 static struct lwkt_token deadlwp_token = LWKT_TOKEN_INITIALIZER(deadlwp_token);
93 * callout list for things to do at exit time
97 TAILQ_ENTRY(exitlist) next;
100 TAILQ_HEAD(exit_list_head, exitlist);
101 static struct exit_list_head exit_list = TAILQ_HEAD_INITIALIZER(exit_list);
106 struct task *deadlwp_task[MAXCPU];
107 struct lwplist deadlwp_list[MAXCPU];
113 * SYS_EXIT_ARGS(int rval)
116 sys_exit(struct exit_args *uap)
118 exit1(W_EXITCODE(uap->rval, 0));
124 * Death of a lwp or process with optional bells and whistles.
129 sys_extexit(struct extexit_args *uap)
131 struct proc *p = curproc;
135 action = EXTEXIT_ACTION(uap->how);
136 who = EXTEXIT_WHO(uap->how);
138 /* Check parameters before we might perform some action */
151 error = copyout(&uap->status, uap->addr, sizeof(uap->status));
159 lwkt_gettoken(&p->p_token);
164 * Be sure only to perform a simple lwp exit if there is at
165 * least one more lwp in the proc, which will call exit1()
166 * later, otherwise the proc will be an UNDEAD and not even a
169 if (p->p_nthreads > 1) {
170 lwp_exit(0); /* called w/ p_token held */
173 /* else last lwp in proc: do the real thing */
175 default: /* to help gcc */
177 lwkt_reltoken(&p->p_token);
178 exit1(W_EXITCODE(uap->status, 0));
183 lwkt_reltoken(&p->p_token); /* safety */
187 * Kill all lwps associated with the current process except the
188 * current lwp. Return an error if we race another thread trying to
189 * do the same thing and lose the race.
191 * If forexec is non-zero the current thread and process flags are
192 * cleaned up so they can be reused.
194 * Caller must hold curproc->p_token
197 killalllwps(int forexec)
199 struct lwp *lp = curthread->td_lwp;
200 struct proc *p = lp->lwp_proc;
203 * Interlock against P_WEXIT. Only one of the process's thread
204 * is allowed to do the master exit.
206 if (p->p_flags & P_WEXIT)
208 p->p_flags |= P_WEXIT;
211 * Interlock with LWP_MP_WEXIT and kill any remaining LWPs
213 atomic_set_int(&lp->lwp_mpflags, LWP_MP_WEXIT);
214 if (p->p_nthreads > 1)
218 * If doing this for an exec, clean up the remaining thread
219 * (us) for continuing operation after all the other threads
223 atomic_clear_int(&lp->lwp_mpflags, LWP_MP_WEXIT);
224 p->p_flags &= ~P_WEXIT;
230 * Kill all LWPs except the current one. Do not try to signal
231 * LWPs which have exited on their own or have already been
235 killlwps(struct lwp *lp)
237 struct proc *p = lp->lwp_proc;
241 * Kill the remaining LWPs. We must send the signal before setting
242 * LWP_MP_WEXIT. The setting of WEXIT is optional but helps reduce
243 * races. tlp must be held across the call as it might block and
244 * allow the target lwp to rip itself out from under our loop.
246 FOREACH_LWP_IN_PROC(tlp, p) {
248 lwkt_gettoken(&tlp->lwp_token);
249 if ((tlp->lwp_mpflags & LWP_MP_WEXIT) == 0) {
250 lwpsignal(p, tlp, SIGKILL);
251 atomic_set_int(&tlp->lwp_mpflags, LWP_MP_WEXIT);
253 lwkt_reltoken(&tlp->lwp_token);
258 * Wait for everything to clear out.
260 while (p->p_nthreads > 1) {
261 tsleep(&p->p_nthreads, 0, "killlwps", 0);
266 * Exit: deallocate address space and other resources, change proc state
267 * to zombie, and unlink proc from allproc and parent's lists. Save exit
268 * status and rusage for wait(). Check for child processes and orphan them.
273 struct thread *td = curthread;
274 struct proc *p = td->td_proc;
275 struct lwp *lp = td->td_lwp;
282 lwkt_gettoken(&p->p_token);
285 kprintf("init died (signal %d, exit %d)\n",
286 WTERMSIG(rv), WEXITSTATUS(rv));
287 panic("Going nowhere without my init!");
289 varsymset_clean(&p->p_varsymset);
290 lockuninit(&p->p_varsymset.vx_lock);
293 * Kill all lwps associated with the current process, return an
294 * error if we race another thread trying to do the same thing
297 error = killalllwps(0);
303 /* are we a task leader? */
304 if (p == p->p_leader) {
305 struct kill_args killArgs;
306 killArgs.signum = SIGKILL;
309 killArgs.pid = q->p_pid;
311 * The interface for kill is better
312 * than the internal signal
319 tsleep((caddr_t)p, 0, "exit1", 0);
325 STOPEVENT(p, S_EXIT, rv);
326 p->p_flags |= P_POSTEXIT; /* stop procfs stepping */
329 * Check if any loadable modules need anything done at process exit.
330 * e.g. SYSV IPC stuff
331 * XXX what if one of these generates an error?
334 EVENTHANDLER_INVOKE(process_exit, p);
337 * XXX: imho, the eventhandler stuff is much cleaner than this.
338 * Maybe we should move everything to use eventhandler.
340 TAILQ_FOREACH(ep, &exit_list, next)
343 if (p->p_flags & P_PROFIL)
346 SIGEMPTYSET(p->p_siglist);
347 SIGEMPTYSET(lp->lwp_siglist);
348 if (timevalisset(&p->p_realtimer.it_value))
349 callout_stop_sync(&p->p_ithandle);
352 * Reset any sigio structures pointing to us as a result of
353 * F_SETOWN with our pid.
355 funsetownlst(&p->p_sigiolst);
358 * Close open files and release open-file table.
363 if(p->p_leader->p_peers) {
365 while(q->p_peers != p)
367 q->p_peers = p->p_peers;
368 wakeup((caddr_t)p->p_leader);
372 * XXX Shutdown SYSV semaphores
376 KKASSERT(p->p_numposixlocks == 0);
378 /* The next two chunks should probably be moved to vmspace_exit. */
382 * Clean up data related to virtual kernel operation. Clean up
383 * any vkernel context related to the current lwp now so we can
387 vkernel_lwp_exit(lp);
392 * Release user portion of address space.
393 * This releases references to vnodes,
394 * which could cause I/O if the file has been unlinked.
395 * Need to do this early enough that we can still sleep.
396 * Can't free the entire vmspace as the kernel stack
397 * may be mapped within that space also.
399 * Processes sharing the same vmspace may exit in one order, and
400 * get cleaned up by vmspace_exit() in a different order. The
401 * last exiting process to reach this point releases as much of
402 * the environment as it can, and the last process cleaned up
403 * by vmspace_exit() (which decrements exitingcnt) cleans up the
406 vmspace_exitbump(vm);
407 sysref_put(&vm->vm_sysref);
409 if (SESS_LEADER(p)) {
410 struct session *sp = p->p_session;
414 * We are the controlling process. Signal the
415 * foreground process group, drain the controlling
416 * terminal, and revoke access to the controlling
419 * NOTE: while waiting for the process group to exit
420 * it is possible that one of the processes in the
421 * group will revoke the tty, so the ttyclosesession()
422 * function will re-check sp->s_ttyvp.
424 if (sp->s_ttyp && (sp->s_ttyp->t_session == sp)) {
425 if (sp->s_ttyp->t_pgrp)
426 pgsignal(sp->s_ttyp->t_pgrp, SIGHUP, 1);
428 ttyclosesession(sp, 1); /* also revoke */
431 * Release the tty. If someone has it open via
432 * /dev/tty then close it (since they no longer can
433 * once we've NULL'd it out).
435 ttyclosesession(sp, 0);
438 * s_ttyp is not zero'd; we use this to indicate
439 * that the session once had a controlling terminal.
440 * (for logging and informational purposes)
445 fixjobc(p, p->p_pgrp, 0);
446 (void)acct_process(p);
452 ktrdestroy(&p->p_tracenode);
456 * Release reference to text vnode
458 if ((vtmp = p->p_textvp) != NULL) {
463 /* Release namecache handle to text file */
464 if (p->p_textnch.ncp)
465 cache_drop(&p->p_textnch);
468 * We have to handle PPWAIT here or proc_move_allproc_zombie()
469 * will block on the PHOLD() the parent is doing.
471 if (p->p_flags & P_PPWAIT) {
472 p->p_flags &= ~P_PPWAIT;
477 * Move the process to the zombie list. This will block
478 * until the process p_lock count reaches 0. The process will
479 * not be reaped until TDF_EXITING is set by cpu_thread_exit(),
480 * which is called from cpu_proc_exit().
482 proc_move_allproc_zombie(p);
485 * Reparent all of this process's children to the init process.
486 * We must hold initproc->p_token in order to mess with
487 * initproc->p_children. We already hold p->p_token (to remove
488 * the children from our list).
490 q = LIST_FIRST(&p->p_children);
492 lwkt_gettoken(&initproc->p_token);
493 while ((q = LIST_FIRST(&p->p_children)) != NULL) {
495 lwkt_gettoken(&q->p_token);
496 if (q != LIST_FIRST(&p->p_children)) {
497 lwkt_reltoken(&q->p_token);
501 LIST_REMOVE(q, p_sibling);
502 LIST_INSERT_HEAD(&initproc->p_children, q, p_sibling);
503 q->p_pptr = initproc;
504 q->p_sigparent = SIGCHLD;
507 * Traced processes are killed
508 * since their existence means someone is screwing up.
510 if (q->p_flags & P_TRACED) {
511 q->p_flags &= ~P_TRACED;
514 lwkt_reltoken(&q->p_token);
517 lwkt_reltoken(&initproc->p_token);
522 * Save exit status and final rusage info, adding in child rusage
523 * info and self times.
525 calcru_proc(p, &p->p_ru);
526 ruadd(&p->p_ru, &p->p_cru);
529 * notify interested parties of our demise.
531 KNOTE(&p->p_klist, NOTE_EXIT);
534 * Notify parent that we're gone. If parent has the PS_NOCLDWAIT
535 * flag set, or if the handler is set to SIG_IGN, notify process 1
536 * instead (and hope it will handle this situation).
538 if (p->p_pptr->p_sigacts->ps_flag & (PS_NOCLDWAIT | PS_CLDSIGIGN)) {
539 proc_reparent(p, initproc);
542 /* lwkt_gettoken(&proc_token); */
545 if (p->p_sigparent && q != initproc) {
546 ksignal(q, p->p_sigparent);
551 p->p_flags &= ~P_TRACED;
555 /* lwkt_reltoken(&proc_token); */
556 /* NOTE: p->p_pptr can get ripped out */
558 * cpu_exit is responsible for clearing curproc, since
559 * it is heavily integrated with the thread/switching sequence.
561 * Other substructures are freed from wait().
566 * Release the current user process designation on the process so
567 * the userland scheduler can work in someone else.
569 p->p_usched->release_curproc(lp);
572 * Finally, call machine-dependent code to release as many of the
573 * lwp's resources as we can and halt execution of this thread.
579 * Eventually called by every exiting LWP
581 * p->p_token must be held. mplock may be held and will be released.
584 lwp_exit(int masterexit)
586 struct thread *td = curthread;
587 struct lwp *lp = td->td_lwp;
588 struct proc *p = lp->lwp_proc;
592 * lwp_exit() may be called without setting LWP_MP_WEXIT, so
593 * make sure it is set here.
595 ASSERT_LWKT_TOKEN_HELD(&p->p_token);
596 atomic_set_int(&lp->lwp_mpflags, LWP_MP_WEXIT);
599 * Clean up any virtualization
602 vkernel_lwp_exit(lp);
605 * Clean up select/poll support
607 kqueue_terminate(&lp->lwp_kqueue);
610 * Clean up any syscall-cached ucred
613 crfree(td->td_ucred);
618 * Nobody actually wakes us when the lock
619 * count reaches zero, so just wait one tick.
621 while (lp->lwp_lock > 0)
622 tsleep(lp, 0, "lwpexit", 1);
624 /* Hand down resource usage to our proc */
625 ruadd(&p->p_ru, &lp->lwp_ru);
628 * If we don't hold the process until the LWP is reaped wait*()
629 * may try to dispose of its vmspace before all the LWPs have
630 * actually terminated.
635 * Do any remaining work that might block on us. We should be
636 * coded such that further blocking is ok after decrementing
637 * p_nthreads but don't take the chance.
639 dsched_exit_thread(td);
640 biosched_done(curthread);
643 * We have to use the reaper for all the LWPs except the one doing
644 * the master exit. The LWP doing the master exit can just be
645 * left on p_lwps and the process reaper will deal with it
646 * synchronously, which is much faster.
648 * Wakeup anyone waiting on p_nthreads to drop to 1 or 0.
650 * The process is left held until the reaper calls lwp_dispose() on
651 * the lp (after calling lwp_wait()).
653 if (masterexit == 0) {
654 lwp_rb_tree_RB_REMOVE(&p->p_lwp_tree, lp);
656 if (p->p_nthreads <= 1)
658 lwkt_gettoken(&deadlwp_token);
659 LIST_INSERT_HEAD(&deadlwp_list[mycpuid], lp, u.lwp_reap_entry);
660 taskqueue_enqueue(taskqueue_thread[mycpuid],
661 deadlwp_task[mycpuid]);
662 lwkt_reltoken(&deadlwp_token);
665 if (p->p_nthreads <= 1)
670 * Release p_token. Issue the wakeup() on p_nthreads if necessary,
671 * as late as possible to give us a chance to actually deschedule and
672 * switch away before another cpu core hits reaplwp().
674 lwkt_reltoken(&p->p_token);
676 wakeup(&p->p_nthreads);
679 * Tell the userland scheduler that we are going away
681 p->p_usched->heuristic_exiting(lp, p);
687 * Wait until a lwp is completely dead. The final interlock in this drama
688 * is when TDF_EXITING is set in cpu_thread_exit() just before the final
691 * At the point TDF_EXITING is set a complete exit is accomplished when
692 * TDF_RUNNING and TDF_PREEMPT_LOCK are both clear. td_mpflags has two
693 * post-switch interlock flags that can be used to wait for the TDF_
696 * Returns non-zero on success, and zero if the caller needs to retry
700 lwp_wait(struct lwp *lp)
702 struct thread *td = lp->lwp_thread;
705 KKASSERT(lwkt_preempted_proc() != lp);
708 * This bit of code uses the thread destruction interlock
709 * managed by lwkt_switch_return() to wait for the lwp's
710 * thread to completely disengage.
712 * It is possible for us to race another cpu core so we
713 * have to do this correctly.
716 mpflags = td->td_mpflags;
718 if (mpflags & TDF_MP_EXITSIG)
720 tsleep_interlock(td, 0);
721 if (atomic_cmpset_int(&td->td_mpflags, mpflags,
722 mpflags | TDF_MP_EXITWAIT)) {
723 tsleep(td, PINTERLOCKED, "lwpxt", 0);
728 * We've already waited for the core exit but there can still
729 * be other refs from e.g. process scans and such.
731 if (lp->lwp_lock > 0) {
732 tsleep(lp, 0, "lwpwait1", 1);
736 tsleep(td, 0, "lwpwait2", 1);
741 * Now that we have the thread destruction interlock these flags
742 * really should already be cleaned up, keep a check for safety.
744 * We can't rip its stack out from under it until TDF_EXITING is
745 * set and both TDF_RUNNING and TDF_PREEMPT_LOCK are clear.
746 * TDF_PREEMPT_LOCK must be checked because TDF_RUNNING
747 * will be cleared temporarily if a thread gets preempted.
749 while ((td->td_flags & (TDF_RUNNING |
751 TDF_EXITING)) != TDF_EXITING) {
752 tsleep(lp, 0, "lwpwait3", 1);
756 KASSERT((td->td_flags & (TDF_RUNQ|TDF_TSLEEPQ)) == 0,
757 ("lwp_wait: td %p (%s) still on run or sleep queue",
763 * Release the resources associated with a lwp.
764 * The lwp must be completely dead.
767 lwp_dispose(struct lwp *lp)
769 struct thread *td = lp->lwp_thread;
771 KKASSERT(lwkt_preempted_proc() != lp);
772 KKASSERT(td->td_refs == 0);
773 KKASSERT((td->td_flags & (TDF_RUNNING |
775 TDF_EXITING)) == TDF_EXITING);
782 lp->lwp_thread = NULL;
783 lwkt_free_thread(td);
792 sys_wait4(struct wait_args *uap)
794 struct rusage rusage;
797 error = kern_wait(uap->pid, (uap->status ? &status : NULL),
798 uap->options, (uap->rusage ? &rusage : NULL),
799 &uap->sysmsg_result);
801 if (error == 0 && uap->status)
802 error = copyout(&status, uap->status, sizeof(*uap->status));
803 if (error == 0 && uap->rusage)
804 error = copyout(&rusage, uap->rusage, sizeof(*uap->rusage));
811 * wait_args(int pid, int *status, int options, struct rusage *rusage)
816 kern_wait(pid_t pid, int *status, int options, struct rusage *rusage, int *res)
818 struct thread *td = curthread;
820 struct proc *q = td->td_proc;
828 if (options &~ (WUNTRACED|WNOHANG|WCONTINUED|WLINUXCLONE))
831 lwkt_gettoken(&q->p_token);
834 * All sorts of things can change due to blocking so we have to loop
835 * all the way back up here.
837 * The problem is that if a process group is stopped and the parent
838 * is doing a wait*(..., WUNTRACED, ...), it will see the STOP
839 * of the child and then stop itself when it tries to return from the
840 * system call. When the process group is resumed the parent will
841 * then get the STOP status even though the child has now resumed
842 * (a followup wait*() will get the CONT status).
844 * Previously the CONT would overwrite the STOP because the tstop
845 * was handled within tsleep(), and the parent would only see
846 * the CONT when both are stopped and continued together. This little
847 * two-line hack restores this effect.
849 while (q->p_stat == SSTOP)
857 * NOTE: We don't want to break q's p_token in the loop for the
858 * case where no children are found or we risk breaking the
859 * interlock between child and parent.
861 LIST_FOREACH(p, &q->p_children, p_sibling) {
862 if (pid != WAIT_ANY &&
863 p->p_pid != pid && p->p_pgid != -pid) {
868 * This special case handles a kthread spawned by linux_clone
869 * (see linux_misc.c). The linux_wait4 and linux_waitpid
870 * functions need to be able to distinguish between waiting
871 * on a process and waiting on a thread. It is a thread if
872 * p_sigparent is not SIGCHLD, and the WLINUXCLONE option
873 * signifies we want to wait for threads and not processes.
875 if ((p->p_sigparent != SIGCHLD) ^
876 ((options & WLINUXCLONE) != 0)) {
881 if (p->p_stat == SZOMB) {
883 * We may go into SZOMB with threads still present.
884 * We must wait for them to exit before we can reap
885 * the master thread, otherwise we may race reaping
886 * non-master threads.
888 * Only this routine can remove a process from
889 * the zombie list and destroy it, use PACQUIREZOMB()
890 * to serialize us and loop if it blocks (interlocked
891 * by the parent's q->p_token).
893 * WARNING! (p) can be invalid when PHOLDZOMB(p)
894 * returns non-zero. Be sure not to
899 lwkt_gettoken(&p->p_token);
900 if (p->p_pptr != q) {
901 lwkt_reltoken(&p->p_token);
905 while (p->p_nthreads > 0) {
906 tsleep(&p->p_nthreads, 0, "lwpzomb", hz);
910 * Reap any LWPs left in p->p_lwps. This is usually
911 * just the last LWP. This must be done before
912 * we loop on p_lock since the lwps hold a ref on
913 * it as a vmspace interlock.
915 * Once that is accomplished p_nthreads had better
918 while ((lp = RB_ROOT(&p->p_lwp_tree)) != NULL) {
919 lwp_rb_tree_RB_REMOVE(&p->p_lwp_tree, lp);
922 KKASSERT(p->p_nthreads == 0);
925 * Don't do anything really bad until all references
926 * to the process go away. This may include other
927 * LWPs which are still in the process of being
928 * reaped. We can't just pull the rug out from under
929 * them because they may still be using the VM space.
931 * Certain kernel facilities such as /proc will also
932 * put a hold on the process for short periods of
936 PSTALL(p, "reap3", 0);
938 /* Take care of our return values. */
942 *status = p->p_xstat;
946 * If we got the child via a ptrace 'attach',
947 * we need to give it back to the old parent.
949 if (p->p_oppid && (t = pfind(p->p_oppid)) != NULL) {
957 lwkt_reltoken(&p->p_token);
963 * Unlink the proc from its process group so that
964 * the following operations won't lead to an
965 * inconsistent state for processes running down
968 proc_remove_zombie(p);
969 lwkt_reltoken(&p->p_token);
973 ruadd(&q->p_cru, &p->p_ru);
976 * Decrement the count of procs running with this uid.
978 chgproccnt(p->p_ucred->cr_ruidinfo, -1, 0);
981 * Free up credentials.
987 * Remove unused arguments
991 if (pa && refcount_release(&pa->ar_ref)) {
998 if (ps && refcount_release(&ps->ps_refcnt)) {
999 kfree(ps, M_SUBPROC);
1004 * Our exitingcount was incremented when the process
1005 * became a zombie, now that the process has been
1006 * removed from (almost) all lists we should be able
1007 * to safely destroy its vmspace. Wait for any current
1008 * holders to go away (so the vmspace remains stable),
1011 PSTALL(p, "reap4", 0);
1012 vmspace_exitfree(p);
1013 PSTALL(p, "reap5", 0);
1016 * NOTE: We have to officially release ZOMB in order
1017 * to ensure that a racing thread in kern_wait()
1018 * which blocked on ZOMB is woken up.
1023 atomic_add_int(&nprocs, -1);
1027 if (p->p_stat == SSTOP && (p->p_flags & P_WAITED) == 0 &&
1028 ((p->p_flags & P_TRACED) || (options & WUNTRACED))) {
1030 lwkt_gettoken(&p->p_token);
1031 if (p->p_pptr != q) {
1032 lwkt_reltoken(&p->p_token);
1036 if (p->p_stat != SSTOP ||
1037 (p->p_flags & P_WAITED) != 0 ||
1038 ((p->p_flags & P_TRACED) == 0 &&
1039 (options & WUNTRACED) == 0)) {
1040 lwkt_reltoken(&p->p_token);
1045 p->p_flags |= P_WAITED;
1049 *status = W_STOPCODE(p->p_xstat);
1050 /* Zero rusage so we get something consistent. */
1052 bzero(rusage, sizeof(*rusage));
1054 lwkt_reltoken(&p->p_token);
1058 if ((options & WCONTINUED) && (p->p_flags & P_CONTINUED)) {
1060 lwkt_gettoken(&p->p_token);
1061 if (p->p_pptr != q) {
1062 lwkt_reltoken(&p->p_token);
1066 if ((p->p_flags & P_CONTINUED) == 0) {
1067 lwkt_reltoken(&p->p_token);
1073 p->p_flags &= ~P_CONTINUED;
1078 lwkt_reltoken(&p->p_token);
1087 if (options & WNOHANG) {
1094 * Wait for signal - interlocked using q->p_token.
1096 error = tsleep(q, PCATCH, "wait", 0);
1099 lwkt_reltoken(&q->p_token);
1106 * Change child's parent process to parent.
1108 * p_children/p_sibling requires the parent's token, and
1109 * changing pptr requires the child's token, so we have to
1110 * get three tokens to do this operation. We also need to
1111 * hold pointers that might get ripped out from under us to
1112 * preserve structural integrity.
1114 * It is possible to race another reparent or disconnect or other
1115 * similar operation. We must retry when this situation occurs.
1116 * Once we successfully reparent the process we no longer care
1120 proc_reparent(struct proc *child, struct proc *parent)
1125 while ((opp = child->p_pptr) != parent) {
1127 lwkt_gettoken(&opp->p_token);
1128 lwkt_gettoken(&child->p_token);
1129 lwkt_gettoken(&parent->p_token);
1130 if (child->p_pptr != opp) {
1131 lwkt_reltoken(&parent->p_token);
1132 lwkt_reltoken(&child->p_token);
1133 lwkt_reltoken(&opp->p_token);
1137 LIST_REMOVE(child, p_sibling);
1138 LIST_INSERT_HEAD(&parent->p_children, child, p_sibling);
1139 child->p_pptr = parent;
1140 lwkt_reltoken(&parent->p_token);
1141 lwkt_reltoken(&child->p_token);
1142 lwkt_reltoken(&opp->p_token);
1143 if (LIST_EMPTY(&opp->p_children))
1152 * The next two functions are to handle adding/deleting items on the
1156 * Take the arguments given and put them onto the exit callout list,
1157 * However first make sure that it's not already there.
1158 * returns 0 on success.
1162 at_exit(exitlist_fn function)
1164 struct exitlist *ep;
1167 /* Be noisy if the programmer has lost track of things */
1168 if (rm_at_exit(function))
1169 kprintf("WARNING: exit callout entry (%p) already present\n",
1172 ep = kmalloc(sizeof(*ep), M_ATEXIT, M_NOWAIT);
1175 ep->function = function;
1176 TAILQ_INSERT_TAIL(&exit_list, ep, next);
1181 * Scan the exit callout list for the given item and remove it.
1182 * Returns the number of items removed (0 or 1)
1185 rm_at_exit(exitlist_fn function)
1187 struct exitlist *ep;
1189 TAILQ_FOREACH(ep, &exit_list, next) {
1190 if (ep->function == function) {
1191 TAILQ_REMOVE(&exit_list, ep, next);
1192 kfree(ep, M_ATEXIT);
1200 * LWP reaper related code.
1203 reaplwps(void *context, int dummy)
1205 struct lwplist *lwplist = context;
1208 lwkt_gettoken(&deadlwp_token);
1209 while ((lp = LIST_FIRST(lwplist))) {
1210 LIST_REMOVE(lp, u.lwp_reap_entry);
1213 lwkt_reltoken(&deadlwp_token);
1217 reaplwp(struct lwp *lp)
1219 while (lwp_wait(lp) == 0)
1229 for (cpu = 0; cpu < ncpus; cpu++) {
1230 LIST_INIT(&deadlwp_list[cpu]);
1231 deadlwp_task[cpu] = kmalloc(sizeof(*deadlwp_task[cpu]),
1232 M_DEVBUF, M_WAITOK);
1233 TASK_INIT(deadlwp_task[cpu], 0, reaplwps, &deadlwp_list[cpu]);
1237 SYSINIT(deadlwpinit, SI_SUB_CONFIGURE, SI_ORDER_ANY, deadlwp_init, NULL);