2 * Copyright (c) 1982, 1986, 1989, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * @(#)kern_exit.c 8.7 (Berkeley) 2/12/94
39 * $FreeBSD: src/sys/kern/kern_exit.c,v 1.92.2.11 2003/01/13 22:51:16 dillon Exp $
40 * $DragonFly: src/sys/kern/kern_exit.c,v 1.76 2007/02/24 14:25:06 corecode Exp $
43 #include "opt_compat.h"
44 #include "opt_ktrace.h"
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/sysproto.h>
49 #include <sys/kernel.h>
50 #include <sys/malloc.h>
52 #include <sys/ktrace.h>
53 #include <sys/pioctl.h>
56 #include <sys/vnode.h>
57 #include <sys/resourcevar.h>
58 #include <sys/signalvar.h>
59 #include <sys/taskqueue.h>
60 #include <sys/ptrace.h>
61 #include <sys/acct.h> /* for acct_process() function prototype */
62 #include <sys/filedesc.h>
67 #include <sys/kern_syscall.h>
68 #include <sys/upcall.h>
72 #include <vm/vm_param.h>
75 #include <vm/vm_map.h>
76 #include <vm/vm_zone.h>
77 #include <vm/vm_extern.h>
80 #include <sys/thread2.h>
82 static MALLOC_DEFINE(M_ATEXIT, "atexit", "atexit callback");
83 static MALLOC_DEFINE(M_ZOMBIE, "zombie", "zombie proc status");
86 * callout list for things to do at exit time
90 TAILQ_ENTRY(exitlist) next;
93 TAILQ_HEAD(exit_list_head, exitlist);
94 static struct exit_list_head exit_list = TAILQ_HEAD_INITIALIZER(exit_list);
99 struct task *deadlwp_task[MAXCPU];
100 struct lwplist deadlwp_list[MAXCPU];
106 * SYS_EXIT_ARGS(int rval)
109 sys_exit(struct exit_args *uap)
111 exit1(W_EXITCODE(uap->rval, 0));
116 killlwps(struct lwp *lp)
118 struct proc *p = lp->lwp_proc;
121 FOREACH_LWP_IN_PROC(tlp, p) {
123 continue; /* don't kill the current lwp */
124 tlp->lwp_flag |= LWP_WEXIT;
128 while (p->p_nthreads > 1) {
130 kprintf("killlwps: waiting for %d lwps of pid %d to die\n",
131 p->p_nthreads - 1, p->p_pid);
132 tsleep(&p->p_nthreads, 0, "killlwps", hz);
137 * Exit: deallocate address space and other resources, change proc state
138 * to zombie, and unlink proc from allproc and parent's lists. Save exit
139 * status and rusage for wait(). Check for child processes and orphan them.
144 struct thread *td = curthread;
145 struct proc *p = td->td_proc;
146 struct lwp *lp = td->td_lwp;
153 kprintf("init died (signal %d, exit %d)\n",
154 WTERMSIG(rv), WEXITSTATUS(rv));
155 panic("Going nowhere without my init!");
159 * Kill all other threads if there are any.
161 * XXX TGEN Need to protect against multiple lwps of the same proc
162 * entering this function?
164 if (p->p_nthreads > 1)
167 caps_exit(lp->lwp_thread);
170 /* are we a task leader? */
171 if(p == p->p_leader) {
172 struct kill_args killArgs;
173 killArgs.signum = SIGKILL;
176 killArgs.pid = q->p_pid;
178 * The interface for kill is better
179 * than the internal signal
186 tsleep((caddr_t)p, 0, "exit1", 0);
192 STOPEVENT(p, S_EXIT, rv);
193 wakeup(&p->p_stype); /* Wakeup anyone in procfs' PIOCWAIT */
196 * Check if any loadable modules need anything done at process exit.
197 * e.g. SYSV IPC stuff
198 * XXX what if one of these generates an error?
200 TAILQ_FOREACH(ep, &exit_list, next)
203 if (p->p_flag & P_PROFIL)
206 * If parent is waiting for us to exit or exec,
207 * P_PPWAIT is set; we will wakeup the parent below.
209 p->p_flag &= ~(P_TRACED | P_PPWAIT);
210 p->p_flag |= P_WEXIT;
211 SIGEMPTYSET(p->p_siglist);
212 SIGEMPTYSET(lp->lwp_siglist);
213 if (timevalisset(&p->p_realtimer.it_value))
214 callout_stop(&p->p_ithandle);
217 * Reset any sigio structures pointing to us as a result of
218 * F_SETOWN with our pid.
220 funsetownlst(&p->p_sigiolst);
223 * Close open files and release open-file table.
229 if(p->p_leader->p_peers) {
231 while(q->p_peers != p)
233 q->p_peers = p->p_peers;
234 wakeup((caddr_t)p->p_leader);
238 * XXX Shutdown SYSV semaphores
242 KKASSERT(p->p_numposixlocks == 0);
244 /* The next two chunks should probably be moved to vmspace_exit. */
248 * Release upcalls associated with this process
253 /* clean up data related to virtual kernel operation */
258 * Release user portion of address space.
259 * This releases references to vnodes,
260 * which could cause I/O if the file has been unlinked.
261 * Need to do this early enough that we can still sleep.
262 * Can't free the entire vmspace as the kernel stack
263 * may be mapped within that space also.
265 * Processes sharing the same vmspace may exit in one order, and
266 * get cleaned up by vmspace_exit() in a different order. The
267 * last exiting process to reach this point releases as much of
268 * the environment as it can, and the last process cleaned up
269 * by vmspace_exit() (which decrements exitingcnt) cleans up the
273 if (--vm->vm_refcnt == 0) {
275 pmap_remove_pages(vmspace_pmap(vm), VM_MIN_USER_ADDRESS,
276 VM_MAX_USER_ADDRESS);
277 vm_map_remove(&vm->vm_map, VM_MIN_USER_ADDRESS,
278 VM_MAX_USER_ADDRESS);
281 if (SESS_LEADER(p)) {
282 struct session *sp = p->p_session;
287 * We are the controlling process. Signal the
288 * foreground process group, drain the controlling
289 * terminal, and revoke access to the controlling
292 * NOTE: while waiting for the process group to exit
293 * it is possible that one of the processes in the
294 * group will revoke the tty, so we have to recheck.
296 if (sp->s_ttyp && (sp->s_ttyp->t_session == sp)) {
297 if (sp->s_ttyp->t_pgrp)
298 pgsignal(sp->s_ttyp->t_pgrp, SIGHUP, 1);
299 (void) ttywait(sp->s_ttyp);
301 * The tty could have been revoked
304 if ((vp = sp->s_ttyvp) != NULL) {
305 ttyclosesession(sp, 0);
307 VOP_REVOKE(vp, REVOKEALL);
309 vrele(vp); /* s_ttyvp ref */
313 * Release the tty. If someone has it open via
314 * /dev/tty then close it (since they no longer can
315 * once we've NULL'd it out).
318 ttyclosesession(sp, 1);
320 * s_ttyp is not zero'd; we use this to indicate
321 * that the session once had a controlling terminal.
322 * (for logging and informational purposes)
327 fixjobc(p, p->p_pgrp, 0);
328 (void)acct_process(p);
334 ktrdestroy(&p->p_tracenode);
338 * Release reference to text vnode
340 if ((vtmp = p->p_textvp) != NULL) {
346 * Move the process to the zombie list. This will block
347 * until the process p_lock count reaches 0. The process will
348 * not be reaped until TDF_EXITING is set by cpu_thread_exit(),
349 * which is called from cpu_proc_exit().
351 proc_move_allproc_zombie(p);
353 q = LIST_FIRST(&p->p_children);
354 if (q) /* only need this if any child is S_ZOMB */
355 wakeup((caddr_t) initproc);
356 for (; q != 0; q = nq) {
357 nq = LIST_NEXT(q, p_sibling);
358 LIST_REMOVE(q, p_sibling);
359 LIST_INSERT_HEAD(&initproc->p_children, q, p_sibling);
360 q->p_pptr = initproc;
361 q->p_sigparent = SIGCHLD;
363 * Traced processes are killed
364 * since their existence means someone is screwing up.
366 if (q->p_flag & P_TRACED) {
367 q->p_flag &= ~P_TRACED;
373 * Save exit status and final rusage info, adding in child rusage
374 * info and self times.
377 calcru_proc(p, &p->p_ru);
378 ruadd(&p->p_ru, &p->p_cru);
381 * notify interested parties of our demise.
383 KNOTE(&p->p_klist, NOTE_EXIT);
386 * Notify parent that we're gone. If parent has the PS_NOCLDWAIT
387 * flag set, notify process 1 instead (and hope it will handle
390 if (p->p_pptr->p_procsig->ps_flag & PS_NOCLDWAIT) {
391 struct proc *pp = p->p_pptr;
392 proc_reparent(p, initproc);
394 * If this was the last child of our parent, notify
395 * parent, so in case he was wait(2)ing, he will
398 if (LIST_EMPTY(&pp->p_children))
402 if (p->p_sigparent && p->p_pptr != initproc) {
403 ksignal(p->p_pptr, p->p_sigparent);
405 ksignal(p->p_pptr, SIGCHLD);
408 wakeup((caddr_t)p->p_pptr);
410 * cpu_exit is responsible for clearing curproc, since
411 * it is heavily integrated with the thread/switching sequence.
413 * Other substructures are freed from wait().
415 plimit_free(&p->p_limit);
418 * Release the current user process designation on the process so
419 * the userland scheduler can work in someone else.
421 p->p_usched->release_curproc(lp);
424 * Finally, call machine-dependent code to release the remaining
425 * resources including address space, the kernel stack and pcb.
426 * The address space is released by "vmspace_free(p->p_vmspace)";
427 * This is machine-dependent, as we may have to change stacks
428 * or ensure that the current one isn't reallocated before we
429 * finish. cpu_exit will end with a call to cpu_switch(), finishing
430 * our execution (pun intended).
438 struct lwp *lp = curthread->td_lwp;
439 struct proc *p = lp->lwp_proc;
442 * Nobody actually wakes us when the lock
443 * count reaches zero, so just wait one tick.
445 while (lp->lwp_lock > 0)
446 tsleep(lp, 0, "lwpexit", 1);
448 /* Hand down resource usage to our proc */
449 ruadd(&p->p_ru, &lp->lwp_ru);
452 LIST_REMOVE(lp, lwp_list);
453 wakeup(&p->p_nthreads);
454 LIST_INSERT_HEAD(&deadlwp_list[mycpuid], lp, lwp_list);
455 taskqueue_enqueue(taskqueue_thread[mycpuid], deadlwp_task[mycpuid]);
460 * Wait until a lwp is completely dead.
462 * If the thread is still executing, which can't be waited upon,
463 * return failure. The caller is responsible of waiting a little
464 * bit and checking again.
467 * while (!lwp_wait(lp))
468 * tsleep(lp, 0, "lwpwait", 1);
471 lwp_wait(struct lwp *lp)
473 struct thread *td = lp->lwp_thread;;
475 KKASSERT(lwkt_preempted_proc() != lp);
477 while (lp->lwp_lock > 0)
478 tsleep(lp, 0, "lwpwait1", 1);
483 * The lwp's thread may still be in the middle
484 * of switching away, we can't rip its stack out from
485 * under it until TDF_EXITING is set and both
486 * TDF_RUNNING and TDF_PREEMPT_LOCK are clear.
487 * TDF_PREEMPT_LOCK must be checked because TDF_RUNNING
488 * will be cleared temporarily if a thread gets
491 * YYY no wakeup occurs, so we simply return failure
492 * and let the caller deal with sleeping and calling
495 if ((td->td_flags & (TDF_RUNNING|TDF_PREEMPT_LOCK|TDF_EXITING)) !=
503 * Release the resources associated with a lwp.
504 * The lwp must be completely dead.
507 lwp_dispose(struct lwp *lp)
509 struct thread *td = lp->lwp_thread;;
511 KKASSERT(lwkt_preempted_proc() != lp);
512 KKASSERT(td->td_refs == 0);
513 KKASSERT((td->td_flags & (TDF_RUNNING|TDF_PREEMPT_LOCK|TDF_EXITING)) ==
519 lp->lwp_thread = NULL;
520 lwkt_free_thread(td);
526 sys_wait4(struct wait_args *uap)
528 struct rusage rusage;
531 error = kern_wait(uap->pid, uap->status ? &status : NULL,
532 uap->options, uap->rusage ? &rusage : NULL, &uap->sysmsg_fds[0]);
534 if (error == 0 && uap->status)
535 error = copyout(&status, uap->status, sizeof(*uap->status));
536 if (error == 0 && uap->rusage)
537 error = copyout(&rusage, uap->rusage, sizeof(*uap->rusage));
544 * wait_args(int pid, int *status, int options, struct rusage *rusage)
547 kern_wait(pid_t pid, int *status, int options, struct rusage *rusage, int *res)
549 struct thread *td = curthread;
550 struct proc *q = td->td_proc;
556 if (options &~ (WUNTRACED|WNOHANG|WLINUXCLONE))
560 * Hack for backwards compatibility with badly written user code.
561 * Or perhaps we have to do this anyway, it is unclear. XXX
563 * The problem is that if a process group is stopped and the parent
564 * is doing a wait*(..., WUNTRACED, ...), it will see the STOP
565 * of the child and then stop itself when it tries to return from the
566 * system call. When the process group is resumed the parent will
567 * then get the STOP status even though the child has now resumed
568 * (a followup wait*() will get the CONT status).
570 * Previously the CONT would overwrite the STOP because the tstop
571 * was handled within tsleep(), and the parent would only see
572 * the CONT when both are stopped and continued together. This litte
573 * two-line hack restores this effect.
575 while (q->p_stat == SSTOP)
579 LIST_FOREACH(p, &q->p_children, p_sibling) {
580 if (pid != WAIT_ANY &&
581 p->p_pid != pid && p->p_pgid != -pid)
584 /* This special case handles a kthread spawned by linux_clone
585 * (see linux_misc.c). The linux_wait4 and linux_waitpid
586 * functions need to be able to distinguish between waiting
587 * on a process and waiting on a thread. It is a thread if
588 * p_sigparent is not SIGCHLD, and the WLINUXCLONE option
589 * signifies we want to wait for threads and not processes.
591 if ((p->p_sigparent != SIGCHLD) ^
592 ((options & WLINUXCLONE) != 0)) {
597 if (p->p_stat == SZOMB) {
599 * Other kernel threads may be in the middle of
600 * accessing the proc. For example, kern/kern_proc.c
601 * could be blocked writing proc data to a sysctl.
602 * At the moment, if this occurs, we are not woken
603 * up and rely on a one-second retry.
606 tsleep(p, 0, "reap3", hz);
608 /* scheduling hook for heuristic */
609 /* XXX no lwp available, we need a different heuristic */
611 p->p_usched->heuristic_exiting(td->td_lwp, deadlp);
614 /* Take care of our return values. */
617 *status = p->p_xstat;
621 * If we got the child via a ptrace 'attach',
622 * we need to give it back to the old parent.
624 if (p->p_oppid && (t = pfind(p->p_oppid))) {
632 ruadd(&q->p_cru, &p->p_ru);
635 * Decrement the count of procs running with this uid.
637 chgproccnt(p->p_ucred->cr_ruidinfo, -1, 0);
640 * Free up credentials.
646 * Remove unused arguments
648 if (p->p_args && --p->p_args->ar_ref == 0)
649 FREE(p->p_args, M_PARGS);
652 * Finally finished with old proc entry.
653 * Unlink it from its process group and free it.
655 proc_remove_zombie(p);
658 if (--p->p_procsig->ps_refcnt == 0) {
659 if (p->p_sigacts != &p->p_addr->u_sigacts)
660 FREE(p->p_sigacts, M_SUBPROC);
661 FREE(p->p_procsig, M_SUBPROC);
670 if (p->p_stat == SSTOP && (p->p_flag & P_WAITED) == 0 &&
671 (p->p_flag & P_TRACED || options & WUNTRACED)) {
672 p->p_flag |= P_WAITED;
676 *status = W_STOPCODE(p->p_xstat);
677 /* Zero rusage so we get something consistent. */
679 bzero(rusage, sizeof(rusage));
685 if (options & WNOHANG) {
689 error = tsleep((caddr_t)q, PCATCH, "wait", 0);
696 * make process 'parent' the new parent of process 'child'.
699 proc_reparent(struct proc *child, struct proc *parent)
702 if (child->p_pptr == parent)
705 LIST_REMOVE(child, p_sibling);
706 LIST_INSERT_HEAD(&parent->p_children, child, p_sibling);
707 child->p_pptr = parent;
711 * The next two functions are to handle adding/deleting items on the
715 * Take the arguments given and put them onto the exit callout list,
716 * However first make sure that it's not already there.
717 * returns 0 on success.
721 at_exit(exitlist_fn function)
726 /* Be noisy if the programmer has lost track of things */
727 if (rm_at_exit(function))
728 kprintf("WARNING: exit callout entry (%p) already present\n",
731 ep = kmalloc(sizeof(*ep), M_ATEXIT, M_NOWAIT);
734 ep->function = function;
735 TAILQ_INSERT_TAIL(&exit_list, ep, next);
740 * Scan the exit callout list for the given item and remove it.
741 * Returns the number of items removed (0 or 1)
744 rm_at_exit(exitlist_fn function)
748 TAILQ_FOREACH(ep, &exit_list, next) {
749 if (ep->function == function) {
750 TAILQ_REMOVE(&exit_list, ep, next);
761 struct proc *p = curproc;
764 if (p->p_procsig->ps_refcnt == 1 &&
765 p->p_sigacts != &p->p_addr->u_sigacts) {
768 p->p_addr->u_sigacts = *pss;
769 p->p_sigacts = &p->p_addr->u_sigacts;
771 FREE(pss, M_SUBPROC);
777 * LWP reaper related code.
781 reaplwps(void *context, int dummy)
783 struct lwplist *lwplist = context;
786 while ((lp = LIST_FIRST(lwplist))) {
788 tsleep(lp, 0, "lwpreap", 1);
789 LIST_REMOVE(lp, lwp_list);
799 for (cpu = 0; cpu < ncpus; cpu++) {
800 LIST_INIT(&deadlwp_list[cpu]);
801 deadlwp_task[cpu] = kmalloc(sizeof(*deadlwp_task[cpu]), M_DEVBUF, M_WAITOK);
802 TASK_INIT(deadlwp_task[cpu], 0, reaplwps, &deadlwp_list[cpu]);
806 SYSINIT(deadlwpinit, SI_SUB_CONFIGURE, SI_ORDER_ANY, deadlwp_init, NULL);