nrelease - fix/improve livecd
[dragonfly.git] / sys / kern / kern_exit.c
CommitLineData
984263bc
MD
1/*
2 * Copyright (c) 1982, 1986, 1989, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
dc71b7ab 18 * 3. Neither the name of the University nor the names of its contributors
984263bc
MD
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * @(#)kern_exit.c 8.7 (Berkeley) 2/12/94
35 * $FreeBSD: src/sys/kern/kern_exit.c,v 1.92.2.11 2003/01/13 22:51:16 dillon Exp $
36 */
37
984263bc
MD
38#include "opt_ktrace.h"
39
40#include <sys/param.h>
41#include <sys/systm.h>
80d831e1 42#include <sys/sysmsg.h>
984263bc
MD
43#include <sys/kernel.h>
44#include <sys/malloc.h>
45#include <sys/proc.h>
29f58392 46#include <sys/ktrace.h>
984263bc
MD
47#include <sys/pioctl.h>
48#include <sys/tty.h>
49#include <sys/wait.h>
50#include <sys/vnode.h>
51#include <sys/resourcevar.h>
52#include <sys/signalvar.h>
70d3d461 53#include <sys/taskqueue.h>
984263bc
MD
54#include <sys/ptrace.h>
55#include <sys/acct.h> /* for acct_process() function prototype */
56#include <sys/filedesc.h>
57#include <sys/shm.h>
58#include <sys/sem.h>
984263bc 59#include <sys/jail.h>
9697c509 60#include <sys/kern_syscall.h>
d5f340eb 61#include <sys/unistd.h>
8ba5f7ef 62#include <sys/eventhandler.h>
2883d2d8 63#include <sys/dsched.h>
984263bc
MD
64
65#include <vm/vm.h>
66#include <vm/vm_param.h>
67#include <sys/lock.h>
68#include <vm/pmap.h>
69#include <vm/vm_map.h>
984263bc 70#include <vm/vm_extern.h>
984263bc 71
e955543c 72#include <sys/refcount.h>
6cbfbdb9 73#include <sys/spinlock2.h>
e3161323
MD
74
75static void reaplwps(void *context, int dummy);
3e291793 76static void reaplwp(struct lwp *lp);
dc52e1cc 77static void killlwps(struct lwp *lp);
e43a034f 78
984263bc
MD
79static MALLOC_DEFINE(M_ATEXIT, "atexit", "atexit callback");
80
984263bc
MD
81/*
82 * callout list for things to do at exit time
83 */
84struct exitlist {
85 exitlist_fn function;
86 TAILQ_ENTRY(exitlist) next;
87};
88
89TAILQ_HEAD(exit_list_head, exitlist);
90static struct exit_list_head exit_list = TAILQ_HEAD_INITIALIZER(exit_list);
91
70d3d461
SS
92/*
93 * LWP reaper data
94 */
00891516
MD
95static struct task *deadlwp_task[MAXCPU];
96static struct lwplist deadlwp_list[MAXCPU];
97static struct lwkt_token deadlwp_token[MAXCPU];
70d3d461 98
0e32b8c5
MD
99void (*linux_task_drop_callback)(thread_t td);
100void (*linux_proc_drop_callback)(struct proc *p);
101
984263bc
MD
102/*
103 * exit --
104 * Death of process.
41c20dac
MD
105 *
106 * SYS_EXIT_ARGS(int rval)
984263bc 107 */
753fd850 108int
80d831e1 109sys_exit(struct sysmsg *sysmsg, const struct exit_args *uap)
984263bc 110{
41c20dac 111 exit1(W_EXITCODE(uap->rval, 0));
984263bc
MD
112 /* NOTREACHED */
113}
114
d5f340eb
SS
115/*
116 * Extended exit --
117 * Death of a lwp or process with optional bells and whistles.
118 */
119int
80d831e1 120sys_extexit(struct sysmsg *sysmsg, const struct extexit_args *uap)
d5f340eb 121{
b5c4d81f 122 struct proc *p = curproc;
d5f340eb
SS
123 int action, who;
124 int error;
125
126 action = EXTEXIT_ACTION(uap->how);
127 who = EXTEXIT_WHO(uap->how);
128
129 /* Check parameters before we might perform some action */
130 switch (who) {
131 case EXTEXIT_PROC:
132 case EXTEXIT_LWP:
133 break;
d5f340eb
SS
134 default:
135 return (EINVAL);
136 }
137
138 switch (action) {
139 case EXTEXIT_SIMPLE:
140 break;
d5f340eb
SS
141 case EXTEXIT_SETINT:
142 error = copyout(&uap->status, uap->addr, sizeof(uap->status));
143 if (error)
144 return (error);
145 break;
d5f340eb
SS
146 default:
147 return (EINVAL);
148 }
149
b5c4d81f 150 lwkt_gettoken(&p->p_token);
3919ced0 151
d5f340eb
SS
152 switch (who) {
153 case EXTEXIT_LWP:
154 /*
155 * Be sure only to perform a simple lwp exit if there is at
156 * least one more lwp in the proc, which will call exit1()
157 * later, otherwise the proc will be an UNDEAD and not even a
158 * SZOMB!
159 */
b5c4d81f 160 if (p->p_nthreads > 1) {
51818c08 161 lwp_exit(0, NULL); /* called w/ p_token held */
e3161323
MD
162 /* NOT REACHED */
163 }
d5f340eb
SS
164 /* else last lwp in proc: do the real thing */
165 /* FALLTHROUGH */
d5f340eb
SS
166 default: /* to help gcc */
167 case EXTEXIT_PROC:
b5c4d81f 168 lwkt_reltoken(&p->p_token);
d5f340eb
SS
169 exit1(W_EXITCODE(uap->status, 0));
170 /* NOTREACHED */
171 }
172
173 /* NOTREACHED */
b5c4d81f 174 lwkt_reltoken(&p->p_token); /* safety */
d5f340eb
SS
175}
176
dc52e1cc
MD
177/*
178 * Kill all lwps associated with the current process except the
179 * current lwp. Return an error if we race another thread trying to
180 * do the same thing and lose the race.
181 *
182 * If forexec is non-zero the current thread and process flags are
183 * cleaned up so they can be reused.
184 */
185int
186killalllwps(int forexec)
187{
188 struct lwp *lp = curthread->td_lwp;
189 struct proc *p = lp->lwp_proc;
f5b92db7 190 int fakestop;
dc52e1cc
MD
191
192 /*
193 * Interlock against P_WEXIT. Only one of the process's thread
194 * is allowed to do the master exit.
195 */
5715de34
MD
196 lwkt_gettoken(&p->p_token);
197 if (p->p_flags & P_WEXIT) {
198 lwkt_reltoken(&p->p_token);
dc52e1cc 199 return (EALREADY);
5715de34 200 }
4643740a 201 p->p_flags |= P_WEXIT;
5715de34 202 lwkt_gettoken(&lp->lwp_token);
dc52e1cc 203
f5b92db7
MD
204 /*
205 * Set temporary stopped state in case we are racing a coredump.
206 * Otherwise the coredump may hang forever.
207 */
208 if (lp->lwp_mpflags & LWP_MP_WSTOP) {
209 fakestop = 0;
210 } else {
211 atomic_set_int(&lp->lwp_mpflags, LWP_MP_WSTOP);
212 ++p->p_nstopped;
213 fakestop = 1;
214 wakeup(&p->p_nstopped);
215 }
216
dc52e1cc 217 /*
4643740a 218 * Interlock with LWP_MP_WEXIT and kill any remaining LWPs
dc52e1cc 219 */
4643740a 220 atomic_set_int(&lp->lwp_mpflags, LWP_MP_WEXIT);
dc52e1cc
MD
221 if (p->p_nthreads > 1)
222 killlwps(lp);
223
f5b92db7
MD
224 /*
225 * Undo temporary stopped state
226 */
ac39aef5 227 if (fakestop && (lp->lwp_mpflags & LWP_MP_WSTOP)) {
f5b92db7
MD
228 atomic_clear_int(&lp->lwp_mpflags, LWP_MP_WSTOP);
229 --p->p_nstopped;
230 }
231
dc52e1cc
MD
232 /*
233 * If doing this for an exec, clean up the remaining thread
234 * (us) for continuing operation after all the other threads
235 * have been killed.
236 */
237 if (forexec) {
4643740a
MD
238 atomic_clear_int(&lp->lwp_mpflags, LWP_MP_WEXIT);
239 p->p_flags &= ~P_WEXIT;
dc52e1cc 240 }
5715de34
MD
241 lwkt_reltoken(&lp->lwp_token);
242 lwkt_reltoken(&p->p_token);
243
dc52e1cc
MD
244 return(0);
245}
246
e3161323
MD
247/*
248 * Kill all LWPs except the current one. Do not try to signal
249 * LWPs which have exited on their own or have already been
250 * signaled.
251 */
dc52e1cc 252static void
70d3d461
SS
253killlwps(struct lwp *lp)
254{
255 struct proc *p = lp->lwp_proc;
256 struct lwp *tlp;
257
e3161323 258 /*
89d9d449 259 * Kill the remaining LWPs. We must send the signal before setting
4643740a 260 * LWP_MP_WEXIT. The setting of WEXIT is optional but helps reduce
89d9d449
MD
261 * races. tlp must be held across the call as it might block and
262 * allow the target lwp to rip itself out from under our loop.
e3161323 263 */
70d3d461 264 FOREACH_LWP_IN_PROC(tlp, p) {
89d9d449 265 LWPHOLD(tlp);
e2b148c6 266 lwkt_gettoken(&tlp->lwp_token);
4643740a 267 if ((tlp->lwp_mpflags & LWP_MP_WEXIT) == 0) {
4643740a 268 atomic_set_int(&tlp->lwp_mpflags, LWP_MP_WEXIT);
9c960153 269 lwpsignal(p, tlp, SIGKILL);
e3161323 270 }
e2b148c6 271 lwkt_reltoken(&tlp->lwp_token);
89d9d449 272 LWPRELE(tlp);
70d3d461
SS
273 }
274
e3161323 275 /*
0001762f
MD
276 * Wait for everything to clear out. Also make sure any tstop()s
277 * are signalled (we are holding p_token for the interlock).
e3161323 278 */
0001762f 279 wakeup(p);
51818c08 280 while (p->p_nthreads > 1)
ef1c4d7f 281 tsleep(&p->p_nthreads, 0, "killlwps", 0);
70d3d461
SS
282}
283
984263bc
MD
284/*
285 * Exit: deallocate address space and other resources, change proc state
286 * to zombie, and unlink proc from allproc and parent's lists. Save exit
287 * status and rusage for wait(). Check for child processes and orphan them.
288 */
289void
41c20dac 290exit1(int rv)
984263bc 291{
bb3cd951
SS
292 struct thread *td = curthread;
293 struct proc *p = td->td_proc;
294 struct lwp *lp = td->td_lwp;
77a4bf30 295 struct proc *q;
51818c08 296 struct proc *pp;
6e2a912c
MD
297 struct proc *reproc;
298 struct sysreaper *reap;
41c20dac 299 struct vmspace *vm;
984263bc
MD
300 struct vnode *vtmp;
301 struct exitlist *ep;
dc52e1cc 302 int error;
984263bc 303
b5c4d81f
MD
304 lwkt_gettoken(&p->p_token);
305
984263bc 306 if (p->p_pid == 1) {
6ea70f76 307 kprintf("init died (signal %d, exit %d)\n",
984263bc
MD
308 WTERMSIG(rv), WEXITSTATUS(rv));
309 panic("Going nowhere without my init!");
310 }
9d7a637e
AE
311 varsymset_clean(&p->p_varsymset);
312 lockuninit(&p->p_varsymset.vx_lock);
2af9d75d 313
70d3d461 314 /*
dc52e1cc
MD
315 * Kill all lwps associated with the current process, return an
316 * error if we race another thread trying to do the same thing
317 * and lose the race.
70d3d461 318 */
dc52e1cc
MD
319 error = killalllwps(0);
320 if (error) {
51818c08 321 lwp_exit(0, NULL);
e3161323 322 /* NOT REACHED */
222b18cd 323 }
81eea9f4 324
984263bc 325 /* are we a task leader? */
e3161323 326 if (p == p->p_leader) {
80d831e1
MD
327 struct sysmsg sysmsg;
328
329 sysmsg.extargs.kill.signum = SIGKILL;
984263bc
MD
330 q = p->p_peers;
331 while(q) {
80d831e1 332 sysmsg.extargs.kill.pid = q->p_pid;
984263bc
MD
333 /*
334 * The interface for kill is better
335 * than the internal signal
336 */
80d831e1 337 sys_kill(&sysmsg, &sysmsg.extargs.kill);
984263bc
MD
338 q = q->p_peers;
339 }
340 while (p->p_peers)
e3161323 341 tsleep((caddr_t)p, 0, "exit1", 0);
8ba5f7ef 342 }
984263bc
MD
343
344#ifdef PGINPROF
345 vmsizmon();
346#endif
347 STOPEVENT(p, S_EXIT, rv);
b2a6ad87 348 p->p_flags |= P_POSTEXIT; /* stop procfs stepping */
984263bc
MD
349
350 /*
351 * Check if any loadable modules need anything done at process exit.
352 * e.g. SYSV IPC stuff
353 * XXX what if one of these generates an error?
354 */
8ba5f7ef 355 p->p_xstat = rv;
8ba5f7ef
AH
356
357 /*
358 * XXX: imho, the eventhandler stuff is much cleaner than this.
359 * Maybe we should move everything to use eventhandler.
360 */
984263bc 361 TAILQ_FOREACH(ep, &exit_list, next)
bb3cd951 362 (*ep->function)(td);
984263bc 363
4643740a 364 if (p->p_flags & P_PROFIL)
984263bc 365 stopprofclock(p);
82354ad8 366
984263bc 367 SIGEMPTYSET(p->p_siglist);
aa6c3de6 368 SIGEMPTYSET(lp->lwp_siglist);
984263bc 369 if (timevalisset(&p->p_realtimer.it_value))
eb67213a 370 callout_terminate(&p->p_ithandle);
984263bc
MD
371
372 /*
373 * Reset any sigio structures pointing to us as a result of
374 * F_SETOWN with our pid.
375 */
376 funsetownlst(&p->p_sigiolst);
377
378 /*
379 * Close open files and release open-file table.
380 * This may block!
381 */
0a4a9c77 382 fdfree(p, NULL);
984263bc 383
51818c08 384 if (p->p_leader->p_peers) {
984263bc
MD
385 q = p->p_leader;
386 while(q->p_peers != p)
387 q = q->p_peers;
388 q->p_peers = p->p_peers;
389 wakeup((caddr_t)p->p_leader);
390 }
391
392 /*
393 * XXX Shutdown SYSV semaphores
394 */
395 semexit(p);
396
397 /* The next two chunks should probably be moved to vmspace_exit. */
398 vm = p->p_vmspace;
a722be49 399
39005e16
MD
400 /*
401 * Clean up data related to virtual kernel operation. Clean up
402 * any vkernel context related to the current lwp now so we can
403 * destroy p_vkernel.
404 */
405 if (p->p_vkernel) {
406 vkernel_lwp_exit(lp);
4a22e893 407 vkernel_exit(p);
39005e16 408 }
0daa37a5 409
984263bc 410 /*
93f86408
MD
411 * Release the user portion of address space. The exitbump prevents
412 * the vmspace from being completely eradicated (using holdcnt).
413 * This releases references to vnodes, which could cause I/O if the
414 * file has been unlinked. We need to do this early enough that
415 * we can still sleep.
416 *
417 * We can't free the entire vmspace as the kernel stack may be mapped
418 * within that space also.
984263bc
MD
419 *
420 * Processes sharing the same vmspace may exit in one order, and
421 * get cleaned up by vmspace_exit() in a different order. The
422 * last exiting process to reach this point releases as much of
423 * the environment as it can, and the last process cleaned up
424 * by vmspace_exit() (which decrements exitingcnt) cleans up the
425 * remainder.
5411d8f1
MD
426 *
427 * NOTE: Releasing p_token around this call is helpful if the
428 * vmspace had a huge RSS. Otherwise some other process
429 * trying to do an allproc or other scan (like 'ps') may
430 * stall for a long time.
984263bc 431 */
5411d8f1 432 lwkt_reltoken(&p->p_token);
93f86408 433 vmspace_relexit(vm);
5411d8f1 434 lwkt_gettoken(&p->p_token);
984263bc
MD
435
436 if (SESS_LEADER(p)) {
1fd87d54 437 struct session *sp = p->p_session;
984263bc
MD
438
439 if (sp->s_ttyvp) {
440 /*
1fbb5fc0
MD
441 * We are the controlling process. Signal the
442 * foreground process group, drain the controlling
443 * terminal, and revoke access to the controlling
444 * terminal.
445 *
de22b67f
MD
446 * NOTE: While waiting for the process group to exit
447 * it is possible that one of the processes in
448 * the group will revoke the tty, so the
449 * ttyclosesession() function will re-check
450 * sp->s_ttyvp.
451 *
452 * NOTE: Force a timeout of one second when draining
453 * the controlling terminal. PCATCH won't work
454 * in exit1().
984263bc
MD
455 */
456 if (sp->s_ttyp && (sp->s_ttyp->t_session == sp)) {
457 if (sp->s_ttyp->t_pgrp)
458 pgsignal(sp->s_ttyp->t_pgrp, SIGHUP, 1);
de22b67f 459 sp->s_ttyp->t_timeout = hz;
f1aeb0c0
MD
460 ttywait(sp->s_ttyp);
461 ttyclosesession(sp, 1); /* also revoke */
984263bc 462 }
de22b67f 463
5fd012e0
MD
464 /*
465 * Release the tty. If someone has it open via
466 * /dev/tty then close it (since they no longer can
467 * once we've NULL'd it out).
468 */
f1aeb0c0
MD
469 ttyclosesession(sp, 0);
470
984263bc
MD
471 /*
472 * s_ttyp is not zero'd; we use this to indicate
473 * that the session once had a controlling terminal.
474 * (for logging and informational purposes)
475 */
476 }
477 sp->s_leader = NULL;
478 }
479 fixjobc(p, p->p_pgrp, 0);
480 (void)acct_process(p);
481#ifdef KTRACE
482 /*
483 * release trace file
484 */
29f58392
MD
485 if (p->p_tracenode)
486 ktrdestroy(&p->p_tracenode);
487 p->p_traceflag = 0;
984263bc
MD
488#endif
489 /*
490 * Release reference to text vnode
491 */
492 if ((vtmp = p->p_textvp) != NULL) {
493 p->p_textvp = NULL;
494 vrele(vtmp);
495 }
496
8ba5f7ef
AH
497 /* Release namecache handle to text file */
498 if (p->p_textnch.ncp)
499 cache_drop(&p->p_textnch);
500
82354ad8
MD
501 /*
502 * We have to handle PPWAIT here or proc_move_allproc_zombie()
503 * will block on the PHOLD() the parent is doing.
51818c08
MD
504 *
505 * We are using the flag as an interlock so an atomic op is
506 * necessary to synchronize with the parent's cpu.
82354ad8
MD
507 */
508 if (p->p_flags & P_PPWAIT) {
87116512 509 if (p->p_pptr && p->p_pptr->p_upmap)
2eca01a4 510 atomic_add_int(&p->p_pptr->p_upmap->invfork, -1);
51818c08 511 atomic_clear_int(&p->p_flags, P_PPWAIT);
82354ad8
MD
512 wakeup(p->p_pptr);
513 }
514
ae8050a4 515 /*
5bf0d9b5
MD
516 * Move the process to the zombie list. This will block
517 * until the process p_lock count reaches 0. The process will
518 * not be reaped until TDF_EXITING is set by cpu_thread_exit(),
519 * which is called from cpu_proc_exit().
51818c08
MD
520 *
521 * Interlock against waiters using p_waitgen. We increment
522 * p_waitgen after completing the move of our process to the
523 * zombie list.
524 *
525 * WARNING: pp becomes stale when we block, clear it now as a
526 * reminder.
ae8050a4 527 */
5bf0d9b5 528 proc_move_allproc_zombie(p);
51818c08
MD
529 pp = p->p_pptr;
530 atomic_add_long(&pp->p_waitgen, 1);
531 pp = NULL;
984263bc 532
7e90d791 533 /*
6e2a912c
MD
534 * release controlled reaper for exit if we own it and return the
535 * remaining reaper (the one for us), which we will drop after we
536 * are done.
7e90d791 537 */
6e2a912c
MD
538 reap = reaper_exit(p);
539
540 /*
541 * Reparent all of this process's children to the init process or
542 * to the designated reaper. We must hold the reaper's p_token in
543 * order to safely mess with p_children.
544 *
acdf1ee6
MD
545 * Issue the p_deathsig signal to children that request it.
546 *
6e2a912c
MD
547 * We already hold p->p_token (to remove the children from our list).
548 */
549 reproc = NULL;
984263bc 550 q = LIST_FIRST(&p->p_children);
7e90d791 551 if (q) {
6e2a912c
MD
552 reproc = reaper_get(reap);
553 lwkt_gettoken(&reproc->p_token);
0730ed66
MD
554 while ((q = LIST_FIRST(&p->p_children)) != NULL) {
555 PHOLD(q);
556 lwkt_gettoken(&q->p_token);
557 if (q != LIST_FIRST(&p->p_children)) {
558 lwkt_reltoken(&q->p_token);
559 PRELE(q);
560 continue;
561 }
7e90d791 562 LIST_REMOVE(q, p_sibling);
6e2a912c
MD
563 LIST_INSERT_HEAD(&reproc->p_children, q, p_sibling);
564 q->p_pptr = reproc;
39b9b6cd 565 q->p_ppid = reproc->p_pid;
7e90d791 566 q->p_sigparent = SIGCHLD;
0730ed66 567
7e90d791
MD
568 /*
569 * Traced processes are killed
570 * since their existence means someone is screwing up.
571 */
4643740a
MD
572 if (q->p_flags & P_TRACED) {
573 q->p_flags &= ~P_TRACED;
7e90d791
MD
574 ksignal(q, SIGKILL);
575 }
acdf1ee6
MD
576
577 /*
578 * Issue p_deathsig to children that request it
579 */
580 if (q->p_deathsig)
581 ksignal(q, q->p_deathsig);
0730ed66
MD
582 lwkt_reltoken(&q->p_token);
583 PRELE(q);
984263bc 584 }
6e2a912c
MD
585 lwkt_reltoken(&reproc->p_token);
586 wakeup(reproc);
984263bc
MD
587 }
588
589 /*
33b81dc9
MD
590 * Save exit status and final rusage info. We no longer add
591 * child rusage info into self times, wait4() and kern_wait()
592 * handles it in order to properly support wait6().
984263bc 593 */
fde7ac71 594 calcru_proc(p, &p->p_ru);
33b81dc9 595 /*ruadd(&p->p_ru, &p->p_cru); REMOVED */
984263bc 596
984263bc
MD
597 /*
598 * notify interested parties of our demise.
599 */
600 KNOTE(&p->p_klist, NOTE_EXIT);
601
602 /*
603 * Notify parent that we're gone. If parent has the PS_NOCLDWAIT
6e2a912c
MD
604 * flag set, or if the handler is set to SIG_IGN, notify the reaper
605 * instead (it will handle this situation).
606 *
607 * NOTE: The reaper can still be the parent process.
51818c08
MD
608 *
609 * (must reload pp)
984263bc 610 */
a0ae03a7 611 if (p->p_pptr->p_sigacts->ps_flag & (PS_NOCLDWAIT | PS_CLDSIGIGN)) {
6e2a912c
MD
612 if (reproc == NULL)
613 reproc = reaper_get(reap);
614 proc_reparent(p, reproc);
984263bc 615 }
6e2a912c
MD
616 if (reproc)
617 PRELE(reproc);
618 if (reap)
619 reaper_drop(reap);
984263bc 620
6e2a912c
MD
621 /*
622 * Signal (possibly new) parent.
623 */
51818c08
MD
624 pp = p->p_pptr;
625 PHOLD(pp);
626 if (p->p_sigparent && pp != initproc) {
6e2a912c
MD
627 int sig = p->p_sigparent;
628
629 if (sig != SIGUSR1 && sig != SIGCHLD)
630 sig = SIGCHLD;
fc3bc286 631 ksignal(pp, sig);
984263bc 632 } else {
51818c08 633 ksignal(pp, SIGCHLD);
984263bc 634 }
82354ad8 635 p->p_flags &= ~P_TRACED;
51818c08 636 PRELE(pp);
82354ad8 637
984263bc 638 /*
8ad65e08
MD
639 * cpu_exit is responsible for clearing curproc, since
640 * it is heavily integrated with the thread/switching sequence.
641 *
984263bc
MD
642 * Other substructures are freed from wait().
643 */
327aab1f
MD
644 if (p->p_limit) {
645 struct plimit *rlimit;
646
647 rlimit = p->p_limit;
648 p->p_limit = NULL;
649 plimit_free(rlimit);
650 }
984263bc
MD
651
652 /*
e3161323
MD
653 * Finally, call machine-dependent code to release as many of the
654 * lwp's resources as we can and halt execution of this thread.
51818c08
MD
655 *
656 * pp is a wild pointer now but still the correct wakeup() target.
657 * lwp_exit() only uses it to send the wakeup() signal to the likely
658 * parent. Any reparenting race that occurs will get a signal
659 * automatically and not be an issue.
984263bc 660 */
51818c08 661 lwp_exit(1, pp);
c6880072
SS
662}
663
0d355d3b
MD
664/*
665 * Eventually called by every exiting LWP
b5c4d81f
MD
666 *
667 * p->p_token must be held. mplock may be held and will be released.
0d355d3b 668 */
c6880072 669void
51818c08 670lwp_exit(int masterexit, void *waddr)
c6880072 671{
d86a23e0
MD
672 struct thread *td = curthread;
673 struct lwp *lp = td->td_lwp;
c6880072 674 struct proc *p = lp->lwp_proc;
8c2bce60 675 int dowake = 0;
c6880072 676
721d1b44
MD
677 /*
678 * Release the current user process designation on the process so
679 * the userland scheduler can work in someone else.
680 */
681 p->p_usched->release_curproc(lp);
682
64b5a8a5
MD
683 /*
684 * Destroy the per-thread shared page and remove from any pmaps
685 * it resides in.
686 */
687 lwp_userunmap(lp);
688
e3161323 689 /*
4643740a 690 * lwp_exit() may be called without setting LWP_MP_WEXIT, so
e3161323
MD
691 * make sure it is set here.
692 */
b5c4d81f 693 ASSERT_LWKT_TOKEN_HELD(&p->p_token);
4643740a 694 atomic_set_int(&lp->lwp_mpflags, LWP_MP_WEXIT);
e3161323 695
39005e16
MD
696 /*
697 * Clean up any virtualization
698 */
699 if (lp->lwp_vkernel)
700 vkernel_lwp_exit(lp);
701
a591f597
MD
702 /*
703 * Clean up select/poll support
704 */
705 kqueue_terminate(&lp->lwp_kqueue);
706
0e32b8c5
MD
707 if (td->td_linux_task)
708 linux_task_drop_callback(td);
709 if (masterexit && p->p_linux_mm)
710 linux_proc_drop_callback(p);
711
0d355d3b 712 /*
384ee8f0 713 * Clean up any syscall-cached ucred or rlimit.
0d355d3b 714 */
d86a23e0
MD
715 if (td->td_ucred) {
716 crfree(td->td_ucred);
717 td->td_ucred = NULL;
0d355d3b 718 }
384ee8f0
MD
719 if (td->td_limit) {
720 struct plimit *rlimit;
721
722 rlimit = td->td_limit;
723 td->td_limit = NULL;
724 plimit_free(rlimit);
725 }
0d355d3b 726
35949930
MD
727 /*
728 * Cleanup any cached descriptors for this thread
729 */
730 if (p->p_fd)
731 fexitcache(td);
732
70d3d461
SS
733 /*
734 * Nobody actually wakes us when the lock
735 * count reaches zero, so just wait one tick.
736 */
737 while (lp->lwp_lock > 0)
738 tsleep(lp, 0, "lwpexit", 1);
739
740 /* Hand down resource usage to our proc */
741 ruadd(&p->p_ru, &lp->lwp_ru);
742
e3161323
MD
743 /*
744 * If we don't hold the process until the LWP is reaped wait*()
745 * may try to dispose of its vmspace before all the LWPs have
746 * actually terminated.
747 */
748 PHOLD(p);
749
2883d2d8
MD
750 /*
751 * Do any remaining work that might block on us. We should be
752 * coded such that further blocking is ok after decrementing
753 * p_nthreads but don't take the chance.
754 */
755 dsched_exit_thread(td);
756 biosched_done(curthread);
757
e3161323
MD
758 /*
759 * We have to use the reaper for all the LWPs except the one doing
760 * the master exit. The LWP doing the master exit can just be
761 * left on p_lwps and the process reaper will deal with it
762 * synchronously, which is much faster.
ce19c8fe
MD
763 *
764 * Wakeup anyone waiting on p_nthreads to drop to 1 or 0.
2af9d75d
MD
765 *
766 * The process is left held until the reaper calls lwp_dispose() on
767 * the lp (after calling lwp_wait()).
e3161323
MD
768 */
769 if (masterexit == 0) {
00891516
MD
770 int cpu = mycpuid;
771
3e291793 772 lwp_rb_tree_RB_REMOVE(&p->p_lwp_tree, lp);
e3161323 773 --p->p_nthreads;
51818c08 774 if ((p->p_flags & P_MAYBETHREADED) && p->p_nthreads <= 1)
8c2bce60 775 dowake = 1;
00891516
MD
776 lwkt_gettoken(&deadlwp_token[cpu]);
777 LIST_INSERT_HEAD(&deadlwp_list[cpu], lp, u.lwp_reap_entry);
778 taskqueue_enqueue(taskqueue_thread[cpu], deadlwp_task[cpu]);
779 lwkt_reltoken(&deadlwp_token[cpu]);
e3161323
MD
780 } else {
781 --p->p_nthreads;
51818c08 782 if ((p->p_flags & P_MAYBETHREADED) && p->p_nthreads <= 1)
8c2bce60 783 dowake = 1;
e3161323 784 }
b5c4d81f
MD
785
786 /*
51818c08
MD
787 * We no longer need p_token.
788 *
789 * Tell the userland scheduler that we are going away
b5c4d81f
MD
790 */
791 lwkt_reltoken(&p->p_token);
51818c08 792 p->p_usched->heuristic_exiting(lp, p);
e28d8b15
MD
793
794 /*
51818c08
MD
795 * Issue late wakeups after releasing our token to give us a chance
796 * to deschedule and switch away before another cpu in a wait*()
797 * reaps us. This is done as late as possible to reduce contention.
e28d8b15 798 */
51818c08
MD
799 if (dowake)
800 wakeup(&p->p_nthreads);
801 if (waddr)
802 wakeup(waddr);
e28d8b15 803
c6880072
SS
804 cpu_lwp_exit();
805}
806
807/*
2af9d75d
MD
808 * Wait until a lwp is completely dead. The final interlock in this drama
809 * is when TDF_EXITING is set in cpu_thread_exit() just before the final
810 * switchout.
c6880072 811 *
2af9d75d 812 * At the point TDF_EXITING is set a complete exit is accomplished when
2b07d9aa
MD
813 * TDF_RUNNING and TDF_PREEMPT_LOCK are both clear. td_mpflags has two
814 * post-switch interlock flags that can be used to wait for the TDF_
815 * flags to clear.
c6880072 816 *
2af9d75d
MD
817 * Returns non-zero on success, and zero if the caller needs to retry
818 * the lwp_wait().
c6880072
SS
819 */
820static int
821lwp_wait(struct lwp *lp)
822{
3598cc14 823 struct thread *td = lp->lwp_thread;
2b07d9aa 824 u_int mpflags;
c6880072
SS
825
826 KKASSERT(lwkt_preempted_proc() != lp);
827
2af9d75d 828 /*
2b07d9aa
MD
829 * This bit of code uses the thread destruction interlock
830 * managed by lwkt_switch_return() to wait for the lwp's
831 * thread to completely disengage.
832 *
833 * It is possible for us to race another cpu core so we
834 * have to do this correctly.
835 */
836 for (;;) {
837 mpflags = td->td_mpflags;
838 cpu_ccfence();
839 if (mpflags & TDF_MP_EXITSIG)
840 break;
841 tsleep_interlock(td, 0);
842 if (atomic_cmpset_int(&td->td_mpflags, mpflags,
843 mpflags | TDF_MP_EXITWAIT)) {
844 tsleep(td, PINTERLOCKED, "lwpxt", 0);
845 }
846 }
847
848 /*
849 * We've already waited for the core exit but there can still
850 * be other refs from e.g. process scans and such.
2af9d75d
MD
851 */
852 if (lp->lwp_lock > 0) {
c6880072 853 tsleep(lp, 0, "lwpwait1", 1);
2af9d75d
MD
854 return(0);
855 }
2af9d75d
MD
856 if (td->td_refs) {
857 tsleep(td, 0, "lwpwait2", 1);
858 return(0);
859 }
c6880072
SS
860
861 /*
2b07d9aa
MD
862 * Now that we have the thread destruction interlock these flags
863 * really should already be cleaned up, keep a check for safety.
c6880072 864 *
2b07d9aa
MD
865 * We can't rip its stack out from under it until TDF_EXITING is
866 * set and both TDF_RUNNING and TDF_PREEMPT_LOCK are clear.
867 * TDF_PREEMPT_LOCK must be checked because TDF_RUNNING
868 * will be cleared temporarily if a thread gets preempted.
869 */
870 while ((td->td_flags & (TDF_RUNNING |
0adbcbd6 871 TDF_RUNQ |
2b07d9aa
MD
872 TDF_PREEMPT_LOCK |
873 TDF_EXITING)) != TDF_EXITING) {
874 tsleep(lp, 0, "lwpwait3", 1);
c6880072 875 return (0);
cf709dd2 876 }
2b07d9aa 877
2af9d75d
MD
878 KASSERT((td->td_flags & (TDF_RUNQ|TDF_TSLEEPQ)) == 0,
879 ("lwp_wait: td %p (%s) still on run or sleep queue",
880 td, td->td_comm));
c6880072
SS
881 return (1);
882}
883
884/*
885 * Release the resources associated with a lwp.
886 * The lwp must be completely dead.
887 */
888void
889lwp_dispose(struct lwp *lp)
890{
3598cc14 891 struct thread *td = lp->lwp_thread;
c6880072
SS
892
893 KKASSERT(lwkt_preempted_proc() != lp);
6214ede1 894 KKASSERT(lp->lwp_lock == 0);
c6880072 895 KKASSERT(td->td_refs == 0);
2af9d75d 896 KKASSERT((td->td_flags & (TDF_RUNNING |
0adbcbd6 897 TDF_RUNQ |
2af9d75d
MD
898 TDF_PREEMPT_LOCK |
899 TDF_EXITING)) == TDF_EXITING);
c6880072 900
e3161323
MD
901 PRELE(lp->lwp_proc);
902 lp->lwp_proc = NULL;
c6880072
SS
903 if (td != NULL) {
904 td->td_proc = NULL;
905 td->td_lwp = NULL;
906 lp->lwp_thread = NULL;
907 lwkt_free_thread(td);
908 }
f6c36234 909 kfree(lp, M_LWP);
984263bc
MD
910}
911
984263bc 912int
80d831e1 913sys_wait4(struct sysmsg *sysmsg, const struct wait_args *uap)
984263bc 914{
33b81dc9
MD
915 struct __wrusage wrusage;
916 int error;
917 int status;
918 int options;
919 id_t id;
920 idtype_t idtype;
921
922 options = uap->options | WEXITED | WTRAPPED;
923 id = uap->pid;
924
925 if (id == WAIT_ANY) {
926 idtype = P_ALL;
927 } else if (id == WAIT_MYPGRP) {
928 idtype = P_PGID;
929 id = curproc->p_pgid;
930 } else if (id < 0) {
931 idtype = P_PGID;
932 id = -id;
933 } else {
934 idtype = P_PID;
935 }
936
937 error = kern_wait(idtype, id, &status, options, &wrusage,
80d831e1 938 NULL, &sysmsg->sysmsg_result);
33b81dc9
MD
939
940 if (error == 0 && uap->status)
941 error = copyout(&status, uap->status, sizeof(*uap->status));
942 if (error == 0 && uap->rusage) {
943 ruadd(&wrusage.wru_self, &wrusage.wru_children);
944 error = copyout(&wrusage.wru_self, uap->rusage, sizeof(*uap->rusage));
945 }
946 return (error);
947}
948
949int
80d831e1 950sys_wait6(struct sysmsg *sysmsg, const struct wait6_args *uap)
33b81dc9
MD
951{
952 struct __wrusage wrusage;
7bc469f6
SW
953 siginfo_t info;
954 siginfo_t *infop;
33b81dc9
MD
955 int error;
956 int status;
957 int options;
958 id_t id;
959 idtype_t idtype;
960
961 /*
962 * NOTE: wait6() requires WEXITED and WTRAPPED to be specified if
963 * desired.
964 */
965 options = uap->options;
966 idtype = uap->idtype;
967 id = uap->id;
968 infop = uap->info ? &info : NULL;
969
970 switch(idtype) {
971 case P_PID:
972 case P_PGID:
973 if (id == WAIT_MYPGRP) {
974 idtype = P_PGID;
975 id = curproc->p_pgid;
976 }
977 break;
978 default:
979 /* let kern_wait deal with the remainder */
980 break;
981 }
984263bc 982
33b81dc9 983 error = kern_wait(idtype, id, &status, options,
80d831e1 984 &wrusage, infop, &sysmsg->sysmsg_result);
984263bc 985
9697c509
DRJ
986 if (error == 0 && uap->status)
987 error = copyout(&status, uap->status, sizeof(*uap->status));
33b81dc9
MD
988 if (error == 0 && uap->wrusage)
989 error = copyout(&wrusage, uap->wrusage, sizeof(*uap->wrusage));
990 if (error == 0 && uap->info)
991 error = copyout(&info, uap->info, sizeof(*uap->info));
9697c509 992 return (error);
984263bc
MD
993}
994
41c20dac 995/*
33b81dc9 996 * kernel wait*() system call support
41c20dac 997 */
9697c509 998int
33b81dc9 999kern_wait(idtype_t idtype, id_t id, int *status, int options,
7bc469f6 1000 struct __wrusage *wrusage, siginfo_t *info, int *res)
984263bc 1001{
9697c509 1002 struct thread *td = curthread;
3e291793 1003 struct lwp *lp;
9697c509 1004 struct proc *q = td->td_proc;
41c20dac 1005 struct proc *p, *t;
6cbfbdb9 1006 struct ucred *cr;
e955543c 1007 struct pargs *pa;
6fa9e71a 1008 struct sigacts *ps;
9697c509 1009 int nfound, error;
51818c08 1010 long waitgen;
984263bc 1011
33b81dc9
MD
1012 /*
1013 * Must not have extraneous options. Must have at least one
1014 * matchable option.
1015 */
1016 if (options &~ (WUNTRACED|WNOHANG|WCONTINUED|WLINUXCLONE|WSTOPPED|
1017 WEXITED|WTRAPPED|WNOWAIT)) {
984263bc 1018 return (EINVAL);
33b81dc9
MD
1019 }
1020 if ((options & (WEXITED | WUNTRACED | WCONTINUED | WTRAPPED)) == 0) {
1021 return (EINVAL);
1022 }
b5c4d81f 1023
51818c08
MD
1024 /*
1025 * Protect the q->p_children list
1026 */
b5c4d81f 1027 lwkt_gettoken(&q->p_token);
984263bc 1028loop:
6d5b152e 1029 /*
b5c4d81f
MD
1030 * All sorts of things can change due to blocking so we have to loop
1031 * all the way back up here.
6d5b152e
MD
1032 *
1033 * The problem is that if a process group is stopped and the parent
1034 * is doing a wait*(..., WUNTRACED, ...), it will see the STOP
1035 * of the child and then stop itself when it tries to return from the
1036 * system call. When the process group is resumed the parent will
1037 * then get the STOP status even though the child has now resumed
1038 * (a followup wait*() will get the CONT status).
1039 *
1040 * Previously the CONT would overwrite the STOP because the tstop
1041 * was handled within tsleep(), and the parent would only see
b5c4d81f 1042 * the CONT when both are stopped and continued together. This little
6d5b152e 1043 * two-line hack restores this effect.
ba69e357
MD
1044 *
1045 * No locks are held so we can safely block the process here.
6d5b152e 1046 */
9c960153 1047 if (STOPLWP(q, td->td_lwp))
9a379a4a 1048 tstop();
6d5b152e 1049
984263bc 1050 nfound = 0;
b5c4d81f 1051
8c986a82
MD
1052 /*
1053 * Loop on children.
1054 *
1055 * NOTE: We don't want to break q's p_token in the loop for the
1056 * case where no children are found or we risk breaking the
1057 * interlock between child and parent.
1058 */
51818c08 1059 waitgen = atomic_fetchadd_long(&q->p_waitgen, 0x80000000);
984263bc 1060 LIST_FOREACH(p, &q->p_children, p_sibling) {
f071b5e0
MD
1061 /*
1062 * Skip children that another thread is already uninterruptably
1063 * reaping.
1064 */
1065 if (PWAITRES_PENDING(p))
1066 continue;
1067
33b81dc9
MD
1068 /*
1069 * Filter, (p) will be held on fall-through. Try to optimize
1070 * this to avoid the atomic op until we are pretty sure we
1071 * want this process.
1072 */
1073 switch(idtype) {
1074 case P_ALL:
1075 PHOLD(p);
1076 break;
1077 case P_PID:
1078 if (p->p_pid != (pid_t)id)
1079 continue;
1080 PHOLD(p);
1081 break;
1082 case P_PGID:
1083 if (p->p_pgid != (pid_t)id)
1084 continue;
1085 PHOLD(p);
1086 break;
1087 case P_SID:
1088 PHOLD(p);
1089 if (p->p_session && p->p_session->s_sid != (pid_t)id) {
1090 PRELE(p);
1091 continue;
1092 }
1093 break;
1094 case P_UID:
1095 PHOLD(p);
1096 if (p->p_ucred->cr_uid != (uid_t)id) {
1097 PRELE(p);
1098 continue;
1099 }
1100 break;
1101 case P_GID:
1102 PHOLD(p);
1103 if (p->p_ucred->cr_gid != (gid_t)id) {
1104 PRELE(p);
1105 continue;
1106 }
1107 break;
1108 case P_JAILID:
1109 PHOLD(p);
1110 if (p->p_ucred->cr_prison &&
1111 p->p_ucred->cr_prison->pr_id != (int)id) {
1112 PRELE(p);
1113 continue;
1114 }
1115 break;
1116 default:
1117 /* unsupported filter */
984263bc 1118 continue;
b5c4d81f 1119 }
33b81dc9 1120 /* (p) is held at this point */
984263bc 1121
5686ec5a
MD
1122 /*
1123 * This special case handles a kthread spawned by linux_clone
352f5709
MD
1124 * (see linux_misc.c). The linux_wait4 and linux_waitpid
1125 * functions need to be able to distinguish between waiting
1126 * on a process and waiting on a thread. It is a thread if
1127 * p_sigparent is not SIGCHLD, and the WLINUXCLONE option
1128 * signifies we want to wait for threads and not processes.
984263bc 1129 */
352f5709
MD
1130 if ((p->p_sigparent != SIGCHLD) ^
1131 ((options & WLINUXCLONE) != 0)) {
33b81dc9 1132 PRELE(p);
984263bc 1133 continue;
352f5709 1134 }
984263bc
MD
1135
1136 nfound++;
33b81dc9 1137 if (p->p_stat == SZOMB && (options & WEXITED)) {
8f211c4b
MD
1138 /*
1139 * We may go into SZOMB with threads still present.
1140 * We must wait for them to exit before we can reap
1141 * the master thread, otherwise we may race reaping
1142 * non-master threads.
0730ed66
MD
1143 *
1144 * Only this routine can remove a process from
33b81dc9 1145 * the zombie list and destroy it.
f071b5e0
MD
1146 *
1147 * This function will fail after sleeping if another
1148 * thread owns the zombie lock. This function will
1149 * fail immediately or after sleeping if another
1150 * thread owns or obtains ownership of the reap via
1151 * WAITRES.
8f211c4b 1152 */
33b81dc9
MD
1153 if (PHOLDZOMB(p)) {
1154 PRELE(p);
0730ed66 1155 goto loop;
33b81dc9 1156 }
b5c4d81f 1157 lwkt_gettoken(&p->p_token);
0730ed66
MD
1158 if (p->p_pptr != q) {
1159 lwkt_reltoken(&p->p_token);
33b81dc9 1160 PRELE(p);
0730ed66
MD
1161 PRELEZOMB(p);
1162 goto loop;
1163 }
f071b5e0
MD
1164
1165 /*
1166 * We are the reaper, from this point on the reap
1167 * cannot be aborted.
1168 */
1169 PWAITRES_SET(p);
8f211c4b
MD
1170 while (p->p_nthreads > 0) {
1171 tsleep(&p->p_nthreads, 0, "lwpzomb", hz);
1172 }
1173
c008d3ad 1174 /*
e3161323
MD
1175 * Reap any LWPs left in p->p_lwps. This is usually
1176 * just the last LWP. This must be done before
1177 * we loop on p_lock since the lwps hold a ref on
1178 * it as a vmspace interlock.
1179 *
1180 * Once that is accomplished p_nthreads had better
1181 * be zero.
1182 */
3e291793 1183 while ((lp = RB_ROOT(&p->p_lwp_tree)) != NULL) {
6214ede1
SZ
1184 /*
1185 * Make sure no one is using this lwp, before
1186 * it is removed from the tree. If we didn't
1187 * wait it here, lwp tree iteration with
1188 * blocking operation would be broken.
1189 */
1190 while (lp->lwp_lock > 0)
1191 tsleep(lp, 0, "zomblwp", 1);
3e291793
MD
1192 lwp_rb_tree_RB_REMOVE(&p->p_lwp_tree, lp);
1193 reaplwp(lp);
1194 }
e3161323
MD
1195 KKASSERT(p->p_nthreads == 0);
1196
1197 /*
1198 * Don't do anything really bad until all references
1199 * to the process go away. This may include other
1200 * LWPs which are still in the process of being
1201 * reaped. We can't just pull the rug out from under
1202 * them because they may still be using the VM space.
1203 *
1204 * Certain kernel facilities such as /proc will also
1205 * put a hold on the process for short periods of
1206 * time.
c008d3ad 1207 */
33b81dc9
MD
1208 PRELE(p); /* from top of loop */
1209 PSTALL(p, "reap3", 1); /* 1 ref (for PZOMBHOLD) */
c1102e9f 1210
9697c509
DRJ
1211 /* Take care of our return values. */
1212 *res = p->p_pid;
52cac9fb 1213
33b81dc9
MD
1214 *status = p->p_xstat;
1215 wrusage->wru_self = p->p_ru;
1216 wrusage->wru_children = p->p_cru;
1217
1218 if (info) {
1219 bzero(info, sizeof(*info));
1220 info->si_errno = 0;
1221 info->si_signo = SIGCHLD;
819542d7 1222 if (WIFEXITED(p->p_xstat)) {
33b81dc9 1223 info->si_code = CLD_EXITED;
819542d7
MD
1224 info->si_status =
1225 WEXITSTATUS(p->p_xstat);
1226 } else {
bfd192f7 1227 info->si_code = CLD_KILLED;
819542d7
MD
1228 info->si_status = WTERMSIG(p->p_xstat);
1229 }
33b81dc9
MD
1230 info->si_pid = p->p_pid;
1231 info->si_uid = p->p_ucred->cr_uid;
1232 }
1233
1234 /*
1235 * WNOWAIT shortcuts to done here, leaving the
1236 * child on the zombie list.
1237 */
1238 if (options & WNOWAIT) {
1239 lwkt_reltoken(&p->p_token);
1240 PRELEZOMB(p);
1241 error = 0;
1242 goto done;
1243 }
51818c08 1244
984263bc
MD
1245 /*
1246 * If we got the child via a ptrace 'attach',
1247 * we need to give it back to the old parent.
1248 */
58c2553a 1249 if (p->p_oppid && (t = pfind(p->p_oppid)) != NULL) {
984263bc
MD
1250 p->p_oppid = 0;
1251 proc_reparent(p, t);
84204577 1252 ksignal(t, SIGCHLD);
984263bc 1253 wakeup((caddr_t)t);
58c2553a 1254 PRELE(t);
eb2adbf5 1255 lwkt_reltoken(&p->p_token);
0730ed66 1256 PRELEZOMB(p);
33b81dc9 1257 error = 0;
3919ced0 1258 goto done;
984263bc 1259 }
3c8687d6
SS
1260
1261 /*
1262 * Unlink the proc from its process group so that
1263 * the following operations won't lead to an
1264 * inconsistent state for processes running down
1265 * the zombie list.
1266 */
3c8687d6 1267 proc_remove_zombie(p);
0adbcbd6 1268 proc_userunmap(p);
eb2adbf5 1269 lwkt_reltoken(&p->p_token);
3c8687d6
SS
1270 leavepgrp(p);
1271
984263bc 1272 p->p_xstat = 0;
fde7ac71 1273 ruadd(&q->p_cru, &p->p_ru);
33b81dc9 1274 ruadd(&q->p_cru, &p->p_cru);
984263bc
MD
1275
1276 /*
1277 * Decrement the count of procs running with this uid.
1278 */
9697c509 1279 chgproccnt(p->p_ucred->cr_ruidinfo, -1, 0);
984263bc
MD
1280
1281 /*
6cbfbdb9
MD
1282 * Free up credentials. p_spin is required to
1283 * avoid races against allproc scans.
984263bc 1284 */
6cbfbdb9
MD
1285 spin_lock(&p->p_spin);
1286 cr = p->p_ucred;
41c20dac 1287 p->p_ucred = NULL;
6cbfbdb9
MD
1288 spin_unlock(&p->p_spin);
1289 crfree(cr);
984263bc
MD
1290
1291 /*
1292 * Remove unused arguments
1293 */
e955543c
MD
1294 pa = p->p_args;
1295 p->p_args = NULL;
e955543c
MD
1296 if (pa && refcount_release(&pa->ar_ref)) {
1297 kfree(pa, M_PARGS);
1298 pa = NULL;
1299 }
6fa9e71a
MD
1300
1301 ps = p->p_sigacts;
1302 p->p_sigacts = NULL;
1303 if (ps && refcount_release(&ps->ps_refcnt)) {
1304 kfree(ps, M_SUBPROC);
1305 ps = NULL;
984263bc
MD
1306 }
1307
49aa3df0 1308 /*
e22b17d7
MD
1309 * Our exitingcount was incremented when the process
1310 * became a zombie, now that the process has been
1311 * removed from (almost) all lists we should be able
1312 * to safely destroy its vmspace. Wait for any current
1313 * holders to go away (so the vmspace remains stable),
1314 * then scrap it.
5411d8f1
MD
1315 *
1316 * NOTE: Releasing the parent process (q) p_token
1317 * across the vmspace_exitfree() call is
1318 * important here to reduce stalls on
1319 * interactions with (q) (such as
1320 * fork/exec/wait or 'ps').
49aa3df0 1321 */
33b81dc9 1322 PSTALL(p, "reap4", 1);
5411d8f1 1323 lwkt_reltoken(&q->p_token);
e22b17d7 1324 vmspace_exitfree(p);
5411d8f1 1325 lwkt_gettoken(&q->p_token);
33b81dc9 1326 PSTALL(p, "reap5", 1);
e22b17d7 1327
0730ed66
MD
1328 /*
1329 * NOTE: We have to officially release ZOMB in order
1330 * to ensure that a racing thread in kern_wait()
1331 * which blocked on ZOMB is woken up.
1332 */
0730ed66 1333 PRELEZOMB(p);
d6299163 1334 kfree(p->p_uidpcpu, M_SUBPROC);
fb2a331e 1335 kfree(p, M_PROC);
8c2bce60 1336 atomic_add_int(&nprocs, -1);
3919ced0
MD
1337 error = 0;
1338 goto done;
984263bc 1339 }
33b81dc9
MD
1340
1341 /*
1342 * Process has not yet exited
1343 */
f5b92db7
MD
1344 if ((p->p_stat == SSTOP || p->p_stat == SCORE) &&
1345 (p->p_flags & P_WAITED) == 0 &&
33b81dc9
MD
1346 (((p->p_flags & P_TRACED) && (options & WTRAPPED)) ||
1347 (options & WSTOPPED))) {
8c986a82 1348 lwkt_gettoken(&p->p_token);
0730ed66
MD
1349 if (p->p_pptr != q) {
1350 lwkt_reltoken(&p->p_token);
1351 PRELE(p);
1352 goto loop;
1353 }
f5b92db7 1354 if ((p->p_stat != SSTOP && p->p_stat != SCORE) ||
0730ed66
MD
1355 (p->p_flags & P_WAITED) != 0 ||
1356 ((p->p_flags & P_TRACED) == 0 &&
1357 (options & WUNTRACED) == 0)) {
1358 lwkt_reltoken(&p->p_token);
1359 PRELE(p);
1360 goto loop;
1361 }
1362
33b81dc9
MD
1363 /*
1364 * Don't set P_WAITED if WNOWAIT specified, leaving
1365 * the process in a waitable state.
1366 */
1367 if ((options & WNOWAIT) == 0)
1368 p->p_flags |= P_WAITED;
9697c509
DRJ
1369
1370 *res = p->p_pid;
33b81dc9 1371 *status = W_STOPCODE(p->p_xstat);
9697c509 1372 /* Zero rusage so we get something consistent. */
33b81dc9 1373 bzero(wrusage, sizeof(*wrusage));
3919ced0 1374 error = 0;
33b81dc9
MD
1375 if (info) {
1376 bzero(info, sizeof(*info));
1377 if (p->p_flags & P_TRACED)
1378 info->si_code = CLD_TRAPPED;
1379 else
1380 info->si_code = CLD_STOPPED;
819542d7 1381 info->si_status = WSTOPSIG(p->p_xstat);
33b81dc9 1382 }
8c986a82 1383 lwkt_reltoken(&p->p_token);
0730ed66 1384 PRELE(p);
3919ced0 1385 goto done;
984263bc 1386 }
4643740a 1387 if ((options & WCONTINUED) && (p->p_flags & P_CONTINUED)) {
8c986a82 1388 lwkt_gettoken(&p->p_token);
0730ed66
MD
1389 if (p->p_pptr != q) {
1390 lwkt_reltoken(&p->p_token);
1391 PRELE(p);
1392 goto loop;
1393 }
1394 if ((p->p_flags & P_CONTINUED) == 0) {
1395 lwkt_reltoken(&p->p_token);
1396 PRELE(p);
1397 goto loop;
1398 }
1399
cc162105 1400 *res = p->p_pid;
cc162105 1401
33b81dc9
MD
1402 /*
1403 * Don't set P_WAITED if WNOWAIT specified, leaving
1404 * the process in a waitable state.
1405 */
1406 if ((options & WNOWAIT) == 0)
1407 p->p_flags &= ~P_CONTINUED;
1408
1409 *status = SIGCONT;
3919ced0 1410 error = 0;
33b81dc9
MD
1411 if (info) {
1412 bzero(info, sizeof(*info));
1413 info->si_code = CLD_CONTINUED;
819542d7 1414 info->si_status = WSTOPSIG(p->p_xstat);
33b81dc9 1415 }
8c986a82 1416 lwkt_reltoken(&p->p_token);
0730ed66 1417 PRELE(p);
3919ced0 1418 goto done;
cc162105 1419 }
33b81dc9 1420 PRELE(p);
984263bc 1421 }
3919ced0
MD
1422 if (nfound == 0) {
1423 error = ECHILD;
1424 goto done;
1425 }
9697c509
DRJ
1426 if (options & WNOHANG) {
1427 *res = 0;
3919ced0
MD
1428 error = 0;
1429 goto done;
984263bc 1430 }
58c2553a
MD
1431
1432 /*
51818c08 1433 * Wait for signal - interlocked using q->p_waitgen.
58c2553a 1434 */
51818c08
MD
1435 error = 0;
1436 while ((waitgen & 0x7FFFFFFF) == (q->p_waitgen & 0x7FFFFFFF)) {
1437 tsleep_interlock(q, PCATCH);
1438 waitgen = atomic_fetchadd_long(&q->p_waitgen, 0x80000000);
1439 if ((waitgen & 0x7FFFFFFF) == (q->p_waitgen & 0x7FFFFFFF)) {
1440 error = tsleep(q, PCATCH | PINTERLOCKED, "wait", 0);
1441 break;
1442 }
1443 }
3919ced0
MD
1444 if (error) {
1445done:
b5c4d81f 1446 lwkt_reltoken(&q->p_token);
984263bc 1447 return (error);
3919ced0 1448 }
984263bc
MD
1449 goto loop;
1450}
1451
1452/*
55c81f71 1453 * Change child's parent process to parent.
7e90d791
MD
1454 *
1455 * p_children/p_sibling requires the parent's token, and
1456 * changing pptr requires the child's token, so we have to
55c81f71
MD
1457 * get three tokens to do this operation. We also need to
1458 * hold pointers that might get ripped out from under us to
1459 * preserve structural integrity.
1460 *
1461 * It is possible to race another reparent or disconnect or other
1462 * similar operation. We must retry when this situation occurs.
1463 * Once we successfully reparent the process we no longer care
1464 * about any races.
984263bc
MD
1465 */
1466void
77153250 1467proc_reparent(struct proc *child, struct proc *parent)
984263bc 1468{
55c81f71 1469 struct proc *opp;
7e90d791 1470
b5c4d81f 1471 PHOLD(parent);
55c81f71
MD
1472 while ((opp = child->p_pptr) != parent) {
1473 PHOLD(opp);
1474 lwkt_gettoken(&opp->p_token);
1475 lwkt_gettoken(&child->p_token);
1476 lwkt_gettoken(&parent->p_token);
1477 if (child->p_pptr != opp) {
1478 lwkt_reltoken(&parent->p_token);
1479 lwkt_reltoken(&child->p_token);
1480 lwkt_reltoken(&opp->p_token);
1481 PRELE(opp);
1482 continue;
1483 }
1484 LIST_REMOVE(child, p_sibling);
1485 LIST_INSERT_HEAD(&parent->p_children, child, p_sibling);
1486 child->p_pptr = parent;
39b9b6cd 1487 child->p_ppid = parent->p_pid;
55c81f71
MD
1488 lwkt_reltoken(&parent->p_token);
1489 lwkt_reltoken(&child->p_token);
1490 lwkt_reltoken(&opp->p_token);
1491 if (LIST_EMPTY(&opp->p_children))
1492 wakeup(opp);
1493 PRELE(opp);
1494 break;
1495 }
b5c4d81f 1496 PRELE(parent);
984263bc
MD
1497}
1498
1499/*
1500 * The next two functions are to handle adding/deleting items on the
1501 * exit callout list
1502 *
1503 * at_exit():
1504 * Take the arguments given and put them onto the exit callout list,
1505 * However first make sure that it's not already there.
1506 * returns 0 on success.
1507 */
1508
1509int
77153250 1510at_exit(exitlist_fn function)
984263bc
MD
1511{
1512 struct exitlist *ep;
1513
1514#ifdef INVARIANTS
1515 /* Be noisy if the programmer has lost track of things */
1516 if (rm_at_exit(function))
6ea70f76 1517 kprintf("WARNING: exit callout entry (%p) already present\n",
984263bc
MD
1518 function);
1519#endif
efda3bd0 1520 ep = kmalloc(sizeof(*ep), M_ATEXIT, M_NOWAIT);
984263bc
MD
1521 if (ep == NULL)
1522 return (ENOMEM);
1523 ep->function = function;
1524 TAILQ_INSERT_TAIL(&exit_list, ep, next);
1525 return (0);
1526}
1527
1528/*
1529 * Scan the exit callout list for the given item and remove it.
1530 * Returns the number of items removed (0 or 1)
1531 */
1532int
77153250 1533rm_at_exit(exitlist_fn function)
984263bc
MD
1534{
1535 struct exitlist *ep;
1536
1537 TAILQ_FOREACH(ep, &exit_list, next) {
1538 if (ep->function == function) {
1539 TAILQ_REMOVE(&exit_list, ep, next);
efda3bd0 1540 kfree(ep, M_ATEXIT);
984263bc
MD
1541 return(1);
1542 }
1543 }
1544 return (0);
1545}
1546
70d3d461
SS
1547/*
1548 * LWP reaper related code.
1549 */
70d3d461
SS
1550static void
1551reaplwps(void *context, int dummy)
1552{
1553 struct lwplist *lwplist = context;
1554 struct lwp *lp;
00891516 1555 int cpu = mycpuid;
70d3d461 1556
00891516 1557 lwkt_gettoken(&deadlwp_token[cpu]);
70d3d461 1558 while ((lp = LIST_FIRST(lwplist))) {
3e291793
MD
1559 LIST_REMOVE(lp, u.lwp_reap_entry);
1560 reaplwp(lp);
70d3d461 1561 }
00891516 1562 lwkt_reltoken(&deadlwp_token[cpu]);
70d3d461
SS
1563}
1564
3e291793
MD
1565static void
1566reaplwp(struct lwp *lp)
1567{
2af9d75d
MD
1568 while (lwp_wait(lp) == 0)
1569 ;
3e291793
MD
1570 lwp_dispose(lp);
1571}
1572
70d3d461
SS
1573static void
1574deadlwp_init(void)
1575{
1576 int cpu;
1577
1578 for (cpu = 0; cpu < ncpus; cpu++) {
00891516 1579 lwkt_token_init(&deadlwp_token[cpu], "deadlwpl");
70d3d461 1580 LIST_INIT(&deadlwp_list[cpu]);
2af9d75d
MD
1581 deadlwp_task[cpu] = kmalloc(sizeof(*deadlwp_task[cpu]),
1582 M_DEVBUF, M_WAITOK);
70d3d461
SS
1583 TASK_INIT(deadlwp_task[cpu], 0, reaplwps, &deadlwp_list[cpu]);
1584 }
1585}
1586
1587SYSINIT(deadlwpinit, SI_SUB_CONFIGURE, SI_ORDER_ANY, deadlwp_init, NULL);