kernel - Add per-process token, adjust signal code to use it.
[dragonfly.git] / sys / kern / kern_exit.c
CommitLineData
984263bc
MD
1/*
2 * Copyright (c) 1982, 1986, 1989, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 * @(#)kern_exit.c 8.7 (Berkeley) 2/12/94
39 * $FreeBSD: src/sys/kern/kern_exit.c,v 1.92.2.11 2003/01/13 22:51:16 dillon Exp $
f6c36234 40 * $DragonFly: src/sys/kern/kern_exit.c,v 1.91 2008/05/18 20:02:02 nth Exp $
984263bc
MD
41 */
42
43#include "opt_compat.h"
44#include "opt_ktrace.h"
45
46#include <sys/param.h>
47#include <sys/systm.h>
48#include <sys/sysproto.h>
49#include <sys/kernel.h>
50#include <sys/malloc.h>
51#include <sys/proc.h>
29f58392 52#include <sys/ktrace.h>
984263bc
MD
53#include <sys/pioctl.h>
54#include <sys/tty.h>
55#include <sys/wait.h>
56#include <sys/vnode.h>
57#include <sys/resourcevar.h>
58#include <sys/signalvar.h>
70d3d461 59#include <sys/taskqueue.h>
984263bc
MD
60#include <sys/ptrace.h>
61#include <sys/acct.h> /* for acct_process() function prototype */
62#include <sys/filedesc.h>
63#include <sys/shm.h>
64#include <sys/sem.h>
65#include <sys/aio.h>
66#include <sys/jail.h>
9697c509 67#include <sys/kern_syscall.h>
a722be49 68#include <sys/upcall.h>
f6bf3af1 69#include <sys/caps.h>
d5f340eb 70#include <sys/unistd.h>
8ba5f7ef 71#include <sys/eventhandler.h>
2883d2d8 72#include <sys/dsched.h>
984263bc
MD
73
74#include <vm/vm.h>
75#include <vm/vm_param.h>
76#include <sys/lock.h>
77#include <vm/pmap.h>
78#include <vm/vm_map.h>
984263bc
MD
79#include <vm/vm_extern.h>
80#include <sys/user.h>
81
e43a034f 82#include <sys/thread2.h>
e3161323 83#include <sys/sysref2.h>
684a93c4 84#include <sys/mplock2.h>
e3161323
MD
85
86static void reaplwps(void *context, int dummy);
3e291793 87static void reaplwp(struct lwp *lp);
dc52e1cc 88static void killlwps(struct lwp *lp);
e43a034f 89
984263bc 90static MALLOC_DEFINE(M_ATEXIT, "atexit", "atexit callback");
d511d5d5 91static MALLOC_DEFINE(M_ZOMBIE, "zombie", "zombie proc status");
984263bc 92
984263bc
MD
93/*
94 * callout list for things to do at exit time
95 */
96struct exitlist {
97 exitlist_fn function;
98 TAILQ_ENTRY(exitlist) next;
99};
100
101TAILQ_HEAD(exit_list_head, exitlist);
102static struct exit_list_head exit_list = TAILQ_HEAD_INITIALIZER(exit_list);
103
104/*
70d3d461
SS
105 * LWP reaper data
106 */
107struct task *deadlwp_task[MAXCPU];
108struct lwplist deadlwp_list[MAXCPU];
109
110/*
984263bc
MD
111 * exit --
112 * Death of process.
41c20dac
MD
113 *
114 * SYS_EXIT_ARGS(int rval)
3919ced0
MD
115 *
116 * MPALMOSTSAFE
984263bc 117 */
753fd850
MD
118int
119sys_exit(struct exit_args *uap)
984263bc 120{
41c20dac 121 exit1(W_EXITCODE(uap->rval, 0));
984263bc
MD
122 /* NOTREACHED */
123}
124
d5f340eb
SS
125/*
126 * Extended exit --
127 * Death of a lwp or process with optional bells and whistles.
3919ced0
MD
128 *
129 * MPALMOSTSAFE
d5f340eb
SS
130 */
131int
132sys_extexit(struct extexit_args *uap)
133{
134 int action, who;
135 int error;
136
137 action = EXTEXIT_ACTION(uap->how);
138 who = EXTEXIT_WHO(uap->how);
139
140 /* Check parameters before we might perform some action */
141 switch (who) {
142 case EXTEXIT_PROC:
143 case EXTEXIT_LWP:
144 break;
d5f340eb
SS
145 default:
146 return (EINVAL);
147 }
148
149 switch (action) {
150 case EXTEXIT_SIMPLE:
151 break;
d5f340eb
SS
152 case EXTEXIT_SETINT:
153 error = copyout(&uap->status, uap->addr, sizeof(uap->status));
154 if (error)
155 return (error);
156 break;
d5f340eb
SS
157 default:
158 return (EINVAL);
159 }
160
3919ced0
MD
161 get_mplock();
162
d5f340eb
SS
163 switch (who) {
164 case EXTEXIT_LWP:
165 /*
166 * Be sure only to perform a simple lwp exit if there is at
167 * least one more lwp in the proc, which will call exit1()
168 * later, otherwise the proc will be an UNDEAD and not even a
169 * SZOMB!
170 */
e3161323
MD
171 if (curproc->p_nthreads > 1) {
172 lwp_exit(0);
173 /* NOT REACHED */
174 }
d5f340eb
SS
175 /* else last lwp in proc: do the real thing */
176 /* FALLTHROUGH */
d5f340eb
SS
177 default: /* to help gcc */
178 case EXTEXIT_PROC:
179 exit1(W_EXITCODE(uap->status, 0));
180 /* NOTREACHED */
181 }
182
183 /* NOTREACHED */
3919ced0 184 rel_mplock(); /* safety */
d5f340eb
SS
185}
186
e3161323 187/*
dc52e1cc
MD
188 * Kill all lwps associated with the current process except the
189 * current lwp. Return an error if we race another thread trying to
190 * do the same thing and lose the race.
191 *
192 * If forexec is non-zero the current thread and process flags are
193 * cleaned up so they can be reused.
194 */
195int
196killalllwps(int forexec)
197{
198 struct lwp *lp = curthread->td_lwp;
199 struct proc *p = lp->lwp_proc;
200
201 /*
202 * Interlock against P_WEXIT. Only one of the process's thread
203 * is allowed to do the master exit.
204 */
205 if (p->p_flag & P_WEXIT)
206 return (EALREADY);
207 p->p_flag |= P_WEXIT;
208
209 /*
210 * Interlock with LWP_WEXIT and kill any remaining LWPs
211 */
212 lp->lwp_flag |= LWP_WEXIT;
213 if (p->p_nthreads > 1)
214 killlwps(lp);
215
216 /*
217 * If doing this for an exec, clean up the remaining thread
218 * (us) for continuing operation after all the other threads
219 * have been killed.
220 */
221 if (forexec) {
222 lp->lwp_flag &= ~LWP_WEXIT;
223 p->p_flag &= ~P_WEXIT;
224 }
225 return(0);
226}
227
228/*
e3161323
MD
229 * Kill all LWPs except the current one. Do not try to signal
230 * LWPs which have exited on their own or have already been
231 * signaled.
232 */
dc52e1cc 233static void
70d3d461
SS
234killlwps(struct lwp *lp)
235{
236 struct proc *p = lp->lwp_proc;
237 struct lwp *tlp;
238
e3161323 239 /*
89d9d449
MD
240 * Kill the remaining LWPs. We must send the signal before setting
241 * LWP_WEXIT. The setting of WEXIT is optional but helps reduce
242 * races. tlp must be held across the call as it might block and
243 * allow the target lwp to rip itself out from under our loop.
e3161323 244 */
70d3d461 245 FOREACH_LWP_IN_PROC(tlp, p) {
89d9d449 246 LWPHOLD(tlp);
e3161323 247 if ((tlp->lwp_flag & LWP_WEXIT) == 0) {
73e40cb2 248 lwpsignal(p, tlp, SIGKILL);
e3161323 249 tlp->lwp_flag |= LWP_WEXIT;
e3161323 250 }
89d9d449 251 LWPRELE(tlp);
70d3d461
SS
252 }
253
e3161323
MD
254 /*
255 * Wait for everything to clear out.
256 */
70d3d461 257 while (p->p_nthreads > 1) {
ef1c4d7f 258 tsleep(&p->p_nthreads, 0, "killlwps", 0);
70d3d461
SS
259 }
260}
261
984263bc
MD
262/*
263 * Exit: deallocate address space and other resources, change proc state
264 * to zombie, and unlink proc from allproc and parent's lists. Save exit
265 * status and rusage for wait(). Check for child processes and orphan them.
266 */
267void
41c20dac 268exit1(int rv)
984263bc 269{
bb3cd951
SS
270 struct thread *td = curthread;
271 struct proc *p = td->td_proc;
272 struct lwp *lp = td->td_lwp;
41c20dac
MD
273 struct proc *q, *nq;
274 struct vmspace *vm;
984263bc
MD
275 struct vnode *vtmp;
276 struct exitlist *ep;
dc52e1cc 277 int error;
984263bc
MD
278
279 if (p->p_pid == 1) {
6ea70f76 280 kprintf("init died (signal %d, exit %d)\n",
984263bc
MD
281 WTERMSIG(rv), WEXITSTATUS(rv));
282 panic("Going nowhere without my init!");
283 }
284
2883d2d8
MD
285 get_mplock();
286
9d7a637e
AE
287 varsymset_clean(&p->p_varsymset);
288 lockuninit(&p->p_varsymset.vx_lock);
70d3d461 289 /*
dc52e1cc
MD
290 * Kill all lwps associated with the current process, return an
291 * error if we race another thread trying to do the same thing
292 * and lose the race.
70d3d461 293 */
dc52e1cc
MD
294 error = killalllwps(0);
295 if (error) {
e3161323
MD
296 lwp_exit(0);
297 /* NOT REACHED */
222b18cd 298 }
81eea9f4 299
81eea9f4 300 caps_exit(lp->lwp_thread);
984263bc
MD
301 aio_proc_rundown(p);
302
303 /* are we a task leader? */
e3161323 304 if (p == p->p_leader) {
984263bc
MD
305 struct kill_args killArgs;
306 killArgs.signum = SIGKILL;
307 q = p->p_peers;
308 while(q) {
309 killArgs.pid = q->p_pid;
310 /*
311 * The interface for kill is better
312 * than the internal signal
313 */
753fd850 314 sys_kill(&killArgs);
984263bc
MD
315 nq = q;
316 q = q->p_peers;
317 }
318 while (p->p_peers)
e3161323 319 tsleep((caddr_t)p, 0, "exit1", 0);
8ba5f7ef 320 }
984263bc
MD
321
322#ifdef PGINPROF
323 vmsizmon();
324#endif
325 STOPEVENT(p, S_EXIT, rv);
326 wakeup(&p->p_stype); /* Wakeup anyone in procfs' PIOCWAIT */
327
328 /*
329 * Check if any loadable modules need anything done at process exit.
330 * e.g. SYSV IPC stuff
331 * XXX what if one of these generates an error?
332 */
8ba5f7ef
AH
333 p->p_xstat = rv;
334 EVENTHANDLER_INVOKE(process_exit, p);
335
336 /*
337 * XXX: imho, the eventhandler stuff is much cleaner than this.
338 * Maybe we should move everything to use eventhandler.
339 */
984263bc 340 TAILQ_FOREACH(ep, &exit_list, next)
bb3cd951 341 (*ep->function)(td);
984263bc
MD
342
343 if (p->p_flag & P_PROFIL)
344 stopprofclock(p);
984263bc
MD
345 /*
346 * If parent is waiting for us to exit or exec,
347 * P_PPWAIT is set; we will wakeup the parent below.
348 */
349 p->p_flag &= ~(P_TRACED | P_PPWAIT);
984263bc 350 SIGEMPTYSET(p->p_siglist);
aa6c3de6 351 SIGEMPTYSET(lp->lwp_siglist);
984263bc 352 if (timevalisset(&p->p_realtimer.it_value))
8fbf9130 353 callout_stop(&p->p_ithandle);
984263bc
MD
354
355 /*
356 * Reset any sigio structures pointing to us as a result of
357 * F_SETOWN with our pid.
358 */
359 funsetownlst(&p->p_sigiolst);
360
361 /*
362 * Close open files and release open-file table.
363 * This may block!
364 */
0a4a9c77 365 fdfree(p, NULL);
984263bc
MD
366
367 if(p->p_leader->p_peers) {
368 q = p->p_leader;
369 while(q->p_peers != p)
370 q = q->p_peers;
371 q->p_peers = p->p_peers;
372 wakeup((caddr_t)p->p_leader);
373 }
374
375 /*
376 * XXX Shutdown SYSV semaphores
377 */
378 semexit(p);
379
06ecca5a
MD
380 KKASSERT(p->p_numposixlocks == 0);
381
984263bc
MD
382 /* The next two chunks should probably be moved to vmspace_exit. */
383 vm = p->p_vmspace;
a722be49
MD
384
385 /*
386 * Release upcalls associated with this process
387 */
388 if (vm->vm_upcalls)
08f2f1bb 389 upc_release(vm, lp);
a722be49 390
39005e16
MD
391 /*
392 * Clean up data related to virtual kernel operation. Clean up
393 * any vkernel context related to the current lwp now so we can
394 * destroy p_vkernel.
395 */
396 if (p->p_vkernel) {
397 vkernel_lwp_exit(lp);
4a22e893 398 vkernel_exit(p);
39005e16 399 }
0daa37a5 400
984263bc
MD
401 /*
402 * Release user portion of address space.
403 * This releases references to vnodes,
404 * which could cause I/O if the file has been unlinked.
405 * Need to do this early enough that we can still sleep.
406 * Can't free the entire vmspace as the kernel stack
407 * may be mapped within that space also.
408 *
409 * Processes sharing the same vmspace may exit in one order, and
410 * get cleaned up by vmspace_exit() in a different order. The
411 * last exiting process to reach this point releases as much of
412 * the environment as it can, and the last process cleaned up
413 * by vmspace_exit() (which decrements exitingcnt) cleans up the
414 * remainder.
415 */
46754a20 416 vmspace_exitbump(vm);
e3161323 417 sysref_put(&vm->vm_sysref);
984263bc
MD
418
419 if (SESS_LEADER(p)) {
1fd87d54 420 struct session *sp = p->p_session;
984263bc
MD
421
422 if (sp->s_ttyvp) {
423 /*
1fbb5fc0
MD
424 * We are the controlling process. Signal the
425 * foreground process group, drain the controlling
426 * terminal, and revoke access to the controlling
427 * terminal.
428 *
429 * NOTE: while waiting for the process group to exit
430 * it is possible that one of the processes in the
f1aeb0c0
MD
431 * group will revoke the tty, so the ttyclosesession()
432 * function will re-check sp->s_ttyvp.
984263bc
MD
433 */
434 if (sp->s_ttyp && (sp->s_ttyp->t_session == sp)) {
435 if (sp->s_ttyp->t_pgrp)
436 pgsignal(sp->s_ttyp->t_pgrp, SIGHUP, 1);
f1aeb0c0
MD
437 ttywait(sp->s_ttyp);
438 ttyclosesession(sp, 1); /* also revoke */
984263bc 439 }
5fd012e0
MD
440 /*
441 * Release the tty. If someone has it open via
442 * /dev/tty then close it (since they no longer can
443 * once we've NULL'd it out).
444 */
f1aeb0c0
MD
445 ttyclosesession(sp, 0);
446
984263bc
MD
447 /*
448 * s_ttyp is not zero'd; we use this to indicate
449 * that the session once had a controlling terminal.
450 * (for logging and informational purposes)
451 */
452 }
453 sp->s_leader = NULL;
454 }
455 fixjobc(p, p->p_pgrp, 0);
456 (void)acct_process(p);
457#ifdef KTRACE
458 /*
459 * release trace file
460 */
29f58392
MD
461 if (p->p_tracenode)
462 ktrdestroy(&p->p_tracenode);
463 p->p_traceflag = 0;
984263bc
MD
464#endif
465 /*
466 * Release reference to text vnode
467 */
468 if ((vtmp = p->p_textvp) != NULL) {
469 p->p_textvp = NULL;
470 vrele(vtmp);
471 }
472
8ba5f7ef
AH
473 /* Release namecache handle to text file */
474 if (p->p_textnch.ncp)
475 cache_drop(&p->p_textnch);
476
984263bc 477 /*
5bf0d9b5
MD
478 * Move the process to the zombie list. This will block
479 * until the process p_lock count reaches 0. The process will
480 * not be reaped until TDF_EXITING is set by cpu_thread_exit(),
481 * which is called from cpu_proc_exit().
ae8050a4 482 */
5bf0d9b5 483 proc_move_allproc_zombie(p);
984263bc
MD
484
485 q = LIST_FIRST(&p->p_children);
486 if (q) /* only need this if any child is S_ZOMB */
487 wakeup((caddr_t) initproc);
488 for (; q != 0; q = nq) {
489 nq = LIST_NEXT(q, p_sibling);
490 LIST_REMOVE(q, p_sibling);
491 LIST_INSERT_HEAD(&initproc->p_children, q, p_sibling);
492 q->p_pptr = initproc;
493 q->p_sigparent = SIGCHLD;
494 /*
495 * Traced processes are killed
496 * since their existence means someone is screwing up.
497 */
498 if (q->p_flag & P_TRACED) {
499 q->p_flag &= ~P_TRACED;
84204577 500 ksignal(q, SIGKILL);
984263bc
MD
501 }
502 }
503
504 /*
505 * Save exit status and final rusage info, adding in child rusage
506 * info and self times.
507 */
fde7ac71
SS
508 calcru_proc(p, &p->p_ru);
509 ruadd(&p->p_ru, &p->p_cru);
984263bc
MD
510
511 /*
984263bc
MD
512 * notify interested parties of our demise.
513 */
514 KNOTE(&p->p_klist, NOTE_EXIT);
515
516 /*
517 * Notify parent that we're gone. If parent has the PS_NOCLDWAIT
518 * flag set, notify process 1 instead (and hope it will handle
519 * this situation).
520 */
b1b4e5a6 521 if (p->p_pptr->p_sigacts->ps_flag & PS_NOCLDWAIT) {
984263bc
MD
522 struct proc *pp = p->p_pptr;
523 proc_reparent(p, initproc);
524 /*
525 * If this was the last child of our parent, notify
526 * parent, so in case he was wait(2)ing, he will
527 * continue.
528 */
529 if (LIST_EMPTY(&pp->p_children))
530 wakeup((caddr_t)pp);
531 }
532
f2f3db5c
MD
533 /* lwkt_gettoken(&proc_token); */
534 q = p->p_pptr;
535 if (p->p_sigparent && q != initproc) {
536 PHOLD(q);
537 ksignal(q, p->p_sigparent);
538 PRELE(q);
984263bc 539 } else {
f2f3db5c 540 ksignal(q, SIGCHLD);
984263bc 541 }
f2f3db5c
MD
542 /* lwkt_reltoken(&proc_token); */
543 /* NOTE: p->p_pptr can get ripped out */
984263bc 544
f2f3db5c 545 wakeup(p->p_pptr);
984263bc 546 /*
8ad65e08
MD
547 * cpu_exit is responsible for clearing curproc, since
548 * it is heavily integrated with the thread/switching sequence.
549 *
984263bc
MD
550 * Other substructures are freed from wait().
551 */
8f1f6170 552 plimit_free(p);
984263bc
MD
553
554 /*
0a3f9b47
MD
555 * Release the current user process designation on the process so
556 * the userland scheduler can work in someone else.
26a0694b 557 */
553ea3c8 558 p->p_usched->release_curproc(lp);
26a0694b
MD
559
560 /*
e3161323
MD
561 * Finally, call machine-dependent code to release as many of the
562 * lwp's resources as we can and halt execution of this thread.
984263bc 563 */
e3161323 564 lwp_exit(1);
c6880072
SS
565}
566
0d355d3b
MD
567/*
568 * Eventually called by every exiting LWP
569 */
c6880072 570void
e3161323 571lwp_exit(int masterexit)
c6880072 572{
d86a23e0
MD
573 struct thread *td = curthread;
574 struct lwp *lp = td->td_lwp;
c6880072
SS
575 struct proc *p = lp->lwp_proc;
576
70d3d461 577 /*
e3161323
MD
578 * lwp_exit() may be called without setting LWP_WEXIT, so
579 * make sure it is set here.
580 */
581 lp->lwp_flag |= LWP_WEXIT;
582
583 /*
39005e16
MD
584 * Clean up any virtualization
585 */
586 if (lp->lwp_vkernel)
587 vkernel_lwp_exit(lp);
588
589 /*
a591f597
MD
590 * Clean up select/poll support
591 */
592 kqueue_terminate(&lp->lwp_kqueue);
593
594 /*
0d355d3b
MD
595 * Clean up any syscall-cached ucred
596 */
d86a23e0
MD
597 if (td->td_ucred) {
598 crfree(td->td_ucred);
599 td->td_ucred = NULL;
0d355d3b
MD
600 }
601
602 /*
70d3d461
SS
603 * Nobody actually wakes us when the lock
604 * count reaches zero, so just wait one tick.
605 */
606 while (lp->lwp_lock > 0)
607 tsleep(lp, 0, "lwpexit", 1);
608
609 /* Hand down resource usage to our proc */
610 ruadd(&p->p_ru, &lp->lwp_ru);
611
e3161323
MD
612 /*
613 * If we don't hold the process until the LWP is reaped wait*()
614 * may try to dispose of its vmspace before all the LWPs have
615 * actually terminated.
616 */
617 PHOLD(p);
618
619 /*
2883d2d8
MD
620 * Do any remaining work that might block on us. We should be
621 * coded such that further blocking is ok after decrementing
622 * p_nthreads but don't take the chance.
623 */
624 dsched_exit_thread(td);
625 biosched_done(curthread);
626
627 /*
e3161323
MD
628 * We have to use the reaper for all the LWPs except the one doing
629 * the master exit. The LWP doing the master exit can just be
630 * left on p_lwps and the process reaper will deal with it
631 * synchronously, which is much faster.
ce19c8fe
MD
632 *
633 * Wakeup anyone waiting on p_nthreads to drop to 1 or 0.
e3161323
MD
634 */
635 if (masterexit == 0) {
3e291793 636 lwp_rb_tree_RB_REMOVE(&p->p_lwp_tree, lp);
e3161323 637 --p->p_nthreads;
ce19c8fe
MD
638 if (p->p_nthreads <= 1)
639 wakeup(&p->p_nthreads);
3e291793 640 LIST_INSERT_HEAD(&deadlwp_list[mycpuid], lp, u.lwp_reap_entry);
2883d2d8
MD
641 taskqueue_enqueue(taskqueue_thread[mycpuid],
642 deadlwp_task[mycpuid]);
e3161323
MD
643 } else {
644 --p->p_nthreads;
ce19c8fe
MD
645 if (p->p_nthreads <= 1)
646 wakeup(&p->p_nthreads);
e3161323 647 }
c6880072
SS
648 cpu_lwp_exit();
649}
650
651/*
652 * Wait until a lwp is completely dead.
653 *
654 * If the thread is still executing, which can't be waited upon,
655 * return failure. The caller is responsible of waiting a little
656 * bit and checking again.
657 *
658 * Suggested use:
659 * while (!lwp_wait(lp))
660 * tsleep(lp, 0, "lwpwait", 1);
661 */
662static int
663lwp_wait(struct lwp *lp)
664{
665 struct thread *td = lp->lwp_thread;;
666
667 KKASSERT(lwkt_preempted_proc() != lp);
668
669 while (lp->lwp_lock > 0)
670 tsleep(lp, 0, "lwpwait1", 1);
671
672 lwkt_wait_free(td);
673
674 /*
675 * The lwp's thread may still be in the middle
676 * of switching away, we can't rip its stack out from
677 * under it until TDF_EXITING is set and both
678 * TDF_RUNNING and TDF_PREEMPT_LOCK are clear.
679 * TDF_PREEMPT_LOCK must be checked because TDF_RUNNING
680 * will be cleared temporarily if a thread gets
681 * preempted.
682 *
683 * YYY no wakeup occurs, so we simply return failure
684 * and let the caller deal with sleeping and calling
685 * us again.
686 */
cf709dd2
MD
687 if ((td->td_flags & (TDF_RUNNING|TDF_PREEMPT_LOCK|
688 TDF_EXITING|TDF_RUNQ)) != TDF_EXITING) {
c6880072 689 return (0);
cf709dd2 690 }
74c9628e
MD
691 KASSERT((td->td_flags & TDF_TSLEEPQ) == 0,
692 ("lwp_wait: td %p (%s) still on sleep queue", td, td->td_comm));
c6880072
SS
693 return (1);
694}
695
696/*
697 * Release the resources associated with a lwp.
698 * The lwp must be completely dead.
699 */
700void
701lwp_dispose(struct lwp *lp)
702{
703 struct thread *td = lp->lwp_thread;;
704
705 KKASSERT(lwkt_preempted_proc() != lp);
706 KKASSERT(td->td_refs == 0);
707 KKASSERT((td->td_flags & (TDF_RUNNING|TDF_PREEMPT_LOCK|TDF_EXITING)) ==
708 TDF_EXITING);
709
e3161323
MD
710 PRELE(lp->lwp_proc);
711 lp->lwp_proc = NULL;
c6880072
SS
712 if (td != NULL) {
713 td->td_proc = NULL;
714 td->td_lwp = NULL;
715 lp->lwp_thread = NULL;
716 lwkt_free_thread(td);
717 }
f6c36234 718 kfree(lp, M_LWP);
984263bc
MD
719}
720
3919ced0
MD
721/*
722 * MPSAFE
723 */
984263bc 724int
753fd850 725sys_wait4(struct wait_args *uap)
984263bc 726{
9697c509
DRJ
727 struct rusage rusage;
728 int error, status;
984263bc 729
3919ced0
MD
730 error = kern_wait(uap->pid, (uap->status ? &status : NULL),
731 uap->options, (uap->rusage ? &rusage : NULL),
732 &uap->sysmsg_result);
984263bc 733
9697c509
DRJ
734 if (error == 0 && uap->status)
735 error = copyout(&status, uap->status, sizeof(*uap->status));
736 if (error == 0 && uap->rusage)
737 error = copyout(&rusage, uap->rusage, sizeof(*uap->rusage));
738 return (error);
984263bc
MD
739}
740
41c20dac
MD
741/*
742 * wait1()
743 *
744 * wait_args(int pid, int *status, int options, struct rusage *rusage)
3919ced0
MD
745 *
746 * MPALMOSTSAFE
41c20dac 747 */
9697c509
DRJ
748int
749kern_wait(pid_t pid, int *status, int options, struct rusage *rusage, int *res)
984263bc 750{
9697c509 751 struct thread *td = curthread;
3e291793 752 struct lwp *lp;
9697c509 753 struct proc *q = td->td_proc;
41c20dac 754 struct proc *p, *t;
9697c509 755 int nfound, error;
984263bc 756
9697c509
DRJ
757 if (pid == 0)
758 pid = -q->p_pgid;
cc162105 759 if (options &~ (WUNTRACED|WNOHANG|WCONTINUED|WLINUXCLONE))
984263bc 760 return (EINVAL);
3919ced0 761 get_mplock();
984263bc 762loop:
6d5b152e
MD
763 /*
764 * Hack for backwards compatibility with badly written user code.
765 * Or perhaps we have to do this anyway, it is unclear. XXX
766 *
767 * The problem is that if a process group is stopped and the parent
768 * is doing a wait*(..., WUNTRACED, ...), it will see the STOP
769 * of the child and then stop itself when it tries to return from the
770 * system call. When the process group is resumed the parent will
771 * then get the STOP status even though the child has now resumed
772 * (a followup wait*() will get the CONT status).
773 *
774 * Previously the CONT would overwrite the STOP because the tstop
775 * was handled within tsleep(), and the parent would only see
776 * the CONT when both are stopped and continued together. This litte
777 * two-line hack restores this effect.
778 */
164b8401 779 while (q->p_stat == SSTOP)
9a379a4a 780 tstop();
6d5b152e 781
984263bc
MD
782 nfound = 0;
783 LIST_FOREACH(p, &q->p_children, p_sibling) {
9697c509
DRJ
784 if (pid != WAIT_ANY &&
785 p->p_pid != pid && p->p_pgid != -pid)
984263bc
MD
786 continue;
787
5686ec5a
MD
788 /*
789 * This special case handles a kthread spawned by linux_clone
352f5709
MD
790 * (see linux_misc.c). The linux_wait4 and linux_waitpid
791 * functions need to be able to distinguish between waiting
792 * on a process and waiting on a thread. It is a thread if
793 * p_sigparent is not SIGCHLD, and the WLINUXCLONE option
794 * signifies we want to wait for threads and not processes.
984263bc 795 */
352f5709
MD
796 if ((p->p_sigparent != SIGCHLD) ^
797 ((options & WLINUXCLONE) != 0)) {
984263bc 798 continue;
352f5709 799 }
984263bc
MD
800
801 nfound++;
416d05d7 802 if (p->p_stat == SZOMB) {
ae8050a4 803 /*
8f211c4b
MD
804 * We may go into SZOMB with threads still present.
805 * We must wait for them to exit before we can reap
806 * the master thread, otherwise we may race reaping
807 * non-master threads.
808 */
809 while (p->p_nthreads > 0) {
810 tsleep(&p->p_nthreads, 0, "lwpzomb", hz);
811 }
812
813 /*
e3161323
MD
814 * Reap any LWPs left in p->p_lwps. This is usually
815 * just the last LWP. This must be done before
816 * we loop on p_lock since the lwps hold a ref on
817 * it as a vmspace interlock.
818 *
819 * Once that is accomplished p_nthreads had better
820 * be zero.
821 */
3e291793
MD
822 while ((lp = RB_ROOT(&p->p_lwp_tree)) != NULL) {
823 lwp_rb_tree_RB_REMOVE(&p->p_lwp_tree, lp);
824 reaplwp(lp);
825 }
e3161323
MD
826 KKASSERT(p->p_nthreads == 0);
827
828 /*
829 * Don't do anything really bad until all references
830 * to the process go away. This may include other
831 * LWPs which are still in the process of being
832 * reaped. We can't just pull the rug out from under
833 * them because they may still be using the VM space.
834 *
835 * Certain kernel facilities such as /proc will also
836 * put a hold on the process for short periods of
837 * time.
c008d3ad 838 */
c6880072
SS
839 while (p->p_lock)
840 tsleep(p, 0, "reap3", hz);
c1102e9f 841
352f5709 842 /* scheduling hook for heuristic */
70d3d461
SS
843 /* XXX no lwp available, we need a different heuristic */
844 /*
08f2f1bb 845 p->p_usched->heuristic_exiting(td->td_lwp, deadlp);
70d3d461 846 */
984263bc 847
9697c509
DRJ
848 /* Take care of our return values. */
849 *res = p->p_pid;
850 if (status)
851 *status = p->p_xstat;
852 if (rusage)
fde7ac71 853 *rusage = p->p_ru;
984263bc
MD
854 /*
855 * If we got the child via a ptrace 'attach',
856 * we need to give it back to the old parent.
857 */
858 if (p->p_oppid && (t = pfind(p->p_oppid))) {
859 p->p_oppid = 0;
860 proc_reparent(p, t);
84204577 861 ksignal(t, SIGCHLD);
984263bc 862 wakeup((caddr_t)t);
3919ced0
MD
863 error = 0;
864 goto done;
984263bc 865 }
3c8687d6
SS
866
867 /*
868 * Unlink the proc from its process group so that
869 * the following operations won't lead to an
870 * inconsistent state for processes running down
871 * the zombie list.
872 */
873 KKASSERT(p->p_lock == 0);
874 proc_remove_zombie(p);
875 leavepgrp(p);
876
984263bc 877 p->p_xstat = 0;
fde7ac71 878 ruadd(&q->p_cru, &p->p_ru);
984263bc
MD
879
880 /*
881 * Decrement the count of procs running with this uid.
882 */
9697c509 883 chgproccnt(p->p_ucred->cr_ruidinfo, -1, 0);
984263bc
MD
884
885 /*
886 * Free up credentials.
887 */
41c20dac
MD
888 crfree(p->p_ucred);
889 p->p_ucred = NULL;
984263bc
MD
890
891 /*
892 * Remove unused arguments
893 */
894 if (p->p_args && --p->p_args->ar_ref == 0)
895 FREE(p->p_args, M_PARGS);
896
b1b4e5a6
SS
897 if (--p->p_sigacts->ps_refcnt == 0) {
898 kfree(p->p_sigacts, M_SUBPROC);
899 p->p_sigacts = NULL;
984263bc
MD
900 }
901
984263bc 902 vm_waitproc(p);
fb2a331e 903 kfree(p, M_PROC);
984263bc 904 nprocs--;
3919ced0
MD
905 error = 0;
906 goto done;
984263bc 907 }
164b8401 908 if (p->p_stat == SSTOP && (p->p_flag & P_WAITED) == 0 &&
9697c509 909 (p->p_flag & P_TRACED || options & WUNTRACED)) {
984263bc 910 p->p_flag |= P_WAITED;
9697c509
DRJ
911
912 *res = p->p_pid;
913 if (status)
a89c93ee 914 *status = W_STOPCODE(p->p_xstat);
9697c509
DRJ
915 /* Zero rusage so we get something consistent. */
916 if (rusage)
917 bzero(rusage, sizeof(rusage));
3919ced0
MD
918 error = 0;
919 goto done;
984263bc 920 }
cc162105
PA
921 if (options & WCONTINUED && (p->p_flag & P_CONTINUED)) {
922 *res = p->p_pid;
923 p->p_flag &= ~P_CONTINUED;
924
925 if (status)
926 *status = SIGCONT;
3919ced0
MD
927 error = 0;
928 goto done;
cc162105 929 }
984263bc 930 }
3919ced0
MD
931 if (nfound == 0) {
932 error = ECHILD;
933 goto done;
934 }
9697c509
DRJ
935 if (options & WNOHANG) {
936 *res = 0;
3919ced0
MD
937 error = 0;
938 goto done;
984263bc 939 }
9697c509 940 error = tsleep((caddr_t)q, PCATCH, "wait", 0);
3919ced0
MD
941 if (error) {
942done:
943 rel_mplock();
984263bc 944 return (error);
3919ced0 945 }
984263bc
MD
946 goto loop;
947}
948
949/*
950 * make process 'parent' the new parent of process 'child'.
951 */
952void
77153250 953proc_reparent(struct proc *child, struct proc *parent)
984263bc
MD
954{
955
956 if (child->p_pptr == parent)
957 return;
958
959 LIST_REMOVE(child, p_sibling);
960 LIST_INSERT_HEAD(&parent->p_children, child, p_sibling);
961 child->p_pptr = parent;
962}
963
964/*
965 * The next two functions are to handle adding/deleting items on the
966 * exit callout list
967 *
968 * at_exit():
969 * Take the arguments given and put them onto the exit callout list,
970 * However first make sure that it's not already there.
971 * returns 0 on success.
972 */
973
974int
77153250 975at_exit(exitlist_fn function)
984263bc
MD
976{
977 struct exitlist *ep;
978
979#ifdef INVARIANTS
980 /* Be noisy if the programmer has lost track of things */
981 if (rm_at_exit(function))
6ea70f76 982 kprintf("WARNING: exit callout entry (%p) already present\n",
984263bc
MD
983 function);
984#endif
efda3bd0 985 ep = kmalloc(sizeof(*ep), M_ATEXIT, M_NOWAIT);
984263bc
MD
986 if (ep == NULL)
987 return (ENOMEM);
988 ep->function = function;
989 TAILQ_INSERT_TAIL(&exit_list, ep, next);
990 return (0);
991}
992
993/*
994 * Scan the exit callout list for the given item and remove it.
995 * Returns the number of items removed (0 or 1)
996 */
997int
77153250 998rm_at_exit(exitlist_fn function)
984263bc
MD
999{
1000 struct exitlist *ep;
1001
1002 TAILQ_FOREACH(ep, &exit_list, next) {
1003 if (ep->function == function) {
1004 TAILQ_REMOVE(&exit_list, ep, next);
efda3bd0 1005 kfree(ep, M_ATEXIT);
984263bc
MD
1006 return(1);
1007 }
1008 }
1009 return (0);
1010}
1011
70d3d461
SS
1012/*
1013 * LWP reaper related code.
1014 */
70d3d461
SS
1015static void
1016reaplwps(void *context, int dummy)
1017{
1018 struct lwplist *lwplist = context;
1019 struct lwp *lp;
1020
227ce828 1021 get_mplock();
70d3d461 1022 while ((lp = LIST_FIRST(lwplist))) {
3e291793
MD
1023 LIST_REMOVE(lp, u.lwp_reap_entry);
1024 reaplwp(lp);
70d3d461 1025 }
227ce828 1026 rel_mplock();
70d3d461
SS
1027}
1028
1029static void
3e291793
MD
1030reaplwp(struct lwp *lp)
1031{
1032 while (lwp_wait(lp) == 0)
1033 tsleep(lp, 0, "lwpreap", 1);
1034 lwp_dispose(lp);
1035}
1036
1037static void
70d3d461
SS
1038deadlwp_init(void)
1039{
1040 int cpu;
1041
1042 for (cpu = 0; cpu < ncpus; cpu++) {
1043 LIST_INIT(&deadlwp_list[cpu]);
1044 deadlwp_task[cpu] = kmalloc(sizeof(*deadlwp_task[cpu]), M_DEVBUF, M_WAITOK);
1045 TASK_INIT(deadlwp_task[cpu], 0, reaplwps, &deadlwp_list[cpu]);
1046 }
1047}
1048
1049SYSINIT(deadlwpinit, SI_SUB_CONFIGURE, SI_ORDER_ANY, deadlwp_init, NULL);