kernel - Fix exit races which can lead to a corrupt p_children list
[dragonfly.git] / sys / kern / kern_proc.c
CommitLineData
984263bc 1/*
99ad9bc4
MD
2 * (MPSAFE)
3 *
984263bc
MD
4 * Copyright (c) 1982, 1986, 1989, 1991, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by the University of
18 * California, Berkeley and its contributors.
19 * 4. Neither the name of the University nor the names of its contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 *
35 * @(#)kern_proc.c 8.7 (Berkeley) 2/14/95
36 * $FreeBSD: src/sys/kern/kern_proc.c,v 1.63.2.9 2003/05/08 07:47:16 kbyanc Exp $
37 */
38
39#include <sys/param.h>
40#include <sys/systm.h>
41#include <sys/kernel.h>
42#include <sys/sysctl.h>
43#include <sys/malloc.h>
44#include <sys/proc.h>
56c703bd 45#include <sys/jail.h>
984263bc
MD
46#include <sys/filedesc.h>
47#include <sys/tty.h>
8c72e3d5 48#include <sys/dsched.h>
984263bc 49#include <sys/signalvar.h>
5bf0d9b5 50#include <sys/spinlock.h>
984263bc
MD
51#include <vm/vm.h>
52#include <sys/lock.h>
53#include <vm/pmap.h>
54#include <vm/vm_map.h>
55#include <sys/user.h>
630ccdeb 56#include <machine/smp.h>
984263bc 57
58c2553a 58#include <sys/refcount.h>
5bf0d9b5 59#include <sys/spinlock2.h>
684a93c4 60#include <sys/mplock2.h>
5bf0d9b5 61
984263bc
MD
62static MALLOC_DEFINE(M_PGRP, "pgrp", "process group header");
63MALLOC_DEFINE(M_SESSION, "session", "session header");
fb2a331e 64MALLOC_DEFINE(M_PROC, "proc", "Proc structures");
f6c36234 65MALLOC_DEFINE(M_LWP, "lwp", "lwp structures");
984263bc
MD
66MALLOC_DEFINE(M_SUBPROC, "subproc", "Proc sub-structures");
67
28c57d20 68int ps_showallprocs = 1;
73e4f7b9 69static int ps_showallthreads = 1;
43a0f7ae
MS
70SYSCTL_INT(_security, OID_AUTO, ps_showallprocs, CTLFLAG_RW,
71 &ps_showallprocs, 0,
72 "Unprivileged processes can see proccesses with different UID/GID");
73SYSCTL_INT(_security, OID_AUTO, ps_showallthreads, CTLFLAG_RW,
74 &ps_showallthreads, 0,
75 "Unprivileged processes can see kernel threads");
984263bc 76
51e64ff2
MD
77static void pgdelete(struct pgrp *);
78static void orphanpg(struct pgrp *pg);
79static pid_t proc_getnewpid_locked(int random_offset);
984263bc
MD
80
81/*
82 * Other process lists
83 */
84struct pidhashhead *pidhashtbl;
85u_long pidhash;
86struct pgrphashhead *pgrphashtbl;
87u_long pgrphash;
88struct proclist allproc;
89struct proclist zombproc;
984263bc
MD
90
91/*
51e64ff2
MD
92 * Random component to nextpid generation. We mix in a random factor to make
93 * it a little harder to predict. We sanity check the modulus value to avoid
94 * doing it in critical paths. Don't let it be too small or we pointlessly
95 * waste randomness entropy, and don't let it be impossibly large. Using a
96 * modulus that is too big causes a LOT more process table scans and slows
97 * down fork processing as the pidchecked caching is defeated.
98 */
99static int randompid = 0;
100
99ad9bc4
MD
101/*
102 * No requirements.
103 */
51e64ff2
MD
104static int
105sysctl_kern_randompid(SYSCTL_HANDLER_ARGS)
106{
107 int error, pid;
108
109 pid = randompid;
110 error = sysctl_handle_int(oidp, &pid, 0, req);
111 if (error || !req->newptr)
112 return (error);
113 if (pid < 0 || pid > PID_MAX - 100) /* out of range */
114 pid = PID_MAX - 100;
115 else if (pid < 2) /* NOP */
116 pid = 0;
117 else if (pid < 100) /* Make it reasonable */
118 pid = 100;
119 randompid = pid;
120 return (error);
121}
122
123SYSCTL_PROC(_kern, OID_AUTO, randompid, CTLTYPE_INT|CTLFLAG_RW,
124 0, 0, sysctl_kern_randompid, "I", "Random PID modulus");
125
126/*
984263bc 127 * Initialize global process hashing structures.
99ad9bc4
MD
128 *
129 * Called from the low level boot code only.
984263bc
MD
130 */
131void
77153250 132procinit(void)
984263bc 133{
984263bc
MD
134 LIST_INIT(&allproc);
135 LIST_INIT(&zombproc);
40aaf5fc 136 lwkt_init();
984263bc
MD
137 pidhashtbl = hashinit(maxproc / 4, M_PROC, &pidhash);
138 pgrphashtbl = hashinit(maxproc / 4, M_PROC, &pgrphash);
984263bc
MD
139 uihashinit();
140}
141
142/*
82354ad8
MD
143 * Process hold/release support functions. These functions must be MPSAFE.
144 * Called via the PHOLD(), PRELE(), and PSTALL() macros.
145 *
146 * p->p_lock is a simple hold count with a waiting interlock. No wakeup()
147 * is issued unless someone is actually waiting for the process.
148 *
149 * Most holds are short-term, allowing a process scan or other similar
150 * operation to access a proc structure without it getting ripped out from
151 * under us. procfs and process-list sysctl ops also use the hold function
152 * interlocked with various p_flags to keep the vmspace intact when reading
153 * or writing a user process's address space.
154 *
155 * There are two situations where a hold count can be longer. Exiting lwps
156 * hold the process until the lwp is reaped, and the parent will hold the
157 * child during vfork()/exec() sequences while the child is marked P_PPWAIT.
158 *
159 * The kernel waits for the hold count to drop to 0 (or 1 in some cases) at
160 * various critical points in the fork/exec and exit paths before proceeding.
161 */
1a3fa08b 162#define PLOCK_ZOMB 0x20000000
82354ad8 163#define PLOCK_WAITING 0x40000000
1a3fa08b 164#define PLOCK_MASK 0x1FFFFFFF
82354ad8
MD
165
166void
167pstall(struct proc *p, const char *wmesg, int count)
168{
169 int o;
170 int n;
171
172 for (;;) {
173 o = p->p_lock;
174 cpu_ccfence();
175 if ((o & PLOCK_MASK) <= count)
176 break;
177 n = o | PLOCK_WAITING;
178 tsleep_interlock(&p->p_lock, 0);
7c37ea07
MD
179
180 /*
2fe8ae78
MD
181 * If someone is trying to single-step the process during
182 * an exec or an exit they can deadlock us because procfs
183 * sleeps with the process held.
7c37ea07 184 */
2fe8ae78
MD
185 if (p->p_stops) {
186 if (p->p_flags & P_INEXEC) {
187 wakeup(&p->p_stype);
188 } else if (p->p_flags & P_POSTEXIT) {
189 spin_lock(&p->p_spin);
190 p->p_stops = 0;
191 p->p_step = 0;
192 spin_unlock(&p->p_spin);
193 wakeup(&p->p_stype);
194 }
7c37ea07
MD
195 }
196
82354ad8
MD
197 if (atomic_cmpset_int(&p->p_lock, o, n)) {
198 tsleep(&p->p_lock, PINTERLOCKED, wmesg, 0);
199 }
200 }
201}
202
203void
204phold(struct proc *p)
205{
1a3fa08b
MD
206 atomic_add_int(&p->p_lock, 1);
207}
208
209/*
210 * WARNING! On last release (p) can become instantly invalid due to
211 * MP races.
212 */
213void
214prele(struct proc *p)
215{
82354ad8
MD
216 int o;
217 int n;
218
1a3fa08b
MD
219 /*
220 * Fast path
221 */
222 if (atomic_cmpset_int(&p->p_lock, 1, 0))
223 return;
224
225 /*
226 * Slow path
227 */
82354ad8
MD
228 for (;;) {
229 o = p->p_lock;
1a3fa08b 230 KKASSERT((o & PLOCK_MASK) > 0);
82354ad8 231 cpu_ccfence();
1a3fa08b
MD
232 n = (o - 1) & ~PLOCK_WAITING;
233 if (atomic_cmpset_int(&p->p_lock, o, n)) {
234 if (o & PLOCK_WAITING)
235 wakeup(&p->p_lock);
82354ad8 236 break;
1a3fa08b 237 }
82354ad8
MD
238 }
239}
240
1a3fa08b
MD
241/*
242 * Hold and flag serialized for zombie reaping purposes.
243 *
244 * This function will fail if it has to block, returning non-zero with
245 * neither the flag set or the hold count bumped. Note that we must block
246 * without holding a ref, meaning that the caller must ensure that (p)
247 * remains valid through some other interlock (typically on its parent
248 * process's p_token).
249 *
250 * Zero is returned on success. The hold count will be incremented and
251 * the serialization flag acquired. Note that serialization is only against
252 * other pholdzomb() calls, not against phold() calls.
253 */
254int
255pholdzomb(struct proc *p)
256{
257 int o;
258 int n;
259
260 /*
261 * Fast path
262 */
263 if (atomic_cmpset_int(&p->p_lock, 0, PLOCK_ZOMB | 1))
264 return(0);
265
266 /*
267 * Slow path
268 */
269 for (;;) {
270 o = p->p_lock;
271 cpu_ccfence();
272 if ((o & PLOCK_ZOMB) == 0) {
273 n = (o + 1) | PLOCK_ZOMB;
274 if (atomic_cmpset_int(&p->p_lock, o, n))
275 return(0);
276 } else {
277 KKASSERT((o & PLOCK_MASK) > 0);
278 n = o | PLOCK_WAITING;
279 tsleep_interlock(&p->p_lock, 0);
280 if (atomic_cmpset_int(&p->p_lock, o, n)) {
281 tsleep(&p->p_lock, PINTERLOCKED, "phldz", 0);
282 /* (p) can be ripped out at this point */
283 return(1);
284 }
285 }
286 }
287}
288
289/*
290 * Release PLOCK_ZOMB and the hold count, waking up any waiters.
291 *
292 * WARNING! On last release (p) can become instantly invalid due to
293 * MP races.
294 */
82354ad8 295void
1a3fa08b 296prelezomb(struct proc *p)
82354ad8
MD
297{
298 int o;
299 int n;
300
301 /*
302 * Fast path
303 */
1a3fa08b 304 if (atomic_cmpset_int(&p->p_lock, PLOCK_ZOMB | 1, 0))
82354ad8
MD
305 return;
306
307 /*
308 * Slow path
309 */
1a3fa08b 310 KKASSERT(p->p_lock & PLOCK_ZOMB);
82354ad8
MD
311 for (;;) {
312 o = p->p_lock;
313 KKASSERT((o & PLOCK_MASK) > 0);
314 cpu_ccfence();
1a3fa08b 315 n = (o - 1) & ~(PLOCK_ZOMB | PLOCK_WAITING);
82354ad8
MD
316 if (atomic_cmpset_int(&p->p_lock, o, n)) {
317 if (o & PLOCK_WAITING)
318 wakeup(&p->p_lock);
319 break;
320 }
321 }
322}
323
324/*
984263bc 325 * Is p an inferior of the current process?
99ad9bc4
MD
326 *
327 * No requirements.
328 * The caller must hold proc_token if the caller wishes a stable result.
984263bc
MD
329 */
330int
77153250 331inferior(struct proc *p)
984263bc 332{
99ad9bc4
MD
333 lwkt_gettoken(&proc_token);
334 while (p != curproc) {
335 if (p->p_pid == 0) {
336 lwkt_reltoken(&proc_token);
984263bc 337 return (0);
99ad9bc4
MD
338 }
339 p = p->p_pptr;
340 }
341 lwkt_reltoken(&proc_token);
984263bc
MD
342 return (1);
343}
344
345/*
58c2553a
MD
346 * Locate a process by number. The returned process will be referenced and
347 * must be released with PRELE().
99ad9bc4
MD
348 *
349 * No requirements.
984263bc
MD
350 */
351struct proc *
77153250 352pfind(pid_t pid)
984263bc 353{
1fd87d54 354 struct proc *p;
984263bc 355
99ad9bc4 356 lwkt_gettoken(&proc_token);
5bf0d9b5 357 LIST_FOREACH(p, PIDHASH(pid), p_hash) {
99ad9bc4 358 if (p->p_pid == pid) {
58c2553a 359 PHOLD(p);
99ad9bc4 360 lwkt_reltoken(&proc_token);
984263bc 361 return (p);
99ad9bc4 362 }
5bf0d9b5 363 }
99ad9bc4 364 lwkt_reltoken(&proc_token);
984263bc
MD
365 return (NULL);
366}
367
368/*
58c2553a
MD
369 * Locate a process by number. The returned process is NOT referenced.
370 * The caller should hold proc_token if the caller wishes a stable result.
371 *
372 * No requirements.
373 */
374struct proc *
375pfindn(pid_t pid)
376{
377 struct proc *p;
378
379 lwkt_gettoken(&proc_token);
380 LIST_FOREACH(p, PIDHASH(pid), p_hash) {
381 if (p->p_pid == pid) {
382 lwkt_reltoken(&proc_token);
383 return (p);
384 }
385 }
386 lwkt_reltoken(&proc_token);
387 return (NULL);
388}
389
390void
391pgref(struct pgrp *pgrp)
392{
393 refcount_acquire(&pgrp->pg_refs);
394}
395
396void
397pgrel(struct pgrp *pgrp)
398{
399 if (refcount_release(&pgrp->pg_refs))
400 pgdelete(pgrp);
401}
402
403/*
404 * Locate a process group by number. The returned process group will be
405 * referenced w/pgref() and must be released with pgrel() (or assigned
406 * somewhere if you wish to keep the reference).
99ad9bc4
MD
407 *
408 * No requirements.
984263bc
MD
409 */
410struct pgrp *
77153250 411pgfind(pid_t pgid)
984263bc 412{
1fd87d54 413 struct pgrp *pgrp;
984263bc 414
99ad9bc4 415 lwkt_gettoken(&proc_token);
5bf0d9b5 416 LIST_FOREACH(pgrp, PGRPHASH(pgid), pg_hash) {
46270ec6 417 if (pgrp->pg_id == pgid) {
58c2553a 418 refcount_acquire(&pgrp->pg_refs);
46270ec6 419 lwkt_reltoken(&proc_token);
984263bc 420 return (pgrp);
46270ec6 421 }
5bf0d9b5 422 }
99ad9bc4 423 lwkt_reltoken(&proc_token);
984263bc
MD
424 return (NULL);
425}
426
427/*
428 * Move p to a new or existing process group (and session)
99ad9bc4
MD
429 *
430 * No requirements.
984263bc
MD
431 */
432int
77153250 433enterpgrp(struct proc *p, pid_t pgid, int mksess)
984263bc 434{
99ad9bc4 435 struct pgrp *pgrp;
58c2553a 436 struct pgrp *opgrp;
99ad9bc4
MD
437 int error;
438
99ad9bc4 439 pgrp = pgfind(pgid);
984263bc
MD
440
441 KASSERT(pgrp == NULL || !mksess,
99ad9bc4 442 ("enterpgrp: setsid into non-empty pgrp"));
984263bc 443 KASSERT(!SESS_LEADER(p),
99ad9bc4 444 ("enterpgrp: session leader attempted setpgrp"));
984263bc
MD
445
446 if (pgrp == NULL) {
447 pid_t savepid = p->p_pid;
448 struct proc *np;
449 /*
450 * new process group
451 */
452 KASSERT(p->p_pid == pgid,
99ad9bc4 453 ("enterpgrp: new pgrp and pid != pgid"));
58c2553a 454 if ((np = pfindn(savepid)) == NULL || np != p) {
99ad9bc4
MD
455 error = ESRCH;
456 goto fatal;
457 }
884717e1 458 pgrp = kmalloc(sizeof(struct pgrp), M_PGRP, M_WAITOK);
984263bc 459 if (mksess) {
1fd87d54 460 struct session *sess;
984263bc
MD
461
462 /*
463 * new session
464 */
884717e1
SW
465 sess = kmalloc(sizeof(struct session), M_SESSION,
466 M_WAITOK);
984263bc
MD
467 sess->s_leader = p;
468 sess->s_sid = p->p_pid;
469 sess->s_count = 1;
470 sess->s_ttyvp = NULL;
471 sess->s_ttyp = NULL;
472 bcopy(p->p_session->s_login, sess->s_login,
99ad9bc4 473 sizeof(sess->s_login));
984263bc
MD
474 pgrp->pg_session = sess;
475 KASSERT(p == curproc,
99ad9bc4 476 ("enterpgrp: mksession and p != curproc"));
616516c8 477 lwkt_gettoken(&p->p_token);
4643740a 478 p->p_flags &= ~P_CONTROLT;
616516c8 479 lwkt_reltoken(&p->p_token);
984263bc
MD
480 } else {
481 pgrp->pg_session = p->p_session;
8b90699b 482 sess_hold(pgrp->pg_session);
984263bc
MD
483 }
484 pgrp->pg_id = pgid;
485 LIST_INIT(&pgrp->pg_members);
486 LIST_INSERT_HEAD(PGRPHASH(pgid), pgrp, pg_hash);
487 pgrp->pg_jobc = 0;
488 SLIST_INIT(&pgrp->pg_sigiolst);
58c2553a
MD
489 lwkt_token_init(&pgrp->pg_token, "pgrp_token");
490 refcount_init(&pgrp->pg_refs, 1);
167e6ecb 491 lockinit(&pgrp->pg_lock, "pgwt", 0, 0);
99ad9bc4 492 } else if (pgrp == p->p_pgrp) {
58c2553a 493 pgrel(pgrp);
99ad9bc4 494 goto done;
58c2553a 495 } /* else pgfind() referenced the pgrp */
984263bc
MD
496
497 /*
498 * Adjust eligibility of affected pgrps to participate in job control.
499 * Increment eligibility counts before decrementing, otherwise we
500 * could reach 0 spuriously during the first call.
501 */
58c2553a
MD
502 lwkt_gettoken(&pgrp->pg_token);
503 lwkt_gettoken(&p->p_token);
984263bc
MD
504 fixjobc(p, pgrp, 1);
505 fixjobc(p, p->p_pgrp, 0);
58c2553a
MD
506 while ((opgrp = p->p_pgrp) != NULL) {
507 opgrp = p->p_pgrp;
508 lwkt_gettoken(&opgrp->pg_token);
509 LIST_REMOVE(p, p_pglist);
510 p->p_pgrp = NULL;
511 lwkt_reltoken(&opgrp->pg_token);
512 pgrel(opgrp);
513 }
984263bc
MD
514 p->p_pgrp = pgrp;
515 LIST_INSERT_HEAD(&pgrp->pg_members, p, p_pglist);
58c2553a
MD
516 lwkt_reltoken(&p->p_token);
517 lwkt_reltoken(&pgrp->pg_token);
99ad9bc4
MD
518done:
519 error = 0;
520fatal:
99ad9bc4 521 return (error);
984263bc
MD
522}
523
524/*
99ad9bc4
MD
525 * Remove process from process group
526 *
527 * No requirements.
984263bc
MD
528 */
529int
77153250 530leavepgrp(struct proc *p)
984263bc 531{
58c2553a
MD
532 struct pgrp *pg = p->p_pgrp;
533
534 lwkt_gettoken(&p->p_token);
535 pg = p->p_pgrp;
536 if (pg) {
537 pgref(pg);
538 lwkt_gettoken(&pg->pg_token);
539 if (p->p_pgrp == pg) {
540 p->p_pgrp = NULL;
541 LIST_REMOVE(p, p_pglist);
542 pgrel(pg);
543 }
544 lwkt_reltoken(&pg->pg_token);
545 lwkt_reltoken(&p->p_token); /* avoid chaining on rel */
546 pgrel(pg);
547 } else {
548 lwkt_reltoken(&p->p_token);
549 }
984263bc
MD
550 return (0);
551}
552
553/*
58c2553a
MD
554 * Delete a process group. Must be called only after the last ref has been
555 * released.
984263bc
MD
556 */
557static void
77153250 558pgdelete(struct pgrp *pgrp)
984263bc 559{
984263bc
MD
560 /*
561 * Reset any sigio structures pointing to us as a result of
562 * F_SETOWN with our pgid.
563 */
564 funsetownlst(&pgrp->pg_sigiolst);
565
566 if (pgrp->pg_session->s_ttyp != NULL &&
567 pgrp->pg_session->s_ttyp->t_pgrp == pgrp)
568 pgrp->pg_session->s_ttyp->t_pgrp = NULL;
569 LIST_REMOVE(pgrp, pg_hash);
8b90699b 570 sess_rele(pgrp->pg_session);
efda3bd0 571 kfree(pgrp, M_PGRP);
8b90699b
MD
572}
573
574/*
575 * Adjust the ref count on a session structure. When the ref count falls to
576 * zero the tty is disassociated from the session and the session structure
577 * is freed. Note that tty assocation is not itself ref-counted.
99ad9bc4
MD
578 *
579 * No requirements.
8b90699b
MD
580 */
581void
582sess_hold(struct session *sp)
583{
99ad9bc4 584 lwkt_gettoken(&tty_token);
8b90699b 585 ++sp->s_count;
99ad9bc4 586 lwkt_reltoken(&tty_token);
8b90699b
MD
587}
588
99ad9bc4
MD
589/*
590 * No requirements.
591 */
8b90699b
MD
592void
593sess_rele(struct session *sp)
594{
94a6eea8
MD
595 struct tty *tp;
596
8b90699b 597 KKASSERT(sp->s_count > 0);
99ad9bc4 598 lwkt_gettoken(&tty_token);
8b90699b
MD
599 if (--sp->s_count == 0) {
600 if (sp->s_ttyp && sp->s_ttyp->t_session) {
601#ifdef TTY_DO_FULL_CLOSE
602 /* FULL CLOSE, see ttyclearsession() */
603 KKASSERT(sp->s_ttyp->t_session == sp);
604 sp->s_ttyp->t_session = NULL;
605#else
606 /* HALF CLOSE, see ttyclearsession() */
607 if (sp->s_ttyp->t_session == sp)
608 sp->s_ttyp->t_session = NULL;
609#endif
610 }
94a6eea8
MD
611 if ((tp = sp->s_ttyp) != NULL) {
612 sp->s_ttyp = NULL;
613 ttyunhold(tp);
614 }
efda3bd0 615 kfree(sp, M_SESSION);
8b90699b 616 }
99ad9bc4 617 lwkt_reltoken(&tty_token);
984263bc
MD
618}
619
620/*
621 * Adjust pgrp jobc counters when specified process changes process group.
622 * We count the number of processes in each process group that "qualify"
623 * the group for terminal job control (those with a parent in a different
624 * process group of the same session). If that count reaches zero, the
625 * process group becomes orphaned. Check both the specified process'
626 * process group and that of its children.
627 * entering == 0 => p is leaving specified group.
628 * entering == 1 => p is entering specified group.
99ad9bc4
MD
629 *
630 * No requirements.
984263bc
MD
631 */
632void
77153250 633fixjobc(struct proc *p, struct pgrp *pgrp, int entering)
984263bc 634{
1fd87d54 635 struct pgrp *hispgrp;
99ad9bc4 636 struct session *mysession;
b5c4d81f 637 struct proc *np;
984263bc
MD
638
639 /*
640 * Check p's parent to see whether p qualifies its own process
641 * group; if so, adjust count for p's process group.
642 */
b5c4d81f 643 lwkt_gettoken(&p->p_token); /* p_children scan */
58c2553a 644 lwkt_gettoken(&pgrp->pg_token);
b5c4d81f 645
99ad9bc4 646 mysession = pgrp->pg_session;
984263bc
MD
647 if ((hispgrp = p->p_pptr->p_pgrp) != pgrp &&
648 hispgrp->pg_session == mysession) {
649 if (entering)
650 pgrp->pg_jobc++;
651 else if (--pgrp->pg_jobc == 0)
652 orphanpg(pgrp);
653 }
654
655 /*
656 * Check this process' children to see whether they qualify
657 * their process groups; if so, adjust counts for children's
658 * process groups.
659 */
b5c4d81f 660 LIST_FOREACH(np, &p->p_children, p_sibling) {
58c2553a
MD
661 PHOLD(np);
662 lwkt_gettoken(&np->p_token);
b5c4d81f 663 if ((hispgrp = np->p_pgrp) != pgrp &&
984263bc 664 hispgrp->pg_session == mysession &&
b5c4d81f 665 np->p_stat != SZOMB) {
58c2553a
MD
666 pgref(hispgrp);
667 lwkt_gettoken(&hispgrp->pg_token);
984263bc
MD
668 if (entering)
669 hispgrp->pg_jobc++;
670 else if (--hispgrp->pg_jobc == 0)
671 orphanpg(hispgrp);
58c2553a
MD
672 lwkt_reltoken(&hispgrp->pg_token);
673 pgrel(hispgrp);
984263bc 674 }
58c2553a
MD
675 lwkt_reltoken(&np->p_token);
676 PRELE(np);
99ad9bc4 677 }
58c2553a
MD
678 KKASSERT(pgrp->pg_refs > 0);
679 lwkt_reltoken(&pgrp->pg_token);
b5c4d81f 680 lwkt_reltoken(&p->p_token);
984263bc
MD
681}
682
683/*
684 * A process group has become orphaned;
685 * if there are any stopped processes in the group,
686 * hang-up all process in that group.
99ad9bc4 687 *
58c2553a 688 * The caller must hold pg_token.
984263bc
MD
689 */
690static void
77153250 691orphanpg(struct pgrp *pg)
984263bc 692{
1fd87d54 693 struct proc *p;
984263bc
MD
694
695 LIST_FOREACH(p, &pg->pg_members, p_pglist) {
164b8401 696 if (p->p_stat == SSTOP) {
984263bc 697 LIST_FOREACH(p, &pg->pg_members, p_pglist) {
84204577
MD
698 ksignal(p, SIGHUP);
699 ksignal(p, SIGCONT);
984263bc
MD
700 }
701 return;
702 }
703 }
704}
705
5bf0d9b5 706/*
51e64ff2
MD
707 * Add a new process to the allproc list and the PID hash. This
708 * also assigns a pid to the new process.
709 *
99ad9bc4 710 * No requirements.
51e64ff2
MD
711 */
712void
713proc_add_allproc(struct proc *p)
714{
715 int random_offset;
716
717 if ((random_offset = randompid) != 0) {
718 get_mplock();
0ced1954 719 random_offset = karc4random() % random_offset;
51e64ff2
MD
720 rel_mplock();
721 }
722
99ad9bc4 723 lwkt_gettoken(&proc_token);
51e64ff2
MD
724 p->p_pid = proc_getnewpid_locked(random_offset);
725 LIST_INSERT_HEAD(&allproc, p, p_list);
726 LIST_INSERT_HEAD(PIDHASH(p->p_pid), p, p_hash);
99ad9bc4 727 lwkt_reltoken(&proc_token);
51e64ff2
MD
728}
729
730/*
731 * Calculate a new process pid. This function is integrated into
732 * proc_add_allproc() to guarentee that the new pid is not reused before
733 * the new process can be added to the allproc list.
734 *
99ad9bc4 735 * The caller must hold proc_token.
51e64ff2
MD
736 */
737static
738pid_t
739proc_getnewpid_locked(int random_offset)
740{
741 static pid_t nextpid;
742 static pid_t pidchecked;
743 struct proc *p;
744
745 /*
746 * Find an unused process ID. We remember a range of unused IDs
747 * ready to use (from nextpid+1 through pidchecked-1).
748 */
749 nextpid = nextpid + 1 + random_offset;
750retry:
751 /*
752 * If the process ID prototype has wrapped around,
753 * restart somewhat above 0, as the low-numbered procs
754 * tend to include daemons that don't exit.
755 */
756 if (nextpid >= PID_MAX) {
757 nextpid = nextpid % PID_MAX;
758 if (nextpid < 100)
759 nextpid += 100;
760 pidchecked = 0;
761 }
762 if (nextpid >= pidchecked) {
763 int doingzomb = 0;
764
765 pidchecked = PID_MAX;
0d78b86e 766
51e64ff2
MD
767 /*
768 * Scan the active and zombie procs to check whether this pid
769 * is in use. Remember the lowest pid that's greater
770 * than nextpid, so we can avoid checking for a while.
0d78b86e
MD
771 *
772 * NOTE: Processes in the midst of being forked may not
773 * yet have p_pgrp and p_pgrp->pg_session set up
774 * yet, so we have to check for NULL.
775 *
776 * Processes being torn down should be interlocked
777 * with proc_token prior to the clearing of their
778 * p_pgrp.
51e64ff2
MD
779 */
780 p = LIST_FIRST(&allproc);
781again:
0d78b86e 782 for (; p != NULL; p = LIST_NEXT(p, p_list)) {
51e64ff2 783 while (p->p_pid == nextpid ||
0d78b86e
MD
784 (p->p_pgrp && p->p_pgrp->pg_id == nextpid) ||
785 (p->p_pgrp && p->p_session &&
786 p->p_session->s_sid == nextpid)) {
51e64ff2
MD
787 nextpid++;
788 if (nextpid >= pidchecked)
789 goto retry;
790 }
791 if (p->p_pid > nextpid && pidchecked > p->p_pid)
792 pidchecked = p->p_pid;
0d78b86e
MD
793 if (p->p_pgrp &&
794 p->p_pgrp->pg_id > nextpid &&
795 pidchecked > p->p_pgrp->pg_id) {
51e64ff2 796 pidchecked = p->p_pgrp->pg_id;
0d78b86e
MD
797 }
798 if (p->p_pgrp && p->p_session &&
799 p->p_session->s_sid > nextpid &&
800 pidchecked > p->p_session->s_sid) {
51e64ff2 801 pidchecked = p->p_session->s_sid;
0d78b86e 802 }
51e64ff2
MD
803 }
804 if (!doingzomb) {
805 doingzomb = 1;
806 p = LIST_FIRST(&zombproc);
807 goto again;
808 }
809 }
810 return(nextpid);
811}
812
813/*
5bf0d9b5
MD
814 * Called from exit1 to remove a process from the allproc
815 * list and move it to the zombie list.
816 *
eb2adbf5
MD
817 * Caller must hold p->p_token. We are required to wait until p_lock
818 * becomes zero before we can manipulate the list, allowing allproc
819 * scans to guarantee consistency during a list scan.
5bf0d9b5
MD
820 */
821void
822proc_move_allproc_zombie(struct proc *p)
823{
99ad9bc4 824 lwkt_gettoken(&proc_token);
82354ad8 825 PSTALL(p, "reap1", 0);
5bf0d9b5
MD
826 LIST_REMOVE(p, p_list);
827 LIST_INSERT_HEAD(&zombproc, p, p_list);
828 LIST_REMOVE(p, p_hash);
416d05d7 829 p->p_stat = SZOMB;
99ad9bc4 830 lwkt_reltoken(&proc_token);
8c72e3d5 831 dsched_exit_proc(p);
5bf0d9b5
MD
832}
833
834/*
835 * This routine is called from kern_wait() and will remove the process
836 * from the zombie list and the sibling list. This routine will block
837 * if someone has a lock on the proces (p_lock).
838 *
eb2adbf5
MD
839 * Caller must hold p->p_token. We are required to wait until p_lock
840 * becomes zero before we can manipulate the list, allowing allproc
841 * scans to guarantee consistency during a list scan.
5bf0d9b5
MD
842 */
843void
844proc_remove_zombie(struct proc *p)
845{
99ad9bc4 846 lwkt_gettoken(&proc_token);
82354ad8 847 PSTALL(p, "reap2", 0);
5bf0d9b5
MD
848 LIST_REMOVE(p, p_list); /* off zombproc */
849 LIST_REMOVE(p, p_sibling);
1a3fa08b 850 p->p_pptr = NULL;
99ad9bc4 851 lwkt_reltoken(&proc_token);
5bf0d9b5
MD
852}
853
854/*
855 * Scan all processes on the allproc list. The process is automatically
856 * held for the callback. A return value of -1 terminates the loop.
857 *
99ad9bc4 858 * The callback is made with the process held and proc_token held.
c5d9d575
MD
859 *
860 * We limit the scan to the number of processes as-of the start of
861 * the scan so as not to get caught up in an endless loop if new processes
862 * are created more quickly than we can scan the old ones. Add a little
863 * slop to try to catch edge cases since nprocs can race.
864 *
865 * No requirements.
5bf0d9b5
MD
866 */
867void
868allproc_scan(int (*callback)(struct proc *, void *), void *data)
869{
870 struct proc *p;
871 int r;
c5d9d575 872 int limit = nprocs + ncpus;
5bf0d9b5 873
eb2adbf5
MD
874 /*
875 * proc_token protects the allproc list and PHOLD() prevents the
876 * process from being removed from the allproc list or the zombproc
877 * list.
878 */
99ad9bc4 879 lwkt_gettoken(&proc_token);
5bf0d9b5
MD
880 LIST_FOREACH(p, &allproc, p_list) {
881 PHOLD(p);
5bf0d9b5 882 r = callback(p, data);
5bf0d9b5
MD
883 PRELE(p);
884 if (r < 0)
885 break;
c5d9d575
MD
886 if (--limit < 0)
887 break;
5bf0d9b5 888 }
99ad9bc4 889 lwkt_reltoken(&proc_token);
5bf0d9b5
MD
890}
891
892/*
c7e98b2f
SS
893 * Scan all lwps of processes on the allproc list. The lwp is automatically
894 * held for the callback. A return value of -1 terminates the loop.
895 *
99ad9bc4 896 * The callback is made with the proces and lwp both held, and proc_token held.
eb2adbf5
MD
897 *
898 * No requirements.
c7e98b2f
SS
899 */
900void
901alllwp_scan(int (*callback)(struct lwp *, void *), void *data)
902{
903 struct proc *p;
904 struct lwp *lp;
905 int r = 0;
906
eb2adbf5
MD
907 /*
908 * proc_token protects the allproc list and PHOLD() prevents the
909 * process from being removed from the allproc list or the zombproc
910 * list.
911 */
99ad9bc4 912 lwkt_gettoken(&proc_token);
c7e98b2f
SS
913 LIST_FOREACH(p, &allproc, p_list) {
914 PHOLD(p);
c7e98b2f
SS
915 FOREACH_LWP_IN_PROC(lp, p) {
916 LWPHOLD(lp);
917 r = callback(lp, data);
918 LWPRELE(lp);
919 }
c7e98b2f
SS
920 PRELE(p);
921 if (r < 0)
922 break;
923 }
99ad9bc4 924 lwkt_reltoken(&proc_token);
c7e98b2f
SS
925}
926
927/*
5bf0d9b5
MD
928 * Scan all processes on the zombproc list. The process is automatically
929 * held for the callback. A return value of -1 terminates the loop.
930 *
99ad9bc4
MD
931 * No requirements.
932 * The callback is made with the proces held and proc_token held.
5bf0d9b5
MD
933 */
934void
935zombproc_scan(int (*callback)(struct proc *, void *), void *data)
936{
937 struct proc *p;
938 int r;
939
99ad9bc4 940 lwkt_gettoken(&proc_token);
5bf0d9b5
MD
941 LIST_FOREACH(p, &zombproc, p_list) {
942 PHOLD(p);
5bf0d9b5 943 r = callback(p, data);
5bf0d9b5
MD
944 PRELE(p);
945 if (r < 0)
946 break;
947 }
99ad9bc4 948 lwkt_reltoken(&proc_token);
5bf0d9b5
MD
949}
950
984263bc
MD
951#include "opt_ddb.h"
952#ifdef DDB
953#include <ddb/ddb.h>
954
99ad9bc4
MD
955/*
956 * Debugging only
957 */
984263bc
MD
958DB_SHOW_COMMAND(pgrpdump, pgrpdump)
959{
1fd87d54
RG
960 struct pgrp *pgrp;
961 struct proc *p;
962 int i;
984263bc
MD
963
964 for (i = 0; i <= pgrphash; i++) {
965 if (!LIST_EMPTY(&pgrphashtbl[i])) {
6ea70f76 966 kprintf("\tindx %d\n", i);
984263bc 967 LIST_FOREACH(pgrp, &pgrphashtbl[i], pg_hash) {
6ea70f76 968 kprintf(
984263bc
MD
969 "\tpgrp %p, pgid %ld, sess %p, sesscnt %d, mem %p\n",
970 (void *)pgrp, (long)pgrp->pg_id,
971 (void *)pgrp->pg_session,
972 pgrp->pg_session->s_count,
973 (void *)LIST_FIRST(&pgrp->pg_members));
974 LIST_FOREACH(p, &pgrp->pg_members, p_pglist) {
6ea70f76 975 kprintf("\t\tpid %ld addr %p pgrp %p\n",
984263bc
MD
976 (long)p->p_pid, (void *)p,
977 (void *)p->p_pgrp);
978 }
979 }
980 }
981 }
982}
983#endif /* DDB */
984
5bf0d9b5 985/*
6f9db615 986 * Locate a process on the zombie list. Return a process or NULL.
58c2553a
MD
987 * The returned process will be referenced and the caller must release
988 * it with PRELE().
99ad9bc4 989 *
99ad9bc4 990 * No other requirements.
5bf0d9b5 991 */
984263bc
MD
992struct proc *
993zpfind(pid_t pid)
994{
995 struct proc *p;
996
99ad9bc4
MD
997 lwkt_gettoken(&proc_token);
998 LIST_FOREACH(p, &zombproc, p_list) {
46270ec6 999 if (p->p_pid == pid) {
58c2553a 1000 PHOLD(p);
46270ec6 1001 lwkt_reltoken(&proc_token);
984263bc 1002 return (p);
46270ec6 1003 }
99ad9bc4
MD
1004 }
1005 lwkt_reltoken(&proc_token);
984263bc
MD
1006 return (NULL);
1007}
1008
99ad9bc4
MD
1009/*
1010 * The caller must hold proc_token.
1011 */
984263bc 1012static int
5dfd06ac 1013sysctl_out_proc(struct proc *p, struct sysctl_req *req, int flags)
984263bc 1014{
5dfd06ac
SS
1015 struct kinfo_proc ki;
1016 struct lwp *lp;
fe14f34e 1017 int skp = 0, had_output = 0;
984263bc 1018 int error;
d9d6cb99 1019
fa2217dc 1020 bzero(&ki, sizeof(ki));
b0c15cdf 1021 lwkt_gettoken(&p->p_token);
5dfd06ac
SS
1022 fill_kinfo_proc(p, &ki);
1023 if ((flags & KERN_PROC_FLAG_LWP) == 0)
1024 skp = 1;
fa2217dc 1025 error = 0;
5dfd06ac 1026 FOREACH_LWP_IN_PROC(lp, p) {
fa2217dc 1027 LWPHOLD(lp);
5dfd06ac 1028 fill_kinfo_lwp(lp, &ki.kp_lwp);
5dfd06ac
SS
1029 had_output = 1;
1030 error = SYSCTL_OUT(req, &ki, sizeof(ki));
fa2217dc 1031 LWPRELE(lp);
5dfd06ac 1032 if (error)
fa2217dc 1033 break;
5dfd06ac
SS
1034 if (skp)
1035 break;
73e4f7b9 1036 }
b0c15cdf 1037 lwkt_reltoken(&p->p_token);
5dfd06ac 1038 /* We need to output at least the proc, even if there is no lwp. */
fa2217dc
MD
1039 if (had_output == 0) {
1040 error = SYSCTL_OUT(req, &ki, sizeof(ki));
1041 }
1042 return (error);
984263bc
MD
1043}
1044
99ad9bc4
MD
1045/*
1046 * The caller must hold proc_token.
1047 */
984263bc 1048static int
ef02d0e1
TS
1049sysctl_out_proc_kthread(struct thread *td, struct sysctl_req *req, int flags)
1050{
1051 struct kinfo_proc ki;
1052 int error;
1053
1054 fill_kinfo_proc_kthread(td, &ki);
1055 error = SYSCTL_OUT(req, &ki, sizeof(ki));
1056 if (error)
1057 return error;
1058 return(0);
1059}
1060
99ad9bc4
MD
1061/*
1062 * No requirements.
1063 */
ef02d0e1 1064static int
984263bc
MD
1065sysctl_kern_proc(SYSCTL_HANDLER_ARGS)
1066{
1067 int *name = (int*) arg1;
5dfd06ac 1068 int oid = oidp->oid_number;
984263bc 1069 u_int namelen = arg2;
7cd8fd20 1070 struct proc *p;
5dfd06ac 1071 struct proclist *plist;
ef02d0e1 1072 struct thread *td;
eb2adbf5 1073 struct thread *marker;
5dfd06ac 1074 int doingzomb, flags = 0;
984263bc 1075 int error = 0;
ef02d0e1 1076 int n;
630ccdeb 1077 int origcpu;
41c20dac 1078 struct ucred *cr1 = curproc->p_ucred;
984263bc 1079
5dfd06ac
SS
1080 flags = oid & KERN_PROC_FLAGMASK;
1081 oid &= ~KERN_PROC_FLAGMASK;
1082
1083 if ((oid == KERN_PROC_ALL && namelen != 0) ||
eb2adbf5 1084 (oid != KERN_PROC_ALL && namelen != 1)) {
5dfd06ac 1085 return (EINVAL);
eb2adbf5 1086 }
5dfd06ac 1087
eb2adbf5
MD
1088 /*
1089 * proc_token protects the allproc list and PHOLD() prevents the
1090 * process from being removed from the allproc list or the zombproc
1091 * list.
1092 */
99ad9bc4 1093 lwkt_gettoken(&proc_token);
5dfd06ac 1094 if (oid == KERN_PROC_PID) {
58c2553a 1095 p = pfindn((pid_t)name[0]);
99ad9bc4
MD
1096 if (p == NULL)
1097 goto post_threads;
41c20dac 1098 if (!PRISON_CHECK(cr1, p->p_ucred))
99ad9bc4 1099 goto post_threads;
e7093b07 1100 PHOLD(p);
5dfd06ac 1101 error = sysctl_out_proc(p, req, flags);
e7093b07 1102 PRELE(p);
99ad9bc4 1103 goto post_threads;
984263bc 1104 }
5dfd06ac 1105
984263bc
MD
1106 if (!req->oldptr) {
1107 /* overestimate by 5 procs */
1108 error = SYSCTL_OUT(req, 0, sizeof (struct kinfo_proc) * 5);
1109 if (error)
99ad9bc4 1110 goto post_threads;
984263bc 1111 }
5dfd06ac
SS
1112 for (doingzomb = 0; doingzomb <= 1; doingzomb++) {
1113 if (doingzomb)
1114 plist = &zombproc;
984263bc 1115 else
5dfd06ac 1116 plist = &allproc;
7cd8fd20 1117 LIST_FOREACH(p, plist, p_list) {
984263bc
MD
1118 /*
1119 * Show a user only their processes.
1120 */
41c20dac 1121 if ((!ps_showallprocs) && p_trespass(cr1, p->p_ucred))
984263bc
MD
1122 continue;
1123 /*
1124 * Skip embryonic processes.
1125 */
1126 if (p->p_stat == SIDL)
1127 continue;
1128 /*
1129 * TODO - make more efficient (see notes below).
1130 * do by session.
1131 */
5dfd06ac 1132 switch (oid) {
984263bc
MD
1133 case KERN_PROC_PGRP:
1134 /* could do this by traversing pgrp */
1135 if (p->p_pgrp == NULL ||
1136 p->p_pgrp->pg_id != (pid_t)name[0])
1137 continue;
1138 break;
1139
1140 case KERN_PROC_TTY:
4643740a 1141 if ((p->p_flags & P_CONTROLT) == 0 ||
984263bc
MD
1142 p->p_session == NULL ||
1143 p->p_session->s_ttyp == NULL ||
1144 dev2udev(p->p_session->s_ttyp->t_dev) !=
1145 (udev_t)name[0])
1146 continue;
1147 break;
1148
1149 case KERN_PROC_UID:
1150 if (p->p_ucred == NULL ||
1151 p->p_ucred->cr_uid != (uid_t)name[0])
1152 continue;
1153 break;
1154
1155 case KERN_PROC_RUID:
1156 if (p->p_ucred == NULL ||
41c20dac 1157 p->p_ucred->cr_ruid != (uid_t)name[0])
984263bc
MD
1158 continue;
1159 break;
1160 }
1161
41c20dac 1162 if (!PRISON_CHECK(cr1, p->p_ucred))
984263bc 1163 continue;
c008d3ad 1164 PHOLD(p);
5dfd06ac 1165 error = sysctl_out_proc(p, req, flags);
c008d3ad 1166 PRELE(p);
984263bc 1167 if (error)
99ad9bc4 1168 goto post_threads;
984263bc
MD
1169 }
1170 }
630ccdeb
MD
1171
1172 /*
1173 * Iterate over all active cpus and scan their thread list. Start
1174 * with the next logical cpu and end with our original cpu. We
1175 * migrate our own thread to each target cpu in order to safely scan
1176 * its thread list. In the last loop we migrate back to our original
1177 * cpu.
1178 */
1179 origcpu = mycpu->gd_cpuid;
56c703bd
JS
1180 if (!ps_showallthreads || jailed(cr1))
1181 goto post_threads;
99ad9bc4 1182
eb2adbf5 1183 marker = kmalloc(sizeof(struct thread), M_TEMP, M_WAITOK|M_ZERO);
42b33868 1184 marker->td_flags = TDF_MARKER;
eb2adbf5
MD
1185 error = 0;
1186
56c703bd 1187 for (n = 1; n <= ncpus; ++n) {
630ccdeb
MD
1188 globaldata_t rgd;
1189 int nid;
1190
1191 nid = (origcpu + n) % ncpus;
da23a592 1192 if ((smp_active_mask & CPUMASK(nid)) == 0)
630ccdeb
MD
1193 continue;
1194 rgd = globaldata_find(nid);
1195 lwkt_setcpu_self(rgd);
630ccdeb 1196
eb2adbf5
MD
1197 crit_enter();
1198 TAILQ_INSERT_TAIL(&rgd->gd_tdallq, marker, td_allq);
eb2adbf5
MD
1199
1200 while ((td = TAILQ_PREV(marker, lwkt_queue, td_allq)) != NULL) {
eb2adbf5
MD
1201 TAILQ_REMOVE(&rgd->gd_tdallq, marker, td_allq);
1202 TAILQ_INSERT_BEFORE(td, marker, td_allq);
42b33868 1203 if (td->td_flags & TDF_MARKER)
eb2adbf5 1204 continue;
42b33868 1205 if (td->td_proc)
eb2adbf5 1206 continue;
42b33868
MD
1207
1208 lwkt_hold(td);
1209 crit_exit();
eb2adbf5 1210
ef02d0e1 1211 switch (oid) {
73e4f7b9
MD
1212 case KERN_PROC_PGRP:
1213 case KERN_PROC_TTY:
1214 case KERN_PROC_UID:
1215 case KERN_PROC_RUID:
eb2adbf5 1216 break;
73e4f7b9 1217 default:
eb2adbf5
MD
1218 error = sysctl_out_proc_kthread(td, req,
1219 doingzomb);
73e4f7b9
MD
1220 break;
1221 }
73e4f7b9 1222 lwkt_rele(td);
42b33868 1223 crit_enter();
73e4f7b9 1224 if (error)
eb2adbf5 1225 break;
73e4f7b9 1226 }
eb2adbf5
MD
1227 TAILQ_REMOVE(&rgd->gd_tdallq, marker, td_allq);
1228 crit_exit();
1229
1230 if (error)
1231 break;
73e4f7b9 1232 }
eb2adbf5
MD
1233 kfree(marker, M_TEMP);
1234
56c703bd 1235post_threads:
99ad9bc4
MD
1236 lwkt_reltoken(&proc_token);
1237 return (error);
984263bc
MD
1238}
1239
1240/*
1241 * This sysctl allows a process to retrieve the argument list or process
1242 * title for another process without groping around in the address space
1243 * of the other process. It also allow a process to set its own "process
1244 * title to a string of its own choice.
99ad9bc4
MD
1245 *
1246 * No requirements.
984263bc
MD
1247 */
1248static int
1249sysctl_kern_proc_args(SYSCTL_HANDLER_ARGS)
1250{
1251 int *name = (int*) arg1;
1252 u_int namelen = arg2;
1253 struct proc *p;
19bfc8ab 1254 struct pargs *opa;
984263bc
MD
1255 struct pargs *pa;
1256 int error = 0;
41c20dac 1257 struct ucred *cr1 = curproc->p_ucred;
984263bc
MD
1258
1259 if (namelen != 1)
1260 return (EINVAL);
1261
46fb7ae4 1262 p = pfind((pid_t)name[0]);
99ad9bc4 1263 if (p == NULL)
46fb7ae4 1264 goto done;
19bfc8ab 1265 lwkt_gettoken(&p->p_token);
984263bc 1266
41c20dac 1267 if ((!ps_argsopen) && p_trespass(cr1, p->p_ucred))
99ad9bc4 1268 goto done;
984263bc 1269
99ad9bc4
MD
1270 if (req->newptr && curproc != p) {
1271 error = EPERM;
1272 goto done;
1273 }
46fb7ae4
MD
1274 if (req->oldptr && (pa = p->p_args) != NULL) {
1275 refcount_acquire(&pa->ar_ref);
1276 error = SYSCTL_OUT(req, pa->ar_args, pa->ar_length);
1277 if (refcount_release(&pa->ar_ref))
1278 kfree(pa, M_PARGS);
99ad9bc4 1279 }
19bfc8ab 1280 if (req->newptr == NULL)
99ad9bc4 1281 goto done;
984263bc 1282
99ad9bc4 1283 if (req->newlen + sizeof(struct pargs) > ps_arg_cache_limit) {
99ad9bc4
MD
1284 goto done;
1285 }
984263bc 1286
19bfc8ab
MD
1287 pa = kmalloc(sizeof(struct pargs) + req->newlen, M_PARGS, M_WAITOK);
1288 refcount_init(&pa->ar_ref, 1);
984263bc
MD
1289 pa->ar_length = req->newlen;
1290 error = SYSCTL_IN(req, pa->ar_args, req->newlen);
19bfc8ab
MD
1291 if (error) {
1292 kfree(pa, M_PARGS);
1293 goto done;
1294 }
1295
c7506e9d
MD
1296
1297 /*
1298 * Replace p_args with the new pa. p_args may have previously
1299 * been NULL.
1300 */
19bfc8ab
MD
1301 opa = p->p_args;
1302 p->p_args = pa;
1303
c7506e9d
MD
1304 if (opa) {
1305 KKASSERT(opa->ar_ref > 0);
1306 if (refcount_release(&opa->ar_ref)) {
1307 kfree(opa, M_PARGS);
1308 /* opa = NULL; */
1309 }
19bfc8ab 1310 }
99ad9bc4 1311done:
46fb7ae4
MD
1312 if (p) {
1313 lwkt_reltoken(&p->p_token);
1314 PRELE(p);
1315 }
984263bc
MD
1316 return (error);
1317}
1318
a45611c5
AH
1319static int
1320sysctl_kern_proc_cwd(SYSCTL_HANDLER_ARGS)
1321{
1322 int *name = (int*) arg1;
1323 u_int namelen = arg2;
1324 struct proc *p;
1325 int error = 0;
1326 char *fullpath, *freepath;
1327 struct ucred *cr1 = curproc->p_ucred;
1328
1329 if (namelen != 1)
1330 return (EINVAL);
1331
46fb7ae4 1332 p = pfind((pid_t)name[0]);
a45611c5
AH
1333 if (p == NULL)
1334 goto done;
46fb7ae4 1335 lwkt_gettoken(&p->p_token);
a45611c5
AH
1336
1337 /*
1338 * If we are not allowed to see other args, we certainly shouldn't
1339 * get the cwd either. Also check the usual trespassing.
1340 */
1341 if ((!ps_argsopen) && p_trespass(cr1, p->p_ucred))
1342 goto done;
1343
46fb7ae4
MD
1344 if (req->oldptr && p->p_fd != NULL && p->p_fd->fd_ncdir.ncp) {
1345 struct nchandle nch;
1346
1347 cache_copy(&p->p_fd->fd_ncdir, &nch);
1348 error = cache_fullpath(p, &nch, &fullpath, &freepath, 0);
1349 cache_drop(&nch);
a45611c5
AH
1350 if (error)
1351 goto done;
1352 error = SYSCTL_OUT(req, fullpath, strlen(fullpath) + 1);
1353 kfree(freepath, M_TEMP);
1354 }
1355
a45611c5 1356done:
46fb7ae4
MD
1357 if (p) {
1358 lwkt_reltoken(&p->p_token);
1359 PRELE(p);
1360 }
a45611c5
AH
1361 return (error);
1362}
1363
984263bc
MD
1364SYSCTL_NODE(_kern, KERN_PROC, proc, CTLFLAG_RD, 0, "Process table");
1365
1366SYSCTL_PROC(_kern_proc, KERN_PROC_ALL, all, CTLFLAG_RD|CTLTYPE_STRUCT,
1367 0, 0, sysctl_kern_proc, "S,proc", "Return entire process table");
1368
1369SYSCTL_NODE(_kern_proc, KERN_PROC_PGRP, pgrp, CTLFLAG_RD,
1370 sysctl_kern_proc, "Process table");
1371
1372SYSCTL_NODE(_kern_proc, KERN_PROC_TTY, tty, CTLFLAG_RD,
1373 sysctl_kern_proc, "Process table");
1374
1375SYSCTL_NODE(_kern_proc, KERN_PROC_UID, uid, CTLFLAG_RD,
1376 sysctl_kern_proc, "Process table");
1377
1378SYSCTL_NODE(_kern_proc, KERN_PROC_RUID, ruid, CTLFLAG_RD,
1379 sysctl_kern_proc, "Process table");
1380
1381SYSCTL_NODE(_kern_proc, KERN_PROC_PID, pid, CTLFLAG_RD,
1382 sysctl_kern_proc, "Process table");
1383
5dfd06ac
SS
1384SYSCTL_NODE(_kern_proc, (KERN_PROC_ALL | KERN_PROC_FLAG_LWP), all_lwp, CTLFLAG_RD,
1385 sysctl_kern_proc, "Process table");
1386
1387SYSCTL_NODE(_kern_proc, (KERN_PROC_PGRP | KERN_PROC_FLAG_LWP), pgrp_lwp, CTLFLAG_RD,
1388 sysctl_kern_proc, "Process table");
1389
1390SYSCTL_NODE(_kern_proc, (KERN_PROC_TTY | KERN_PROC_FLAG_LWP), tty_lwp, CTLFLAG_RD,
1391 sysctl_kern_proc, "Process table");
1392
1393SYSCTL_NODE(_kern_proc, (KERN_PROC_UID | KERN_PROC_FLAG_LWP), uid_lwp, CTLFLAG_RD,
1394 sysctl_kern_proc, "Process table");
1395
1396SYSCTL_NODE(_kern_proc, (KERN_PROC_RUID | KERN_PROC_FLAG_LWP), ruid_lwp, CTLFLAG_RD,
1397 sysctl_kern_proc, "Process table");
1398
1399SYSCTL_NODE(_kern_proc, (KERN_PROC_PID | KERN_PROC_FLAG_LWP), pid_lwp, CTLFLAG_RD,
1400 sysctl_kern_proc, "Process table");
1401
984263bc
MD
1402SYSCTL_NODE(_kern_proc, KERN_PROC_ARGS, args, CTLFLAG_RW | CTLFLAG_ANYBODY,
1403 sysctl_kern_proc_args, "Process argument list");
a45611c5
AH
1404
1405SYSCTL_NODE(_kern_proc, KERN_PROC_CWD, cwd, CTLFLAG_RD | CTLFLAG_ANYBODY,
1406 sysctl_kern_proc_cwd, "Process argument list");