2 * Copyright (c) 1982, 1986, 1989, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. Neither the name of the University nor the names of its contributors
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/kernel.h>
33 #include <sys/sysctl.h>
34 #include <sys/malloc.h>
36 #include <sys/vnode.h>
38 #include <sys/filedesc.h>
40 #include <sys/dsched.h>
41 #include <sys/signalvar.h>
42 #include <sys/spinlock.h>
43 #include <sys/random.h>
44 #include <sys/vnode.h>
48 #include <vm/vm_map.h>
50 #include <machine/smp.h>
52 #include <sys/refcount.h>
53 #include <sys/spinlock2.h>
54 #include <sys/mplock2.h>
57 * Hash table size must be a power of two and is not currently dynamically
58 * sized. There is a trade-off between the linear scans which must iterate
59 * all HSIZE elements and the number of elements which might accumulate
60 * within each hash chain.
62 #define ALLPROC_HSIZE 256
63 #define ALLPROC_HMASK (ALLPROC_HSIZE - 1)
64 #define ALLPROC_HASH(pid) (pid & ALLPROC_HMASK)
65 #define PGRP_HASH(pid) (pid & ALLPROC_HMASK)
66 #define SESS_HASH(pid) (pid & ALLPROC_HMASK)
69 * pid_doms[] management, used to control how quickly a PID can be recycled.
70 * Must be a multiple of ALLPROC_HSIZE for the proc_makepid() inner loops.
72 * WARNING! PIDDOM_DELAY should not be defined > 20 or so unless you change
73 * the array from int8_t's to int16_t's.
75 #define PIDDOM_COUNT 10 /* 10 pids per domain - reduce array size */
76 #define PIDDOM_DELAY 10 /* min 10 seconds after exit before reuse */
77 #define PIDSEL_DOMAINS (PID_MAX / PIDDOM_COUNT / ALLPROC_HSIZE * ALLPROC_HSIZE)
80 int allproc_hsize = ALLPROC_HSIZE;
82 LIST_HEAD(pidhashhead, proc);
84 static MALLOC_DEFINE(M_PGRP, "pgrp", "process group header");
85 MALLOC_DEFINE(M_SESSION, "session", "session header");
86 MALLOC_DEFINE(M_PROC, "proc", "Proc structures");
87 MALLOC_DEFINE(M_LWP, "lwp", "lwp structures");
88 MALLOC_DEFINE(M_SUBPROC, "subproc", "Proc sub-structures");
90 int ps_showallprocs = 1;
91 static int ps_showallthreads = 1;
92 SYSCTL_INT(_security, OID_AUTO, ps_showallprocs, CTLFLAG_RW,
94 "Unprivileged processes can see processes with different UID/GID");
95 SYSCTL_INT(_security, OID_AUTO, ps_showallthreads, CTLFLAG_RW,
96 &ps_showallthreads, 0,
97 "Unprivileged processes can see kernel threads");
98 static u_int pid_domain_skips;
99 SYSCTL_UINT(_kern, OID_AUTO, pid_domain_skips, CTLFLAG_RW,
100 &pid_domain_skips, 0,
101 "Number of pid_doms[] skipped");
102 static u_int pid_inner_skips;
103 SYSCTL_UINT(_kern, OID_AUTO, pid_inner_skips, CTLFLAG_RW,
105 "Number of pid_doms[] skipped");
107 static void orphanpg(struct pgrp *pg);
108 static void proc_makepid(struct proc *p, int random_offset);
111 * Other process lists
113 static struct lwkt_token proc_tokens[ALLPROC_HSIZE];
114 static struct proclist allprocs[ALLPROC_HSIZE]; /* locked by proc_tokens */
115 static struct pgrplist allpgrps[ALLPROC_HSIZE]; /* locked by proc_tokens */
116 static struct sesslist allsessn[ALLPROC_HSIZE]; /* locked by proc_tokens */
119 * We try our best to avoid recycling a PID too quickly. We do this by
120 * storing (uint8_t)time_second in the related pid domain on-reap and then
121 * using that to skip-over the domain on-allocate.
123 * This array has to be fairly large to support a high fork/exec rate.
124 * We want ~100,000 entries or so to support a 10-second reuse latency
125 * at 10,000 execs/second, worst case. Best-case multiply by PIDDOM_COUNT
126 * (approximately 100,000 execs/second).
128 static uint8_t pid_doms[PIDSEL_DOMAINS]; /* ~100,000 entries */
131 * Random component to nextpid generation. We mix in a random factor to make
132 * it a little harder to predict. We sanity check the modulus value to avoid
133 * doing it in critical paths. Don't let it be too small or we pointlessly
134 * waste randomness entropy, and don't let it be impossibly large. Using a
135 * modulus that is too big causes a LOT more process table scans and slows
136 * down fork processing as the pidchecked caching is defeated.
138 static int randompid = 0;
144 sysctl_kern_randompid(SYSCTL_HANDLER_ARGS)
149 error = sysctl_handle_int(oidp, &pid, 0, req);
150 if (error || !req->newptr)
152 if (pid < 0 || pid > PID_MAX - 100) /* out of range */
154 else if (pid < 2) /* NOP */
156 else if (pid < 100) /* Make it reasonable */
162 SYSCTL_PROC(_kern, OID_AUTO, randompid, CTLTYPE_INT|CTLFLAG_RW,
163 0, 0, sysctl_kern_randompid, "I", "Random PID modulus");
166 * Initialize global process hashing structures.
168 * These functions are ONLY called from the low level boot code and do
169 * not lock their operations.
177 * Avoid unnecessary stalls due to pid_doms[] values all being
178 * the same. Make sure that the allocation of pid 1 and pid 2
181 for (i = 0; i < PIDSEL_DOMAINS; ++i)
182 pid_doms[i] = (int8_t)i - (int8_t)(PIDDOM_DELAY + 1);
187 for (i = 0; i < ALLPROC_HSIZE; ++i) {
188 LIST_INIT(&allprocs[i]);
189 LIST_INIT(&allsessn[i]);
190 LIST_INIT(&allpgrps[i]);
191 lwkt_token_init(&proc_tokens[i], "allproc");
198 procinsertinit(struct proc *p)
200 LIST_INSERT_HEAD(&allprocs[ALLPROC_HASH(p->p_pid)], p, p_list);
204 pgrpinsertinit(struct pgrp *pg)
206 LIST_INSERT_HEAD(&allpgrps[ALLPROC_HASH(pg->pg_id)], pg, pg_list);
210 sessinsertinit(struct session *sess)
212 LIST_INSERT_HEAD(&allsessn[ALLPROC_HASH(sess->s_sid)], sess, s_list);
216 * Process hold/release support functions. Called via the PHOLD(),
217 * PRELE(), and PSTALL() macros.
219 * p->p_lock is a simple hold count with a waiting interlock. No wakeup()
220 * is issued unless someone is actually waiting for the process.
222 * Most holds are short-term, allowing a process scan or other similar
223 * operation to access a proc structure without it getting ripped out from
224 * under us. procfs and process-list sysctl ops also use the hold function
225 * interlocked with various p_flags to keep the vmspace intact when reading
226 * or writing a user process's address space.
228 * There are two situations where a hold count can be longer. Exiting lwps
229 * hold the process until the lwp is reaped, and the parent will hold the
230 * child during vfork()/exec() sequences while the child is marked P_PPWAIT.
232 * The kernel waits for the hold count to drop to 0 (or 1 in some cases) at
233 * various critical points in the fork/exec and exit paths before proceeding.
235 #define PLOCK_ZOMB 0x20000000
236 #define PLOCK_WAITING 0x40000000
237 #define PLOCK_MASK 0x1FFFFFFF
240 pstall(struct proc *p, const char *wmesg, int count)
248 if ((o & PLOCK_MASK) <= count)
250 n = o | PLOCK_WAITING;
251 tsleep_interlock(&p->p_lock, 0);
254 * If someone is trying to single-step the process during
255 * an exec or an exit they can deadlock us because procfs
256 * sleeps with the process held.
259 if (p->p_flags & P_INEXEC) {
261 } else if (p->p_flags & P_POSTEXIT) {
262 spin_lock(&p->p_spin);
265 spin_unlock(&p->p_spin);
270 if (atomic_cmpset_int(&p->p_lock, o, n)) {
271 tsleep(&p->p_lock, PINTERLOCKED, wmesg, 0);
277 phold(struct proc *p)
279 atomic_add_int(&p->p_lock, 1);
283 * WARNING! On last release (p) can become instantly invalid due to
287 prele(struct proc *p)
295 if (atomic_cmpset_int(&p->p_lock, 1, 0))
303 KKASSERT((o & PLOCK_MASK) > 0);
305 n = (o - 1) & ~PLOCK_WAITING;
306 if (atomic_cmpset_int(&p->p_lock, o, n)) {
307 if (o & PLOCK_WAITING)
315 * Hold and flag serialized for zombie reaping purposes.
317 * This function will fail if it has to block, returning non-zero with
318 * neither the flag set or the hold count bumped. Note that we must block
319 * without holding a ref, meaning that the caller must ensure that (p)
320 * remains valid through some other interlock (typically on its parent
321 * process's p_token).
323 * Zero is returned on success. The hold count will be incremented and
324 * the serialization flag acquired. Note that serialization is only against
325 * other pholdzomb() calls, not against phold() calls.
328 pholdzomb(struct proc *p)
336 if (atomic_cmpset_int(&p->p_lock, 0, PLOCK_ZOMB | 1))
345 if ((o & PLOCK_ZOMB) == 0) {
346 n = (o + 1) | PLOCK_ZOMB;
347 if (atomic_cmpset_int(&p->p_lock, o, n))
350 KKASSERT((o & PLOCK_MASK) > 0);
351 n = o | PLOCK_WAITING;
352 tsleep_interlock(&p->p_lock, 0);
353 if (atomic_cmpset_int(&p->p_lock, o, n)) {
354 tsleep(&p->p_lock, PINTERLOCKED, "phldz", 0);
355 /* (p) can be ripped out at this point */
363 * Release PLOCK_ZOMB and the hold count, waking up any waiters.
365 * WARNING! On last release (p) can become instantly invalid due to
369 prelezomb(struct proc *p)
377 if (atomic_cmpset_int(&p->p_lock, PLOCK_ZOMB | 1, 0))
383 KKASSERT(p->p_lock & PLOCK_ZOMB);
386 KKASSERT((o & PLOCK_MASK) > 0);
388 n = (o - 1) & ~(PLOCK_ZOMB | PLOCK_WAITING);
389 if (atomic_cmpset_int(&p->p_lock, o, n)) {
390 if (o & PLOCK_WAITING)
398 * Is p an inferior of the current process?
403 inferior(struct proc *p)
408 lwkt_gettoken_shared(&p->p_token);
409 while (p != curproc) {
411 lwkt_reltoken(&p->p_token);
416 lwkt_reltoken(&p->p_token);
418 lwkt_gettoken_shared(&p2->p_token);
421 lwkt_reltoken(&p->p_token);
428 * Locate a process by number. The returned process will be referenced and
429 * must be released with PRELE().
436 struct proc *p = curproc;
440 * Shortcut the current process
442 if (p && p->p_pid == pid) {
448 * Otherwise find it in the hash table.
450 n = ALLPROC_HASH(pid);
452 lwkt_gettoken_shared(&proc_tokens[n]);
453 LIST_FOREACH(p, &allprocs[n], p_list) {
454 if (p->p_stat == SZOMB)
456 if (p->p_pid == pid) {
458 lwkt_reltoken(&proc_tokens[n]);
462 lwkt_reltoken(&proc_tokens[n]);
468 * Locate a process by number. The returned process is NOT referenced.
469 * The result will not be stable and is typically only used to validate
470 * against a process that the caller has in-hand.
477 struct proc *p = curproc;
481 * Shortcut the current process
483 if (p && p->p_pid == pid)
487 * Otherwise find it in the hash table.
489 n = ALLPROC_HASH(pid);
491 lwkt_gettoken_shared(&proc_tokens[n]);
492 LIST_FOREACH(p, &allprocs[n], p_list) {
493 if (p->p_stat == SZOMB)
495 if (p->p_pid == pid) {
496 lwkt_reltoken(&proc_tokens[n]);
500 lwkt_reltoken(&proc_tokens[n]);
506 * Locate a process on the zombie list. Return a process or NULL.
507 * The returned process will be referenced and the caller must release
510 * No other requirements.
515 struct proc *p = curproc;
519 * Shortcut the current process
521 if (p && p->p_pid == pid) {
527 * Otherwise find it in the hash table.
529 n = ALLPROC_HASH(pid);
531 lwkt_gettoken_shared(&proc_tokens[n]);
532 LIST_FOREACH(p, &allprocs[n], p_list) {
533 if (p->p_stat != SZOMB)
535 if (p->p_pid == pid) {
537 lwkt_reltoken(&proc_tokens[n]);
541 lwkt_reltoken(&proc_tokens[n]);
548 pgref(struct pgrp *pgrp)
550 refcount_acquire(&pgrp->pg_refs);
554 pgrel(struct pgrp *pgrp)
559 n = PGRP_HASH(pgrp->pg_id);
561 count = pgrp->pg_refs;
565 lwkt_gettoken(&proc_tokens[n]);
566 if (atomic_cmpset_int(&pgrp->pg_refs, 1, 0))
568 lwkt_reltoken(&proc_tokens[n]);
571 if (atomic_cmpset_int(&pgrp->pg_refs, count, count - 1))
578 * Successful 1->0 transition, pghash_spin is held.
580 LIST_REMOVE(pgrp, pg_list);
581 pid_doms[pgrp->pg_id % PIDSEL_DOMAINS] = (uint8_t)time_second;
584 * Reset any sigio structures pointing to us as a result of
585 * F_SETOWN with our pgid.
587 funsetownlst(&pgrp->pg_sigiolst);
589 if (pgrp->pg_session->s_ttyp != NULL &&
590 pgrp->pg_session->s_ttyp->t_pgrp == pgrp) {
591 pgrp->pg_session->s_ttyp->t_pgrp = NULL;
593 lwkt_reltoken(&proc_tokens[n]);
595 sess_rele(pgrp->pg_session);
600 * Locate a process group by number. The returned process group will be
601 * referenced w/pgref() and must be released with pgrel() (or assigned
602 * somewhere if you wish to keep the reference).
613 lwkt_gettoken_shared(&proc_tokens[n]);
615 LIST_FOREACH(pgrp, &allpgrps[n], pg_list) {
616 if (pgrp->pg_id == pgid) {
617 refcount_acquire(&pgrp->pg_refs);
618 lwkt_reltoken(&proc_tokens[n]);
622 lwkt_reltoken(&proc_tokens[n]);
627 * Move p to a new or existing process group (and session)
632 enterpgrp(struct proc *p, pid_t pgid, int mksess)
640 KASSERT(pgrp == NULL || !mksess,
641 ("enterpgrp: setsid into non-empty pgrp"));
642 KASSERT(!SESS_LEADER(p),
643 ("enterpgrp: session leader attempted setpgrp"));
646 pid_t savepid = p->p_pid;
653 KASSERT(p->p_pid == pgid,
654 ("enterpgrp: new pgrp and pid != pgid"));
655 pgrp = kmalloc(sizeof(struct pgrp), M_PGRP, M_WAITOK | M_ZERO);
657 LIST_INIT(&pgrp->pg_members);
659 SLIST_INIT(&pgrp->pg_sigiolst);
660 lwkt_token_init(&pgrp->pg_token, "pgrp_token");
661 refcount_init(&pgrp->pg_refs, 1);
662 lockinit(&pgrp->pg_lock, "pgwt", 0, 0);
666 if ((np = pfindn(savepid)) == NULL || np != p) {
667 lwkt_reltoken(&proc_tokens[n]);
673 lwkt_gettoken(&proc_tokens[n]);
675 struct session *sess;
680 sess = kmalloc(sizeof(struct session), M_SESSION,
682 lwkt_gettoken(&p->p_token);
684 sess->s_sid = p->p_pid;
686 sess->s_ttyvp = NULL;
688 bcopy(p->p_session->s_login, sess->s_login,
689 sizeof(sess->s_login));
690 pgrp->pg_session = sess;
691 KASSERT(p == curproc,
692 ("enterpgrp: mksession and p != curproc"));
693 p->p_flags &= ~P_CONTROLT;
694 LIST_INSERT_HEAD(&allsessn[n], sess, s_list);
695 lwkt_reltoken(&p->p_token);
697 lwkt_gettoken(&p->p_token);
698 pgrp->pg_session = p->p_session;
699 sess_hold(pgrp->pg_session);
700 lwkt_reltoken(&p->p_token);
702 LIST_INSERT_HEAD(&allpgrps[n], pgrp, pg_list);
704 lwkt_reltoken(&proc_tokens[n]);
705 } else if (pgrp == p->p_pgrp) {
708 } /* else pgfind() referenced the pgrp */
710 lwkt_gettoken(&pgrp->pg_token);
711 lwkt_gettoken(&p->p_token);
714 * Replace p->p_pgrp, handling any races that occur.
716 while ((opgrp = p->p_pgrp) != NULL) {
718 lwkt_gettoken(&opgrp->pg_token);
719 if (opgrp != p->p_pgrp) {
720 lwkt_reltoken(&opgrp->pg_token);
724 LIST_REMOVE(p, p_pglist);
728 LIST_INSERT_HEAD(&pgrp->pg_members, p, p_pglist);
731 * Adjust eligibility of affected pgrps to participate in job control.
732 * Increment eligibility counts before decrementing, otherwise we
733 * could reach 0 spuriously during the first call.
737 fixjobc(p, opgrp, 0);
738 lwkt_reltoken(&opgrp->pg_token);
739 pgrel(opgrp); /* manual pgref */
740 pgrel(opgrp); /* p->p_pgrp ref */
742 lwkt_reltoken(&p->p_token);
743 lwkt_reltoken(&pgrp->pg_token);
751 * Remove process from process group
756 leavepgrp(struct proc *p)
758 struct pgrp *pg = p->p_pgrp;
760 lwkt_gettoken(&p->p_token);
761 while ((pg = p->p_pgrp) != NULL) {
763 lwkt_gettoken(&pg->pg_token);
764 if (p->p_pgrp != pg) {
765 lwkt_reltoken(&pg->pg_token);
770 LIST_REMOVE(p, p_pglist);
771 lwkt_reltoken(&pg->pg_token);
772 pgrel(pg); /* manual pgref */
773 pgrel(pg); /* p->p_pgrp ref */
776 lwkt_reltoken(&p->p_token);
782 * Adjust the ref count on a session structure. When the ref count falls to
783 * zero the tty is disassociated from the session and the session structure
784 * is freed. Note that tty assocation is not itself ref-counted.
789 sess_hold(struct session *sp)
791 atomic_add_int(&sp->s_count, 1);
798 sess_rele(struct session *sess)
804 n = SESS_HASH(sess->s_sid);
806 count = sess->s_count;
810 lwkt_gettoken(&tty_token);
811 lwkt_gettoken(&proc_tokens[n]);
812 if (atomic_cmpset_int(&sess->s_count, 1, 0))
814 lwkt_reltoken(&proc_tokens[n]);
815 lwkt_reltoken(&tty_token);
818 if (atomic_cmpset_int(&sess->s_count, count, count - 1))
825 * Successful 1->0 transition and tty_token is held.
827 LIST_REMOVE(sess, s_list);
828 pid_doms[sess->s_sid % PIDSEL_DOMAINS] = (uint8_t)time_second;
830 if (sess->s_ttyp && sess->s_ttyp->t_session) {
831 #ifdef TTY_DO_FULL_CLOSE
832 /* FULL CLOSE, see ttyclearsession() */
833 KKASSERT(sess->s_ttyp->t_session == sess);
834 sess->s_ttyp->t_session = NULL;
836 /* HALF CLOSE, see ttyclearsession() */
837 if (sess->s_ttyp->t_session == sess)
838 sess->s_ttyp->t_session = NULL;
841 if ((tp = sess->s_ttyp) != NULL) {
845 lwkt_reltoken(&proc_tokens[n]);
846 lwkt_reltoken(&tty_token);
848 kfree(sess, M_SESSION);
852 * Adjust pgrp jobc counters when specified process changes process group.
853 * We count the number of processes in each process group that "qualify"
854 * the group for terminal job control (those with a parent in a different
855 * process group of the same session). If that count reaches zero, the
856 * process group becomes orphaned. Check both the specified process'
857 * process group and that of its children.
858 * entering == 0 => p is leaving specified group.
859 * entering == 1 => p is entering specified group.
864 fixjobc(struct proc *p, struct pgrp *pgrp, int entering)
866 struct pgrp *hispgrp;
867 struct session *mysession;
871 * Check p's parent to see whether p qualifies its own process
872 * group; if so, adjust count for p's process group.
874 lwkt_gettoken(&p->p_token); /* p_children scan */
875 lwkt_gettoken(&pgrp->pg_token);
877 mysession = pgrp->pg_session;
878 if ((hispgrp = p->p_pptr->p_pgrp) != pgrp &&
879 hispgrp->pg_session == mysession) {
882 else if (--pgrp->pg_jobc == 0)
887 * Check this process' children to see whether they qualify
888 * their process groups; if so, adjust counts for children's
891 LIST_FOREACH(np, &p->p_children, p_sibling) {
893 lwkt_gettoken(&np->p_token);
894 if ((hispgrp = np->p_pgrp) != pgrp &&
895 hispgrp->pg_session == mysession &&
896 np->p_stat != SZOMB) {
898 lwkt_gettoken(&hispgrp->pg_token);
901 else if (--hispgrp->pg_jobc == 0)
903 lwkt_reltoken(&hispgrp->pg_token);
906 lwkt_reltoken(&np->p_token);
909 KKASSERT(pgrp->pg_refs > 0);
910 lwkt_reltoken(&pgrp->pg_token);
911 lwkt_reltoken(&p->p_token);
915 * A process group has become orphaned;
916 * if there are any stopped processes in the group,
917 * hang-up all process in that group.
919 * The caller must hold pg_token.
922 orphanpg(struct pgrp *pg)
926 LIST_FOREACH(p, &pg->pg_members, p_pglist) {
927 if (p->p_stat == SSTOP) {
928 LIST_FOREACH(p, &pg->pg_members, p_pglist) {
938 * Add a new process to the allproc list and the PID hash. This
939 * also assigns a pid to the new process.
944 proc_add_allproc(struct proc *p)
948 if ((random_offset = randompid) != 0) {
949 read_random(&random_offset, sizeof(random_offset));
950 random_offset = (random_offset & 0x7FFFFFFF) % randompid;
952 proc_makepid(p, random_offset);
956 * Calculate a new process pid. This function is integrated into
957 * proc_add_allproc() to guarentee that the new pid is not reused before
958 * the new process can be added to the allproc list.
960 * p_pid is assigned and the process is added to the allproc hash table
962 * WARNING! We need to allocate PIDs sequentially during early boot.
963 * In particular, init needs to have a pid of 1.
967 proc_makepid(struct proc *p, int random_offset)
969 static pid_t nextpid = 1; /* heuristic, allowed to race */
972 struct session *sess;
979 * Select the next pid base candidate.
981 * Check cyclement, do not allow a pid < 100.
985 base = atomic_fetchadd_int(&nextpid, 1) + random_offset;
986 if (base <= 0 || base >= PID_MAX) {
987 base = base % PID_MAX;
992 nextpid = base; /* reset (SMP race ok) */
996 * Do not allow a base pid to be selected from a domain that has
997 * recently seen a pid/pgid/sessid reap. Sleep a little if we looped
998 * through all available domains.
1000 * WARNING: We want the early pids to be allocated linearly,
1001 * particularly pid 1 and pid 2.
1003 if (++retries >= PIDSEL_DOMAINS)
1004 tsleep(&nextpid, 0, "makepid", 1);
1006 delta8 = (int8_t)time_second -
1007 (int8_t)pid_doms[base % PIDSEL_DOMAINS];
1008 if (delta8 >= 0 && delta8 <= PIDDOM_DELAY) {
1015 * Calculate a hash index and find an unused process id within
1016 * the table, looping if we cannot find one.
1018 * The inner loop increments by ALLPROC_HSIZE which keeps the
1019 * PID at the same pid_doms[] index as well as the same hash index.
1021 n = ALLPROC_HASH(base);
1022 lwkt_gettoken(&proc_tokens[n]);
1025 LIST_FOREACH(ps, &allprocs[n], p_list) {
1026 if (ps->p_pid == base) {
1027 base += ALLPROC_HSIZE;
1028 if (base >= PID_MAX) {
1029 lwkt_reltoken(&proc_tokens[n]);
1036 LIST_FOREACH(pg, &allpgrps[n], pg_list) {
1037 if (pg->pg_id == base) {
1038 base += ALLPROC_HSIZE;
1039 if (base >= PID_MAX) {
1040 lwkt_reltoken(&proc_tokens[n]);
1047 LIST_FOREACH(sess, &allsessn[n], s_list) {
1048 if (sess->s_sid == base) {
1049 base += ALLPROC_HSIZE;
1050 if (base >= PID_MAX) {
1051 lwkt_reltoken(&proc_tokens[n]);
1060 * Assign the pid and insert the process.
1063 LIST_INSERT_HEAD(&allprocs[n], p, p_list);
1064 lwkt_reltoken(&proc_tokens[n]);
1068 * Called from exit1 to place the process into a zombie state.
1069 * The process is removed from the pid hash and p_stat is set
1070 * to SZOMB. Normal pfind[n]() calls will not find it any more.
1072 * Caller must hold p->p_token. We are required to wait until p_lock
1073 * becomes zero before we can manipulate the list, allowing allproc
1074 * scans to guarantee consistency during a list scan.
1077 proc_move_allproc_zombie(struct proc *p)
1081 n = ALLPROC_HASH(p->p_pid);
1082 PSTALL(p, "reap1", 0);
1083 lwkt_gettoken(&proc_tokens[n]);
1085 PSTALL(p, "reap1a", 0);
1088 lwkt_reltoken(&proc_tokens[n]);
1089 dsched_exit_proc(p);
1093 * This routine is called from kern_wait() and will remove the process
1094 * from the zombie list and the sibling list. This routine will block
1095 * if someone has a lock on the proces (p_lock).
1097 * Caller must hold p->p_token. We are required to wait until p_lock
1098 * becomes zero before we can manipulate the list, allowing allproc
1099 * scans to guarantee consistency during a list scan.
1102 proc_remove_zombie(struct proc *p)
1106 n = ALLPROC_HASH(p->p_pid);
1108 PSTALL(p, "reap2", 0);
1109 lwkt_gettoken(&proc_tokens[n]);
1110 PSTALL(p, "reap2a", 0);
1111 LIST_REMOVE(p, p_list); /* from remove master list */
1112 LIST_REMOVE(p, p_sibling); /* and from sibling list */
1114 pid_doms[p->p_pid % PIDSEL_DOMAINS] = (uint8_t)time_second;
1115 lwkt_reltoken(&proc_tokens[n]);
1119 * Handle various requirements prior to returning to usermode. Called from
1120 * platform trap and system call code.
1123 lwpuserret(struct lwp *lp)
1125 struct proc *p = lp->lwp_proc;
1127 if (lp->lwp_mpflags & LWP_MP_VNLRU) {
1128 atomic_clear_int(&lp->lwp_mpflags, LWP_MP_VNLRU);
1131 if (lp->lwp_mpflags & LWP_MP_WEXIT) {
1132 lwkt_gettoken(&p->p_token);
1134 lwkt_reltoken(&p->p_token); /* NOT REACHED */
1139 * Kernel threads run from user processes can also accumulate deferred
1140 * actions which need to be acted upon. Callers include:
1142 * nfsd - Can allocate lots of vnodes
1145 lwpkthreaddeferred(void)
1147 struct lwp *lp = curthread->td_lwp;
1150 if (lp->lwp_mpflags & LWP_MP_VNLRU) {
1151 atomic_clear_int(&lp->lwp_mpflags, LWP_MP_VNLRU);
1158 * Scan all processes on the allproc list. The process is automatically
1159 * held for the callback. A return value of -1 terminates the loop.
1160 * Zombie procs are skipped.
1162 * The callback is made with the process held and proc_token held.
1164 * We limit the scan to the number of processes as-of the start of
1165 * the scan so as not to get caught up in an endless loop if new processes
1166 * are created more quickly than we can scan the old ones. Add a little
1167 * slop to try to catch edge cases since nprocs can race.
1172 allproc_scan(int (*callback)(struct proc *, void *), void *data)
1174 int limit = nprocs + ncpus;
1180 * proc_tokens[n] protects the allproc list and PHOLD() prevents the
1181 * process from being removed from the allproc list or the zombproc
1184 for (n = 0; n < ALLPROC_HSIZE; ++n) {
1185 if (LIST_FIRST(&allprocs[n]) == NULL)
1187 lwkt_gettoken(&proc_tokens[n]);
1188 LIST_FOREACH(p, &allprocs[n], p_list) {
1189 if (p->p_stat == SZOMB)
1192 r = callback(p, data);
1199 lwkt_reltoken(&proc_tokens[n]);
1202 * Check if asked to stop early
1210 * Scan all lwps of processes on the allproc list. The lwp is automatically
1211 * held for the callback. A return value of -1 terminates the loop.
1213 * The callback is made with the proces and lwp both held, and proc_token held.
1218 alllwp_scan(int (*callback)(struct lwp *, void *), void *data)
1225 for (n = 0; n < ALLPROC_HSIZE; ++n) {
1226 if (LIST_FIRST(&allprocs[n]) == NULL)
1228 lwkt_gettoken(&proc_tokens[n]);
1229 LIST_FOREACH(p, &allprocs[n], p_list) {
1230 if (p->p_stat == SZOMB)
1233 lwkt_gettoken(&p->p_token);
1234 FOREACH_LWP_IN_PROC(lp, p) {
1236 r = callback(lp, data);
1239 lwkt_reltoken(&p->p_token);
1244 lwkt_reltoken(&proc_tokens[n]);
1247 * Asked to exit early
1255 * Scan all processes on the zombproc list. The process is automatically
1256 * held for the callback. A return value of -1 terminates the loop.
1259 * The callback is made with the proces held and proc_token held.
1262 zombproc_scan(int (*callback)(struct proc *, void *), void *data)
1269 * proc_tokens[n] protects the allproc list and PHOLD() prevents the
1270 * process from being removed from the allproc list or the zombproc
1273 for (n = 0; n < ALLPROC_HSIZE; ++n) {
1274 if (LIST_FIRST(&allprocs[n]) == NULL)
1276 lwkt_gettoken(&proc_tokens[n]);
1277 LIST_FOREACH(p, &allprocs[n], p_list) {
1278 if (p->p_stat != SZOMB)
1281 r = callback(p, data);
1286 lwkt_reltoken(&proc_tokens[n]);
1289 * Check if asked to stop early
1296 #include "opt_ddb.h"
1298 #include <ddb/ddb.h>
1303 DB_SHOW_COMMAND(pgrpdump, pgrpdump)
1309 for (i = 0; i < ALLPROC_HSIZE; ++i) {
1310 if (LIST_EMPTY(&allpgrps[i]))
1312 kprintf("\tindx %d\n", i);
1313 LIST_FOREACH(pgrp, &allpgrps[i], pg_list) {
1314 kprintf("\tpgrp %p, pgid %ld, sess %p, "
1315 "sesscnt %d, mem %p\n",
1316 (void *)pgrp, (long)pgrp->pg_id,
1317 (void *)pgrp->pg_session,
1318 pgrp->pg_session->s_count,
1319 (void *)LIST_FIRST(&pgrp->pg_members));
1320 LIST_FOREACH(p, &pgrp->pg_members, p_pglist) {
1321 kprintf("\t\tpid %ld addr %p pgrp %p\n",
1322 (long)p->p_pid, (void *)p,
1331 * The caller must hold proc_token.
1334 sysctl_out_proc(struct proc *p, struct sysctl_req *req, int flags)
1336 struct kinfo_proc ki;
1338 int skp = 0, had_output = 0;
1341 bzero(&ki, sizeof(ki));
1342 lwkt_gettoken_shared(&p->p_token);
1343 fill_kinfo_proc(p, &ki);
1344 if ((flags & KERN_PROC_FLAG_LWP) == 0)
1347 FOREACH_LWP_IN_PROC(lp, p) {
1349 fill_kinfo_lwp(lp, &ki.kp_lwp);
1351 error = SYSCTL_OUT(req, &ki, sizeof(ki));
1358 lwkt_reltoken(&p->p_token);
1359 /* We need to output at least the proc, even if there is no lwp. */
1360 if (had_output == 0) {
1361 error = SYSCTL_OUT(req, &ki, sizeof(ki));
1367 * The caller must hold proc_token.
1370 sysctl_out_proc_kthread(struct thread *td, struct sysctl_req *req)
1372 struct kinfo_proc ki;
1375 fill_kinfo_proc_kthread(td, &ki);
1376 error = SYSCTL_OUT(req, &ki, sizeof(ki));
1386 sysctl_kern_proc(SYSCTL_HANDLER_ARGS)
1388 int *name = (int *)arg1;
1389 int oid = oidp->oid_number;
1390 u_int namelen = arg2;
1393 struct thread *marker;
1398 struct ucred *cr1 = curproc->p_ucred;
1400 flags = oid & KERN_PROC_FLAGMASK;
1401 oid &= ~KERN_PROC_FLAGMASK;
1403 if ((oid == KERN_PROC_ALL && namelen != 0) ||
1404 (oid != KERN_PROC_ALL && namelen != 1)) {
1409 * proc_token protects the allproc list and PHOLD() prevents the
1410 * process from being removed from the allproc list or the zombproc
1413 if (oid == KERN_PROC_PID) {
1414 p = pfind((pid_t)name[0]);
1416 if (PRISON_CHECK(cr1, p->p_ucred))
1417 error = sysctl_out_proc(p, req, flags);
1425 /* overestimate by 5 procs */
1426 error = SYSCTL_OUT(req, 0, sizeof (struct kinfo_proc) * 5);
1431 for (n = 0; n < ALLPROC_HSIZE; ++n) {
1432 if (LIST_EMPTY(&allprocs[n]))
1434 lwkt_gettoken_shared(&proc_tokens[n]);
1435 LIST_FOREACH(p, &allprocs[n], p_list) {
1437 * Show a user only their processes.
1439 if ((!ps_showallprocs) &&
1440 (p->p_ucred == NULL || p_trespass(cr1, p->p_ucred))) {
1444 * Skip embryonic processes.
1446 if (p->p_stat == SIDL)
1449 * TODO - make more efficient (see notes below).
1453 case KERN_PROC_PGRP:
1454 /* could do this by traversing pgrp */
1455 if (p->p_pgrp == NULL ||
1456 p->p_pgrp->pg_id != (pid_t)name[0])
1461 if ((p->p_flags & P_CONTROLT) == 0 ||
1462 p->p_session == NULL ||
1463 p->p_session->s_ttyp == NULL ||
1464 dev2udev(p->p_session->s_ttyp->t_dev) !=
1470 if (p->p_ucred == NULL ||
1471 p->p_ucred->cr_uid != (uid_t)name[0])
1475 case KERN_PROC_RUID:
1476 if (p->p_ucred == NULL ||
1477 p->p_ucred->cr_ruid != (uid_t)name[0])
1482 if (!PRISON_CHECK(cr1, p->p_ucred))
1485 error = sysctl_out_proc(p, req, flags);
1488 lwkt_reltoken(&proc_tokens[n]);
1492 lwkt_reltoken(&proc_tokens[n]);
1496 * Iterate over all active cpus and scan their thread list. Start
1497 * with the next logical cpu and end with our original cpu. We
1498 * migrate our own thread to each target cpu in order to safely scan
1499 * its thread list. In the last loop we migrate back to our original
1502 origcpu = mycpu->gd_cpuid;
1503 if (!ps_showallthreads || jailed(cr1))
1506 marker = kmalloc(sizeof(struct thread), M_TEMP, M_WAITOK|M_ZERO);
1507 marker->td_flags = TDF_MARKER;
1510 for (n = 1; n <= ncpus; ++n) {
1514 nid = (origcpu + n) % ncpus;
1515 if (CPUMASK_TESTBIT(smp_active_mask, nid) == 0)
1517 rgd = globaldata_find(nid);
1518 lwkt_setcpu_self(rgd);
1521 TAILQ_INSERT_TAIL(&rgd->gd_tdallq, marker, td_allq);
1523 while ((td = TAILQ_PREV(marker, lwkt_queue, td_allq)) != NULL) {
1524 TAILQ_REMOVE(&rgd->gd_tdallq, marker, td_allq);
1525 TAILQ_INSERT_BEFORE(td, marker, td_allq);
1526 if (td->td_flags & TDF_MARKER)
1535 case KERN_PROC_PGRP:
1538 case KERN_PROC_RUID:
1541 error = sysctl_out_proc_kthread(td, req);
1549 TAILQ_REMOVE(&rgd->gd_tdallq, marker, td_allq);
1557 * Userland scheduler expects us to return on the same cpu we
1560 if (mycpu->gd_cpuid != origcpu)
1561 lwkt_setcpu_self(globaldata_find(origcpu));
1563 kfree(marker, M_TEMP);
1570 * This sysctl allows a process to retrieve the argument list or process
1571 * title for another process without groping around in the address space
1572 * of the other process. It also allow a process to set its own "process
1573 * title to a string of its own choice.
1578 sysctl_kern_proc_args(SYSCTL_HANDLER_ARGS)
1580 int *name = (int*) arg1;
1581 u_int namelen = arg2;
1586 struct ucred *cr1 = curproc->p_ucred;
1591 p = pfind((pid_t)name[0]);
1594 lwkt_gettoken(&p->p_token);
1596 if ((!ps_argsopen) && p_trespass(cr1, p->p_ucred))
1599 if (req->newptr && curproc != p) {
1603 if (req->oldptr && (pa = p->p_args) != NULL) {
1604 refcount_acquire(&pa->ar_ref);
1605 error = SYSCTL_OUT(req, pa->ar_args, pa->ar_length);
1606 if (refcount_release(&pa->ar_ref))
1609 if (req->newptr == NULL)
1612 if (req->newlen + sizeof(struct pargs) > ps_arg_cache_limit) {
1616 pa = kmalloc(sizeof(struct pargs) + req->newlen, M_PARGS, M_WAITOK);
1617 refcount_init(&pa->ar_ref, 1);
1618 pa->ar_length = req->newlen;
1619 error = SYSCTL_IN(req, pa->ar_args, req->newlen);
1627 * Replace p_args with the new pa. p_args may have previously
1634 KKASSERT(opa->ar_ref > 0);
1635 if (refcount_release(&opa->ar_ref)) {
1636 kfree(opa, M_PARGS);
1642 lwkt_reltoken(&p->p_token);
1649 sysctl_kern_proc_cwd(SYSCTL_HANDLER_ARGS)
1651 int *name = (int*) arg1;
1652 u_int namelen = arg2;
1655 char *fullpath, *freepath;
1656 struct ucred *cr1 = curproc->p_ucred;
1661 p = pfind((pid_t)name[0]);
1664 lwkt_gettoken_shared(&p->p_token);
1667 * If we are not allowed to see other args, we certainly shouldn't
1668 * get the cwd either. Also check the usual trespassing.
1670 if ((!ps_argsopen) && p_trespass(cr1, p->p_ucred))
1673 if (req->oldptr && p->p_fd != NULL && p->p_fd->fd_ncdir.ncp) {
1674 struct nchandle nch;
1676 cache_copy(&p->p_fd->fd_ncdir, &nch);
1677 error = cache_fullpath(p, &nch, NULL,
1678 &fullpath, &freepath, 0);
1682 error = SYSCTL_OUT(req, fullpath, strlen(fullpath) + 1);
1683 kfree(freepath, M_TEMP);
1688 lwkt_reltoken(&p->p_token);
1695 * This sysctl allows a process to retrieve the path of the executable for
1696 * itself or another process.
1699 sysctl_kern_proc_pathname(SYSCTL_HANDLER_ARGS)
1701 pid_t *pidp = (pid_t *)arg1;
1702 unsigned int arglen = arg2;
1705 char *retbuf, *freebuf;
1710 if (*pidp == -1) { /* -1 means this process */
1723 error = vn_fullpath(p, vp, &retbuf, &freebuf, 0);
1727 error = SYSCTL_OUT(req, retbuf, strlen(retbuf) + 1);
1728 kfree(freebuf, M_TEMP);
1736 SYSCTL_NODE(_kern, KERN_PROC, proc, CTLFLAG_RD, 0, "Process table");
1738 SYSCTL_PROC(_kern_proc, KERN_PROC_ALL, all, CTLFLAG_RD|CTLTYPE_STRUCT,
1739 0, 0, sysctl_kern_proc, "S,proc", "Return entire process table");
1741 SYSCTL_NODE(_kern_proc, KERN_PROC_PGRP, pgrp, CTLFLAG_RD,
1742 sysctl_kern_proc, "Process table");
1744 SYSCTL_NODE(_kern_proc, KERN_PROC_TTY, tty, CTLFLAG_RD,
1745 sysctl_kern_proc, "Process table");
1747 SYSCTL_NODE(_kern_proc, KERN_PROC_UID, uid, CTLFLAG_RD,
1748 sysctl_kern_proc, "Process table");
1750 SYSCTL_NODE(_kern_proc, KERN_PROC_RUID, ruid, CTLFLAG_RD,
1751 sysctl_kern_proc, "Process table");
1753 SYSCTL_NODE(_kern_proc, KERN_PROC_PID, pid, CTLFLAG_RD,
1754 sysctl_kern_proc, "Process table");
1756 SYSCTL_NODE(_kern_proc, (KERN_PROC_ALL | KERN_PROC_FLAG_LWP), all_lwp, CTLFLAG_RD,
1757 sysctl_kern_proc, "Process table");
1759 SYSCTL_NODE(_kern_proc, (KERN_PROC_PGRP | KERN_PROC_FLAG_LWP), pgrp_lwp, CTLFLAG_RD,
1760 sysctl_kern_proc, "Process table");
1762 SYSCTL_NODE(_kern_proc, (KERN_PROC_TTY | KERN_PROC_FLAG_LWP), tty_lwp, CTLFLAG_RD,
1763 sysctl_kern_proc, "Process table");
1765 SYSCTL_NODE(_kern_proc, (KERN_PROC_UID | KERN_PROC_FLAG_LWP), uid_lwp, CTLFLAG_RD,
1766 sysctl_kern_proc, "Process table");
1768 SYSCTL_NODE(_kern_proc, (KERN_PROC_RUID | KERN_PROC_FLAG_LWP), ruid_lwp, CTLFLAG_RD,
1769 sysctl_kern_proc, "Process table");
1771 SYSCTL_NODE(_kern_proc, (KERN_PROC_PID | KERN_PROC_FLAG_LWP), pid_lwp, CTLFLAG_RD,
1772 sysctl_kern_proc, "Process table");
1774 SYSCTL_NODE(_kern_proc, KERN_PROC_ARGS, args, CTLFLAG_RW | CTLFLAG_ANYBODY,
1775 sysctl_kern_proc_args, "Process argument list");
1777 SYSCTL_NODE(_kern_proc, KERN_PROC_CWD, cwd, CTLFLAG_RD | CTLFLAG_ANYBODY,
1778 sysctl_kern_proc_cwd, "Process argument list");
1780 static SYSCTL_NODE(_kern_proc, KERN_PROC_PATHNAME, pathname, CTLFLAG_RD,
1781 sysctl_kern_proc_pathname, "Process executable path");