2 * Copyright (c) 2014 John Baldwin
3 * Copyright (c) 2014, 2016 The FreeBSD Foundation
5 * Portions of this software were developed by Konstantin Belousov
6 * under sponsorship from the FreeBSD Foundation.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/capsicum.h>
38 #include <sys/mutex.h>
41 #include <sys/procctl.h>
43 #include <sys/syscallsubr.h>
44 #include <sys/sysproto.h>
49 #include <vm/vm_map.h>
50 #include <vm/vm_extern.h>
53 protect_setchild(struct thread *td, struct proc *p, int flags)
56 PROC_LOCK_ASSERT(p, MA_OWNED);
57 if (p->p_flag & P_SYSTEM || p_cansched(td, p) != 0)
59 if (flags & PPROT_SET) {
60 p->p_flag |= P_PROTECTED;
61 if (flags & PPROT_INHERIT)
62 p->p_flag2 |= P2_INHERIT_PROTECTED;
64 p->p_flag &= ~P_PROTECTED;
65 p->p_flag2 &= ~P2_INHERIT_PROTECTED;
71 protect_setchildren(struct thread *td, struct proc *top, int flags)
78 sx_assert(&proctree_lock, SX_LOCKED);
80 ret |= protect_setchild(td, p, flags);
83 * If this process has children, descend to them next,
84 * otherwise do any siblings, and if done with this level,
85 * follow back up the tree (but not past top).
87 if (!LIST_EMPTY(&p->p_children))
88 p = LIST_FIRST(&p->p_children);
94 if (LIST_NEXT(p, p_sibling)) {
95 p = LIST_NEXT(p, p_sibling);
105 protect_set(struct thread *td, struct proc *p, int flags)
109 switch (PPROT_OP(flags)) {
117 if ((PPROT_FLAGS(flags) & ~(PPROT_DESCEND | PPROT_INHERIT)) != 0)
120 error = priv_check(td, PRIV_VM_MADV_PROTECT);
124 if (flags & PPROT_DESCEND)
125 ret = protect_setchildren(td, p, flags);
127 ret = protect_setchild(td, p, flags);
134 reap_acquire(struct thread *td, struct proc *p)
137 sx_assert(&proctree_lock, SX_XLOCKED);
140 if ((p->p_treeflag & P_TREE_REAPER) != 0)
142 p->p_treeflag |= P_TREE_REAPER;
144 * We do not reattach existing children and the whole tree
145 * under them to us, since p->p_reaper already seen them.
151 reap_release(struct thread *td, struct proc *p)
154 sx_assert(&proctree_lock, SX_XLOCKED);
159 if ((p->p_treeflag & P_TREE_REAPER) == 0)
161 reaper_abandon_children(p, false);
166 reap_status(struct thread *td, struct proc *p,
167 struct procctl_reaper_status *rs)
169 struct proc *reap, *p2, *first_p;
171 sx_assert(&proctree_lock, SX_LOCKED);
172 bzero(rs, sizeof(*rs));
173 if ((p->p_treeflag & P_TREE_REAPER) == 0) {
177 rs->rs_flags |= REAPER_STATUS_OWNED;
179 if (reap == initproc)
180 rs->rs_flags |= REAPER_STATUS_REALINIT;
181 rs->rs_reaper = reap->p_pid;
182 rs->rs_descendants = 0;
184 if (!LIST_EMPTY(&reap->p_reaplist)) {
185 first_p = LIST_FIRST(&reap->p_children);
187 first_p = LIST_FIRST(&reap->p_reaplist);
188 rs->rs_pid = first_p->p_pid;
189 LIST_FOREACH(p2, &reap->p_reaplist, p_reapsibling) {
190 if (proc_realparent(p2) == reap)
192 rs->rs_descendants++;
201 reap_getpids(struct thread *td, struct proc *p, struct procctl_reaper_pids *rp)
203 struct proc *reap, *p2;
204 struct procctl_reaper_pidinfo *pi, *pip;
208 sx_assert(&proctree_lock, SX_LOCKED);
210 reap = (p->p_treeflag & P_TREE_REAPER) == 0 ? p->p_reaper : p;
213 LIST_FOREACH(p2, &reap->p_reaplist, p_reapsibling)
215 sx_unlock(&proctree_lock);
216 if (rp->rp_count < n)
218 pi = malloc(n * sizeof(*pi), M_TEMP, M_WAITOK);
219 sx_slock(&proctree_lock);
220 LIST_FOREACH(p2, &reap->p_reaplist, p_reapsibling) {
224 bzero(pip, sizeof(*pip));
225 pip->pi_pid = p2->p_pid;
226 pip->pi_subtree = p2->p_reapsubtree;
227 pip->pi_flags = REAPER_PIDINFO_VALID;
228 if (proc_realparent(p2) == reap)
229 pip->pi_flags |= REAPER_PIDINFO_CHILD;
230 if ((p2->p_treeflag & P_TREE_REAPER) != 0)
231 pip->pi_flags |= REAPER_PIDINFO_REAPER;
234 sx_sunlock(&proctree_lock);
235 error = copyout(pi, rp->rp_pids, i * sizeof(*pi));
237 sx_slock(&proctree_lock);
243 reap_kill_proc(struct thread *td, struct proc *p2, ksiginfo_t *ksi,
244 struct procctl_reaper_kill *rk, int *error)
249 error1 = p_cansignal(td, p2, rk->rk_sig);
251 pksignal(p2, rk->rk_sig, ksi);
254 } else if (*error == ESRCH) {
255 rk->rk_fpid = p2->p_pid;
261 struct reap_kill_tracker {
263 TAILQ_ENTRY(reap_kill_tracker) link;
266 TAILQ_HEAD(reap_kill_tracker_head, reap_kill_tracker);
269 reap_kill_sched(struct reap_kill_tracker_head *tracker, struct proc *p2)
271 struct reap_kill_tracker *t;
273 t = malloc(sizeof(struct reap_kill_tracker), M_TEMP, M_WAITOK);
275 TAILQ_INSERT_TAIL(tracker, t, link);
279 reap_kill(struct thread *td, struct proc *p, struct procctl_reaper_kill *rk)
281 struct proc *reap, *p2;
283 struct reap_kill_tracker_head tracker;
284 struct reap_kill_tracker *t;
287 sx_assert(&proctree_lock, SX_LOCKED);
288 if (IN_CAPABILITY_MODE(td))
290 if (rk->rk_sig <= 0 || rk->rk_sig > _SIG_MAXSIG ||
291 (rk->rk_flags & ~(REAPER_KILL_CHILDREN |
292 REAPER_KILL_SUBTREE)) != 0 || (rk->rk_flags &
293 (REAPER_KILL_CHILDREN | REAPER_KILL_SUBTREE)) ==
294 (REAPER_KILL_CHILDREN | REAPER_KILL_SUBTREE))
297 reap = (p->p_treeflag & P_TREE_REAPER) == 0 ? p->p_reaper : p;
299 ksi.ksi_signo = rk->rk_sig;
300 ksi.ksi_code = SI_USER;
301 ksi.ksi_pid = td->td_proc->p_pid;
302 ksi.ksi_uid = td->td_ucred->cr_ruid;
306 if ((rk->rk_flags & REAPER_KILL_CHILDREN) != 0) {
307 for (p2 = LIST_FIRST(&reap->p_children); p2 != NULL;
308 p2 = LIST_NEXT(p2, p_sibling)) {
309 reap_kill_proc(td, p2, &ksi, rk, &error);
311 * Do not end the loop on error, signal
316 TAILQ_INIT(&tracker);
317 reap_kill_sched(&tracker, reap);
318 while ((t = TAILQ_FIRST(&tracker)) != NULL) {
319 MPASS((t->parent->p_treeflag & P_TREE_REAPER) != 0);
320 TAILQ_REMOVE(&tracker, t, link);
321 for (p2 = LIST_FIRST(&t->parent->p_reaplist); p2 != NULL;
322 p2 = LIST_NEXT(p2, p_reapsibling)) {
323 if (t->parent == reap &&
324 (rk->rk_flags & REAPER_KILL_SUBTREE) != 0 &&
325 p2->p_reapsubtree != rk->rk_subtree)
327 if ((p2->p_treeflag & P_TREE_REAPER) != 0)
328 reap_kill_sched(&tracker, p2);
329 reap_kill_proc(td, p2, &ksi, rk, &error);
339 trace_ctl(struct thread *td, struct proc *p, int state)
342 PROC_LOCK_ASSERT(p, MA_OWNED);
345 * Ktrace changes p_traceflag from or to zero under the
346 * process lock, so the test does not need to acquire ktrace
349 if ((p->p_flag & P_TRACED) != 0 || p->p_traceflag != 0)
353 case PROC_TRACE_CTL_ENABLE:
354 if (td->td_proc != p)
356 p->p_flag2 &= ~(P2_NOTRACE | P2_NOTRACE_EXEC);
358 case PROC_TRACE_CTL_DISABLE_EXEC:
359 p->p_flag2 |= P2_NOTRACE_EXEC | P2_NOTRACE;
361 case PROC_TRACE_CTL_DISABLE:
362 if ((p->p_flag2 & P2_NOTRACE_EXEC) != 0) {
363 KASSERT((p->p_flag2 & P2_NOTRACE) != 0,
364 ("dandling P2_NOTRACE_EXEC"));
365 if (td->td_proc != p)
367 p->p_flag2 &= ~P2_NOTRACE_EXEC;
369 p->p_flag2 |= P2_NOTRACE;
379 trace_status(struct thread *td, struct proc *p, int *data)
382 if ((p->p_flag2 & P2_NOTRACE) != 0) {
383 KASSERT((p->p_flag & P_TRACED) == 0,
384 ("%d traced but tracing disabled", p->p_pid));
386 } else if ((p->p_flag & P_TRACED) != 0) {
387 *data = p->p_pptr->p_pid;
395 trapcap_ctl(struct thread *td, struct proc *p, int state)
398 PROC_LOCK_ASSERT(p, MA_OWNED);
401 case PROC_TRAPCAP_CTL_ENABLE:
402 p->p_flag2 |= P2_TRAPCAP;
404 case PROC_TRAPCAP_CTL_DISABLE:
405 p->p_flag2 &= ~P2_TRAPCAP;
414 trapcap_status(struct thread *td, struct proc *p, int *data)
417 *data = (p->p_flag2 & P2_TRAPCAP) != 0 ? PROC_TRAPCAP_CTL_ENABLE :
418 PROC_TRAPCAP_CTL_DISABLE;
423 no_new_privs_ctl(struct thread *td, struct proc *p, int state)
426 PROC_LOCK_ASSERT(p, MA_OWNED);
428 if (state != PROC_NO_NEW_PRIVS_ENABLE)
430 p->p_flag2 |= P2_NO_NEW_PRIVS;
435 no_new_privs_status(struct thread *td, struct proc *p, int *data)
438 *data = (p->p_flag2 & P2_NO_NEW_PRIVS) != 0 ?
439 PROC_NO_NEW_PRIVS_ENABLE : PROC_NO_NEW_PRIVS_DISABLE;
444 protmax_ctl(struct thread *td, struct proc *p, int state)
446 PROC_LOCK_ASSERT(p, MA_OWNED);
449 case PROC_PROTMAX_FORCE_ENABLE:
450 p->p_flag2 &= ~P2_PROTMAX_DISABLE;
451 p->p_flag2 |= P2_PROTMAX_ENABLE;
453 case PROC_PROTMAX_FORCE_DISABLE:
454 p->p_flag2 |= P2_PROTMAX_DISABLE;
455 p->p_flag2 &= ~P2_PROTMAX_ENABLE;
457 case PROC_PROTMAX_NOFORCE:
458 p->p_flag2 &= ~(P2_PROTMAX_ENABLE | P2_PROTMAX_DISABLE);
467 protmax_status(struct thread *td, struct proc *p, int *data)
471 switch (p->p_flag2 & (P2_PROTMAX_ENABLE | P2_PROTMAX_DISABLE)) {
473 d = PROC_PROTMAX_NOFORCE;
475 case P2_PROTMAX_ENABLE:
476 d = PROC_PROTMAX_FORCE_ENABLE;
478 case P2_PROTMAX_DISABLE:
479 d = PROC_PROTMAX_FORCE_DISABLE;
482 if (kern_mmap_maxprot(p, PROT_READ) == PROT_READ)
483 d |= PROC_PROTMAX_ACTIVE;
489 aslr_ctl(struct thread *td, struct proc *p, int state)
492 PROC_LOCK_ASSERT(p, MA_OWNED);
495 case PROC_ASLR_FORCE_ENABLE:
496 p->p_flag2 &= ~P2_ASLR_DISABLE;
497 p->p_flag2 |= P2_ASLR_ENABLE;
499 case PROC_ASLR_FORCE_DISABLE:
500 p->p_flag2 |= P2_ASLR_DISABLE;
501 p->p_flag2 &= ~P2_ASLR_ENABLE;
503 case PROC_ASLR_NOFORCE:
504 p->p_flag2 &= ~(P2_ASLR_ENABLE | P2_ASLR_DISABLE);
513 aslr_status(struct thread *td, struct proc *p, int *data)
518 switch (p->p_flag2 & (P2_ASLR_ENABLE | P2_ASLR_DISABLE)) {
520 d = PROC_ASLR_NOFORCE;
523 d = PROC_ASLR_FORCE_ENABLE;
525 case P2_ASLR_DISABLE:
526 d = PROC_ASLR_FORCE_DISABLE;
529 if ((p->p_flag & P_WEXIT) == 0) {
532 vm = vmspace_acquire_ref(p);
534 if ((vm->vm_map.flags & MAP_ASLR) != 0)
535 d |= PROC_ASLR_ACTIVE;
546 stackgap_ctl(struct thread *td, struct proc *p, int state)
548 PROC_LOCK_ASSERT(p, MA_OWNED);
550 if ((state & ~(PROC_STACKGAP_ENABLE | PROC_STACKGAP_DISABLE |
551 PROC_STACKGAP_ENABLE_EXEC | PROC_STACKGAP_DISABLE_EXEC)) != 0)
553 switch (state & (PROC_STACKGAP_ENABLE | PROC_STACKGAP_DISABLE)) {
554 case PROC_STACKGAP_ENABLE:
555 if ((p->p_flag2 & P2_STKGAP_DISABLE) != 0)
558 case PROC_STACKGAP_DISABLE:
559 p->p_flag2 |= P2_STKGAP_DISABLE;
566 switch (state & (PROC_STACKGAP_ENABLE_EXEC |
567 PROC_STACKGAP_DISABLE_EXEC)) {
568 case PROC_STACKGAP_ENABLE_EXEC:
569 p->p_flag2 &= ~P2_STKGAP_DISABLE_EXEC;
571 case PROC_STACKGAP_DISABLE_EXEC:
572 p->p_flag2 |= P2_STKGAP_DISABLE_EXEC;
583 stackgap_status(struct thread *td, struct proc *p, int *data)
585 PROC_LOCK_ASSERT(p, MA_OWNED);
587 *data = (p->p_flag2 & P2_STKGAP_DISABLE) != 0 ? PROC_STACKGAP_DISABLE :
588 PROC_STACKGAP_ENABLE;
589 *data |= (p->p_flag2 & P2_STKGAP_DISABLE_EXEC) != 0 ?
590 PROC_STACKGAP_DISABLE_EXEC : PROC_STACKGAP_ENABLE_EXEC;
595 wxmap_ctl(struct thread *td, struct proc *p, int state)
600 PROC_LOCK_ASSERT(p, MA_OWNED);
601 if ((p->p_flag & P_WEXIT) != 0)
605 case PROC_WX_MAPPINGS_PERMIT:
606 p->p_flag2 |= P2_WXORX_DISABLE;
609 vm = vmspace_acquire_ref(p);
613 map->flags &= ~MAP_WXORX;
620 case PROC_WX_MAPPINGS_DISALLOW_EXEC:
621 p->p_flag2 |= P2_WXORX_ENABLE_EXEC;
631 wxmap_status(struct thread *td, struct proc *p, int *data)
636 PROC_LOCK_ASSERT(p, MA_OWNED);
637 if ((p->p_flag & P_WEXIT) != 0)
641 if ((p->p_flag2 & P2_WXORX_DISABLE) != 0)
642 d |= PROC_WX_MAPPINGS_PERMIT;
643 if ((p->p_flag2 & P2_WXORX_ENABLE_EXEC) != 0)
644 d |= PROC_WX_MAPPINGS_DISALLOW_EXEC;
647 vm = vmspace_acquire_ref(p);
649 if ((vm->vm_map.flags & MAP_WXORX) != 0)
650 d |= PROC_WXORX_ENFORCE;
659 struct procctl_cmd_info {
663 static const struct procctl_cmd_info procctl_cmds_info[] = {
665 { .lock_tree = SA_SLOCKED, .one_proc = false, },
666 [PROC_REAP_ACQUIRE] =
667 { .lock_tree = SA_XLOCKED, .one_proc = true, },
668 [PROC_REAP_RELEASE] =
669 { .lock_tree = SA_XLOCKED, .one_proc = true, },
671 { .lock_tree = SA_SLOCKED, .one_proc = true, },
672 [PROC_REAP_GETPIDS] =
673 { .lock_tree = SA_SLOCKED, .one_proc = true, },
675 { .lock_tree = SA_SLOCKED, .one_proc = true, },
677 { .lock_tree = SA_SLOCKED, .one_proc = false, },
678 [PROC_TRACE_STATUS] =
679 { .lock_tree = SA_UNLOCKED, .one_proc = true, },
681 { .lock_tree = SA_SLOCKED, .one_proc = false, },
682 [PROC_TRAPCAP_STATUS] =
683 { .lock_tree = SA_UNLOCKED, .one_proc = true, },
684 [PROC_PDEATHSIG_CTL] =
685 { .lock_tree = SA_UNLOCKED, .one_proc = true, },
686 [PROC_PDEATHSIG_STATUS] =
687 { .lock_tree = SA_UNLOCKED, .one_proc = true, },
689 { .lock_tree = SA_UNLOCKED, .one_proc = true, },
691 { .lock_tree = SA_UNLOCKED, .one_proc = true, },
693 { .lock_tree = SA_UNLOCKED, .one_proc = true, },
694 [PROC_PROTMAX_STATUS] =
695 { .lock_tree = SA_UNLOCKED, .one_proc = true, },
696 [PROC_STACKGAP_CTL] =
697 { .lock_tree = SA_UNLOCKED, .one_proc = true, },
698 [PROC_STACKGAP_STATUS] =
699 { .lock_tree = SA_UNLOCKED, .one_proc = true, },
700 [PROC_NO_NEW_PRIVS_CTL] =
701 { .lock_tree = SA_SLOCKED, .one_proc = true, },
702 [PROC_NO_NEW_PRIVS_STATUS] =
703 { .lock_tree = SA_UNLOCKED, .one_proc = true, },
705 { .lock_tree = SA_UNLOCKED, .one_proc = true, },
706 [PROC_WXMAP_STATUS] =
707 { .lock_tree = SA_UNLOCKED, .one_proc = true, },
711 sys_procctl(struct thread *td, struct procctl_args *uap)
715 struct procctl_reaper_status rs;
716 struct procctl_reaper_pids rp;
717 struct procctl_reaper_kill rk;
719 int error, error1, flags, signum;
721 if (uap->com >= PROC_PROCCTL_MD_MIN)
722 return (cpu_procctl(td, uap->idtype, uap->id,
723 uap->com, uap->data));
727 case PROC_PROTMAX_CTL:
729 case PROC_STACKGAP_CTL:
731 case PROC_TRAPCAP_CTL:
732 case PROC_NO_NEW_PRIVS_CTL:
734 error = copyin(uap->data, &flags, sizeof(flags));
739 case PROC_REAP_ACQUIRE:
740 case PROC_REAP_RELEASE:
741 if (uap->data != NULL)
745 case PROC_REAP_STATUS:
748 case PROC_REAP_GETPIDS:
749 error = copyin(uap->data, &x.rp, sizeof(x.rp));
755 error = copyin(uap->data, &x.rk, sizeof(x.rk));
760 case PROC_ASLR_STATUS:
761 case PROC_PROTMAX_STATUS:
762 case PROC_STACKGAP_STATUS:
763 case PROC_TRACE_STATUS:
764 case PROC_TRAPCAP_STATUS:
765 case PROC_NO_NEW_PRIVS_STATUS:
766 case PROC_WXMAP_STATUS:
769 case PROC_PDEATHSIG_CTL:
770 error = copyin(uap->data, &signum, sizeof(signum));
775 case PROC_PDEATHSIG_STATUS:
781 error = kern_procctl(td, uap->idtype, uap->id, uap->com, data);
783 case PROC_REAP_STATUS:
785 error = copyout(&x.rs, uap->data, sizeof(x.rs));
788 error1 = copyout(&x.rk, uap->data, sizeof(x.rk));
792 case PROC_ASLR_STATUS:
793 case PROC_PROTMAX_STATUS:
794 case PROC_STACKGAP_STATUS:
795 case PROC_TRACE_STATUS:
796 case PROC_TRAPCAP_STATUS:
797 case PROC_NO_NEW_PRIVS_STATUS:
798 case PROC_WXMAP_STATUS:
800 error = copyout(&flags, uap->data, sizeof(flags));
802 case PROC_PDEATHSIG_STATUS:
804 error = copyout(&signum, uap->data, sizeof(signum));
811 kern_procctl_single(struct thread *td, struct proc *p, int com, void *data)
814 PROC_LOCK_ASSERT(p, MA_OWNED);
817 return (aslr_ctl(td, p, *(int *)data));
818 case PROC_ASLR_STATUS:
819 return (aslr_status(td, p, data));
821 return (protect_set(td, p, *(int *)data));
822 case PROC_PROTMAX_CTL:
823 return (protmax_ctl(td, p, *(int *)data));
824 case PROC_PROTMAX_STATUS:
825 return (protmax_status(td, p, data));
826 case PROC_STACKGAP_CTL:
827 return (stackgap_ctl(td, p, *(int *)data));
828 case PROC_STACKGAP_STATUS:
829 return (stackgap_status(td, p, data));
830 case PROC_REAP_ACQUIRE:
831 return (reap_acquire(td, p));
832 case PROC_REAP_RELEASE:
833 return (reap_release(td, p));
834 case PROC_REAP_STATUS:
835 return (reap_status(td, p, data));
836 case PROC_REAP_GETPIDS:
837 return (reap_getpids(td, p, data));
839 return (reap_kill(td, p, data));
841 return (trace_ctl(td, p, *(int *)data));
842 case PROC_TRACE_STATUS:
843 return (trace_status(td, p, data));
844 case PROC_TRAPCAP_CTL:
845 return (trapcap_ctl(td, p, *(int *)data));
846 case PROC_TRAPCAP_STATUS:
847 return (trapcap_status(td, p, data));
848 case PROC_NO_NEW_PRIVS_CTL:
849 return (no_new_privs_ctl(td, p, *(int *)data));
850 case PROC_NO_NEW_PRIVS_STATUS:
851 return (no_new_privs_status(td, p, data));
853 return (wxmap_ctl(td, p, *(int *)data));
854 case PROC_WXMAP_STATUS:
855 return (wxmap_status(td, p, data));
862 kern_procctl(struct thread *td, idtype_t idtype, id_t id, int com, void *data)
866 const struct procctl_cmd_info *cmd_info;
867 int error, first_error, ok;
870 MPASS(com > 0 && com < nitems(procctl_cmds_info));
871 cmd_info = &procctl_cmds_info[com];
872 if (idtype != P_PID && cmd_info->one_proc)
876 case PROC_PDEATHSIG_CTL:
877 signum = *(int *)data;
879 if ((id != 0 && id != p->p_pid) ||
880 (signum != 0 && !_SIG_VALID(signum)))
883 p->p_pdeathsig = signum;
886 case PROC_PDEATHSIG_STATUS:
888 if (id != 0 && id != p->p_pid)
891 *(int *)data = p->p_pdeathsig;
896 switch (cmd_info->lock_tree) {
898 sx_xlock(&proctree_lock);
901 sx_slock(&proctree_lock);
912 error = p_cansee(td, p);
914 error = kern_procctl_single(td, p, com, data);
919 * Attempt to apply the operation to all members of the
920 * group. Ignore processes in the group that can't be
921 * seen. Ignore errors so long as at least one process is
922 * able to complete the request successfully.
932 LIST_FOREACH(p, &pg->pg_members, p_pglist) {
934 if (p->p_state == PRS_NEW || p_cansee(td, p) != 0) {
938 error = kern_procctl_single(td, p, com, data);
942 else if (first_error == 0)
947 else if (first_error != 0)
951 * Was not able to see any processes in the
961 switch (cmd_info->lock_tree) {
963 sx_xunlock(&proctree_lock);
966 sx_sunlock(&proctree_lock);