2 * Copyright (c) 2007 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Simon 'corecode' Schubert <corecode@fs.ei.tum.de>
6 * by Thomas E. Spanjaard <tgen@netphreax.net>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the
18 * 3. Neither the name of The DragonFly Project nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific, prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * $DragonFly: src/sys/kern/kern_kinfo.c,v 1.17 2008/01/07 23:41:55 dillon Exp $
39 * This is a source file used by both the kernel and libkvm.
43 #define _KERNEL_STRUCTURES
47 #include <vm/vm_map.h>
48 #include <sys/kinfo.h>
52 #include <sys/mplock2.h>
53 #include <sys/globaldata.h>
55 #include <sys/systm.h>
59 dev_t dev2udev(cdev_t dev); /* kvm_proc.c */
64 * Fill in a struct kinfo_proc.
66 * NOTE! We may be asked to fill in kinfo_proc for a zombied process, and
67 * the process may be in the middle of being deallocated. Check all pointers
71 fill_kinfo_proc(struct proc *p, struct kinfo_proc *kp)
77 sess = pgrp ? pgrp->pg_session : NULL;
79 bzero(kp, sizeof(*kp));
81 kp->kp_paddr = (uintptr_t)p;
82 kp->kp_fd = (uintptr_t)p->p_fd;
84 kp->kp_flags = p->p_flag;
85 kp->kp_stat = p->p_stat;
86 kp->kp_lock = p->p_lock;
87 kp->kp_acflag = p->p_acflag;
88 kp->kp_traceflag = p->p_traceflag;
89 kp->kp_siglist = p->p_siglist;
91 kp->kp_sigignore = p->p_sigignore; /* p_sigacts-> */
92 kp->kp_sigcatch = p->p_sigcatch; /* p_sigacts-> */
93 kp->kp_sigflag = p->p_sigacts->ps_flag;
95 kp->kp_start = p->p_start;
97 strncpy(kp->kp_comm, p->p_comm, sizeof(kp->kp_comm) - 1);
98 kp->kp_comm[sizeof(kp->kp_comm) - 1] = 0;
101 kp->kp_uid = p->p_ucred->cr_uid;
102 kp->kp_ngroups = p->p_ucred->cr_ngroups;
103 if (p->p_ucred->cr_groups) {
104 bcopy(p->p_ucred->cr_groups, kp->kp_groups,
105 NGROUPS * sizeof(kp->kp_groups[0]));
107 kp->kp_ruid = p->p_ucred->cr_ruid;
108 kp->kp_svuid = p->p_ucred->cr_svuid;
109 kp->kp_rgid = p->p_ucred->cr_rgid;
110 kp->kp_svgid = p->p_ucred->cr_svgid;
113 kp->kp_pid = p->p_pid;
115 kp->kp_ppid = p->p_oppid;
117 kp->kp_ppid = p->p_pptr != NULL ? p->p_pptr->p_pid : -1;
119 kp->kp_pgid = pgrp->pg_id;
120 kp->kp_jobc = pgrp->pg_jobc;
123 kp->kp_sid = sess->s_sid;
124 bcopy(sess->s_login, kp->kp_login, MAXLOGNAME);
125 if (sess->s_ttyvp != NULL)
126 kp->kp_auxflags |= KI_CTTY;
128 kp->kp_auxflags |= KI_SLEADER;
130 if (sess && (p->p_flag & P_CONTROLT) != 0 && sess->s_ttyp != NULL) {
131 kp->kp_tdev = dev2udev(sess->s_ttyp->t_dev);
132 if (sess->s_ttyp->t_pgrp != NULL)
133 kp->kp_tpgid = sess->s_ttyp->t_pgrp->pg_id;
136 if (sess->s_ttyp->t_session != NULL)
137 kp->kp_tsid = sess->s_ttyp->t_session->s_sid;
141 kp->kp_tdev = NOUDEV;
143 kp->kp_exitstat = p->p_xstat;
144 kp->kp_nthreads = p->p_nthreads;
145 kp->kp_nice = p->p_nice;
146 kp->kp_swtime = p->p_swtime;
149 kp->kp_vm_map_size = p->p_vmspace->vm_map.size;
150 kp->kp_vm_rssize = vmspace_resident_count(p->p_vmspace);
151 kp->kp_vm_prssize = vmspace_president_count(p->p_vmspace);
152 kp->kp_vm_swrss = p->p_vmspace->vm_swrss;
153 kp->kp_vm_tsize = p->p_vmspace->vm_tsize;
154 kp->kp_vm_dsize = p->p_vmspace->vm_dsize;
155 kp->kp_vm_ssize = p->p_vmspace->vm_ssize;
158 if (p->p_ucred && jailed(p->p_ucred))
159 kp->kp_jailid = p->p_ucred->cr_prison->pr_id;
162 kp->kp_cru = p->p_cru;
166 * Fill in a struct kinfo_lwp.
169 fill_kinfo_lwp(struct lwp *lwp, struct kinfo_lwp *kl)
171 bzero(kl, sizeof(*kl));
173 kl->kl_pid = lwp->lwp_proc->p_pid;
174 kl->kl_tid = lwp->lwp_tid;
176 kl->kl_flags = lwp->lwp_flag;
177 kl->kl_stat = lwp->lwp_stat;
178 kl->kl_lock = lwp->lwp_lock;
179 kl->kl_tdflags = lwp->lwp_thread->td_flags;
182 * The process/lwp stat may not reflect whether the process is
183 * actually sleeping or not if the related thread was directly
184 * descheduled by LWKT. Adjust the stat if the thread is not
185 * runnable and not waiting to be scheduled on a cpu by the
186 * user process scheduler.
188 if (kl->kl_stat == LSRUN) {
189 if ((kl->kl_tdflags & TDF_RUNQ) == 0 &&
190 (lwp->lwp_flag & LWP_ONRUNQ) == 0) {
191 kl->kl_stat = LSSLEEP;
195 kl->kl_mpcount = get_mplock_count(lwp->lwp_thread);
200 kl->kl_prio = lwp->lwp_usdata.bsd4.priority; /* XXX TGEN dangerous assumption */
201 kl->kl_tdprio = lwp->lwp_thread->td_pri;
202 kl->kl_rtprio = lwp->lwp_rtprio;
204 kl->kl_uticks = lwp->lwp_thread->td_uticks;
205 kl->kl_sticks = lwp->lwp_thread->td_sticks;
206 kl->kl_iticks = lwp->lwp_thread->td_iticks;
207 kl->kl_cpticks = lwp->lwp_cpticks;
208 kl->kl_pctcpu = lwp->lwp_pctcpu;
209 kl->kl_slptime = lwp->lwp_slptime;
210 kl->kl_origcpu = lwp->lwp_usdata.bsd4.batch;
211 kl->kl_estcpu = lwp->lwp_usdata.bsd4.estcpu;
212 kl->kl_cpuid = lwp->lwp_thread->td_gd->gd_cpuid;
214 kl->kl_ru = lwp->lwp_ru;
216 kl->kl_siglist = lwp->lwp_siglist;
217 kl->kl_sigmask = lwp->lwp_sigmask;
219 kl->kl_wchan = (uintptr_t)lwp->lwp_thread->td_wchan;
220 if (lwp->lwp_thread->td_wmesg) {
221 strncpy(kl->kl_wmesg, lwp->lwp_thread->td_wmesg, WMESGLEN);
222 kl->kl_wmesg[WMESGLEN] = 0;
227 * Fill in a struct kinfo_proc for kernel threads (i.e. those without proc).
230 fill_kinfo_proc_kthread(struct thread *td, struct kinfo_proc *kp)
232 bzero(kp, sizeof(*kp));
235 * Fill in fake proc information and semi-fake lwp info.
238 kp->kp_tdev = NOUDEV;
239 strncpy(kp->kp_comm, td->td_comm, sizeof(kp->kp_comm) - 1);
240 kp->kp_comm[sizeof(kp->kp_comm) - 1] = 0;
241 kp->kp_flags = P_SYSTEM;
242 kp->kp_stat = SACTIVE;
244 kp->kp_lwp.kl_pid = -1;
245 kp->kp_lwp.kl_tid = -1;
246 kp->kp_lwp.kl_tdflags = td->td_flags;
248 kp->kp_lwp.kl_mpcount = get_mplock_count(td);
250 kp->kp_lwp.kl_mpcount = 0;
253 kp->kp_lwp.kl_tdprio = td->td_pri;
254 kp->kp_lwp.kl_rtprio.type = RTP_PRIO_THREAD;
255 kp->kp_lwp.kl_rtprio.prio = td->td_pri;
257 kp->kp_lwp.kl_uticks = td->td_uticks;
258 kp->kp_lwp.kl_sticks = td->td_sticks;
259 kp->kp_lwp.kl_iticks = td->td_iticks;
260 kp->kp_lwp.kl_cpuid = td->td_gd->gd_cpuid;
262 kp->kp_lwp.kl_wchan = (uintptr_t)td->td_wchan;
263 if (td->td_flags & TDF_RUNQ)
264 kp->kp_lwp.kl_stat = LSRUN;
266 kp->kp_lwp.kl_stat = LSSLEEP;
268 strncpy(kp->kp_lwp.kl_wmesg, td->td_wmesg, WMESGLEN);
269 kp->kp_lwp.kl_wmesg[WMESGLEN] = 0;