4 * cc -I/usr/src/sys vnodeinfo.c -o /usr/local/bin/vnodeinfo -lkvm
8 * Dump the mountlist and related vnodes.
11 * Copyright (c) 2004 The DragonFly Project. All rights reserved.
13 * This code is derived from software contributed to The DragonFly Project
14 * by Matthew Dillon <dillon@backplane.com>
16 * Redistribution and use in source and binary forms, with or without
17 * modification, are permitted provided that the following conditions
20 * 1. Redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer.
22 * 2. Redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in
24 * the documentation and/or other materials provided with the
26 * 3. Neither the name of The DragonFly Project nor the names of its
27 * contributors may be used to endorse or promote products derived
28 * from this software without specific, prior written permission.
30 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
31 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
32 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
33 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
34 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
35 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
36 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
37 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
38 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
39 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
40 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
45 #define _KERNEL_STRUCTURES
46 #include <sys/param.h>
48 #include <sys/malloc.h>
49 #include <sys/signalvar.h>
50 #include <sys/namecache.h>
51 #include <sys/mount.h>
52 #include <sys/vnode.h>
56 #include <vm/vm_page.h>
57 #include <vm/vm_kern.h>
58 #include <vm/vm_object.h>
59 #include <vm/swap_pager.h>
60 #include <vm/vnode_pager.h>
62 #include <vfs/ufs/quota.h>
63 #include <vfs/ufs/inode.h>
75 { "_vnode_list_hash" },
80 static void kkread(kvm_t *kd, u_long addr, void *buf, size_t nbytes);
81 static struct mount *dumpmount(kvm_t *kd, struct mount *mp);
82 static struct vnode *dumpvp(kvm_t *kd, struct vnode *vp, int whichlist, char *vfc_name);
83 static void dumpbufs(kvm_t *kd, void *bufp, const char *id);
84 static void dumplocks(kvm_t *kd, struct lockf *lockf);
85 static void dumplockinfo(kvm_t *kd, struct lockf_range *item);
86 static int getobjpages(kvm_t *kd, struct vm_object *obj);
87 static int getobjvnpsize(kvm_t *kd, struct vm_object *obj);
89 static const struct dump_private_data {
90 char vfc_name[MFSNAMELEN];
91 void (*dumpfn)(kvm_t *, void *);
102 main(int ac, char **av)
105 struct vnode_index *vib;
106 struct vnode_index vni;
110 const char *corefile = NULL;
111 const char *sysfile = NULL;
113 while ((ch = getopt(ac, av, "alnbM:N:p")) != -1) {
140 fprintf(stderr, "%s [-pbnla] [-M core] [-N system]\n", av[0]);
145 if ((kd = kvm_open(sysfile, corefile, NULL, O_RDONLY, "kvm:")) == NULL) {
149 if (kvm_nlist(kd, Nl) != 0) {
154 /* Mount points and their private data */
155 kkread(kd, Nl[0].n_value, &mp, sizeof(mp));
157 mp = dumpmount(kd, mp);
160 * Get ncpus for the vnode lists, we could get it with a sysctl
161 * but since we're reading kernel memory, take advantage of it.
162 * Also read the base address of vnode_list_hash.
164 kkread(kd, Nl[1].n_value, &vib, sizeof(vib));
165 kkread(kd, Nl[2].n_value, &ncpus, sizeof(ncpus));
167 /* Per-CPU list of inactive vnodes */
168 printf("INACTIVELIST {\n");
170 for (int i = 0; i < ncpus; i++) {
173 kkread(kd, (u_long)(vib + i), &vni, sizeof(vni));
174 vp = vni.inactive_list.tqh_first;
176 vp = dumpvp(kd, vp, 0, NULL);
180 /* Per-CPU list of active vnodes */
181 printf("ACTIVELIST {\n");
182 for (int i = 0; i < ncpus; i++) {
185 kkread(kd, (u_long)(vib + i), &vni, sizeof(vni));
186 vp = vni.active_list.tqh_first;
188 vp = dumpvp(kd, vp, 0,
195 static struct mount *
196 dumpmount(kvm_t *kd, struct mount *mp)
202 kkread(kd, (u_long)mp, &mnt, sizeof(mnt));
203 printf("MOUNTPOINT %s on %s {\n",
204 mnt.mnt_stat.f_mntfromname, mnt.mnt_stat.f_mntonname);
205 printf(" lk_flags %08x count %016jx holder = %p\n",
206 mnt.mnt_lock.lk_flags, mnt.mnt_lock.lk_count,
207 mnt.mnt_lock.lk_lockholder);
208 printf(" mnt_flag %08x mnt_kern_flag %08x\n",
209 mnt.mnt_flag, mnt.mnt_kern_flag);
210 printf(" mnt_nvnodelistsize %d\n", mnt.mnt_nvnodelistsize);
211 printf(" mnt_stat.f_fsid %08x %08x\n", mnt.mnt_stat.f_fsid.val[0],
212 mnt.mnt_stat.f_fsid.val[1]);
214 /* Dump fs private node data */
215 kkread(kd, (u_long)mnt.mnt_vfc, &vfc, sizeof(vfc));
216 vp = mnt.mnt_nvnodelist.tqh_first;
218 vp = dumpvp(kd, vp, 1, vfc.vfc_name);
222 return(mnt.mnt_list.tqe_next);
226 vtype(enum vtype type)
252 snprintf(buf, sizeof(buf), "%d", (int)type);
256 static struct vnode *
257 dumpvp(kvm_t *kd, struct vnode *vp, int whichlist, char *vfc_name)
261 kkread(kd, (u_long)vp, &vn, sizeof(vn));
263 printf(" vnode %p.%d refcnt %08x auxcnt %d type=%s flags %08x",
264 vp, vn.v_state, vn.v_refcnt, vn.v_auxrefs, vtype(vn.v_type), vn.v_flag);
266 if ((vn.v_flag & VOBJBUF) && vn.v_object) {
267 int npages = getobjpages(kd, vn.v_object);
268 int vnpsize = getobjvnpsize(kd, vn.v_object);
269 if (npages || vnpsize)
270 printf(" vmobjpgs=%d vnpsize=%d", npages, vnpsize);
273 if (vn.v_flag & VROOT)
275 if (vn.v_flag & VTEXT)
277 if (vn.v_flag & VSYSTEM)
279 if (vn.v_flag & VISTTY)
282 if (vn.v_flag & VXLOCK)
284 if (vn.v_flag & VXWANT)
288 if (vn.v_flag & VRECLAIMED)
289 printf(" VRECLAIMED");
290 if (vn.v_flag & VINACTIVE)
291 printf(" VINACTIVE");
293 if (vn.v_flag & VOBJBUF)
296 if (vn.v_flag & VSWAPCACHE)
297 printf(" VSWAPCACHE");
299 switch(vn.v_flag & (VAGE0 | VAGE1)) {
314 if (vn.v_flag & VDOOMED)
318 if (vn.v_flag & VINFREE)
321 if (vn.v_flag & VONWORKLST)
322 printf(" VONWORKLST");
323 if (vn.v_flag & VOBJDIRTY)
324 printf(" VOBJDIRTY");
325 if (vn.v_flag & VMAYHAVELOCKS)
326 printf(" VMAYHAVELOCKS");
330 if (vn.v_lock.lk_count || vn.v_lock.lk_lockholder != NULL) {
331 printf("\tlk_flags %08x count %016jx holder = %p\n",
332 vn.v_lock.lk_flags, vn.v_lock.lk_count,
333 vn.v_lock.lk_lockholder);
336 if (withnames && TAILQ_FIRST(&vn.v_namecache)) {
337 struct namecache ncp;
341 kkread(kd, (u_long)TAILQ_FIRST(&vn.v_namecache), &ncp, sizeof(ncp));
342 if ((nlen = ncp.nc_nlen) >= sizeof(buf))
343 nlen = sizeof(buf) - 1;
347 kkread(kd, (u_long)ncp.nc_name, buf, nlen);
349 printf("\tfilename %s\n", buf);
354 if (vn.v_rbclean_tree.rbh_root) {
355 printf("\tCLEAN BUFFERS\n");
356 dumpbufs(kd, vn.v_rbclean_tree.rbh_root, "ROOT");
358 if (vn.v_rbdirty_tree.rbh_root) {
359 printf("\tDIRTY BUFFERS\n");
360 dumpbufs(kd, vn.v_rbdirty_tree.rbh_root, "ROOT");
365 if (vn.v_tag == VT_UFS && vn.v_data) {
366 struct inode *ip = vn.v_data;
369 kkread(kd, (u_long)&ip->i_lockf, &lockf, sizeof(lockf));
370 dumplocks(kd, &lockf);
374 if (fsprivate && vfc_name) {
376 * Actually find whether the filesystem can dump
377 * detailed inode information out of the vnode
379 const struct dump_private_data *dpd;
381 for (dpd = dumplist; dpd->dumpfn != NULL; dpd++) {
382 if ((strcmp(dpd->vfc_name, vfc_name) == 0) &&
384 dpd->dumpfn(kd, vn.v_data);
389 return(vn.v_nmntvnodes.tqe_next);
391 return(vn.v_list.tqe_next);
395 dumpbufs(kvm_t *kd, void *bufp, const char *id)
399 kkread(kd, (u_long)bufp, &buf, sizeof(buf));
400 printf("\t %-8s %p loffset %012lx/%05x foffset %08lx",
402 buf.b_bio1.bio_offset,
404 buf.b_bio2.bio_offset);
405 printf(" q=%d count=%016jx flags=%08x refs=%08x dep=%p",
406 buf.b_qindex, buf.b_lock.lk_count,
407 buf.b_flags, buf.b_refs, buf.b_dep.lh_first);
410 if (buf.b_rbnode.rbe_left)
411 dumpbufs(kd, buf.b_rbnode.rbe_left, "LEFT");
412 if (buf.b_rbnode.rbe_right)
413 dumpbufs(kd, buf.b_rbnode.rbe_right, "RIGHT");
417 dumplocks(kvm_t *kd, struct lockf *lockf)
419 struct lockf_range item;
420 struct lockf_range *scan;
422 if ((scan = TAILQ_FIRST(&lockf->lf_range)) != NULL) {
425 kkread(kd, (u_long)scan, &item, sizeof(item));
426 dumplockinfo(kd, &item);
427 } while ((scan = TAILQ_NEXT(&item, lf_link)) != NULL);
430 if ((scan = TAILQ_FIRST(&lockf->lf_blocked)) != NULL) {
433 kkread(kd, (u_long)scan, &item, sizeof(item));
434 dumplockinfo(kd, &item);
435 } while ((scan = TAILQ_NEXT(&item, lf_link)) != NULL);
442 dumplockinfo(kvm_t *kd, struct lockf_range *item)
446 if (item->lf_owner && (item->lf_flags & F_POSIX)) {
447 kkread(kd, (u_long)&item->lf_owner->p_pid,
448 &ownerpid, sizeof(ownerpid));
453 printf("\t ty=%d flgs=%04x %ld-%ld owner=%d\n",
454 item->lf_type, item->lf_flags,
455 item->lf_start, item->lf_end,
462 getobjpages(kvm_t *kd, struct vm_object *obj)
464 struct vm_object vmobj;
466 kkread(kd, (u_long)obj, &vmobj, sizeof(vmobj));
467 return(vmobj.resident_page_count);
472 getobjvnpsize(kvm_t *kd, struct vm_object *obj)
474 struct vm_object vmobj;
476 kkread(kd, (u_long)obj, &vmobj, sizeof(vmobj));
477 return ((int)vmobj.size);
481 kkread(kvm_t *kd, u_long addr, void *buf, size_t nbytes)
483 if (kvm_read(kd, addr, buf, nbytes) != nbytes) {