4 * cc -I/usr/src/sys vnodeinfo.c -o /usr/local/bin/vnodeinfo -lkvm
8 * Dump the mountlist and related vnodes.
11 * Copyright (c) 2004 The DragonFly Project. All rights reserved.
13 * This code is derived from software contributed to The DragonFly Project
14 * by Matthew Dillon <dillon@backplane.com>
16 * Redistribution and use in source and binary forms, with or without
17 * modification, are permitted provided that the following conditions
20 * 1. Redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer.
22 * 2. Redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in
24 * the documentation and/or other materials provided with the
26 * 3. Neither the name of The DragonFly Project nor the names of its
27 * contributors may be used to endorse or promote products derived
28 * from this software without specific, prior written permission.
30 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
31 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
32 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
33 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
34 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
35 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
36 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
37 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
38 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
39 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
40 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
43 * $DragonFly: src/test/debug/vnodeinfo.c,v 1.13 2007/05/06 20:45:01 dillon Exp $
46 #define _KERNEL_STRUCTURES
47 #include <sys/param.h>
49 #include <sys/malloc.h>
50 #include <sys/signalvar.h>
51 #include <sys/namecache.h>
52 #include <sys/mount.h>
53 #include <sys/vnode.h>
57 #include <vm/vm_page.h>
58 #include <vm/vm_kern.h>
59 #include <vm/vm_object.h>
60 #include <vm/swap_pager.h>
61 #include <vm/vnode_pager.h>
63 #include <vfs/ufs/quota.h>
64 #include <vfs/ufs/inode.h>
76 { "_vnode_inactive_list" },
77 { "_vnode_active_list" },
81 static void kkread(kvm_t *kd, u_long addr, void *buf, size_t nbytes);
82 static struct mount *dumpmount(kvm_t *kd, struct mount *mp);
83 static struct vnode *dumpvp(kvm_t *kd, struct vnode *vp, int whichlist);
84 static void dumpbufs(kvm_t *kd, void *bufp, const char *id);
85 static void dumplocks(kvm_t *kd, struct lockf *lockf);
86 static void dumplockinfo(kvm_t *kd, struct lockf_range *item);
87 static int getobjpages(kvm_t *kd, struct vm_object *obj);
88 static int getobjvnpsize(kvm_t *kd, struct vm_object *obj);
95 main(int ac, char **av)
102 const char *corefile = NULL;
103 const char *sysfile = NULL;
105 while ((ch = getopt(ac, av, "alnbM:N:")) != -1) {
128 fprintf(stderr, "%s [-M core] [-N system]\n", av[0]);
133 if ((kd = kvm_open(sysfile, corefile, NULL, O_RDONLY, "kvm:")) == NULL) {
137 if (kvm_nlist(kd, Nl) != 0) {
141 kkread(kd, Nl[0].n_value, &mp, sizeof(mp));
143 mp = dumpmount(kd, mp);
144 printf("INACTIVELIST {\n");
145 kkread(kd, Nl[1].n_value, &vp, sizeof(vp));
147 vp = dumpvp(kd, vp, 0);
149 printf("ACTIVELIST {\n");
150 kkread(kd, Nl[2].n_value, &vp, sizeof(vp));
152 vp = dumpvp(kd, vp, 0);
157 static struct mount *
158 dumpmount(kvm_t *kd, struct mount *mp)
163 kkread(kd, (u_long)mp, &mnt, sizeof(mnt));
164 printf("MOUNTPOINT %s on %s {\n",
165 mnt.mnt_stat.f_mntfromname, mnt.mnt_stat.f_mntonname);
166 printf(" lk_flags %08x count %08x holder = %p\n",
167 mnt.mnt_lock.lk_flags, mnt.mnt_lock.lk_count,
168 mnt.mnt_lock.lk_lockholder);
169 printf(" mnt_flag %08x mnt_kern_flag %08x\n",
170 mnt.mnt_flag, mnt.mnt_kern_flag);
171 printf(" mnt_nvnodelistsize %d\n", mnt.mnt_nvnodelistsize);
172 printf(" mnt_stat.f_fsid %08x %08x\n", mnt.mnt_stat.f_fsid.val[0],
173 mnt.mnt_stat.f_fsid.val[1]);
174 vp = mnt.mnt_nvnodelist.tqh_first;
176 vp = dumpvp(kd, vp, 1);
180 return(mnt.mnt_list.tqe_next);
184 vtype(enum vtype type)
210 snprintf(buf, sizeof(buf), "%d", (int)type);
214 static struct vnode *
215 dumpvp(kvm_t *kd, struct vnode *vp, int whichlist)
219 kkread(kd, (u_long)vp, &vn, sizeof(vn));
221 printf(" vnode %p.%d refcnt %08x auxcnt %d type=%s flags %08x",
222 vp, vn.v_state, vn.v_refcnt, vn.v_auxrefs, vtype(vn.v_type), vn.v_flag);
224 if ((vn.v_flag & VOBJBUF) && vn.v_object) {
225 int npages = getobjpages(kd, vn.v_object);
226 int vnpsize = getobjvnpsize(kd, vn.v_object);
227 if (npages || vnpsize)
228 printf(" vmobjpgs=%d vnpsize=%d", npages, vnpsize);
231 if (vn.v_flag & VROOT)
233 if (vn.v_flag & VTEXT)
235 if (vn.v_flag & VSYSTEM)
237 if (vn.v_flag & VISTTY)
240 if (vn.v_flag & VXLOCK)
242 if (vn.v_flag & VXWANT)
246 if (vn.v_flag & VRECLAIMED)
247 printf(" VRECLAIMED");
248 if (vn.v_flag & VINACTIVE)
249 printf(" VINACTIVE");
251 if (vn.v_flag & VOBJBUF)
254 if (vn.v_flag & VSWAPCACHE)
255 printf(" VSWAPCACHE");
257 switch(vn.v_flag & (VAGE0 | VAGE1)) {
272 if (vn.v_flag & VDOOMED)
276 if (vn.v_flag & VINFREE)
279 if (vn.v_flag & VONWORKLST)
280 printf(" VONWORKLST");
281 if (vn.v_flag & VOBJDIRTY)
282 printf(" VOBJDIRTY");
283 if (vn.v_flag & VMAYHAVELOCKS)
284 printf(" VMAYHAVELOCKS");
288 if (vn.v_lock.lk_count || vn.v_lock.lk_lockholder != LK_NOTHREAD) {
289 printf("\tlk_flags %08x count %08x holder = %p\n",
290 vn.v_lock.lk_flags, vn.v_lock.lk_count,
291 vn.v_lock.lk_lockholder);
294 if (withnames && TAILQ_FIRST(&vn.v_namecache)) {
295 struct namecache ncp;
299 kkread(kd, (u_long)TAILQ_FIRST(&vn.v_namecache), &ncp, sizeof(ncp));
300 if ((nlen = ncp.nc_nlen) >= sizeof(buf))
301 nlen = sizeof(buf) - 1;
305 kkread(kd, (u_long)ncp.nc_name, buf, nlen);
307 printf("\tfilename %s\n", buf);
312 if (vn.v_rbclean_tree.rbh_root) {
313 printf("\tCLEAN BUFFERS\n");
314 dumpbufs(kd, vn.v_rbclean_tree.rbh_root, "ROOT");
316 if (vn.v_rbdirty_tree.rbh_root) {
317 printf("\tDIRTY BUFFERS\n");
318 dumpbufs(kd, vn.v_rbdirty_tree.rbh_root, "ROOT");
323 if (vn.v_tag == VT_UFS && vn.v_data) {
324 struct inode *ip = vn.v_data;
327 kkread(kd, (u_long)&ip->i_lockf, &lockf, sizeof(lockf));
328 dumplocks(kd, &lockf);
334 return(vn.v_nmntvnodes.tqe_next);
336 return(vn.v_list.tqe_next);
340 dumpbufs(kvm_t *kd, void *bufp, const char *id)
344 kkread(kd, (u_long)bufp, &buf, sizeof(buf));
345 printf("\t %-8s %p loffset %012llx/%05x foffset %08llx",
347 buf.b_bio1.bio_offset,
349 buf.b_bio2.bio_offset);
350 printf(" q=%d count=%08x flags=%08x refs=%08x dep=%p",
351 buf.b_qindex, buf.b_lock.lk_count,
352 buf.b_flags, buf.b_refs, buf.b_dep.lh_first);
355 if (buf.b_rbnode.rbe_left)
356 dumpbufs(kd, buf.b_rbnode.rbe_left, "LEFT");
357 if (buf.b_rbnode.rbe_right)
358 dumpbufs(kd, buf.b_rbnode.rbe_right, "RIGHT");
362 dumplocks(kvm_t *kd, struct lockf *lockf)
364 struct lockf_range item;
365 struct lockf_range *scan;
367 if ((scan = TAILQ_FIRST(&lockf->lf_range)) != NULL) {
370 kkread(kd, (u_long)scan, &item, sizeof(item));
371 dumplockinfo(kd, &item);
372 } while ((scan = TAILQ_NEXT(&item, lf_link)) != NULL);
375 if ((scan = TAILQ_FIRST(&lockf->lf_blocked)) != NULL) {
378 kkread(kd, (u_long)scan, &item, sizeof(item));
379 dumplockinfo(kd, &item);
380 } while ((scan = TAILQ_NEXT(&item, lf_link)) != NULL);
387 dumplockinfo(kvm_t *kd, struct lockf_range *item)
391 if (item->lf_owner && (item->lf_flags & F_POSIX)) {
392 kkread(kd, (u_long)&item->lf_owner->p_pid,
393 &ownerpid, sizeof(ownerpid));
398 printf("\t ty=%d flgs=%04x %lld-%lld owner=%d\n",
399 item->lf_type, item->lf_flags,
400 item->lf_start, item->lf_end,
407 getobjpages(kvm_t *kd, struct vm_object *obj)
409 struct vm_object vmobj;
411 kkread(kd, (u_long)obj, &vmobj, sizeof(vmobj));
412 return(vmobj.resident_page_count);
417 getobjvnpsize(kvm_t *kd, struct vm_object *obj)
419 struct vm_object vmobj;
421 kkread(kd, (u_long)obj, &vmobj, sizeof(vmobj));
422 return ((int)vmobj.size);
426 kkread(kvm_t *kd, u_long addr, void *buf, size_t nbytes)
428 if (kvm_read(kd, addr, buf, nbytes) != nbytes) {