4 * cc -I/usr/src/sys vmpageinfo.c -o /usr/local/bin/vmpageinfo -lkvm
8 * Validate the vm_page_buckets[] hash array against the vm_page_array
11 * Copyright (c) 2004 The DragonFly Project. All rights reserved.
13 * This code is derived from software contributed to The DragonFly Project
14 * by Matthew Dillon <dillon@backplane.com>
16 * Redistribution and use in source and binary forms, with or without
17 * modification, are permitted provided that the following conditions
20 * 1. Redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer.
22 * 2. Redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in
24 * the documentation and/or other materials provided with the
26 * 3. Neither the name of The DragonFly Project nor the names of its
27 * contributors may be used to endorse or promote products derived
28 * from this software without specific, prior written permission.
30 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
31 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
32 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
33 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
34 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
35 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
36 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
37 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
38 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
39 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
40 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
43 * $DragonFly: src/test/debug/vmpageinfo.c,v 1.2 2006/05/23 01:00:05 dillon Exp $
46 #define _KERNEL_STRUCTURES_
47 #include <sys/param.h>
49 #include <sys/malloc.h>
50 #include <sys/signalvar.h>
51 #include <sys/vnode.h>
52 #include <sys/namecache.h>
53 #include <sys/slaballoc.h>
56 #include <vm/vm_page.h>
57 #include <vm/vm_kern.h>
58 #include <vm/vm_page.h>
59 #include <vm/vm_object.h>
60 #include <vm/swap_pager.h>
61 #include <vm/vnode_pager.h>
73 { "_vm_page_buckets" },
74 { "_vm_page_hash_mask" },
77 { "_vm_page_array_size" },
84 struct vm_page **vm_page_buckets;
85 int vm_page_hash_mask;
87 struct vm_page *vm_page_array;
88 int vm_page_array_size;
90 void checkpage(kvm_t *kd, vm_page_t mptr, vm_page_t m, struct vm_object *obj);
91 static void kkread_vmpage(kvm_t *kd, u_long addr, vm_page_t m);
92 static void kkread(kvm_t *kd, u_long addr, void *buf, size_t nbytes);
93 static int kkread_err(kvm_t *kd, u_long addr, void *buf, size_t nbytes);
96 static void addsltrack(vm_page_t m);
98 static void dumpsltrack(kvm_t *kd);
101 main(int ac, char **av)
103 const char *corefile = NULL;
104 const char *sysfile = NULL;
106 struct vm_object obj;
117 while ((ch = getopt(ac, av, "M:N:dv")) != -1) {
132 fprintf(stderr, "%s [-M core] [-N system]\n", av[0]);
139 if ((kd = kvm_open(sysfile, corefile, NULL, O_RDONLY, "kvm:")) == NULL) {
143 if (kvm_nlist(kd, Nl) != 0) {
149 kkread(kd, Nl[0].n_value, &vm_page_buckets, sizeof(vm_page_buckets));
150 kkread(kd, Nl[1].n_value, &vm_page_hash_mask, sizeof(vm_page_hash_mask));
152 kkread(kd, Nl[0].n_value, &vm_page_array, sizeof(vm_page_array));
153 kkread(kd, Nl[1].n_value, &vm_page_array_size, sizeof(vm_page_array_size));
156 * Scan the vm_page_array validating all pages with associated objects
158 for (i = 0; i < vm_page_array_size; ++i) {
160 printf("page %d\r", i);
163 kkread_vmpage(kd, (u_long)&vm_page_array[i], &m);
165 kkread(kd, (u_long)m.object, &obj, sizeof(obj));
166 checkpage(kd, &vm_page_array[i], &m, &obj);
169 if (m.queue >= PQ_HOLD) {
171 } else if (m.queue >= PQ_CACHE) {
173 } else if (m.queue >= PQ_ACTIVE) {
175 } else if (m.queue >= PQ_INACTIVE) {
177 } else if (m.queue >= PQ_FREE) {
182 printf("page %p obj %p/%-8ju(%016jx) val=%02x dty=%02x hold=%d "
183 "wire=%-2d act=%-3d busy=%d %8s",
187 (intmax_t)m.pindex * PAGE_SIZE,
223 printf(" %-7s", ostr);
224 if (m.flags & PG_BUSY)
226 if (m.flags & PG_WANTED)
228 if (m.flags & PG_WINATCFLS)
229 printf(" WINATCFLS");
230 if (m.flags & PG_FICTITIOUS)
231 printf(" FICTITIOUS");
232 if (m.flags & PG_WRITEABLE)
233 printf(" WRITEABLE");
234 if (m.flags & PG_MAPPED)
236 if (m.flags & PG_NEED_COMMIT)
237 printf(" NEED_COMMIT");
238 if (m.flags & PG_REFERENCED)
239 printf(" REFERENCED");
240 if (m.flags & PG_CLEANCHK)
242 if (m.flags & PG_SWAPINPROG)
243 printf(" SWAPINPROG");
244 if (m.flags & PG_NOSYNC)
246 if (m.flags & PG_UNMANAGED)
247 printf(" UNMANAGED");
248 if (m.flags & PG_MARKER)
250 if (m.flags & PG_RAM)
252 if (m.flags & PG_SWAPPED)
255 if (m.flags & PG_SLAB)
260 if (m.flags & PG_SLAB)
265 if (debugopt || verboseopt)
270 * Scan the vm_page_buckets array validating all pages found
272 for (i = 0; i <= vm_page_hash_mask; ++i) {
274 printf("index %d\r", i);
277 kkread(kd, (u_long)&vm_page_buckets[i], &mptr, sizeof(mptr));
279 kkread(kd, (u_long)mptr, &m, sizeof(m));
281 kkread(kd, (u_long)m.object, &obj, sizeof(obj));
282 hv = ((uintptr_t)m.object + m.pindex) ^ obj.hash_rand;
283 hv &= vm_page_hash_mask;
285 printf("vm_page_buckets[%d] ((struct vm_page *)%p)"
286 " should be in bucket %d\n", i, mptr, hv);
287 checkpage(kd, mptr, &m, &obj);
289 printf("vm_page_buckets[%d] ((struct vm_page *)%p)"
290 " has no object\n", i, mptr);
303 * A page with an object.
306 checkpage(kvm_t *kd, vm_page_t mptr, vm_page_t m, struct vm_object *obj)
313 hv = ((uintptr_t)m->object + m->pindex) ^ obj->hash_rand;
314 hv &= vm_page_hash_mask;
315 kkread(kd, (u_long)&vm_page_buckets[hv], &scanptr, sizeof(scanptr));
319 kkread(kd, (u_long)scanptr, &scan, sizeof(scan));
320 scanptr = scan.hnext;
324 printf("good checkpage %p bucket %d\n", mptr, hv);
326 printf("vm_page_buckets[%d] ((struct vm_page *)%p)"
327 " page not found in bucket list\n", hv, mptr);
333 * Acclerate the reading of VM pages
336 kkread_vmpage(kvm_t *kd, u_long addr, vm_page_t m)
338 static struct vm_page vpcache[1024];
342 if (addr < vpbeg || addr >= vpend) {
344 vpend = addr + 1024 * sizeof(*m);
345 if (vpend > (u_long)(uintptr_t)vm_page_array +
346 vm_page_array_size * sizeof(*m)) {
347 vpend = (u_long)(uintptr_t)vm_page_array +
348 vm_page_array_size * sizeof(*m);
350 kkread(kd, vpbeg, vpcache, vpend - vpbeg);
352 *m = vpcache[(addr - vpbeg) / sizeof(*m)];
356 kkread(kvm_t *kd, u_long addr, void *buf, size_t nbytes)
358 if (kvm_read(kd, addr, buf, nbytes) != nbytes) {
365 kkread_err(kvm_t *kd, u_long addr, void *buf, size_t nbytes)
367 if (kvm_read(kd, addr, buf, nbytes) != nbytes) {
374 struct SLTrack *next;
379 #define SLHMASK (SLHSIZE - 1)
381 struct SLTrack *SLHash[SLHSIZE];
386 addsltrack(vm_page_t m)
389 u_long addr = (m->pindex * PAGE_SIZE) & ~131071L;
392 if (m->wire_count == 0 || (m->flags & PG_MAPPED) == 0 ||
396 i = (addr / 131072) & SLHMASK;
397 for (slt = SLHash[i]; slt; slt = slt->next) {
398 if (slt->addr == addr)
402 slt = malloc(sizeof(*slt));
404 slt->next = SLHash[i];
412 dumpsltrack(kvm_t *kd)
416 long total_zones = 0;
419 for (i = 0; i < SLHSIZE; ++i) {
420 for (slt = SLHash[i]; slt; slt = slt->next) {
423 if (kkread_err(kd, slt->addr, &z, sizeof(z))) {
424 printf("SLZone 0x%016lx not mapped\n",
428 printf("SLZone 0x%016lx { mag=%08x cpu=%-2d NFree=%-3d "
441 printf("FullZones/TotalZones: %ld/%ld\n", full_zones, total_zones);