4 * cc -I/usr/src/sys vmpageinfo.c -o ~/bin/vmpageinfo -lkvm
8 * Validate the vm_page_buckets[] hash array against the vm_page_array
11 * Copyright (c) 2004 The DragonFly Project. All rights reserved.
13 * This code is derived from software contributed to The DragonFly Project
14 * by Matthew Dillon <dillon@backplane.com>
16 * Redistribution and use in source and binary forms, with or without
17 * modification, are permitted provided that the following conditions
20 * 1. Redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer.
22 * 2. Redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in
24 * the documentation and/or other materials provided with the
26 * 3. Neither the name of The DragonFly Project nor the names of its
27 * contributors may be used to endorse or promote products derived
28 * from this software without specific, prior written permission.
30 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
31 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
32 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
33 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
34 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
35 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
36 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
37 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
38 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
39 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
40 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
44 #define _KERNEL_STRUCTURES_
45 #include <sys/param.h>
47 #include <sys/malloc.h>
48 #include <sys/signalvar.h>
49 #include <sys/vnode.h>
50 #include <sys/namecache.h>
51 #include <sys/slaballoc.h>
54 #include <vm/vm_page.h>
55 #include <vm/vm_kern.h>
56 #include <vm/vm_object.h>
57 #include <vm/swap_pager.h>
58 #include <vm/vnode_pager.h>
70 { "_vm_page_array_size" },
84 struct vm_page **vm_page_buckets;
85 int vm_page_hash_mask;
87 struct vm_page *vm_page_array;
88 struct vm_object *kernel_object_ptr;
89 int vm_page_array_size;
96 void checkpage(kvm_t *kd, vm_page_t mptr, vm_page_t m, struct vm_object *obj);
97 static void kkread_vmpage(kvm_t *kd, u_long addr, vm_page_t m);
98 static void kkread(kvm_t *kd, u_long addr, void *buf, size_t nbytes);
99 static int kkread_err(kvm_t *kd, u_long addr, void *buf, size_t nbytes);
102 static void addsltrack(vm_page_t m);
103 static void dumpsltrack(kvm_t *kd);
105 static int unique_object(void *ptr);
108 long count_wired; /* total */
109 long count_wired_vnode;
110 long count_wired_anon;
111 long count_wired_in_pmap;
112 long count_wired_pgtable;
113 long count_wired_other;
114 long count_wired_kernel;
115 long count_wired_obj_other;
118 long count_anon_in_pmap;
124 long count_noobj_offqueue;
125 long count_noobj_onqueue;
128 main(int ac, char **av)
130 const char *corefile = NULL;
131 const char *sysfile = NULL;
133 struct vm_object obj;
144 while ((ch = getopt(ac, av, "M:N:dv")) != -1) {
159 fprintf(stderr, "%s [-M core] [-N system]\n", av[0]);
166 if ((kd = kvm_open(sysfile, corefile, NULL, O_RDONLY, "kvm:")) == NULL) {
170 if (kvm_nlist(kd, Nl) != 0) {
175 kkread(kd, Nl[0].n_value, &vm_page_array, sizeof(vm_page_array));
176 kkread(kd, Nl[1].n_value, &vm_page_array_size, sizeof(vm_page_array_size));
177 kernel_object_ptr = (void *)Nl[2].n_value;
178 kkread(kd, Nl[3].n_value, &nbuf, sizeof(nbuf));
179 kkread(kd, Nl[4].n_value, &nswbuf_mem, sizeof(nswbuf_mem));
180 kkread(kd, Nl[5].n_value, &nswbuf_kva, sizeof(nswbuf_kva));
181 kkread(kd, Nl[6].n_value, &nswbuf_raw, sizeof(nswbuf_raw));
182 kern_size = Nl[8].n_value - Nl[7].n_value;
185 * Scan the vm_page_array validating all pages with associated objects
187 for (i = 0; i < vm_page_array_size; ++i) {
189 printf("page %d\r", i);
192 kkread_vmpage(kd, (u_long)&vm_page_array[i], &m);
194 kkread(kd, (u_long)m.object, &obj, sizeof(obj));
195 checkpage(kd, &vm_page_array[i], &m, &obj);
197 if (m.queue >= PQ_HOLD) {
199 } else if (m.queue >= PQ_CACHE) {
201 } else if (m.queue >= PQ_ACTIVE) {
203 } else if (m.queue >= PQ_INACTIVE) {
205 } else if (m.queue >= PQ_FREE) {
213 if (m.object == NULL) {
214 if ((m.flags & PG_MAPPED) &&
215 (m.flags & PG_WRITEABLE) &&
216 (m.flags & PG_UNQUEUED)) {
217 ++count_wired_pgtable;
221 } else if (m.object == kernel_object_ptr) {
222 ++count_wired_kernel;
231 ++count_wired_in_pmap;
236 ++count_wired_obj_other;
240 } else if (m.md.pmap_count) {
241 if (m.object && m.object != kernel_object_ptr) {
245 ++count_anon_in_pmap;
254 printf("page %p obj %p/%-8ju(%016jx) val=%02x dty=%02x hold=%d "
255 "wire=%-2d act=%-3d busy=%d w/pmapcnt=%d/%d %8s",
259 (intmax_t)m.pindex * PAGE_SIZE,
266 m.md.writeable_count,
272 if (m.object == kernel_object_ptr) {
274 if (unique_object(m.object))
275 count_kernel += obj.resident_page_count;
276 } else if (m.object) {
280 if (unique_object(m.object))
281 count_anon += obj.resident_page_count;
285 if (unique_object(m.object))
286 count_anon += obj.resident_page_count;
290 if (unique_object(m.object))
291 count_vnode += obj.resident_page_count;
295 if (unique_object(m.object))
296 count_device += obj.resident_page_count;
300 if (unique_object(m.object))
301 count_phys += obj.resident_page_count;
305 if (unique_object(m.object))
306 count_unknown += obj.resident_page_count;
309 if (unique_object(m.object))
310 count_unknown += obj.resident_page_count;
316 if (m.queue == PQ_NONE)
317 ++count_noobj_offqueue;
318 else if (m.queue - m.pc != PQ_FREE)
319 ++count_noobj_onqueue;
323 printf(" %-7s", ostr);
324 if (m.busy_count & PBUSY_LOCKED)
326 if (m.busy_count & PBUSY_WANTED)
328 if (m.flags & PG_WINATCFLS)
329 printf(" WINATCFLS");
330 if (m.flags & PG_FICTITIOUS)
331 printf(" FICTITIOUS");
332 if (m.flags & PG_WRITEABLE)
333 printf(" WRITEABLE");
334 if (m.flags & PG_MAPPED)
336 if (m.flags & PG_NEED_COMMIT)
337 printf(" NEED_COMMIT");
338 if (m.flags & PG_REFERENCED)
339 printf(" REFERENCED");
340 if (m.flags & PG_CLEANCHK)
342 if (m.busy_count & PBUSY_SWAPINPROG)
343 printf(" SWAPINPROG");
344 if (m.flags & PG_NOSYNC)
346 if (m.flags & PG_UNQUEUED)
348 if (m.flags & PG_MARKER)
350 if (m.flags & PG_RAM)
352 if (m.flags & PG_SWAPPED)
355 if (m.flags & PG_SLAB)
360 if (m.flags & PG_SLAB)
365 if (debugopt || verboseopt)
367 printf("%8.2fM free\n", count_free * 4096.0 / 1048576.0);
369 printf("%8.2fM wired vnode (in buffer cache)\n",
370 count_wired_vnode * 4096.0 / 1048576.0);
371 printf("%8.2fM wired in-pmap (probably vnode pages also in buffer cache)\n",
372 count_wired_in_pmap * 4096.0 / 1048576.0);
373 printf("%8.2fM wired pgtable\n",
374 count_wired_pgtable * 4096.0 / 1048576.0);
375 printf("%8.2fM wired anon\n",
376 count_wired_anon * 4096.0 / 1048576.0);
377 printf("%8.2fM wired kernel_object\n",
378 count_wired_kernel * 4096.0 / 1048576.0);
380 printf("\t%8.2fM vm_page_array\n",
381 vm_page_array_size * sizeof(struct vm_page) / 1048576.0);
382 printf("\t%8.2fM buf, swbuf_mem, swbuf_kva, swbuf_raw\n",
383 (nbuf + nswbuf_mem + nswbuf_kva + nswbuf_raw) *
384 sizeof(struct buf) / 1048576.0);
385 printf("\t%8.2fM kernel binary\n", kern_size / 1048576.0);
386 printf("\t(also add in KMALLOC id kmapinfo, or loosely, vmstat -m)\n");
388 printf("%8.2fM wired other (unknown object)\n",
389 count_wired_obj_other * 4096.0 / 1048576.0);
390 printf("%8.2fM wired other (no object, probably kernel)\n",
391 count_wired_other * 4096.0 / 1048576.0);
393 printf("%8.2fM WIRED TOTAL\n",
394 count_wired * 4096.0 / 1048576.0);
397 printf("%8.2fM anonymous (total, includes in-pmap)\n",
398 count_anon * 4096.0 / 1048576.0);
399 printf("%8.2fM anonymous memory in-pmap\n",
400 count_anon_in_pmap * 4096.0 / 1048576.0);
401 printf("%8.2fM vnode (includes wired)\n",
402 count_vnode * 4096.0 / 1048576.0);
403 printf("%8.2fM device\n", count_device * 4096.0 / 1048576.0);
404 printf("%8.2fM phys\n", count_phys * 4096.0 / 1048576.0);
405 printf("%8.2fM kernel (includes wired)\n",
406 count_kernel * 4096.0 / 1048576.0);
407 printf("%8.2fM unknown\n", count_unknown * 4096.0 / 1048576.0);
408 printf("%8.2fM no_object, off queue (includes wired w/o object)\n",
409 count_noobj_offqueue * 4096.0 / 1048576.0);
410 printf("%8.2fM no_object, on non-free queue (includes wired w/o object)\n",
411 count_noobj_onqueue * 4096.0 / 1048576.0);
415 * Scan the vm_page_buckets array validating all pages found
417 for (i = 0; i <= vm_page_hash_mask; ++i) {
419 printf("index %d\r", i);
422 kkread(kd, (u_long)&vm_page_buckets[i], &mptr, sizeof(mptr));
424 kkread(kd, (u_long)mptr, &m, sizeof(m));
426 kkread(kd, (u_long)m.object, &obj, sizeof(obj));
427 hv = ((uintptr_t)m.object + m.pindex) ^ obj.hash_rand;
428 hv &= vm_page_hash_mask;
430 printf("vm_page_buckets[%d] ((struct vm_page *)%p)"
431 " should be in bucket %d\n", i, mptr, hv);
432 checkpage(kd, mptr, &m, &obj);
434 printf("vm_page_buckets[%d] ((struct vm_page *)%p)"
435 " has no object\n", i, mptr);
450 * A page with an object.
453 checkpage(kvm_t *kd, vm_page_t mptr, vm_page_t m, struct vm_object *obj)
460 hv = ((uintptr_t)m->object + m->pindex) ^ obj->hash_rand;
461 hv &= vm_page_hash_mask;
462 kkread(kd, (u_long)&vm_page_buckets[hv], &scanptr, sizeof(scanptr));
466 kkread(kd, (u_long)scanptr, &scan, sizeof(scan));
467 scanptr = scan.hnext;
471 printf("good checkpage %p bucket %d\n", mptr, hv);
473 printf("vm_page_buckets[%d] ((struct vm_page *)%p)"
474 " page not found in bucket list\n", hv, mptr);
480 * Acclerate the reading of VM pages
483 kkread_vmpage(kvm_t *kd, u_long addr, vm_page_t m)
485 static struct vm_page vpcache[1024];
489 if (addr < vpbeg || addr >= vpend) {
491 vpend = addr + 1024 * sizeof(*m);
492 if (vpend > (u_long)(uintptr_t)vm_page_array +
493 vm_page_array_size * sizeof(*m)) {
494 vpend = (u_long)(uintptr_t)vm_page_array +
495 vm_page_array_size * sizeof(*m);
497 kkread(kd, vpbeg, vpcache, vpend - vpbeg);
499 *m = vpcache[(addr - vpbeg) / sizeof(*m)];
503 kkread(kvm_t *kd, u_long addr, void *buf, size_t nbytes)
505 if (kvm_read(kd, addr, buf, nbytes) != nbytes) {
512 kkread_err(kvm_t *kd, u_long addr, void *buf, size_t nbytes)
514 if (kvm_read(kd, addr, buf, nbytes) != nbytes) {
521 struct SLTrack *next;
526 #define SLHMASK (SLHSIZE - 1)
528 struct SLTrack *SLHash[SLHSIZE];
533 addsltrack(vm_page_t m)
536 u_long addr = (m->pindex * PAGE_SIZE) & ~131071L;
539 if (m->wire_count == 0 || (m->flags & PG_MAPPED) == 0 ||
543 i = (addr / 131072) & SLHMASK;
544 for (slt = SLHash[i]; slt; slt = slt->next) {
545 if (slt->addr == addr)
549 slt = malloc(sizeof(*slt));
551 slt->next = SLHash[i];
559 dumpsltrack(kvm_t *kd)
563 long total_zones = 0;
566 for (i = 0; i < SLHSIZE; ++i) {
567 for (slt = SLHash[i]; slt; slt = slt->next) {
570 if (kkread_err(kd, slt->addr, &z, sizeof(z))) {
571 printf("SLZone 0x%016lx not mapped\n",
575 printf("SLZone 0x%016lx { mag=%08x cpu=%-2d NFree=%-3d "
588 printf("FullZones/TotalZones: %ld/%ld\n", full_zones, total_zones);
591 #define HASH_SIZE (1024*1024)
592 #define HASH_MASK (HASH_SIZE - 1)
595 struct dup_entry *next;
599 struct dup_entry *dup_hash[HASH_SIZE];
602 unique_object(void *ptr)
604 struct dup_entry *hen;
607 hv = (intptr_t)ptr ^ ((intptr_t)ptr >> 20);
609 for (hen = dup_hash[hv]; hen; hen = hen->next) {
613 hen = malloc(sizeof(*hen));
614 hen->next = dup_hash[hv];