4 * cc -I/usr/src/sys vmpagehash.c -o ~/bin/vmpagehash -lkvm
8 * Validate the vm_page_buckets[] hash array against the vm_page_array
10 * Copyright (c) 2019 The DragonFly Project. All rights reserved.
12 * This code is derived from software contributed to The DragonFly Project
13 * by Matthew Dillon <dillon@backplane.com>
15 * Redistribution and use in source and binary forms, with or without
16 * modification, are permitted provided that the following conditions
19 * 1. Redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer.
21 * 2. Redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in
23 * the documentation and/or other materials provided with the
25 * 3. Neither the name of The DragonFly Project nor the names of its
26 * contributors may be used to endorse or promote products derived
27 * from this software without specific, prior written permission.
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
32 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
33 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
34 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
35 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
36 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
37 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
38 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
39 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
43 #define _KERNEL_STRUCTURES_
44 #include <sys/param.h>
46 #include <sys/malloc.h>
47 #include <sys/signalvar.h>
48 #include <sys/vnode.h>
49 #include <sys/namecache.h>
50 #include <sys/slaballoc.h>
53 #include <vm/vm_page.h>
54 #include <vm/vm_kern.h>
55 #include <vm/vm_object.h>
56 #include <vm/swap_pager.h>
57 #include <vm/vnode_pager.h>
67 struct vm_page_hash_elm {
76 { "_vm_page_hash_size" },
79 { "_vm_page_array_size" },
85 struct vm_page *vm_page_array_ptr;
86 struct vm_page_hash_elm *vm_page_hash_ptr;
87 struct vm_page_hash_elm *vm_page_hash;
88 int vm_page_hash_size;
89 int vm_page_array_size;
92 void checkpage(kvm_t *kd, vm_page_t mptr, vm_page_t m, struct vm_object *obj);
93 static void kkread_vmpage(kvm_t *kd, u_long addr, vm_page_t m);
94 static void kkread(kvm_t *kd, u_long addr, void *buf, size_t nbytes);
95 static int kkread_err(kvm_t *kd, u_long addr, void *buf, size_t nbytes);
98 main(int ac, char **av)
100 const char *corefile = NULL;
101 const char *sysfile = NULL;
103 struct vm_object obj;
114 while ((ch = getopt(ac, av, "M:N:dv")) != -1) {
129 fprintf(stderr, "%s [-M core] [-N system]\n", av[0]);
136 if ((kd = kvm_open(sysfile, corefile, NULL, O_RDONLY, "kvm:")) == NULL) {
140 if (kvm_nlist(kd, Nl) != 0) {
145 kkread(kd, Nl[0].n_value, &vm_page_hash_ptr, sizeof(vm_page_hash_ptr));
146 kkread(kd, Nl[1].n_value, &vm_page_hash_size, sizeof(vm_page_hash_size));
147 kkread(kd, Nl[2].n_value, &ticks, sizeof(ticks));
148 kkread(kd, Nl[3].n_value, &vm_page_array_ptr, sizeof(vm_page_array_ptr));
149 kkread(kd, Nl[4].n_value, &vm_page_array_size, sizeof(vm_page_array_size));
151 vm_page_hash = malloc(vm_page_hash_size * sizeof(*vm_page_hash));
152 kkread(kd, (intptr_t)vm_page_hash_ptr, vm_page_hash,
153 vm_page_hash_size * sizeof(*vm_page_hash));
156 * Scan the vm_page_hash validating all pages with associated objects
158 printf("vm_page_hash[%d]\n", vm_page_hash_size);
159 for (i = 0; i < vm_page_hash_size; ++i) {
160 struct vm_page_hash_elm *elm;
162 elm = &vm_page_hash[i];
164 printf(" group(%d-%d) ", i, i + 3);
165 if (elm[0].m && elm[1].m && elm[2].m && elm[3].m)
169 printf(" %016jx %9d ", elm->m, elm->ticks);
172 kkread_vmpage(kd, (u_long)elm->m, &m);
174 kkread(kd, (u_long)m.object, &obj, sizeof(obj));
175 checkpage(kd, elm->m, &m, &obj);
177 if (m.queue >= PQ_HOLD) {
179 } else if (m.queue >= PQ_CACHE) {
181 } else if (m.queue >= PQ_ACTIVE) {
183 } else if (m.queue >= PQ_INACTIVE) {
185 } else if (m.queue >= PQ_FREE) {
190 printf("obj %p/%016jx\n\t\t\t\tval=%02x dty=%02x hold=%d "
191 "wire=%-2d act=%-3d busy=%d %8s",
230 printf(" %-7s", ostr);
231 if (m.busy_count & PBUSY_LOCKED)
233 if (m.busy_count & PBUSY_WANTED)
235 if (m.flags & PG_WINATCFLS)
236 printf(" WINATCFLS");
237 if (m.flags & PG_FICTITIOUS)
238 printf(" FICTITIOUS");
239 if (m.flags & PG_WRITEABLE)
240 printf(" WRITEABLE");
241 if (m.flags & PG_MAPPED)
243 if (m.flags & PG_NEED_COMMIT)
244 printf(" NEED_COMMIT");
245 if (m.flags & PG_REFERENCED)
246 printf(" REFERENCED");
247 if (m.flags & PG_CLEANCHK)
249 if (m.busy_count & PBUSY_SWAPINPROG)
250 printf(" SWAPINPROG");
251 if (m.flags & PG_NOSYNC)
253 if (m.flags & PG_UNMANAGED)
254 printf(" UNMANAGED");
255 if (m.flags & PG_MARKER)
257 if (m.flags & PG_RAM)
259 if (m.flags & PG_SWAPPED)
268 * A page with an object.
271 checkpage(kvm_t *kd, vm_page_t mptr, vm_page_t m, struct vm_object *obj)
278 hv = ((uintptr_t)m->object + m->pindex) ^ obj->hash_rand;
279 hv &= vm_page_hash_mask;
280 kkread(kd, (u_long)&vm_page_buckets[hv], &scanptr, sizeof(scanptr));
284 kkread(kd, (u_long)scanptr, &scan, sizeof(scan));
285 scanptr = scan.hnext;
289 printf("good checkpage %p bucket %d\n", mptr, hv);
291 printf("vm_page_buckets[%d] ((struct vm_page *)%p)"
292 " page not found in bucket list\n", hv, mptr);
298 * Acclerate the reading of VM pages
301 kkread_vmpage(kvm_t *kd, u_long addr, vm_page_t m)
303 static struct vm_page vpcache[1024];
307 if (addr < vpbeg || addr >= vpend) {
309 vpend = addr + 1024 * sizeof(*m);
310 if (vpend > (u_long)(uintptr_t)vm_page_array_ptr +
311 vm_page_array_size * sizeof(*m)) {
312 vpend = (u_long)(uintptr_t)vm_page_array_ptr +
313 vm_page_array_size * sizeof(*m);
315 kkread(kd, vpbeg, vpcache, vpend - vpbeg);
317 *m = vpcache[(addr - vpbeg) / sizeof(*m)];
321 kkread(kvm_t *kd, u_long addr, void *buf, size_t nbytes)
323 if (kvm_read(kd, addr, buf, nbytes) != nbytes) {
330 kkread_err(kvm_t *kd, u_long addr, void *buf, size_t nbytes)
332 if (kvm_read(kd, addr, buf, nbytes) != nbytes) {
339 struct SLTrack *next;
344 #define SLHMASK (SLHSIZE - 1)
346 struct SLTrack *SLHash[SLHSIZE];
351 addsltrack(vm_page_t m)
354 u_long addr = (m->pindex * PAGE_SIZE) & ~131071L;
357 if (m->wire_count == 0 || (m->flags & PG_MAPPED) == 0 ||
361 i = (addr / 131072) & SLHMASK;
362 for (slt = SLHash[i]; slt; slt = slt->next) {
363 if (slt->addr == addr)
367 slt = malloc(sizeof(*slt));
369 slt->next = SLHash[i];