4 * cc -I/usr/src/sys vmpqinactive.c -o ~/bin/vmpqinactive -lkvm
8 * Calculate how many inactive pages are dirty
10 * Copyright (c) 2004-2020 The DragonFly Project. All rights reserved.
12 * This code is derived from software contributed to The DragonFly Project
13 * by Matthew Dillon <dillon@backplane.com>
15 * Redistribution and use in source and binary forms, with or without
16 * modification, are permitted provided that the following conditions
19 * 1. Redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer.
21 * 2. Redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in
23 * the documentation and/or other materials provided with the
25 * 3. Neither the name of The DragonFly Project nor the names of its
26 * contributors may be used to endorse or promote products derived
27 * from this software without specific, prior written permission.
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
32 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
33 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
34 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
35 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
36 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
37 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
38 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
39 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
43 #define _KERNEL_STRUCTURES_
44 #include <sys/param.h>
46 #include <sys/malloc.h>
47 #include <sys/signalvar.h>
48 #include <sys/vnode.h>
50 #include <sys/namecache.h>
51 #include <sys/slaballoc.h>
54 #include <vm/vm_page.h>
55 #include <vm/vm_kern.h>
56 #include <vm/vm_object.h>
57 #include <vm/swap_pager.h>
58 #include <vm/vnode_pager.h>
70 { "_vm_page_array_size" },
84 struct vm_page **vm_page_buckets;
85 int vm_page_hash_mask;
87 struct vm_page *vm_page_array;
88 struct vm_object *kernel_object_ptr;
89 int vm_page_array_size;
96 void checkpage(kvm_t *kd, vm_page_t mptr, vm_page_t m, struct vm_object *obj);
97 static void kkread_vmpage(kvm_t *kd, u_long addr, vm_page_t m);
98 static void kkread(kvm_t *kd, u_long addr, void *buf, size_t nbytes);
99 static int kkread_err(kvm_t *kd, u_long addr, void *buf, size_t nbytes);
102 static void addsltrack(vm_page_t m);
103 static void dumpsltrack(kvm_t *kd);
105 static int unique_object(void *ptr);
108 long count_wired; /* total */
109 long count_wired_vnode;
110 long count_wired_anon;
111 long count_wired_in_pmap;
112 long count_wired_pgtable;
113 long count_wired_other;
114 long count_wired_kernel;
115 long count_wired_obj_other;
118 long count_anon_in_pmap;
124 long count_noobj_offqueue;
125 long count_noobj_onqueue;
128 main(int ac, char **av)
130 const char *corefile = NULL;
131 const char *sysfile = NULL;
133 struct vm_object obj;
143 long pqinactive_clean;
144 long pqinactive_dirty1;
145 long pqinactive_dirty2;
146 long pqinactive_refd;
147 long pqinactive_ready;
149 pqinactive_clean = 0;
150 pqinactive_dirty1 = 0;
151 pqinactive_dirty2 = 0;
153 pqinactive_ready = 0;
155 while ((ch = getopt(ac, av, "M:N:dv")) != -1) {
170 fprintf(stderr, "%s [-M core] [-N system]\n", av[0]);
177 if ((kd = kvm_open(sysfile, corefile, NULL, O_RDONLY, "kvm:")) == NULL) {
181 if (kvm_nlist(kd, Nl) != 0) {
186 kkread(kd, Nl[0].n_value, &vm_page_array, sizeof(vm_page_array));
187 kkread(kd, Nl[1].n_value, &vm_page_array_size, sizeof(vm_page_array_size));
188 kernel_object_ptr = (void *)Nl[2].n_value;
189 kkread(kd, Nl[3].n_value, &nbuf, sizeof(nbuf));
190 kkread(kd, Nl[4].n_value, &nswbuf_mem, sizeof(nswbuf_mem));
191 kkread(kd, Nl[5].n_value, &nswbuf_kva, sizeof(nswbuf_kva));
192 kkread(kd, Nl[6].n_value, &nswbuf_raw, sizeof(nswbuf_raw));
193 kern_size = Nl[8].n_value - Nl[7].n_value;
196 * Scan the vm_page_array validating all pages with associated objects
198 for (i = 0; i < vm_page_array_size; ++i) {
199 if (debugopt && (i & 1023) == 0) {
200 printf("page %d/%d\r", i, vm_page_array_size);
203 kkread_vmpage(kd, (u_long)&vm_page_array[i], &m);
205 kkread(kd, (u_long)m.object, &obj, sizeof(obj));
206 checkpage(kd, &vm_page_array[i], &m, &obj);
208 if (m.queue >= PQ_HOLD) {
210 } else if (m.queue >= PQ_CACHE) {
212 } else if (m.queue >= PQ_ACTIVE) {
214 } else if (m.queue >= PQ_INACTIVE) {
216 if (m.dirty || m.wire_count || m.busy_count || m.hold_count ||
217 (m.flags & PG_NEED_COMMIT)) {
218 if (m.flags & PG_WINATCFLS)
224 if (m.flags & PG_REFERENCED)
229 } else if (m.queue >= PQ_FREE) {
237 if (m.object == NULL) {
238 if ((m.flags & PG_MAPPED) &&
239 (m.flags & PG_WRITEABLE) &&
240 (m.flags & PG_UNQUEUED)) {
241 ++count_wired_pgtable;
245 } else if (m.object == kernel_object_ptr) {
246 ++count_wired_kernel;
255 if (m.flags & PG_MAPPED)
259 ++count_wired_in_pmap;
264 ++count_wired_obj_other;
270 if (m.flags & PG_MAPPED) {
272 if (m.md.pmap_count) {
274 if (m.object && m.object != kernel_object_ptr) {
278 ++count_anon_in_pmap;
287 printf("page %p obj %p/%-8ju(%016jx) val=%02x dty=%02x hold=%d "
288 "wire=%-2d act=%-3d busy=%d w/pmapcnt=%d/%d %8s",
292 (intmax_t)m.pindex * PAGE_SIZE,
300 ((m.flags & PG_WRITEABLE) != 0),
301 ((m.flags & PG_MAPPED) != 0),
303 m.md.writeable_count,
310 if (m.object == kernel_object_ptr) {
312 if (unique_object(m.object))
313 count_kernel += obj.resident_page_count;
314 } else if (m.object) {
318 if (unique_object(m.object))
319 count_anon += obj.resident_page_count;
323 if (unique_object(m.object))
324 count_anon += obj.resident_page_count;
328 if (unique_object(m.object))
329 count_vnode += obj.resident_page_count;
333 if (unique_object(m.object))
334 count_device += obj.resident_page_count;
338 if (unique_object(m.object))
339 count_phys += obj.resident_page_count;
343 if (unique_object(m.object))
344 count_unknown += obj.resident_page_count;
347 if (unique_object(m.object))
348 count_unknown += obj.resident_page_count;
354 if (m.queue == PQ_NONE)
355 ++count_noobj_offqueue;
356 else if (m.queue - m.pc != PQ_FREE)
357 ++count_noobj_onqueue;
361 printf(" %-7s", ostr);
362 if (m.busy_count & PBUSY_LOCKED)
364 if (m.busy_count & PBUSY_WANTED)
366 if (m.flags & PG_WINATCFLS)
367 printf(" WINATCFLS");
368 if (m.flags & PG_FICTITIOUS)
369 printf(" FICTITIOUS");
370 if (m.flags & PG_WRITEABLE)
371 printf(" WRITEABLE");
372 if (m.flags & PG_MAPPED)
374 if (m.flags & PG_NEED_COMMIT)
375 printf(" NEED_COMMIT");
376 if (m.flags & PG_REFERENCED)
377 printf(" REFERENCED");
378 if (m.flags & PG_CLEANCHK)
380 if (m.busy_count & PBUSY_SWAPINPROG)
381 printf(" SWAPINPROG");
382 if (m.flags & PG_NOSYNC)
384 if (m.flags & PG_UNQUEUED)
386 if (m.flags & PG_MARKER)
388 if (m.flags & PG_RAM)
390 if (m.flags & PG_SWAPPED)
393 if (m.flags & PG_SLAB)
398 if (m.flags & PG_SLAB)
403 if (debugopt || verboseopt)
405 printf("%8.2fM free\n",
406 count_free * 4096.0 / 1048576.0);
407 printf("%8.2fM inactive-clean\n",
408 pqinactive_clean * 4096.0 / 1048576.0);
409 printf("%8.2fM inactive-clean-and-referenced\n",
410 pqinactive_refd * 4096.0 / 1048576.0);
411 printf("%8.2fM inactive-clean-and-ready\n",
412 pqinactive_ready * 4096.0 / 1048576.0);
413 printf("%8.2fM inactive-dirty/first-LRU\n",
414 pqinactive_dirty1 * 4096.0 / 1048576.0);
415 printf("%8.2fM inactive-dirty/second-LRU\n",
416 pqinactive_dirty2 * 4096.0 / 1048576.0);
418 printf("%8.2fM wired vnode (in buffer cache)\n",
419 count_wired_vnode * 4096.0 / 1048576.0);
420 printf("%8.2fM wired in-pmap (probably vnode pages also in buffer cache)\n",
421 count_wired_in_pmap * 4096.0 / 1048576.0);
422 printf("%8.2fM wired pgtable\n",
423 count_wired_pgtable * 4096.0 / 1048576.0);
424 printf("%8.2fM wired anon\n",
425 count_wired_anon * 4096.0 / 1048576.0);
426 printf("%8.2fM wired kernel_object\n",
427 count_wired_kernel * 4096.0 / 1048576.0);
429 printf("\t%8.2fM vm_page_array\n",
430 vm_page_array_size * sizeof(struct vm_page) / 1048576.0);
431 printf("\t%8.2fM buf, swbuf_mem, swbuf_kva, swbuf_raw\n",
432 (nbuf + nswbuf_mem + nswbuf_kva + nswbuf_raw) *
433 sizeof(struct buf) / 1048576.0);
434 printf("\t%8.2fM kernel binary\n", kern_size / 1048576.0);
435 printf("\t(also add in KMALLOC id kmapinfo, or loosely, vmstat -m)\n");
437 printf("%8.2fM wired other (unknown object)\n",
438 count_wired_obj_other * 4096.0 / 1048576.0);
439 printf("%8.2fM wired other (no object, probably kernel)\n",
440 count_wired_other * 4096.0 / 1048576.0);
442 printf("%8.2fM WIRED TOTAL\n",
443 count_wired * 4096.0 / 1048576.0);
446 printf("%8.2fM anonymous (total, includes in-pmap)\n",
447 count_anon * 4096.0 / 1048576.0);
448 printf("%8.2fM anonymous memory in-pmap\n",
449 count_anon_in_pmap * 4096.0 / 1048576.0);
450 printf("%8.2fM vnode (includes wired)\n",
451 count_vnode * 4096.0 / 1048576.0);
452 printf("%8.2fM device\n", count_device * 4096.0 / 1048576.0);
453 printf("%8.2fM phys\n", count_phys * 4096.0 / 1048576.0);
454 printf("%8.2fM kernel (includes wired)\n",
455 count_kernel * 4096.0 / 1048576.0);
456 printf("%8.2fM unknown\n", count_unknown * 4096.0 / 1048576.0);
457 printf("%8.2fM no_object, off queue (includes wired w/o object)\n",
458 count_noobj_offqueue * 4096.0 / 1048576.0);
459 printf("%8.2fM no_object, on non-free queue (includes wired w/o object)\n",
460 count_noobj_onqueue * 4096.0 / 1048576.0);
464 * Scan the vm_page_buckets array validating all pages found
466 for (i = 0; i <= vm_page_hash_mask; ++i) {
468 printf("index %d\r", i);
471 kkread(kd, (u_long)&vm_page_buckets[i], &mptr, sizeof(mptr));
473 kkread(kd, (u_long)mptr, &m, sizeof(m));
475 kkread(kd, (u_long)m.object, &obj, sizeof(obj));
476 hv = ((uintptr_t)m.object + m.pindex) ^ obj.hash_rand;
477 hv &= vm_page_hash_mask;
479 printf("vm_page_buckets[%d] ((struct vm_page *)%p)"
480 " should be in bucket %d\n", i, mptr, hv);
481 checkpage(kd, mptr, &m, &obj);
483 printf("vm_page_buckets[%d] ((struct vm_page *)%p)"
484 " has no object\n", i, mptr);
499 * A page with an object.
502 checkpage(kvm_t *kd, vm_page_t mptr, vm_page_t m, struct vm_object *obj)
509 hv = ((uintptr_t)m->object + m->pindex) ^ obj->hash_rand;
510 hv &= vm_page_hash_mask;
511 kkread(kd, (u_long)&vm_page_buckets[hv], &scanptr, sizeof(scanptr));
515 kkread(kd, (u_long)scanptr, &scan, sizeof(scan));
516 scanptr = scan.hnext;
520 printf("good checkpage %p bucket %d\n", mptr, hv);
522 printf("vm_page_buckets[%d] ((struct vm_page *)%p)"
523 " page not found in bucket list\n", hv, mptr);
529 * Acclerate the reading of VM pages
531 #define VPCACHE_SIZE 65536
534 kkread_vmpage(kvm_t *kd, u_long addr, vm_page_t m)
536 static struct vm_page vpcache[VPCACHE_SIZE];
540 if (addr < vpbeg || addr >= vpend) {
542 vpend = addr + VPCACHE_SIZE * sizeof(*m);
543 if (vpend > (u_long)(uintptr_t)vm_page_array +
544 vm_page_array_size * sizeof(*m)) {
545 vpend = (u_long)(uintptr_t)vm_page_array +
546 vm_page_array_size * sizeof(*m);
548 kkread(kd, vpbeg, vpcache, vpend - vpbeg);
550 *m = vpcache[(addr - vpbeg) / sizeof(*m)];
554 kkread(kvm_t *kd, u_long addr, void *buf, size_t nbytes)
556 if (kvm_read(kd, addr, buf, nbytes) != nbytes) {
563 kkread_err(kvm_t *kd, u_long addr, void *buf, size_t nbytes)
565 if (kvm_read(kd, addr, buf, nbytes) != nbytes) {
572 struct SLTrack *next;
577 #define SLHMASK (SLHSIZE - 1)
579 struct SLTrack *SLHash[SLHSIZE];
584 addsltrack(vm_page_t m)
587 u_long addr = (m->pindex * PAGE_SIZE) & ~131071L;
590 if (m->wire_count == 0 || (m->flags & PG_MAPPED) == 0 ||
594 i = (addr / 131072) & SLHMASK;
595 for (slt = SLHash[i]; slt; slt = slt->next) {
596 if (slt->addr == addr)
600 slt = malloc(sizeof(*slt));
602 slt->next = SLHash[i];
610 dumpsltrack(kvm_t *kd)
614 long total_zones = 0;
617 for (i = 0; i < SLHSIZE; ++i) {
618 for (slt = SLHash[i]; slt; slt = slt->next) {
621 if (kkread_err(kd, slt->addr, &z, sizeof(z))) {
622 printf("SLZone 0x%016lx not mapped\n",
626 printf("SLZone 0x%016lx { mag=%08x cpu=%-2d NFree=%-3d "
639 printf("FullZones/TotalZones: %ld/%ld\n", full_zones, total_zones);
642 #define HASH_SIZE (1024*1024)
643 #define HASH_MASK (HASH_SIZE - 1)
646 struct dup_entry *next;
650 struct dup_entry *dup_hash[HASH_SIZE];
653 unique_object(void *ptr)
655 struct dup_entry *hen;
658 hv = (intptr_t)ptr ^ ((intptr_t)ptr >> 20);
660 for (hen = dup_hash[hv]; hen; hen = hen->next) {
664 hen = malloc(sizeof(*hen));
665 hen->next = dup_hash[hv];