Merge dhcpcd-8.0.4 from branch 'vendor/DHCPCD'
[dragonfly.git] / test / debug / vmpageinfo.c
1 /*
2  * VMPAGEINFO.C
3  *
4  * cc -I/usr/src/sys vmpageinfo.c -o ~/bin/vmpageinfo -lkvm
5  *
6  * vmpageinfo
7  *
8  * Validate the vm_page_buckets[] hash array against the vm_page_array
9  *
10  *
11  * Copyright (c) 2004 The DragonFly Project.  All rights reserved.
12  * 
13  * This code is derived from software contributed to The DragonFly Project
14  * by Matthew Dillon <dillon@backplane.com>
15  * 
16  * Redistribution and use in source and binary forms, with or without
17  * modification, are permitted provided that the following conditions
18  * are met:
19  * 
20  * 1. Redistributions of source code must retain the above copyright
21  *    notice, this list of conditions and the following disclaimer.
22  * 2. Redistributions in binary form must reproduce the above copyright
23  *    notice, this list of conditions and the following disclaimer in
24  *    the documentation and/or other materials provided with the
25  *    distribution.
26  * 3. Neither the name of The DragonFly Project nor the names of its
27  *    contributors may be used to endorse or promote products derived
28  *    from this software without specific, prior written permission.
29  * 
30  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
31  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
32  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
33  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
34  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
35  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
36  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
37  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
38  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
39  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
40  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
41  * SUCH DAMAGE.
42  *
43  * $DragonFly: src/test/debug/vmpageinfo.c,v 1.2 2006/05/23 01:00:05 dillon Exp $
44  */
45
46 #define _KERNEL_STRUCTURES_
47 #include <sys/param.h>
48 #include <sys/user.h>
49 #include <sys/malloc.h>
50 #include <sys/signalvar.h>
51 #include <sys/vnode.h>
52 #include <sys/namecache.h>
53 #include <sys/slaballoc.h>
54
55 #include <vm/vm.h>
56 #include <vm/vm_page.h>
57 #include <vm/vm_kern.h>
58 #include <vm/vm_page.h>
59 #include <vm/vm_object.h>
60 #include <vm/swap_pager.h>
61 #include <vm/vnode_pager.h>
62
63 #include <stdio.h>
64 #include <stdlib.h>
65 #include <string.h>
66 #include <fcntl.h>
67 #include <kvm.h>
68 #include <nlist.h>
69 #include <getopt.h>
70
71 struct nlist Nl[] = {
72     { "_vm_page_array" },
73     { "_vm_page_array_size" },
74     { "_kernel_object" },
75     { "_nbuf" },
76     { "_nswbuf_mem" },
77     { "_nswbuf_kva" },
78     { "_nswbuf_raw" },
79     { "_kernbase" },
80     { "__end" },
81     { NULL }
82 };
83
84 int debugopt;
85 int verboseopt;
86 #if 0
87 struct vm_page **vm_page_buckets;
88 int vm_page_hash_mask;
89 #endif
90 struct vm_page *vm_page_array;
91 struct vm_object *kernel_object_ptr;
92 int vm_page_array_size;
93 long nbuf;
94 long nswbuf_mem;
95 long nswbuf_kva;
96 long nswbuf_raw;
97 long kern_size;
98
99 void checkpage(kvm_t *kd, vm_page_t mptr, vm_page_t m, struct vm_object *obj);
100 static void kkread_vmpage(kvm_t *kd, u_long addr, vm_page_t m);
101 static void kkread(kvm_t *kd, u_long addr, void *buf, size_t nbytes);
102 static int kkread_err(kvm_t *kd, u_long addr, void *buf, size_t nbytes);
103
104 #if 0
105 static void addsltrack(vm_page_t m);
106 static void dumpsltrack(kvm_t *kd);
107 #endif
108 static int unique_object(void *ptr);
109
110 long count_free;
111 long count_wired;               /* total */
112 long count_wired_vnode;
113 long count_wired_anon;
114 long count_wired_in_pmap;
115 long count_wired_pgtable;
116 long count_wired_other;
117 long count_wired_kernel;
118 long count_wired_obj_other;
119
120 long count_anon;
121 long count_anon_in_pmap;
122 long count_vnode;
123 long count_device;
124 long count_phys;
125 long count_kernel;
126 long count_unknown;
127 long count_noobj_offqueue;
128 long count_noobj_onqueue;
129
130 int
131 main(int ac, char **av)
132 {
133     const char *corefile = NULL;
134     const char *sysfile = NULL;
135     struct vm_page m;
136     struct vm_object obj;
137     kvm_t *kd;
138     int ch;
139 #if 0
140     vm_page_t mptr;
141     int hv;
142 #endif
143     int i;
144     const char *qstr;
145     const char *ostr;
146
147     while ((ch = getopt(ac, av, "M:N:dv")) != -1) {
148         switch(ch) {
149         case 'd':
150             ++debugopt;
151             break;
152         case 'v':
153             ++verboseopt;
154             break;
155         case 'M':
156             corefile = optarg;
157             break;
158         case 'N':
159             sysfile = optarg;
160             break;
161         default:
162             fprintf(stderr, "%s [-M core] [-N system]\n", av[0]);
163             exit(1);
164         }
165     }
166     ac -= optind;
167     av += optind;
168
169     if ((kd = kvm_open(sysfile, corefile, NULL, O_RDONLY, "kvm:")) == NULL) {
170         perror("kvm_open");
171         exit(1);
172     }
173     if (kvm_nlist(kd, Nl) != 0) {
174         perror("kvm_nlist");
175         exit(1);
176     }
177
178     kkread(kd, Nl[0].n_value, &vm_page_array, sizeof(vm_page_array));
179     kkread(kd, Nl[1].n_value, &vm_page_array_size, sizeof(vm_page_array_size));
180     kernel_object_ptr = (void *)Nl[2].n_value;
181     kkread(kd, Nl[3].n_value, &nbuf, sizeof(nbuf));
182     kkread(kd, Nl[4].n_value, &nswbuf_mem, sizeof(nswbuf_mem));
183     kkread(kd, Nl[5].n_value, &nswbuf_kva, sizeof(nswbuf_kva));
184     kkread(kd, Nl[6].n_value, &nswbuf_raw, sizeof(nswbuf_raw));
185     kern_size = Nl[8].n_value - Nl[7].n_value;
186
187     /*
188      * Scan the vm_page_array validating all pages with associated objects
189      */
190     for (i = 0; i < vm_page_array_size; ++i) {
191         if (debugopt) {
192             printf("page %d\r", i);
193             fflush(stdout);
194         }
195         kkread_vmpage(kd, (u_long)&vm_page_array[i], &m);
196         if (m.object) {
197             kkread(kd, (u_long)m.object, &obj, sizeof(obj));
198             checkpage(kd, &vm_page_array[i], &m, &obj);
199         }
200         if (m.queue >= PQ_HOLD) {
201             qstr = "HOLD";
202         } else if (m.queue >= PQ_CACHE) {
203             qstr = "CACHE";
204         } else if (m.queue >= PQ_ACTIVE) {
205             qstr = "ACTIVE";
206         } else if (m.queue >= PQ_INACTIVE) {
207             qstr = "INACTIVE";
208         } else if (m.queue >= PQ_FREE) {
209             qstr = "FREE";
210             ++count_free;
211         } else {
212             qstr = "NONE";
213         }
214         if (m.wire_count) {
215                 ++count_wired;
216                 if (m.object == NULL) {
217                         if ((m.flags & PG_MAPPED) &&
218                             (m.flags & PG_WRITEABLE) &&
219                             (m.flags & PG_UNQUEUED)) {
220                                 ++count_wired_pgtable;
221                         } else {
222                                 ++count_wired_other;
223                         }
224                 } else if (m.object == kernel_object_ptr) {
225                         ++count_wired_kernel;
226                 } else {
227                         switch(obj.type) {
228                         case OBJT_VNODE:
229                                 ++count_wired_vnode;
230                                 break;
231                         case OBJT_DEFAULT:
232                         case OBJT_SWAP:
233                                 if (m.md.pmap_count)
234                                         ++count_wired_in_pmap;
235                                 else
236                                         ++count_wired_anon;
237                                 break;
238                         default:
239                                 ++count_wired_obj_other;
240                                 break;
241                         }
242                 }
243         } else if (m.md.pmap_count) {
244                 if (m.object && m.object != kernel_object_ptr) {
245                         switch(obj.type) {
246                         case OBJT_DEFAULT:
247                         case OBJT_SWAP:
248                                 ++count_anon_in_pmap;
249                                 break;
250                         default:
251                                 break;
252                         }
253                 }
254         }
255
256         if (verboseopt) {
257             printf("page %p obj %p/%-8ju(%016jx) val=%02x dty=%02x hold=%d "
258                    "wire=%-2d act=%-3d busy=%d w/pmapcnt=%d/%d %8s",
259                 &vm_page_array[i],
260                 m.object,
261                 (intmax_t)m.pindex,
262                 (intmax_t)m.pindex * PAGE_SIZE,
263                 m.valid,
264                 m.dirty,
265                 m.hold_count,
266                 m.wire_count,
267                 m.act_count,
268                 m.busy_count,
269                 m.md.writeable_count,
270                 m.md.pmap_count,
271                 qstr
272             );
273         }
274
275         if (m.object == kernel_object_ptr) {
276                 ostr = "kernel";
277                 if (unique_object(m.object))
278                         count_kernel += obj.resident_page_count;
279         } else if (m.object) {
280             switch(obj.type) {
281             case OBJT_DEFAULT:
282                 ostr = "default";
283                 if (unique_object(m.object))
284                         count_anon += obj.resident_page_count;
285                 break;
286             case OBJT_SWAP:
287                 ostr = "swap";
288                 if (unique_object(m.object))
289                         count_anon += obj.resident_page_count;
290                 break;
291             case OBJT_VNODE:
292                 ostr = "vnode";
293                 if (unique_object(m.object))
294                         count_vnode += obj.resident_page_count;
295                 break;
296             case OBJT_DEVICE:
297                 ostr = "device";
298                 if (unique_object(m.object))
299                         count_device += obj.resident_page_count;
300                 break;
301             case OBJT_PHYS:
302                 ostr = "phys";
303                 if (unique_object(m.object))
304                         count_phys += obj.resident_page_count;
305                 break;
306             case OBJT_DEAD:
307                 ostr = "dead";
308                 if (unique_object(m.object))
309                         count_unknown += obj.resident_page_count;
310                 break;
311             default:
312                 if (unique_object(m.object))
313                         count_unknown += obj.resident_page_count;
314                 ostr = "unknown";
315                 break;
316             }
317         } else {
318             ostr = "-";
319             if (m.queue == PQ_NONE)
320                     ++count_noobj_offqueue;
321             else if (m.queue - m.pc != PQ_FREE)
322                     ++count_noobj_onqueue;
323         }
324
325         if (verboseopt) {
326             printf(" %-7s", ostr);
327             if (m.busy_count & PBUSY_LOCKED)
328                 printf(" BUSY");
329             if (m.busy_count & PBUSY_WANTED)
330                 printf(" WANTED");
331             if (m.flags & PG_WINATCFLS)
332                 printf(" WINATCFLS");
333             if (m.flags & PG_FICTITIOUS)
334                 printf(" FICTITIOUS");
335             if (m.flags & PG_WRITEABLE)
336                 printf(" WRITEABLE");
337             if (m.flags & PG_MAPPED)
338                 printf(" MAPPED");
339             if (m.flags & PG_NEED_COMMIT)
340                 printf(" NEED_COMMIT");
341             if (m.flags & PG_REFERENCED)
342                 printf(" REFERENCED");
343             if (m.flags & PG_CLEANCHK)
344                 printf(" CLEANCHK");
345             if (m.busy_count & PBUSY_SWAPINPROG)
346                 printf(" SWAPINPROG");
347             if (m.flags & PG_NOSYNC)
348                 printf(" NOSYNC");
349             if (m.flags & PG_UNQUEUED)
350                 printf(" UNQUEUED");
351             if (m.flags & PG_MARKER)
352                 printf(" MARKER");
353             if (m.flags & PG_RAM)
354                 printf(" RAM");
355             if (m.flags & PG_SWAPPED)
356                 printf(" SWAPPED");
357 #if 0
358             if (m.flags & PG_SLAB)
359                 printf(" SLAB");
360 #endif
361             printf("\n");
362 #if 0
363             if (m.flags & PG_SLAB)
364                 addsltrack(&m);
365 #endif
366         }
367     }
368     if (debugopt || verboseopt)
369         printf("\n");
370     printf("%8.2fM free\n", count_free * 4096.0 / 1048576.0);
371
372     printf("%8.2fM wired vnode (in buffer cache)\n",
373         count_wired_vnode * 4096.0 / 1048576.0);
374     printf("%8.2fM wired in-pmap (probably vnode pages also in buffer cache)\n",
375         count_wired_in_pmap * 4096.0 / 1048576.0);
376     printf("%8.2fM wired pgtable\n",
377         count_wired_pgtable * 4096.0 / 1048576.0);
378     printf("%8.2fM wired anon\n",
379         count_wired_anon * 4096.0 / 1048576.0);
380     printf("%8.2fM wired kernel_object\n",
381         count_wired_kernel * 4096.0 / 1048576.0);
382
383         printf("\t%8.2fM vm_page_array\n",
384             vm_page_array_size * sizeof(struct vm_page) / 1048576.0);
385         printf("\t%8.2fM buf, swbuf_mem, swbuf_kva, swbuf_raw\n",
386             (nbuf + nswbuf_mem + nswbuf_kva + nswbuf_raw) *
387             sizeof(struct buf) / 1048576.0);
388         printf("\t%8.2fM kernel binary\n", kern_size / 1048576.0);
389         printf("\t(also add in KMALLOC id kmapinfo, or loosely, vmstat -m)\n");
390
391     printf("%8.2fM wired other (unknown object)\n",
392         count_wired_obj_other * 4096.0 / 1048576.0);
393     printf("%8.2fM wired other (no object, probably kernel)\n",
394         count_wired_other * 4096.0 / 1048576.0);
395
396     printf("%8.2fM WIRED TOTAL\n",
397         count_wired * 4096.0 / 1048576.0);
398
399     printf("\n");
400     printf("%8.2fM anonymous (total, includes in-pmap)\n",
401         count_anon * 4096.0 / 1048576.0);
402     printf("%8.2fM anonymous memory in-pmap\n",
403         count_anon_in_pmap * 4096.0 / 1048576.0);
404     printf("%8.2fM vnode (includes wired)\n",
405         count_vnode * 4096.0 / 1048576.0);
406     printf("%8.2fM device\n", count_device * 4096.0 / 1048576.0);
407     printf("%8.2fM phys\n", count_phys * 4096.0 / 1048576.0);
408     printf("%8.2fM kernel (includes wired)\n",
409         count_kernel * 4096.0 / 1048576.0);
410     printf("%8.2fM unknown\n", count_unknown * 4096.0 / 1048576.0);
411     printf("%8.2fM no_object, off queue (includes wired w/o object)\n",
412         count_noobj_offqueue * 4096.0 / 1048576.0);
413     printf("%8.2fM no_object, on non-free queue (includes wired w/o object)\n",
414         count_noobj_onqueue * 4096.0 / 1048576.0);
415
416 #if 0
417     /*
418      * Scan the vm_page_buckets array validating all pages found
419      */
420     for (i = 0; i <= vm_page_hash_mask; ++i) {
421         if (debugopt) {
422             printf("index %d\r", i);
423             fflush(stdout);
424         }
425         kkread(kd, (u_long)&vm_page_buckets[i], &mptr, sizeof(mptr));
426         while (mptr) {
427             kkread(kd, (u_long)mptr, &m, sizeof(m));
428             if (m.object) {
429                 kkread(kd, (u_long)m.object, &obj, sizeof(obj));
430                 hv = ((uintptr_t)m.object + m.pindex) ^ obj.hash_rand;
431                 hv &= vm_page_hash_mask;
432                 if (i != hv)
433                     printf("vm_page_buckets[%d] ((struct vm_page *)%p)"
434                         " should be in bucket %d\n", i, mptr, hv);
435                 checkpage(kd, mptr, &m, &obj);
436             } else {
437                 printf("vm_page_buckets[%d] ((struct vm_page *)%p)"
438                         " has no object\n", i, mptr);
439             }
440             mptr = m.hnext;
441         }
442     }
443 #endif
444     if (debugopt)
445         printf("\n");
446 #if 0
447     dumpsltrack(kd);
448 #endif
449     return(0);
450 }
451
452 /*
453  * A page with an object.
454  */
455 void
456 checkpage(kvm_t *kd, vm_page_t mptr, vm_page_t m, struct vm_object *obj)
457 {
458 #if 0
459     struct vm_page scan;
460     vm_page_t scanptr;
461     int hv;
462
463     hv = ((uintptr_t)m->object + m->pindex) ^ obj->hash_rand;
464     hv &= vm_page_hash_mask;
465     kkread(kd, (u_long)&vm_page_buckets[hv], &scanptr, sizeof(scanptr));
466     while (scanptr) {
467         if (scanptr == mptr)
468             break;
469         kkread(kd, (u_long)scanptr, &scan, sizeof(scan));
470         scanptr = scan.hnext;
471     }
472     if (scanptr) {
473         if (debugopt > 1)
474             printf("good checkpage %p bucket %d\n", mptr, hv);
475     } else {
476         printf("vm_page_buckets[%d] ((struct vm_page *)%p)"
477                 " page not found in bucket list\n", hv, mptr);
478     }
479 #endif
480 }
481
482 /*
483  * Acclerate the reading of VM pages
484  */
485 static void
486 kkread_vmpage(kvm_t *kd, u_long addr, vm_page_t m)
487 {
488     static struct vm_page vpcache[1024];
489     static u_long vpbeg;
490     static u_long vpend;
491
492     if (addr < vpbeg || addr >= vpend) {
493         vpbeg = addr;
494         vpend = addr + 1024 * sizeof(*m);
495         if (vpend > (u_long)(uintptr_t)vm_page_array +
496                     vm_page_array_size * sizeof(*m)) {
497             vpend = (u_long)(uintptr_t)vm_page_array +
498                     vm_page_array_size * sizeof(*m);
499         }
500         kkread(kd, vpbeg, vpcache, vpend - vpbeg);
501     }
502     *m = vpcache[(addr - vpbeg) / sizeof(*m)];
503 }
504
505 static void
506 kkread(kvm_t *kd, u_long addr, void *buf, size_t nbytes)
507 {
508     if (kvm_read(kd, addr, buf, nbytes) != nbytes) {
509         perror("kvm_read");
510         exit(1);
511     }
512 }
513
514 static int
515 kkread_err(kvm_t *kd, u_long addr, void *buf, size_t nbytes)
516 {
517     if (kvm_read(kd, addr, buf, nbytes) != nbytes) {
518         return 1;
519     }
520     return 0;
521 }
522
523 struct SLTrack {
524         struct SLTrack *next;
525         u_long addr;
526 };
527
528 #define SLHSIZE 1024
529 #define SLHMASK (SLHSIZE - 1)
530
531 struct SLTrack *SLHash[SLHSIZE];
532
533 #if 0
534 static
535 void
536 addsltrack(vm_page_t m)
537 {
538         struct SLTrack *slt;
539         u_long addr = (m->pindex * PAGE_SIZE) & ~131071L;
540         int i;
541
542         if (m->wire_count == 0 || (m->flags & PG_MAPPED) == 0 ||
543             m->object == NULL)
544                 return;
545
546         i = (addr / 131072) & SLHMASK;
547         for (slt = SLHash[i]; slt; slt = slt->next) {
548                 if (slt->addr == addr)
549                         break;
550         }
551         if (slt == NULL) {
552                 slt = malloc(sizeof(*slt));
553                 slt->addr = addr;
554                 slt->next = SLHash[i];
555                 SLHash[i] = slt;
556         }
557 }
558 #endif
559
560 static
561 void
562 dumpsltrack(kvm_t *kd)
563 {
564         struct SLTrack *slt;
565         int i;
566         long total_zones = 0;
567         long full_zones = 0;
568
569         for (i = 0; i < SLHSIZE; ++i) {
570                 for (slt = SLHash[i]; slt; slt = slt->next) {
571                         SLZone z;
572
573                         if (kkread_err(kd, slt->addr, &z, sizeof(z))) {
574                                 printf("SLZone 0x%016lx not mapped\n",
575                                         slt->addr);
576                                 continue;
577                         }
578                         printf("SLZone 0x%016lx { mag=%08x cpu=%-2d NFree=%-3d "
579                                "chunksz=%-5d }\n",
580                                slt->addr,
581                                z.z_Magic,
582                                z.z_Cpu,
583                                z.z_NFree,
584                                z.z_ChunkSize
585                         );
586                         ++total_zones;
587                         if (z.z_NFree == 0)
588                                 ++full_zones;
589                 }
590         }
591         printf("FullZones/TotalZones: %ld/%ld\n", full_zones, total_zones);
592 }
593
594 #define HASH_SIZE       (1024*1024)
595 #define HASH_MASK       (HASH_SIZE - 1)
596
597 struct dup_entry {
598         struct dup_entry *next;
599         void    *ptr;
600 };
601
602 struct dup_entry *dup_hash[HASH_SIZE];
603
604 static int
605 unique_object(void *ptr)
606 {
607         struct dup_entry *hen;
608         int hv;
609
610         hv = (intptr_t)ptr ^ ((intptr_t)ptr >> 20);
611         hv &= HASH_MASK;
612         for (hen = dup_hash[hv]; hen; hen = hen->next) {
613                 if (hen->ptr == ptr)
614                         return 0;
615         }
616         hen = malloc(sizeof(*hen));
617         hen->next = dup_hash[hv];
618         hen->ptr = ptr;
619         dup_hash[hv] = hen;
620
621         return 1;
622 }