Upgrade libressl. 1/2
[dragonfly.git] / sys / vm / vm_meter.c
1 /*
2  * (MPSAFE)
3  *
4  * Copyright (c) 1982, 1986, 1989, 1993
5  *      The Regents of the University of California.  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. Neither the name of the University nor the names of its contributors
16  *    may be used to endorse or promote products derived from this software
17  *    without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  *
31  *      @(#)vm_meter.c  8.4 (Berkeley) 1/4/94
32  * $FreeBSD: src/sys/vm/vm_meter.c,v 1.34.2.7 2002/10/10 19:28:22 dillon Exp $
33  */
34
35 #include <sys/param.h>
36 #include <sys/proc.h>
37 #include <sys/systm.h>
38 #include <sys/kernel.h>
39 #include <sys/malloc.h>
40 #include <sys/resource.h>
41 #include <sys/vmmeter.h>
42 #include <sys/kcollect.h>
43
44 #include <vm/vm.h>
45 #include <vm/vm_page.h>
46 #include <vm/vm_extern.h>
47 #include <vm/vm_param.h>
48 #include <sys/lock.h>
49 #include <vm/pmap.h>
50 #include <vm/vm_map.h>
51 #include <vm/vm_object.h>
52 #include <sys/sysctl.h>
53
54 /*
55  * WARNING: vmstats represents the final say, but individual cpu's may
56  *          accumualte adjustments in gd->gd_vmstats_adj.  These are
57  *          synchronized to the global vmstats in hardclock.
58  *
59  *          In addition, most individual cpus check vmstats using a local
60  *          copy of the global vmstats in gd->gd_vmstats.  Hardclock also
61  *          sychronizes the copy.  The pageout code and vm_page_alloc will
62  *          also synchronize their local copies as necessary.
63  *
64  *          Other consumers should not expect perfect values.
65  */
66 __exclusive_cache_line struct vmstats vmstats;
67
68 static int maxslp = MAXSLP;
69
70 SYSCTL_ULONG(_vm, VM_V_FREE_MIN, v_free_min,
71         CTLFLAG_RW, &vmstats.v_free_min, 0,
72         "Minimum number of pages desired free");
73 SYSCTL_ULONG(_vm, VM_V_FREE_TARGET, v_free_target,
74         CTLFLAG_RW, &vmstats.v_free_target, 0,
75         "Number of pages desired free");
76 SYSCTL_ULONG(_vm, VM_V_FREE_RESERVED, v_free_reserved,
77         CTLFLAG_RW, &vmstats.v_free_reserved, 0,
78         "Number of pages reserved for deadlock");
79 SYSCTL_ULONG(_vm, VM_V_INACTIVE_TARGET, v_inactive_target,
80         CTLFLAG_RW, &vmstats.v_inactive_target, 0,
81         "Number of pages desired inactive");
82 SYSCTL_ULONG(_vm, VM_V_CACHE_MIN, v_cache_min,
83         CTLFLAG_RW, &vmstats.v_cache_min, 0,
84         "Min number of pages desired on cache queue");
85 SYSCTL_ULONG(_vm, VM_V_CACHE_MAX, v_cache_max,
86         CTLFLAG_RW, &vmstats.v_cache_max, 0,
87         "Max number of pages in cached obj");
88 SYSCTL_ULONG(_vm, VM_V_PAGEOUT_FREE_MIN, v_pageout_free_min,
89         CTLFLAG_RW, &vmstats.v_pageout_free_min, 0,
90         "Min number pages reserved for kernel");
91 SYSCTL_ULONG(_vm, OID_AUTO, v_free_severe,
92         CTLFLAG_RW, &vmstats.v_free_severe, 0, "");
93
94 SYSCTL_STRUCT(_vm, VM_LOADAVG, loadavg,
95         CTLFLAG_RD | CTLFLAG_NOLOCK,
96         &averunnable, loadavg, "Machine loadaverage history");
97
98 static int do_vmtotal_callback(struct proc *p, void *data);
99
100 /*
101  * No requirements.
102  */
103 static int
104 do_vmtotal(SYSCTL_HANDLER_ARGS)
105 {
106         struct vmtotal total;
107         globaldata_t gd;
108         int n;
109
110         bzero(&total, sizeof(total));
111         for (n = 0; n < ncpus; ++n) {
112                 gd = globaldata_find(n);
113
114                 /* total.t_rq calculated separately */
115                 /* total.t_dw calculated separately */
116                 /* total.t_pw calculated separately */
117                 /* total.t_sl calculated separately */
118                 /* total.t_sw calculated separately */
119                 total.t_vm += gd->gd_vmtotal.t_vm;
120                 total.t_avm += gd->gd_vmtotal.t_avm;
121                 total.t_rm += gd->gd_vmtotal.t_rm;
122                 total.t_arm += gd->gd_vmtotal.t_arm;
123                 total.t_vmshr += gd->gd_vmtotal.t_vmshr;
124                 total.t_avmshr += gd->gd_vmtotal.t_avmshr;
125                 total.t_rmshr += gd->gd_vmtotal.t_rmshr;
126                 total.t_armshr += gd->gd_vmtotal.t_armshr;
127                 /* total.t_free calculated separately */
128         }
129
130         /*
131          * Calculate process statistics.
132          */
133         allproc_scan(do_vmtotal_callback, &total, 0);
134
135         /*
136          * Adjust for sysctl return.  Add real memory into virtual memory.
137          * Set t_free.
138          *
139          * t_rm - Real memory
140          * t_vm - Virtual memory (real + swap)
141          */
142         total.t_vm += total.t_rm;
143         total.t_free = vmstats.v_free_count + vmstats.v_cache_count;
144
145         return (sysctl_handle_opaque(oidp, &total, sizeof(total), req));
146 }
147
148 static int
149 do_vmtotal_callback(struct proc *p, void *data)
150 {
151         struct vmtotal *totalp = data;
152         struct lwp *lp;
153
154         if (p->p_flags & P_SYSTEM)
155                 return(0);
156
157         lwkt_gettoken_shared(&p->p_token);
158
159         FOREACH_LWP_IN_PROC(lp, p) {
160                 switch (lp->lwp_stat) {
161                 case LSSTOP:
162                 case LSSLEEP:
163                         if ((lp->lwp_flags & LWP_SINTR) == 0)
164                                 totalp->t_dw++;
165                         else if (lp->lwp_slptime < maxslp)
166                                 totalp->t_sl++;
167                         if (lp->lwp_slptime >= maxslp)
168                                 goto out;
169                         break;
170                 case LSRUN:
171                         totalp->t_rq++;
172                         if (p->p_stat == SIDL)
173                                 goto out;
174                         break;
175                 default:
176                         goto out;
177                 }
178
179                 /*
180                  * Set while in vm_fault()
181                  */
182                 if (lp->lwp_flags & LWP_PAGING)
183                         totalp->t_pw++;
184         }
185 out:
186         lwkt_reltoken(&p->p_token);
187         return(0);
188 }
189
190 /*
191  * No requirements.
192  */
193 static int
194 do_vmstats(SYSCTL_HANDLER_ARGS)
195 {
196         struct vmstats vms = vmstats;
197         return (sysctl_handle_opaque(oidp, &vms, sizeof(vms), req));
198 }
199
200 /*
201  * No requirements.
202  */
203 static int
204 do_vmmeter(SYSCTL_HANDLER_ARGS)
205 {
206         int boffset = offsetof(struct vmmeter, vmmeter_uint_begin);
207         int eoffset = offsetof(struct vmmeter, vmmeter_uint_end);
208         struct vmmeter vmm;
209         int i;
210
211         bzero(&vmm, sizeof(vmm));
212         for (i = 0; i < ncpus; ++i) {
213                 int off;
214                 struct globaldata *gd = globaldata_find(i);
215
216                 for (off = boffset; off <= eoffset; off += sizeof(u_int)) {
217                         *(u_int *)((char *)&vmm + off) +=
218                                 *(u_int *)((char *)&gd->gd_cnt + off);
219                 }
220
221         }
222         vmm.v_intr += vmm.v_ipi + vmm.v_timer;
223         return (sysctl_handle_opaque(oidp, &vmm, sizeof(vmm), req));
224 }
225
226 /*
227  * vcnt() -     accumulate statistics from the cnt structure for each cpu
228  *
229  *      The vmmeter structure is now per-cpu as well as global.  Those
230  *      statistics which can be kept on a per-cpu basis (to avoid cache
231  *      stalls between cpus) can be moved to the per-cpu vmmeter.  Remaining
232  *      statistics, such as v_free_reserved, are left in the global
233  *      structure.
234  *
235  * (sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req)
236  *
237  * No requirements.
238  */
239 static int
240 vcnt(SYSCTL_HANDLER_ARGS)
241 {
242         int i;
243         int count = 0;
244         int offset = arg2;
245
246         for (i = 0; i < ncpus; ++i) {
247                 struct globaldata *gd = globaldata_find(i);
248                 count += *(int *)((char *)&gd->gd_cnt + offset);
249         }
250         return(SYSCTL_OUT(req, &count, sizeof(int)));
251 }
252
253 /*
254  * No requirements.
255  */
256 static int
257 vcnt_intr(SYSCTL_HANDLER_ARGS)
258 {
259         int i;
260         int count = 0;
261
262         for (i = 0; i < ncpus; ++i) {
263                 struct globaldata *gd = globaldata_find(i);
264
265                 count += gd->gd_cnt.v_intr + gd->gd_cnt.v_ipi +
266                          gd->gd_cnt.v_timer;
267         }
268         return(SYSCTL_OUT(req, &count, sizeof(int)));
269 }
270
271 #define VMMETEROFF(var) offsetof(struct vmmeter, var)
272
273 SYSCTL_PROC(_vm, OID_AUTO, vmtotal, CTLTYPE_OPAQUE|CTLFLAG_RD,
274     0, sizeof(struct vmtotal), do_vmtotal, "S,vmtotal",
275     "System virtual memory aggregate");
276 SYSCTL_PROC(_vm, OID_AUTO, vmstats, CTLTYPE_OPAQUE|CTLFLAG_RD,
277     0, sizeof(struct vmstats), do_vmstats, "S,vmstats",
278     "System virtual memory statistics");
279 SYSCTL_PROC(_vm, OID_AUTO, vmmeter, CTLTYPE_OPAQUE|CTLFLAG_RD,
280     0, sizeof(struct vmmeter), do_vmmeter, "S,vmmeter",
281     "System statistics");
282 SYSCTL_NODE(_vm, OID_AUTO, stats, CTLFLAG_RW, 0, "VM meter stats");
283 SYSCTL_NODE(_vm_stats, OID_AUTO, sys, CTLFLAG_RW, 0, "VM meter sys stats");
284 SYSCTL_NODE(_vm_stats, OID_AUTO, vm, CTLFLAG_RW, 0, "VM meter vm stats");
285 SYSCTL_NODE(_vm_stats, OID_AUTO, misc, CTLFLAG_RW, 0, "VM meter misc stats");
286
287 SYSCTL_PROC(_vm_stats_sys, OID_AUTO, v_swtch, CTLTYPE_UINT|CTLFLAG_RD,
288         0, VMMETEROFF(v_swtch), vcnt, "IU", "Context switches");
289 SYSCTL_PROC(_vm_stats_sys, OID_AUTO, v_intrans_coll, CTLTYPE_UINT|CTLFLAG_RD,
290         0, VMMETEROFF(v_intrans_coll), vcnt, "IU", "Intransit map collisions (total)");
291 SYSCTL_PROC(_vm_stats_sys, OID_AUTO, v_intrans_wait, CTLTYPE_UINT|CTLFLAG_RD,
292         0, VMMETEROFF(v_intrans_wait), vcnt, "IU", "Intransit map collisions which blocked");
293 SYSCTL_PROC(_vm_stats_sys, OID_AUTO, v_forwarded_ints, CTLTYPE_UINT|CTLFLAG_RD,
294         0, VMMETEROFF(v_forwarded_ints), vcnt, "IU", "Forwarded interrupts due to MP lock");
295 SYSCTL_PROC(_vm_stats_sys, OID_AUTO, v_forwarded_hits, CTLTYPE_UINT|CTLFLAG_RD,
296         0, VMMETEROFF(v_forwarded_hits), vcnt, "IU", "Forwarded hits due to MP lock");
297 SYSCTL_PROC(_vm_stats_sys, OID_AUTO, v_forwarded_misses, CTLTYPE_UINT|CTLFLAG_RD,
298         0, VMMETEROFF(v_forwarded_misses), vcnt, "IU", "Forwarded misses due to MP lock");
299 SYSCTL_PROC(_vm_stats_sys, OID_AUTO, v_trap, CTLTYPE_UINT|CTLFLAG_RD,
300         0, VMMETEROFF(v_trap), vcnt, "IU", "Traps");
301 SYSCTL_PROC(_vm_stats_sys, OID_AUTO, v_syscall, CTLTYPE_UINT|CTLFLAG_RD,
302         0, VMMETEROFF(v_syscall), vcnt, "IU", "Syscalls");
303 SYSCTL_PROC(_vm_stats_sys, OID_AUTO, v_intr, CTLTYPE_UINT|CTLFLAG_RD,
304         0, VMMETEROFF(v_intr), vcnt_intr, "IU", "Hardware interrupts");
305 SYSCTL_PROC(_vm_stats_sys, OID_AUTO, v_ipi, CTLTYPE_UINT|CTLFLAG_RD,
306         0, VMMETEROFF(v_ipi), vcnt, "IU", "Inter-processor interrupts");
307 SYSCTL_PROC(_vm_stats_sys, OID_AUTO, v_timer, CTLTYPE_UINT|CTLFLAG_RD,
308         0, VMMETEROFF(v_timer), vcnt, "IU", "LAPIC timer interrupts");
309 SYSCTL_PROC(_vm_stats_sys, OID_AUTO, v_soft, CTLTYPE_UINT|CTLFLAG_RD,
310         0, VMMETEROFF(v_soft), vcnt, "IU", "Software interrupts");
311 SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_vm_faults, CTLTYPE_UINT|CTLFLAG_RD,
312         0, VMMETEROFF(v_vm_faults), vcnt, "IU", "VM faults");
313 SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_cow_faults, CTLTYPE_UINT|CTLFLAG_RD,
314         0, VMMETEROFF(v_cow_faults), vcnt, "IU", "COW faults");
315 SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_cow_optim, CTLTYPE_UINT|CTLFLAG_RD,
316         0, VMMETEROFF(v_cow_optim), vcnt, "IU", "Optimized COW faults");
317 SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_zfod, CTLTYPE_UINT|CTLFLAG_RD,
318         0, VMMETEROFF(v_zfod), vcnt, "IU", "Zero fill");
319 SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_ozfod, CTLTYPE_UINT|CTLFLAG_RD,
320         0, VMMETEROFF(v_ozfod), vcnt, "IU", "Optimized zero fill");
321 SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_swapin, CTLTYPE_UINT|CTLFLAG_RD,
322         0, VMMETEROFF(v_swapin), vcnt, "IU", "Swapin operations");
323 SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_swapout, CTLTYPE_UINT|CTLFLAG_RD,
324         0, VMMETEROFF(v_swapout), vcnt, "IU", "Swapout operations");
325 SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_swappgsin, CTLTYPE_UINT|CTLFLAG_RD,
326         0, VMMETEROFF(v_swappgsin), vcnt, "IU", "Swapin pages");
327 SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_swappgsout, CTLTYPE_UINT|CTLFLAG_RD,
328         0, VMMETEROFF(v_swappgsout), vcnt, "IU", "Swapout pages");
329 SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_vnodein, CTLTYPE_UINT|CTLFLAG_RD,
330         0, VMMETEROFF(v_vnodein), vcnt, "IU", "Vnodein operations");
331 SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_vnodeout, CTLTYPE_UINT|CTLFLAG_RD,
332         0, VMMETEROFF(v_vnodeout), vcnt, "IU", "Vnodeout operations");
333 SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_vnodepgsin, CTLTYPE_UINT|CTLFLAG_RD,
334         0, VMMETEROFF(v_vnodepgsin), vcnt, "IU", "Vnodein pages");
335 SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_vnodepgsout, CTLTYPE_UINT|CTLFLAG_RD,
336         0, VMMETEROFF(v_vnodepgsout), vcnt, "IU", "Vnodeout pages");
337 SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_intrans, CTLTYPE_UINT|CTLFLAG_RD,
338         0, VMMETEROFF(v_intrans), vcnt, "IU", "In transit page blocking");
339 SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_reactivated, CTLTYPE_UINT|CTLFLAG_RD,
340         0, VMMETEROFF(v_reactivated), vcnt, "IU", "Reactivated pages");
341 SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_pdwakeups, CTLTYPE_UINT|CTLFLAG_RD,
342         0, VMMETEROFF(v_pdwakeups), vcnt, "IU", "Pagedaemon wakeups");
343 SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_ppwakeups, CTLTYPE_UINT|CTLFLAG_RD,
344         0, VMMETEROFF(v_ppwakeups), vcnt, "IU", "vm_wait wakeups");
345 SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_pdpages, CTLTYPE_UINT|CTLFLAG_RD,
346         0, VMMETEROFF(v_pdpages), vcnt, "IU", "Pagedaemon page scans");
347 SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_dfree, CTLTYPE_UINT|CTLFLAG_RD,
348         0, VMMETEROFF(v_dfree), vcnt, "IU", "Pages freed by daemon");
349 SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_pfree, CTLTYPE_UINT|CTLFLAG_RD,
350         0, VMMETEROFF(v_pfree), vcnt, "IU", "Pages freed by exiting processes");
351 SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_tfree, CTLTYPE_UINT|CTLFLAG_RD,
352         0, VMMETEROFF(v_tfree), vcnt, "IU", "Total pages freed");
353 SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_forks, CTLTYPE_UINT|CTLFLAG_RD,
354         0, VMMETEROFF(v_forks), vcnt, "IU", "Number of fork() calls");
355 SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_vforks, CTLTYPE_UINT|CTLFLAG_RD,
356         0, VMMETEROFF(v_vforks), vcnt, "IU", "Number of vfork() calls");
357 SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_rforks, CTLTYPE_UINT|CTLFLAG_RD,
358         0, VMMETEROFF(v_rforks), vcnt, "IU", "Number of rfork() calls");
359 SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_kthreads, CTLTYPE_UINT|CTLFLAG_RD,
360         0, VMMETEROFF(v_kthreads), vcnt, "IU", "Number of fork() calls by kernel");
361 SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_forkpages, CTLTYPE_UINT|CTLFLAG_RD,
362         0, VMMETEROFF(v_forkpages), vcnt, "IU", "VM pages affected by fork()");
363 SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_vforkpages, CTLTYPE_UINT|CTLFLAG_RD,
364         0, VMMETEROFF(v_vforkpages), vcnt, "IU", "VM pages affected by vfork()");
365 SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_rforkpages, CTLTYPE_UINT|CTLFLAG_RD,
366         0, VMMETEROFF(v_rforkpages), vcnt, "IU", "VM pages affected by rfork()");
367 SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_kthreadpages, CTLTYPE_UINT|CTLFLAG_RD,
368         0, VMMETEROFF(v_kthreadpages), vcnt, "IU", "VM pages affected by fork() by kernel");
369
370 SYSCTL_UINT(_vm_stats_vm, OID_AUTO,
371         v_page_size, CTLFLAG_RD, &vmstats.v_page_size, 0,
372         "Page size in bytes");
373 SYSCTL_ULONG(_vm_stats_vm, OID_AUTO,
374         v_page_count, CTLFLAG_RD, &vmstats.v_page_count, 0,
375         "Total number of pages in system");
376 SYSCTL_ULONG(_vm_stats_vm, OID_AUTO,
377         v_free_reserved, CTLFLAG_RD, &vmstats.v_free_reserved, 0,
378         "Number of pages reserved for deadlock");
379 SYSCTL_ULONG(_vm_stats_vm, OID_AUTO,
380         v_free_target, CTLFLAG_RD, &vmstats.v_free_target, 0,
381         "Number of pages desired free");
382 SYSCTL_ULONG(_vm_stats_vm, OID_AUTO,
383         v_free_min, CTLFLAG_RD, &vmstats.v_free_min, 0,
384         "Minimum number of pages desired free");
385 SYSCTL_ULONG(_vm_stats_vm, OID_AUTO,
386         v_free_count, CTLFLAG_RD, &vmstats.v_free_count, 0,
387         "Number of pages free");
388 SYSCTL_ULONG(_vm_stats_vm, OID_AUTO,
389         v_wire_count, CTLFLAG_RD, &vmstats.v_wire_count, 0,
390         "Number of pages wired down");
391 SYSCTL_ULONG(_vm_stats_vm, OID_AUTO,
392         v_active_count, CTLFLAG_RD, &vmstats.v_active_count, 0,
393         "Number of pages active");
394 SYSCTL_ULONG(_vm_stats_vm, OID_AUTO,
395         v_inactive_target, CTLFLAG_RD, &vmstats.v_inactive_target, 0,
396         "Number of pages desired inactive");
397 SYSCTL_ULONG(_vm_stats_vm, OID_AUTO,
398         v_inactive_count, CTLFLAG_RD, &vmstats.v_inactive_count, 0,
399         "Number of pages inactive");
400 SYSCTL_ULONG(_vm_stats_vm, OID_AUTO,
401         v_cache_count, CTLFLAG_RD, &vmstats.v_cache_count, 0,
402         "Number of pages on buffer cache queue");
403 SYSCTL_ULONG(_vm_stats_vm, OID_AUTO,
404         v_cache_min, CTLFLAG_RD, &vmstats.v_cache_min, 0,
405         "Min number of pages desired on cache queue");
406 SYSCTL_ULONG(_vm_stats_vm, OID_AUTO,
407         v_cache_max, CTLFLAG_RD, &vmstats.v_cache_max, 0,
408         "Max number of pages in cached obj");
409 SYSCTL_ULONG(_vm_stats_vm, OID_AUTO,
410         v_pageout_free_min, CTLFLAG_RD, &vmstats.v_pageout_free_min, 0,
411         "Min number pages reserved for kernel");
412 SYSCTL_ULONG(_vm_stats_vm, OID_AUTO,
413         v_interrupt_free_min, CTLFLAG_RD, &vmstats.v_interrupt_free_min, 0,
414         "Reserved number of pages for int code");
415
416 /*
417  * No requirements.
418  */
419 static int
420 do_vmmeter_pcpu(SYSCTL_HANDLER_ARGS)
421 {
422         int boffset = offsetof(struct vmmeter, vmmeter_uint_begin);
423         int eoffset = offsetof(struct vmmeter, vmmeter_uint_end);
424         struct globaldata *gd = arg1;
425         struct vmmeter vmm;
426         int off;
427
428         bzero(&vmm, sizeof(vmm));
429         for (off = boffset; off <= eoffset; off += sizeof(u_int)) {
430                 *(u_int *)((char *)&vmm + off) +=
431                         *(u_int *)((char *)&gd->gd_cnt + off);
432         }
433         vmm.v_intr += vmm.v_ipi + vmm.v_timer;
434         return (sysctl_handle_opaque(oidp, &vmm, sizeof(vmm), req));
435 }
436
437 /*
438  * Callback for long-term slow data collection on 10-second interval.
439  *
440  * Return faults, set data for other entries.
441  */
442 #define PTOB(value)     ((uint64_t)(value) << PAGE_SHIFT)
443
444 static uint64_t
445 collect_vmstats_callback(int n)
446 {
447         static struct vmmeter last_vmm;
448         struct vmmeter cur_vmm;
449         const int boffset = offsetof(struct vmmeter, vmmeter_uint_begin);
450         const int eoffset = offsetof(struct vmmeter, vmmeter_uint_end);
451         uint64_t total;
452
453         /*
454          * The hardclock already rolls up vmstats for us.
455          */
456         kcollect_setvalue(KCOLLECT_MEMFRE, PTOB(vmstats.v_free_count));
457         kcollect_setvalue(KCOLLECT_MEMCAC, PTOB(vmstats.v_cache_count));
458         kcollect_setvalue(KCOLLECT_MEMINA, PTOB(vmstats.v_inactive_count));
459         kcollect_setvalue(KCOLLECT_MEMACT, PTOB(vmstats.v_active_count));
460         kcollect_setvalue(KCOLLECT_MEMWIR, PTOB(vmstats.v_wire_count));
461
462         /*
463          * Collect pcpu statistics for things like faults.
464          */
465         bzero(&cur_vmm, sizeof(cur_vmm));
466         for (n = 0; n < ncpus; ++n) {
467                 struct globaldata *gd = globaldata_find(n);
468                 int off;
469
470                 for (off = boffset; off <= eoffset; off += sizeof(u_int)) {
471                         *(u_int *)((char *)&cur_vmm + off) +=
472                                 *(u_int *)((char *)&gd->gd_cnt + off);
473                 }
474
475         }
476
477         total = cur_vmm.v_cow_faults - last_vmm.v_cow_faults;
478         last_vmm.v_cow_faults = cur_vmm.v_cow_faults;
479         kcollect_setvalue(KCOLLECT_COWFAULT, total);
480
481         total = cur_vmm.v_zfod - last_vmm.v_zfod;
482         last_vmm.v_zfod = cur_vmm.v_zfod;
483         kcollect_setvalue(KCOLLECT_ZFILL, total);
484
485         total = cur_vmm.v_syscall - last_vmm.v_syscall;
486         last_vmm.v_syscall = cur_vmm.v_syscall;
487         kcollect_setvalue(KCOLLECT_SYSCALLS, total);
488
489         total = cur_vmm.v_intr - last_vmm.v_intr;
490         last_vmm.v_intr = cur_vmm.v_intr;
491         kcollect_setvalue(KCOLLECT_INTR, total);
492
493         total = cur_vmm.v_ipi - last_vmm.v_ipi;
494         last_vmm.v_ipi = cur_vmm.v_ipi;
495         kcollect_setvalue(KCOLLECT_IPI, total);
496
497         total = cur_vmm.v_timer - last_vmm.v_timer;
498         last_vmm.v_timer = cur_vmm.v_timer;
499         kcollect_setvalue(KCOLLECT_TIMER, total);
500
501         total = cur_vmm.v_vm_faults - last_vmm.v_vm_faults;
502         last_vmm.v_vm_faults = cur_vmm.v_vm_faults;
503
504         return total;
505 }
506
507 /*
508  * Called from the low level boot code only.
509  */
510 static void
511 vmmeter_init(void *dummy __unused)
512 {
513         int i;
514
515         for (i = 0; i < ncpus; ++i) {
516                 struct sysctl_ctx_list *ctx;
517                 struct sysctl_oid *oid;
518                 struct globaldata *gd;
519                 char name[32];
520
521                 ksnprintf(name, sizeof(name), "cpu%d", i);
522
523                 ctx = kmalloc(sizeof(*ctx), M_TEMP, M_WAITOK);
524                 sysctl_ctx_init(ctx);
525                 oid = SYSCTL_ADD_NODE(ctx, SYSCTL_STATIC_CHILDREN(_vm),
526                                       OID_AUTO, name, CTLFLAG_RD, 0, "");
527
528                 gd = globaldata_find(i);
529                 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(oid), OID_AUTO,
530                                 "vmmeter", CTLTYPE_OPAQUE|CTLFLAG_RD,
531                                 gd, sizeof(struct vmmeter), do_vmmeter_pcpu,
532                                 "S,vmmeter", "System per-cpu statistics");
533         }
534         kcollect_register(KCOLLECT_VMFAULT, "fault", collect_vmstats_callback,
535                           KCOLLECT_SCALE(KCOLLECT_VMFAULT_FORMAT, 0));
536         kcollect_register(KCOLLECT_COWFAULT, "cow", NULL,
537                           KCOLLECT_SCALE(KCOLLECT_COWFAULT_FORMAT, 0));
538         kcollect_register(KCOLLECT_ZFILL, "zfill", NULL,
539                           KCOLLECT_SCALE(KCOLLECT_ZFILL_FORMAT, 0));
540
541         kcollect_register(KCOLLECT_MEMFRE, "free", NULL,
542                           KCOLLECT_SCALE(KCOLLECT_MEMFRE_FORMAT,
543                                          PTOB(vmstats.v_page_count)));
544         kcollect_register(KCOLLECT_MEMCAC, "cache", NULL,
545                           KCOLLECT_SCALE(KCOLLECT_MEMCAC_FORMAT,
546                                          PTOB(vmstats.v_page_count)));
547         kcollect_register(KCOLLECT_MEMINA, "inact", NULL,
548                           KCOLLECT_SCALE(KCOLLECT_MEMINA_FORMAT,
549                                          PTOB(vmstats.v_page_count)));
550         kcollect_register(KCOLLECT_MEMACT, "act", NULL,
551                           KCOLLECT_SCALE(KCOLLECT_MEMACT_FORMAT,
552                                          PTOB(vmstats.v_page_count)));
553         kcollect_register(KCOLLECT_MEMWIR, "wired", NULL,
554                           KCOLLECT_SCALE(KCOLLECT_MEMWIR_FORMAT,
555                                          PTOB(vmstats.v_page_count)));
556
557         kcollect_register(KCOLLECT_SYSCALLS, "syscalls", NULL,
558                           KCOLLECT_SCALE(KCOLLECT_SYSCALLS_FORMAT, 0));
559
560         kcollect_register(KCOLLECT_INTR, "intr", NULL,
561                           KCOLLECT_SCALE(KCOLLECT_INTR_FORMAT, 0));
562         kcollect_register(KCOLLECT_IPI, "ipi", NULL,
563                           KCOLLECT_SCALE(KCOLLECT_IPI_FORMAT, 0));
564         kcollect_register(KCOLLECT_TIMER, "timer", NULL,
565                           KCOLLECT_SCALE(KCOLLECT_TIMER_FORMAT, 0));
566 }
567 SYSINIT(vmmeter, SI_SUB_PSEUDO, SI_ORDER_ANY, vmmeter_init, 0);
568
569 /*
570  * Rolls up accumulated pcpu adjustments to vmstats counts into the global
571  * structure, copy the global structure into our pcpu structure.  Critical
572  * path checks will use our pcpu structure.
573  *
574  * This is somewhat expensive and only called when needed, and by the
575  * hardclock.
576  */
577 void
578 vmstats_rollup(void)
579 {
580         int cpu;
581
582         for (cpu = 0; cpu < ncpus; ++cpu) {
583                 vmstats_rollup_cpu(globaldata_find(cpu));
584         }
585         mycpu->gd_vmstats = vmstats;
586 }
587
588 void
589 vmstats_rollup_cpu(globaldata_t gd)
590 {
591         long value;
592
593         if (gd->gd_vmstats_adj.v_free_count) {
594                 value = atomic_swap_long(&gd->gd_vmstats_adj.v_free_count, 0);
595                 atomic_add_long(&vmstats.v_free_count, value);
596         }
597         if (gd->gd_vmstats_adj.v_cache_count) {
598                 value = atomic_swap_long(&gd->gd_vmstats_adj.v_cache_count, 0);
599                 atomic_add_long(&vmstats.v_cache_count, value);
600         }
601         if (gd->gd_vmstats_adj.v_inactive_count) {
602                 value=atomic_swap_long(&gd->gd_vmstats_adj.v_inactive_count, 0);
603                 atomic_add_long(&vmstats.v_inactive_count, value);
604         }
605         if (gd->gd_vmstats_adj.v_active_count) {
606                 value = atomic_swap_long(&gd->gd_vmstats_adj.v_active_count, 0);
607                 atomic_add_long(&vmstats.v_active_count, value);
608         }
609         if (gd->gd_vmstats_adj.v_wire_count) {
610                 value = atomic_swap_long(&gd->gd_vmstats_adj.v_wire_count, 0);
611                 atomic_add_long(&vmstats.v_wire_count, value);
612         }
613 }