kernel - Fix DEBUG_LOCKS races
[dragonfly.git] / sys / vm / vm_meter.c
... / ...
CommitLineData
1/*
2 * (MPSAFE)
3 *
4 * Copyright (c) 1982, 1986, 1989, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by the University of
18 * California, Berkeley and its contributors.
19 * 4. Neither the name of the University nor the names of its contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 *
35 * @(#)vm_meter.c 8.4 (Berkeley) 1/4/94
36 * $FreeBSD: src/sys/vm/vm_meter.c,v 1.34.2.7 2002/10/10 19:28:22 dillon Exp $
37 * $DragonFly: src/sys/vm/vm_meter.c,v 1.15 2008/04/28 18:04:08 dillon Exp $
38 */
39
40#include <sys/param.h>
41#include <sys/proc.h>
42#include <sys/systm.h>
43#include <sys/kernel.h>
44#include <sys/resource.h>
45#include <sys/vmmeter.h>
46
47#include <vm/vm.h>
48#include <vm/vm_page.h>
49#include <vm/vm_extern.h>
50#include <vm/vm_param.h>
51#include <sys/lock.h>
52#include <vm/pmap.h>
53#include <vm/vm_map.h>
54#include <vm/vm_object.h>
55#include <sys/sysctl.h>
56
57struct vmstats vmstats;
58
59static int maxslp = MAXSLP;
60
61SYSCTL_UINT(_vm, VM_V_FREE_MIN, v_free_min,
62 CTLFLAG_RW, &vmstats.v_free_min, 0,
63 "Minimum number of pages desired free");
64SYSCTL_UINT(_vm, VM_V_FREE_TARGET, v_free_target,
65 CTLFLAG_RW, &vmstats.v_free_target, 0,
66 "Number of pages desired free");
67SYSCTL_UINT(_vm, VM_V_FREE_RESERVED, v_free_reserved,
68 CTLFLAG_RW, &vmstats.v_free_reserved, 0,
69 "Number of pages reserved for deadlock");
70SYSCTL_UINT(_vm, VM_V_INACTIVE_TARGET, v_inactive_target,
71 CTLFLAG_RW, &vmstats.v_inactive_target, 0,
72 "Number of pages desired inactive");
73SYSCTL_UINT(_vm, VM_V_CACHE_MIN, v_cache_min,
74 CTLFLAG_RW, &vmstats.v_cache_min, 0,
75 "Min number of pages desired on cache queue");
76SYSCTL_UINT(_vm, VM_V_CACHE_MAX, v_cache_max,
77 CTLFLAG_RW, &vmstats.v_cache_max, 0,
78 "Max number of pages in cached obj");
79SYSCTL_UINT(_vm, VM_V_PAGEOUT_FREE_MIN, v_pageout_free_min,
80 CTLFLAG_RW, &vmstats.v_pageout_free_min, 0,
81 "Min number pages reserved for kernel");
82SYSCTL_UINT(_vm, OID_AUTO, v_free_severe,
83 CTLFLAG_RW, &vmstats.v_free_severe, 0, "");
84
85SYSCTL_STRUCT(_vm, VM_LOADAVG, loadavg, CTLFLAG_RD,
86 &averunnable, loadavg, "Machine loadaverage history");
87
88static int do_vmtotal_callback(struct proc *p, void *data);
89
90/*
91 * No requirements.
92 */
93static int
94do_vmtotal(SYSCTL_HANDLER_ARGS)
95{
96 struct vmtotal total;
97 struct vmtotal *totalp;
98 vm_object_t object;
99
100 bzero(&total, sizeof(total));
101 totalp = &total;
102
103 /*
104 * Mark all objects as inactive.
105 */
106 lwkt_gettoken(&vmobj_token);
107 for (object = TAILQ_FIRST(&vm_object_list);
108 object != NULL;
109 object = TAILQ_NEXT(object,object_list)) {
110 if (object->type == OBJT_MARKER)
111 continue;
112 vm_object_clear_flag(object, OBJ_ACTIVE);
113 }
114 lwkt_reltoken(&vmobj_token);
115
116 /*
117 * Calculate process statistics.
118 */
119 allproc_scan(do_vmtotal_callback, totalp);
120
121 /*
122 * Calculate object memory usage statistics.
123 */
124 lwkt_gettoken(&vmobj_token);
125 for (object = TAILQ_FIRST(&vm_object_list);
126 object != NULL;
127 object = TAILQ_NEXT(object, object_list)) {
128 /*
129 * devices, like /dev/mem, will badly skew our totals.
130 * markers aren't real objects.
131 */
132 if (object->type == OBJT_MARKER)
133 continue;
134 if (object->type == OBJT_DEVICE)
135 continue;
136 if (object->size >= 0x7FFFFFFF) {
137 /*
138 * Probably unbounded anonymous memory (really
139 * bounded by related vm_map_entry structures which
140 * we do not have access to in this loop).
141 */
142 totalp->t_vm += object->resident_page_count;
143 } else {
144 /*
145 * It's questionable how useful this is but...
146 */
147 totalp->t_vm += object->size;
148 }
149 totalp->t_rm += object->resident_page_count;
150 if (object->flags & OBJ_ACTIVE) {
151 totalp->t_avm += object->size;
152 totalp->t_arm += object->resident_page_count;
153 }
154 if (object->shadow_count > 1) {
155 /* shared object */
156 totalp->t_vmshr += object->size;
157 totalp->t_rmshr += object->resident_page_count;
158 if (object->flags & OBJ_ACTIVE) {
159 totalp->t_avmshr += object->size;
160 totalp->t_armshr += object->resident_page_count;
161 }
162 }
163 }
164 lwkt_reltoken(&vmobj_token);
165 totalp->t_free = vmstats.v_free_count + vmstats.v_cache_count;
166
167 return (sysctl_handle_opaque(oidp, totalp, sizeof total, req));
168}
169
170/*
171 * The caller must hold proc_token.
172 */
173static int
174do_vmtotal_callback(struct proc *p, void *data)
175{
176 struct vmtotal *totalp = data;
177 struct lwp *lp;
178
179 if (p->p_flag & P_SYSTEM)
180 return(0);
181
182 FOREACH_LWP_IN_PROC(lp, p) {
183 switch (lp->lwp_stat) {
184 case LSSTOP:
185 case LSSLEEP:
186 if ((p->p_flag & P_SWAPPEDOUT) == 0) {
187 if ((lp->lwp_flag & LWP_SINTR) == 0)
188 totalp->t_dw++;
189 else if (lp->lwp_slptime < maxslp)
190 totalp->t_sl++;
191 } else if (lp->lwp_slptime < maxslp) {
192 totalp->t_sw++;
193 }
194 if (lp->lwp_slptime >= maxslp)
195 return(0);
196 break;
197
198 case LSRUN:
199 if (p->p_flag & P_SWAPPEDOUT)
200 totalp->t_sw++;
201 else
202 totalp->t_rq++;
203 if (p->p_stat == SIDL)
204 return(0);
205 break;
206
207 default:
208 return (0);
209 }
210
211 /*
212 * Set while in vm_fault()
213 */
214 if (lp->lwp_flag & LWP_PAGING)
215 totalp->t_pw++;
216 }
217 return(0);
218}
219
220/*
221 * No requirements.
222 */
223static int
224do_vmstats(SYSCTL_HANDLER_ARGS)
225{
226 struct vmstats vms = vmstats;
227 return (sysctl_handle_opaque(oidp, &vms, sizeof(vms), req));
228}
229
230/*
231 * No requirements.
232 */
233static int
234do_vmmeter(SYSCTL_HANDLER_ARGS)
235{
236 int boffset = offsetof(struct vmmeter, vmmeter_uint_begin);
237 int eoffset = offsetof(struct vmmeter, vmmeter_uint_end);
238 struct vmmeter vmm;
239 int i;
240
241 bzero(&vmm, sizeof(vmm));
242 for (i = 0; i < ncpus; ++i) {
243 int off;
244 struct globaldata *gd = globaldata_find(i);
245
246 for (off = boffset; off <= eoffset; off += sizeof(u_int)) {
247 *(u_int *)((char *)&vmm + off) +=
248 *(u_int *)((char *)&gd->gd_cnt + off);
249 }
250
251 }
252 vmm.v_intr += vmm.v_ipi + vmm.v_timer;
253 return (sysctl_handle_opaque(oidp, &vmm, sizeof(vmm), req));
254}
255
256/*
257 * vcnt() - accumulate statistics from the cnt structure for each cpu
258 *
259 * The vmmeter structure is now per-cpu as well as global. Those
260 * statistics which can be kept on a per-cpu basis (to avoid cache
261 * stalls between cpus) can be moved to the per-cpu vmmeter. Remaining
262 * statistics, such as v_free_reserved, are left in the global
263 * structure.
264 *
265 * (sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req)
266 *
267 * No requirements.
268 */
269static int
270vcnt(SYSCTL_HANDLER_ARGS)
271{
272 int i;
273 int count = 0;
274 int offset = arg2;
275
276 for (i = 0; i < ncpus; ++i) {
277 struct globaldata *gd = globaldata_find(i);
278 count += *(int *)((char *)&gd->gd_cnt + offset);
279 }
280 return(SYSCTL_OUT(req, &count, sizeof(int)));
281}
282
283/*
284 * No requirements.
285 */
286static int
287vcnt_intr(SYSCTL_HANDLER_ARGS)
288{
289 int i;
290 int count = 0;
291
292 for (i = 0; i < ncpus; ++i) {
293 struct globaldata *gd = globaldata_find(i);
294
295 count += gd->gd_cnt.v_intr + gd->gd_cnt.v_ipi +
296 gd->gd_cnt.v_timer;
297 }
298 return(SYSCTL_OUT(req, &count, sizeof(int)));
299}
300
301#define VMMETEROFF(var) offsetof(struct vmmeter, var)
302
303SYSCTL_PROC(_vm, OID_AUTO, vmtotal, CTLTYPE_OPAQUE|CTLFLAG_RD,
304 0, sizeof(struct vmtotal), do_vmtotal, "S,vmtotal",
305 "System virtual memory aggregate");
306SYSCTL_PROC(_vm, OID_AUTO, vmstats, CTLTYPE_OPAQUE|CTLFLAG_RD,
307 0, sizeof(struct vmstats), do_vmstats, "S,vmstats",
308 "System virtual memory statistics");
309SYSCTL_PROC(_vm, OID_AUTO, vmmeter, CTLTYPE_OPAQUE|CTLFLAG_RD,
310 0, sizeof(struct vmmeter), do_vmmeter, "S,vmmeter",
311 "System statistics");
312SYSCTL_NODE(_vm, OID_AUTO, stats, CTLFLAG_RW, 0, "VM meter stats");
313SYSCTL_NODE(_vm_stats, OID_AUTO, sys, CTLFLAG_RW, 0, "VM meter sys stats");
314SYSCTL_NODE(_vm_stats, OID_AUTO, vm, CTLFLAG_RW, 0, "VM meter vm stats");
315SYSCTL_NODE(_vm_stats, OID_AUTO, misc, CTLFLAG_RW, 0, "VM meter misc stats");
316
317SYSCTL_PROC(_vm_stats_sys, OID_AUTO, v_swtch, CTLTYPE_UINT|CTLFLAG_RD,
318 0, VMMETEROFF(v_swtch), vcnt, "IU", "Context switches");
319SYSCTL_PROC(_vm_stats_sys, OID_AUTO, v_intrans_coll, CTLTYPE_UINT|CTLFLAG_RD,
320 0, VMMETEROFF(v_intrans_coll), vcnt, "IU", "Intransit map collisions (total)");
321SYSCTL_PROC(_vm_stats_sys, OID_AUTO, v_intrans_wait, CTLTYPE_UINT|CTLFLAG_RD,
322 0, VMMETEROFF(v_intrans_wait), vcnt, "IU", "Intransit map collisions which blocked");
323SYSCTL_PROC(_vm_stats_sys, OID_AUTO, v_forwarded_ints, CTLTYPE_UINT|CTLFLAG_RD,
324 0, VMMETEROFF(v_forwarded_ints), vcnt, "IU", "Forwarded interrupts due to MP lock");
325SYSCTL_PROC(_vm_stats_sys, OID_AUTO, v_forwarded_hits, CTLTYPE_UINT|CTLFLAG_RD,
326 0, VMMETEROFF(v_forwarded_hits), vcnt, "IU", "Forwarded hits due to MP lock");
327SYSCTL_PROC(_vm_stats_sys, OID_AUTO, v_forwarded_misses, CTLTYPE_UINT|CTLFLAG_RD,
328 0, VMMETEROFF(v_forwarded_misses), vcnt, "IU", "Forwarded misses due to MP lock");
329SYSCTL_PROC(_vm_stats_sys, OID_AUTO, v_trap, CTLTYPE_UINT|CTLFLAG_RD,
330 0, VMMETEROFF(v_trap), vcnt, "IU", "Traps");
331SYSCTL_PROC(_vm_stats_sys, OID_AUTO, v_syscall, CTLTYPE_UINT|CTLFLAG_RD,
332 0, VMMETEROFF(v_syscall), vcnt, "IU", "Syscalls");
333SYSCTL_PROC(_vm_stats_sys, OID_AUTO, v_intr, CTLTYPE_UINT|CTLFLAG_RD,
334 0, VMMETEROFF(v_intr), vcnt_intr, "IU", "Hardware interrupts");
335SYSCTL_PROC(_vm_stats_sys, OID_AUTO, v_ipi, CTLTYPE_UINT|CTLFLAG_RD,
336 0, VMMETEROFF(v_ipi), vcnt, "IU", "Inter-processor interrupts");
337SYSCTL_PROC(_vm_stats_sys, OID_AUTO, v_timer, CTLTYPE_UINT|CTLFLAG_RD,
338 0, VMMETEROFF(v_timer), vcnt, "IU", "LAPIC timer interrupts");
339SYSCTL_PROC(_vm_stats_sys, OID_AUTO, v_soft, CTLTYPE_UINT|CTLFLAG_RD,
340 0, VMMETEROFF(v_soft), vcnt, "IU", "Software interrupts");
341SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_vm_faults, CTLTYPE_UINT|CTLFLAG_RD,
342 0, VMMETEROFF(v_vm_faults), vcnt, "IU", "VM faults");
343SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_cow_faults, CTLTYPE_UINT|CTLFLAG_RD,
344 0, VMMETEROFF(v_cow_faults), vcnt, "IU", "COW faults");
345SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_cow_optim, CTLTYPE_UINT|CTLFLAG_RD,
346 0, VMMETEROFF(v_cow_optim), vcnt, "IU", "Optimized COW faults");
347SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_zfod, CTLTYPE_UINT|CTLFLAG_RD,
348 0, VMMETEROFF(v_zfod), vcnt, "IU", "Zero fill");
349SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_ozfod, CTLTYPE_UINT|CTLFLAG_RD,
350 0, VMMETEROFF(v_ozfod), vcnt, "IU", "Optimized zero fill");
351SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_swapin, CTLTYPE_UINT|CTLFLAG_RD,
352 0, VMMETEROFF(v_swapin), vcnt, "IU", "Swapin operations");
353SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_swapout, CTLTYPE_UINT|CTLFLAG_RD,
354 0, VMMETEROFF(v_swapout), vcnt, "IU", "Swapout operations");
355SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_swappgsin, CTLTYPE_UINT|CTLFLAG_RD,
356 0, VMMETEROFF(v_swappgsin), vcnt, "IU", "Swapin pages");
357SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_swappgsout, CTLTYPE_UINT|CTLFLAG_RD,
358 0, VMMETEROFF(v_swappgsout), vcnt, "IU", "Swapout pages");
359SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_vnodein, CTLTYPE_UINT|CTLFLAG_RD,
360 0, VMMETEROFF(v_vnodein), vcnt, "IU", "Vnodein operations");
361SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_vnodeout, CTLTYPE_UINT|CTLFLAG_RD,
362 0, VMMETEROFF(v_vnodeout), vcnt, "IU", "Vnodeout operations");
363SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_vnodepgsin, CTLTYPE_UINT|CTLFLAG_RD,
364 0, VMMETEROFF(v_vnodepgsin), vcnt, "IU", "Vnodein pages");
365SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_vnodepgsout, CTLTYPE_UINT|CTLFLAG_RD,
366 0, VMMETEROFF(v_vnodepgsout), vcnt, "IU", "Vnodeout pages");
367SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_intrans, CTLTYPE_UINT|CTLFLAG_RD,
368 0, VMMETEROFF(v_intrans), vcnt, "IU", "In transit page blocking");
369SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_reactivated, CTLTYPE_UINT|CTLFLAG_RD,
370 0, VMMETEROFF(v_reactivated), vcnt, "IU", "Reactivated pages");
371SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_pdwakeups, CTLTYPE_UINT|CTLFLAG_RD,
372 0, VMMETEROFF(v_pdwakeups), vcnt, "IU", "Pagedaemon wakeups");
373SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_ppwakeups, CTLTYPE_UINT|CTLFLAG_RD,
374 0, VMMETEROFF(v_ppwakeups), vcnt, "IU", "vm_wait wakeups");
375SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_pdpages, CTLTYPE_UINT|CTLFLAG_RD,
376 0, VMMETEROFF(v_pdpages), vcnt, "IU", "Pagedaemon page scans");
377SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_dfree, CTLTYPE_UINT|CTLFLAG_RD,
378 0, VMMETEROFF(v_dfree), vcnt, "IU", "Pages freed by daemon");
379SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_pfree, CTLTYPE_UINT|CTLFLAG_RD,
380 0, VMMETEROFF(v_pfree), vcnt, "IU", "Pages freed by exiting processes");
381SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_tfree, CTLTYPE_UINT|CTLFLAG_RD,
382 0, VMMETEROFF(v_tfree), vcnt, "IU", "Total pages freed");
383SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_forks, CTLTYPE_UINT|CTLFLAG_RD,
384 0, VMMETEROFF(v_forks), vcnt, "IU", "Number of fork() calls");
385SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_vforks, CTLTYPE_UINT|CTLFLAG_RD,
386 0, VMMETEROFF(v_vforks), vcnt, "IU", "Number of vfork() calls");
387SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_rforks, CTLTYPE_UINT|CTLFLAG_RD,
388 0, VMMETEROFF(v_rforks), vcnt, "IU", "Number of rfork() calls");
389SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_kthreads, CTLTYPE_UINT|CTLFLAG_RD,
390 0, VMMETEROFF(v_kthreads), vcnt, "IU", "Number of fork() calls by kernel");
391SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_forkpages, CTLTYPE_UINT|CTLFLAG_RD,
392 0, VMMETEROFF(v_forkpages), vcnt, "IU", "VM pages affected by fork()");
393SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_vforkpages, CTLTYPE_UINT|CTLFLAG_RD,
394 0, VMMETEROFF(v_vforkpages), vcnt, "IU", "VM pages affected by vfork()");
395SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_rforkpages, CTLTYPE_UINT|CTLFLAG_RD,
396 0, VMMETEROFF(v_rforkpages), vcnt, "IU", "VM pages affected by rfork()");
397SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_kthreadpages, CTLTYPE_UINT|CTLFLAG_RD,
398 0, VMMETEROFF(v_kthreadpages), vcnt, "IU", "VM pages affected by fork() by kernel");
399
400SYSCTL_UINT(_vm_stats_vm, OID_AUTO,
401 v_page_size, CTLFLAG_RD, &vmstats.v_page_size, 0,
402 "Page size in bytes");
403SYSCTL_UINT(_vm_stats_vm, OID_AUTO,
404 v_page_count, CTLFLAG_RD, &vmstats.v_page_count, 0,
405 "Total number of pages in system");
406SYSCTL_UINT(_vm_stats_vm, OID_AUTO,
407 v_free_reserved, CTLFLAG_RD, &vmstats.v_free_reserved, 0,
408 "Number of pages reserved for deadlock");
409SYSCTL_UINT(_vm_stats_vm, OID_AUTO,
410 v_free_target, CTLFLAG_RD, &vmstats.v_free_target, 0,
411 "Number of pages desired free");
412SYSCTL_UINT(_vm_stats_vm, OID_AUTO,
413 v_free_min, CTLFLAG_RD, &vmstats.v_free_min, 0,
414 "Minimum number of pages desired free");
415SYSCTL_UINT(_vm_stats_vm, OID_AUTO,
416 v_free_count, CTLFLAG_RD, &vmstats.v_free_count, 0,
417 "Number of pages free");
418SYSCTL_UINT(_vm_stats_vm, OID_AUTO,
419 v_wire_count, CTLFLAG_RD, &vmstats.v_wire_count, 0,
420 "Number of pages wired down");
421SYSCTL_UINT(_vm_stats_vm, OID_AUTO,
422 v_active_count, CTLFLAG_RD, &vmstats.v_active_count, 0,
423 "Number of pages active");
424SYSCTL_UINT(_vm_stats_vm, OID_AUTO,
425 v_inactive_target, CTLFLAG_RD, &vmstats.v_inactive_target, 0,
426 "Number of pages desired inactive");
427SYSCTL_UINT(_vm_stats_vm, OID_AUTO,
428 v_inactive_count, CTLFLAG_RD, &vmstats.v_inactive_count, 0,
429 "Number of pages inactive");
430SYSCTL_UINT(_vm_stats_vm, OID_AUTO,
431 v_cache_count, CTLFLAG_RD, &vmstats.v_cache_count, 0,
432 "Number of pages on buffer cache queue");
433SYSCTL_UINT(_vm_stats_vm, OID_AUTO,
434 v_cache_min, CTLFLAG_RD, &vmstats.v_cache_min, 0,
435 "Min number of pages desired on cache queue");
436SYSCTL_UINT(_vm_stats_vm, OID_AUTO,
437 v_cache_max, CTLFLAG_RD, &vmstats.v_cache_max, 0,
438 "Max number of pages in cached obj");
439SYSCTL_UINT(_vm_stats_vm, OID_AUTO,
440 v_pageout_free_min, CTLFLAG_RD, &vmstats.v_pageout_free_min, 0,
441 "Min number pages reserved for kernel");
442SYSCTL_UINT(_vm_stats_vm, OID_AUTO,
443 v_interrupt_free_min, CTLFLAG_RD, &vmstats.v_interrupt_free_min, 0,
444 "Reserved number of pages for int code");
445SYSCTL_INT(_vm_stats_misc, OID_AUTO,
446 zero_page_count, CTLFLAG_RD, &vm_page_zero_count, 0,
447 "Number of zeroing pages");
448
449/*
450 * No requirements.
451 */
452static int
453do_vmmeter_pcpu(SYSCTL_HANDLER_ARGS)
454{
455 int boffset = offsetof(struct vmmeter, vmmeter_uint_begin);
456 int eoffset = offsetof(struct vmmeter, vmmeter_uint_end);
457 struct globaldata *gd = arg1;
458 struct vmmeter vmm;
459 int off;
460
461 bzero(&vmm, sizeof(vmm));
462 for (off = boffset; off <= eoffset; off += sizeof(u_int)) {
463 *(u_int *)((char *)&vmm + off) +=
464 *(u_int *)((char *)&gd->gd_cnt + off);
465 }
466 vmm.v_intr += vmm.v_ipi + vmm.v_timer;
467 return (sysctl_handle_opaque(oidp, &vmm, sizeof(vmm), req));
468}
469
470/*
471 * Called from the low level boot code only.
472 */
473static void
474vmmeter_init(void *dummy __unused)
475{
476 int i;
477
478 for (i = 0; i < ncpus; ++i) {
479 struct sysctl_ctx_list *ctx;
480 struct sysctl_oid *oid;
481 struct globaldata *gd;
482 char name[32];
483
484 ksnprintf(name, sizeof(name), "cpu%d", i);
485
486 ctx = kmalloc(sizeof(*ctx), M_TEMP, M_WAITOK);
487 sysctl_ctx_init(ctx);
488 oid = SYSCTL_ADD_NODE(ctx, SYSCTL_STATIC_CHILDREN(_vm),
489 OID_AUTO, name, CTLFLAG_RD, 0, "");
490
491 gd = globaldata_find(i);
492 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(oid), OID_AUTO,
493 "vmmeter", CTLTYPE_OPAQUE|CTLFLAG_RD,
494 gd, sizeof(struct vmmeter), do_vmmeter_pcpu,
495 "S,vmmeter", "System per-cpu statistics");
496 }
497}
498SYSINIT(vmmeter, SI_SUB_PSEUDO, SI_ORDER_ANY, vmmeter_init, 0);