Commit | Line | Data |
---|---|---|
984263bc | 1 | /* |
99ad9bc4 MD |
2 | * (MPSAFE) |
3 | * | |
984263bc MD |
4 | * Copyright (c) 1982, 1986, 1989, 1993 |
5 | * The Regents of the University of California. All rights reserved. | |
6 | * | |
7 | * Redistribution and use in source and binary forms, with or without | |
8 | * modification, are permitted provided that the following conditions | |
9 | * are met: | |
10 | * 1. Redistributions of source code must retain the above copyright | |
11 | * notice, this list of conditions and the following disclaimer. | |
12 | * 2. Redistributions in binary form must reproduce the above copyright | |
13 | * notice, this list of conditions and the following disclaimer in the | |
14 | * documentation and/or other materials provided with the distribution. | |
dc71b7ab | 15 | * 3. Neither the name of the University nor the names of its contributors |
984263bc MD |
16 | * may be used to endorse or promote products derived from this software |
17 | * without specific prior written permission. | |
18 | * | |
19 | * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND | |
20 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |
21 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | |
22 | * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE | |
23 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | |
24 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | |
25 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | |
26 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | |
27 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY | |
28 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | |
29 | * SUCH DAMAGE. | |
30 | * | |
31 | * @(#)vm_meter.c 8.4 (Berkeley) 1/4/94 | |
32 | * $FreeBSD: src/sys/vm/vm_meter.c,v 1.34.2.7 2002/10/10 19:28:22 dillon Exp $ | |
33 | */ | |
34 | ||
35 | #include <sys/param.h> | |
36 | #include <sys/proc.h> | |
37 | #include <sys/systm.h> | |
38 | #include <sys/kernel.h> | |
e2164e29 | 39 | #include <sys/malloc.h> |
984263bc MD |
40 | #include <sys/resource.h> |
41 | #include <sys/vmmeter.h> | |
77bc82e1 | 42 | #include <sys/kcollect.h> |
984263bc MD |
43 | |
44 | #include <vm/vm.h> | |
45 | #include <vm/vm_page.h> | |
46 | #include <vm/vm_extern.h> | |
47 | #include <vm/vm_param.h> | |
48 | #include <sys/lock.h> | |
49 | #include <vm/pmap.h> | |
50 | #include <vm/vm_map.h> | |
51 | #include <vm/vm_object.h> | |
52 | #include <sys/sysctl.h> | |
53 | ||
75979118 MD |
54 | /* |
55 | * WARNING: vmstats represents the final say, but individual cpu's may | |
fd1fd056 | 56 | * accumualte adjustments in gd->gd_vmstats_adj. These are |
75979118 MD |
57 | * synchronized to the global vmstats in hardclock. |
58 | * | |
59 | * In addition, most individual cpus check vmstats using a local | |
60 | * copy of the global vmstats in gd->gd_vmstats. Hardclock also | |
61 | * sychronizes the copy. The pageout code and vm_page_alloc will | |
62 | * also synchronize their local copies as necessary. | |
63 | * | |
64 | * Other consumers should not expect perfect values. | |
65 | */ | |
78831f77 | 66 | __exclusive_cache_line struct vmstats vmstats; |
984263bc MD |
67 | |
68 | static int maxslp = MAXSLP; | |
69 | ||
b7ea2f3f | 70 | SYSCTL_ULONG(_vm, VM_V_FREE_MIN, v_free_min, |
1b26f062 SG |
71 | CTLFLAG_RW, &vmstats.v_free_min, 0, |
72 | "Minimum number of pages desired free"); | |
e91e64c7 MD |
73 | SYSCTL_ULONG(_vm, VM_V_PAGING_WAIT, v_paging_wait, |
74 | CTLFLAG_RW, &vmstats.v_paging_wait, 0, | |
75 | "Userland slows down allocations"); | |
76 | SYSCTL_ULONG(_vm, VM_V_PAGING_START, v_paging_start, | |
77 | CTLFLAG_RW, &vmstats.v_paging_start, 0, | |
78 | "Pageout daemon begins running"); | |
79 | SYSCTL_ULONG(_vm, VM_V_PAGING_TARGET1, v_paging_target1, | |
80 | CTLFLAG_RW, &vmstats.v_paging_target1, 0, | |
81 | "Mid pageout daemon target"); | |
82 | SYSCTL_ULONG(_vm, VM_V_PAGING_TARGET2, v_paging_target2, | |
83 | CTLFLAG_RW, &vmstats.v_paging_target2, 0, | |
84 | "Final pageout daemon target"); | |
b7ea2f3f | 85 | SYSCTL_ULONG(_vm, VM_V_FREE_RESERVED, v_free_reserved, |
1b26f062 SG |
86 | CTLFLAG_RW, &vmstats.v_free_reserved, 0, |
87 | "Number of pages reserved for deadlock"); | |
b7ea2f3f | 88 | SYSCTL_ULONG(_vm, VM_V_INACTIVE_TARGET, v_inactive_target, |
1b26f062 | 89 | CTLFLAG_RW, &vmstats.v_inactive_target, 0, |
e91e64c7 | 90 | "Maximum inactive pages during pageout"); |
b7ea2f3f | 91 | SYSCTL_ULONG(_vm, VM_V_PAGEOUT_FREE_MIN, v_pageout_free_min, |
1b26f062 SG |
92 | CTLFLAG_RW, &vmstats.v_pageout_free_min, 0, |
93 | "Min number pages reserved for kernel"); | |
b7ea2f3f | 94 | SYSCTL_ULONG(_vm, OID_AUTO, v_free_severe, |
12e4aaff | 95 | CTLFLAG_RW, &vmstats.v_free_severe, 0, ""); |
984263bc | 96 | |
618537cf MD |
97 | SYSCTL_STRUCT(_vm, VM_LOADAVG, loadavg, |
98 | CTLFLAG_RD | CTLFLAG_NOLOCK, | |
99 | &averunnable, loadavg, "Machine loadaverage history"); | |
984263bc | 100 | |
8fa76237 MD |
101 | static int do_vmtotal_callback(struct proc *p, void *data); |
102 | ||
99ad9bc4 MD |
103 | /* |
104 | * No requirements. | |
105 | */ | |
984263bc | 106 | static int |
6bdb32ed | 107 | do_vmtotal(SYSCTL_HANDLER_ARGS) |
984263bc | 108 | { |
8fa76237 | 109 | struct vmtotal total; |
6d538b47 MD |
110 | globaldata_t gd; |
111 | int n; | |
984263bc | 112 | |
2de4f77e | 113 | bzero(&total, sizeof(total)); |
6d538b47 MD |
114 | for (n = 0; n < ncpus; ++n) { |
115 | gd = globaldata_find(n); | |
116 | ||
117 | /* total.t_rq calculated separately */ | |
118 | /* total.t_dw calculated separately */ | |
119 | /* total.t_pw calculated separately */ | |
120 | /* total.t_sl calculated separately */ | |
121 | /* total.t_sw calculated separately */ | |
122 | total.t_vm += gd->gd_vmtotal.t_vm; | |
123 | total.t_avm += gd->gd_vmtotal.t_avm; | |
124 | total.t_rm += gd->gd_vmtotal.t_rm; | |
125 | total.t_arm += gd->gd_vmtotal.t_arm; | |
126 | total.t_vmshr += gd->gd_vmtotal.t_vmshr; | |
127 | total.t_avmshr += gd->gd_vmtotal.t_avmshr; | |
128 | total.t_rmshr += gd->gd_vmtotal.t_rmshr; | |
129 | total.t_armshr += gd->gd_vmtotal.t_armshr; | |
130 | /* total.t_free calculated separately */ | |
8fa76237 MD |
131 | } |
132 | ||
984263bc MD |
133 | /* |
134 | * Calculate process statistics. | |
135 | */ | |
586c4308 | 136 | allproc_scan(do_vmtotal_callback, &total, 0); |
984263bc | 137 | |
984263bc | 138 | /* |
6d538b47 MD |
139 | * Adjust for sysctl return. Add real memory into virtual memory. |
140 | * Set t_free. | |
141 | * | |
142 | * t_rm - Real memory | |
143 | * t_vm - Virtual memory (real + swap) | |
984263bc | 144 | */ |
6d538b47 MD |
145 | total.t_vm += total.t_rm; |
146 | total.t_free = vmstats.v_free_count + vmstats.v_cache_count; | |
2de4f77e | 147 | |
6d538b47 | 148 | return (sysctl_handle_opaque(oidp, &total, sizeof(total), req)); |
984263bc MD |
149 | } |
150 | ||
8fa76237 MD |
151 | static int |
152 | do_vmtotal_callback(struct proc *p, void *data) | |
153 | { | |
154 | struct vmtotal *totalp = data; | |
08f2f1bb | 155 | struct lwp *lp; |
8fa76237 | 156 | |
4643740a | 157 | if (p->p_flags & P_SYSTEM) |
8fa76237 MD |
158 | return(0); |
159 | ||
39b9b6cd | 160 | lwkt_gettoken_shared(&p->p_token); |
a8d3ab53 | 161 | |
c7e98b2f SS |
162 | FOREACH_LWP_IN_PROC(lp, p) { |
163 | switch (lp->lwp_stat) { | |
164 | case LSSTOP: | |
165 | case LSSLEEP: | |
ef866ef7 MD |
166 | if ((lp->lwp_flags & LWP_SINTR) == 0) |
167 | totalp->t_dw++; | |
168 | else if (lp->lwp_slptime < maxslp) | |
169 | totalp->t_sl++; | |
c7e98b2f | 170 | if (lp->lwp_slptime >= maxslp) |
a8d3ab53 | 171 | goto out; |
c7e98b2f | 172 | break; |
c7e98b2f | 173 | case LSRUN: |
ef866ef7 | 174 | totalp->t_rq++; |
c7e98b2f | 175 | if (p->p_stat == SIDL) |
a8d3ab53 | 176 | goto out; |
c7e98b2f | 177 | break; |
c7e98b2f | 178 | default: |
a8d3ab53 | 179 | goto out; |
8fa76237 | 180 | } |
99ad9bc4 | 181 | |
54341a3b MD |
182 | /* |
183 | * Set while in vm_fault() | |
184 | */ | |
4643740a | 185 | if (lp->lwp_flags & LWP_PAGING) |
54341a3b | 186 | totalp->t_pw++; |
8fa76237 | 187 | } |
a8d3ab53 MD |
188 | out: |
189 | lwkt_reltoken(&p->p_token); | |
8fa76237 MD |
190 | return(0); |
191 | } | |
192 | ||
99ad9bc4 MD |
193 | /* |
194 | * No requirements. | |
195 | */ | |
6bdb32ed MD |
196 | static int |
197 | do_vmstats(SYSCTL_HANDLER_ARGS) | |
198 | { | |
199 | struct vmstats vms = vmstats; | |
200 | return (sysctl_handle_opaque(oidp, &vms, sizeof(vms), req)); | |
201 | } | |
202 | ||
99ad9bc4 MD |
203 | /* |
204 | * No requirements. | |
205 | */ | |
6bdb32ed MD |
206 | static int |
207 | do_vmmeter(SYSCTL_HANDLER_ARGS) | |
208 | { | |
209 | int boffset = offsetof(struct vmmeter, vmmeter_uint_begin); | |
210 | int eoffset = offsetof(struct vmmeter, vmmeter_uint_end); | |
211 | struct vmmeter vmm; | |
212 | int i; | |
213 | ||
214 | bzero(&vmm, sizeof(vmm)); | |
215 | for (i = 0; i < ncpus; ++i) { | |
216 | int off; | |
217 | struct globaldata *gd = globaldata_find(i); | |
218 | ||
219 | for (off = boffset; off <= eoffset; off += sizeof(u_int)) { | |
220 | *(u_int *)((char *)&vmm + off) += | |
221 | *(u_int *)((char *)&gd->gd_cnt + off); | |
222 | } | |
bce6845a | 223 | |
6bdb32ed | 224 | } |
b785701b | 225 | vmm.v_intr += vmm.v_ipi + vmm.v_timer; |
6bdb32ed MD |
226 | return (sysctl_handle_opaque(oidp, &vmm, sizeof(vmm), req)); |
227 | } | |
228 | ||
12e4aaff MD |
229 | /* |
230 | * vcnt() - accumulate statistics from the cnt structure for each cpu | |
231 | * | |
232 | * The vmmeter structure is now per-cpu as well as global. Those | |
233 | * statistics which can be kept on a per-cpu basis (to avoid cache | |
234 | * stalls between cpus) can be moved to the per-cpu vmmeter. Remaining | |
235 | * statistics, such as v_free_reserved, are left in the global | |
236 | * structure. | |
237 | * | |
238 | * (sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req) | |
99ad9bc4 MD |
239 | * |
240 | * No requirements. | |
12e4aaff MD |
241 | */ |
242 | static int | |
243 | vcnt(SYSCTL_HANDLER_ARGS) | |
244 | { | |
245 | int i; | |
246 | int count = 0; | |
6bdb32ed | 247 | int offset = arg2; |
12e4aaff MD |
248 | |
249 | for (i = 0; i < ncpus; ++i) { | |
250 | struct globaldata *gd = globaldata_find(i); | |
251 | count += *(int *)((char *)&gd->gd_cnt + offset); | |
252 | } | |
253 | return(SYSCTL_OUT(req, &count, sizeof(int))); | |
254 | } | |
255 | ||
99ad9bc4 MD |
256 | /* |
257 | * No requirements. | |
258 | */ | |
b785701b SZ |
259 | static int |
260 | vcnt_intr(SYSCTL_HANDLER_ARGS) | |
261 | { | |
262 | int i; | |
263 | int count = 0; | |
264 | ||
265 | for (i = 0; i < ncpus; ++i) { | |
266 | struct globaldata *gd = globaldata_find(i); | |
267 | ||
268 | count += gd->gd_cnt.v_intr + gd->gd_cnt.v_ipi + | |
269 | gd->gd_cnt.v_timer; | |
270 | } | |
271 | return(SYSCTL_OUT(req, &count, sizeof(int))); | |
272 | } | |
273 | ||
6bdb32ed | 274 | #define VMMETEROFF(var) offsetof(struct vmmeter, var) |
12e4aaff | 275 | |
6bdb32ed | 276 | SYSCTL_PROC(_vm, OID_AUTO, vmtotal, CTLTYPE_OPAQUE|CTLFLAG_RD, |
bce6845a | 277 | 0, sizeof(struct vmtotal), do_vmtotal, "S,vmtotal", |
6bdb32ed MD |
278 | "System virtual memory aggregate"); |
279 | SYSCTL_PROC(_vm, OID_AUTO, vmstats, CTLTYPE_OPAQUE|CTLFLAG_RD, | |
bce6845a | 280 | 0, sizeof(struct vmstats), do_vmstats, "S,vmstats", |
984263bc | 281 | "System virtual memory statistics"); |
6bdb32ed | 282 | SYSCTL_PROC(_vm, OID_AUTO, vmmeter, CTLTYPE_OPAQUE|CTLFLAG_RD, |
bce6845a | 283 | 0, sizeof(struct vmmeter), do_vmmeter, "S,vmmeter", |
b785701b | 284 | "System statistics"); |
984263bc MD |
285 | SYSCTL_NODE(_vm, OID_AUTO, stats, CTLFLAG_RW, 0, "VM meter stats"); |
286 | SYSCTL_NODE(_vm_stats, OID_AUTO, sys, CTLFLAG_RW, 0, "VM meter sys stats"); | |
287 | SYSCTL_NODE(_vm_stats, OID_AUTO, vm, CTLFLAG_RW, 0, "VM meter vm stats"); | |
288 | SYSCTL_NODE(_vm_stats, OID_AUTO, misc, CTLFLAG_RW, 0, "VM meter misc stats"); | |
12e4aaff MD |
289 | |
290 | SYSCTL_PROC(_vm_stats_sys, OID_AUTO, v_swtch, CTLTYPE_UINT|CTLFLAG_RD, | |
6bdb32ed MD |
291 | 0, VMMETEROFF(v_swtch), vcnt, "IU", "Context switches"); |
292 | SYSCTL_PROC(_vm_stats_sys, OID_AUTO, v_intrans_coll, CTLTYPE_UINT|CTLFLAG_RD, | |
1b26f062 | 293 | 0, VMMETEROFF(v_intrans_coll), vcnt, "IU", "Intransit map collisions (total)"); |
6bdb32ed | 294 | SYSCTL_PROC(_vm_stats_sys, OID_AUTO, v_intrans_wait, CTLTYPE_UINT|CTLFLAG_RD, |
1b26f062 | 295 | 0, VMMETEROFF(v_intrans_wait), vcnt, "IU", "Intransit map collisions which blocked"); |
545a1cd3 | 296 | SYSCTL_PROC(_vm_stats_sys, OID_AUTO, v_forwarded_ints, CTLTYPE_UINT|CTLFLAG_RD, |
1b26f062 | 297 | 0, VMMETEROFF(v_forwarded_ints), vcnt, "IU", "Forwarded interrupts due to MP lock"); |
545a1cd3 | 298 | SYSCTL_PROC(_vm_stats_sys, OID_AUTO, v_forwarded_hits, CTLTYPE_UINT|CTLFLAG_RD, |
1b26f062 | 299 | 0, VMMETEROFF(v_forwarded_hits), vcnt, "IU", "Forwarded hits due to MP lock"); |
545a1cd3 | 300 | SYSCTL_PROC(_vm_stats_sys, OID_AUTO, v_forwarded_misses, CTLTYPE_UINT|CTLFLAG_RD, |
1b26f062 | 301 | 0, VMMETEROFF(v_forwarded_misses), vcnt, "IU", "Forwarded misses due to MP lock"); |
12e4aaff | 302 | SYSCTL_PROC(_vm_stats_sys, OID_AUTO, v_trap, CTLTYPE_UINT|CTLFLAG_RD, |
6bdb32ed | 303 | 0, VMMETEROFF(v_trap), vcnt, "IU", "Traps"); |
12e4aaff | 304 | SYSCTL_PROC(_vm_stats_sys, OID_AUTO, v_syscall, CTLTYPE_UINT|CTLFLAG_RD, |
6bdb32ed | 305 | 0, VMMETEROFF(v_syscall), vcnt, "IU", "Syscalls"); |
12e4aaff | 306 | SYSCTL_PROC(_vm_stats_sys, OID_AUTO, v_intr, CTLTYPE_UINT|CTLFLAG_RD, |
b785701b SZ |
307 | 0, VMMETEROFF(v_intr), vcnt_intr, "IU", "Hardware interrupts"); |
308 | SYSCTL_PROC(_vm_stats_sys, OID_AUTO, v_ipi, CTLTYPE_UINT|CTLFLAG_RD, | |
309 | 0, VMMETEROFF(v_ipi), vcnt, "IU", "Inter-processor interrupts"); | |
310 | SYSCTL_PROC(_vm_stats_sys, OID_AUTO, v_timer, CTLTYPE_UINT|CTLFLAG_RD, | |
311 | 0, VMMETEROFF(v_timer), vcnt, "IU", "LAPIC timer interrupts"); | |
12e4aaff | 312 | SYSCTL_PROC(_vm_stats_sys, OID_AUTO, v_soft, CTLTYPE_UINT|CTLFLAG_RD, |
6bdb32ed | 313 | 0, VMMETEROFF(v_soft), vcnt, "IU", "Software interrupts"); |
12e4aaff | 314 | SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_vm_faults, CTLTYPE_UINT|CTLFLAG_RD, |
6bdb32ed | 315 | 0, VMMETEROFF(v_vm_faults), vcnt, "IU", "VM faults"); |
12e4aaff | 316 | SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_cow_faults, CTLTYPE_UINT|CTLFLAG_RD, |
6bdb32ed | 317 | 0, VMMETEROFF(v_cow_faults), vcnt, "IU", "COW faults"); |
12e4aaff | 318 | SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_cow_optim, CTLTYPE_UINT|CTLFLAG_RD, |
6bdb32ed | 319 | 0, VMMETEROFF(v_cow_optim), vcnt, "IU", "Optimized COW faults"); |
12e4aaff | 320 | SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_zfod, CTLTYPE_UINT|CTLFLAG_RD, |
6bdb32ed | 321 | 0, VMMETEROFF(v_zfod), vcnt, "IU", "Zero fill"); |
12e4aaff | 322 | SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_ozfod, CTLTYPE_UINT|CTLFLAG_RD, |
6bdb32ed | 323 | 0, VMMETEROFF(v_ozfod), vcnt, "IU", "Optimized zero fill"); |
12e4aaff | 324 | SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_swapin, CTLTYPE_UINT|CTLFLAG_RD, |
6bdb32ed | 325 | 0, VMMETEROFF(v_swapin), vcnt, "IU", "Swapin operations"); |
12e4aaff | 326 | SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_swapout, CTLTYPE_UINT|CTLFLAG_RD, |
6bdb32ed | 327 | 0, VMMETEROFF(v_swapout), vcnt, "IU", "Swapout operations"); |
12e4aaff | 328 | SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_swappgsin, CTLTYPE_UINT|CTLFLAG_RD, |
6bdb32ed | 329 | 0, VMMETEROFF(v_swappgsin), vcnt, "IU", "Swapin pages"); |
12e4aaff | 330 | SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_swappgsout, CTLTYPE_UINT|CTLFLAG_RD, |
6bdb32ed | 331 | 0, VMMETEROFF(v_swappgsout), vcnt, "IU", "Swapout pages"); |
12e4aaff | 332 | SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_vnodein, CTLTYPE_UINT|CTLFLAG_RD, |
6bdb32ed | 333 | 0, VMMETEROFF(v_vnodein), vcnt, "IU", "Vnodein operations"); |
12e4aaff | 334 | SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_vnodeout, CTLTYPE_UINT|CTLFLAG_RD, |
6bdb32ed | 335 | 0, VMMETEROFF(v_vnodeout), vcnt, "IU", "Vnodeout operations"); |
12e4aaff | 336 | SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_vnodepgsin, CTLTYPE_UINT|CTLFLAG_RD, |
6bdb32ed | 337 | 0, VMMETEROFF(v_vnodepgsin), vcnt, "IU", "Vnodein pages"); |
12e4aaff | 338 | SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_vnodepgsout, CTLTYPE_UINT|CTLFLAG_RD, |
6bdb32ed | 339 | 0, VMMETEROFF(v_vnodepgsout), vcnt, "IU", "Vnodeout pages"); |
12e4aaff | 340 | SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_intrans, CTLTYPE_UINT|CTLFLAG_RD, |
6bdb32ed | 341 | 0, VMMETEROFF(v_intrans), vcnt, "IU", "In transit page blocking"); |
12e4aaff | 342 | SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_reactivated, CTLTYPE_UINT|CTLFLAG_RD, |
6bdb32ed | 343 | 0, VMMETEROFF(v_reactivated), vcnt, "IU", "Reactivated pages"); |
12e4aaff | 344 | SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_pdwakeups, CTLTYPE_UINT|CTLFLAG_RD, |
6bdb32ed | 345 | 0, VMMETEROFF(v_pdwakeups), vcnt, "IU", "Pagedaemon wakeups"); |
cd3c66bd MD |
346 | SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_ppwakeups, CTLTYPE_UINT|CTLFLAG_RD, |
347 | 0, VMMETEROFF(v_ppwakeups), vcnt, "IU", "vm_wait wakeups"); | |
12e4aaff | 348 | SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_pdpages, CTLTYPE_UINT|CTLFLAG_RD, |
6bdb32ed | 349 | 0, VMMETEROFF(v_pdpages), vcnt, "IU", "Pagedaemon page scans"); |
12e4aaff | 350 | SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_dfree, CTLTYPE_UINT|CTLFLAG_RD, |
1b26f062 | 351 | 0, VMMETEROFF(v_dfree), vcnt, "IU", "Pages freed by daemon"); |
12e4aaff | 352 | SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_pfree, CTLTYPE_UINT|CTLFLAG_RD, |
1b26f062 | 353 | 0, VMMETEROFF(v_pfree), vcnt, "IU", "Pages freed by exiting processes"); |
12e4aaff | 354 | SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_tfree, CTLTYPE_UINT|CTLFLAG_RD, |
1b26f062 | 355 | 0, VMMETEROFF(v_tfree), vcnt, "IU", "Total pages freed"); |
12e4aaff | 356 | SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_forks, CTLTYPE_UINT|CTLFLAG_RD, |
6bdb32ed | 357 | 0, VMMETEROFF(v_forks), vcnt, "IU", "Number of fork() calls"); |
12e4aaff | 358 | SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_vforks, CTLTYPE_UINT|CTLFLAG_RD, |
6bdb32ed | 359 | 0, VMMETEROFF(v_vforks), vcnt, "IU", "Number of vfork() calls"); |
12e4aaff | 360 | SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_rforks, CTLTYPE_UINT|CTLFLAG_RD, |
6bdb32ed | 361 | 0, VMMETEROFF(v_rforks), vcnt, "IU", "Number of rfork() calls"); |
12e4aaff | 362 | SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_kthreads, CTLTYPE_UINT|CTLFLAG_RD, |
6bdb32ed | 363 | 0, VMMETEROFF(v_kthreads), vcnt, "IU", "Number of fork() calls by kernel"); |
12e4aaff | 364 | SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_forkpages, CTLTYPE_UINT|CTLFLAG_RD, |
6bdb32ed | 365 | 0, VMMETEROFF(v_forkpages), vcnt, "IU", "VM pages affected by fork()"); |
12e4aaff | 366 | SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_vforkpages, CTLTYPE_UINT|CTLFLAG_RD, |
6bdb32ed | 367 | 0, VMMETEROFF(v_vforkpages), vcnt, "IU", "VM pages affected by vfork()"); |
12e4aaff | 368 | SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_rforkpages, CTLTYPE_UINT|CTLFLAG_RD, |
6bdb32ed | 369 | 0, VMMETEROFF(v_rforkpages), vcnt, "IU", "VM pages affected by rfork()"); |
12e4aaff | 370 | SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_kthreadpages, CTLTYPE_UINT|CTLFLAG_RD, |
6bdb32ed | 371 | 0, VMMETEROFF(v_kthreadpages), vcnt, "IU", "VM pages affected by fork() by kernel"); |
12e4aaff | 372 | |
984263bc | 373 | SYSCTL_UINT(_vm_stats_vm, OID_AUTO, |
1b26f062 SG |
374 | v_page_size, CTLFLAG_RD, &vmstats.v_page_size, 0, |
375 | "Page size in bytes"); | |
b7ea2f3f | 376 | SYSCTL_ULONG(_vm_stats_vm, OID_AUTO, |
bce6845a | 377 | v_page_count, CTLFLAG_RD, &vmstats.v_page_count, 0, |
1b26f062 | 378 | "Total number of pages in system"); |
b7ea2f3f | 379 | SYSCTL_ULONG(_vm_stats_vm, OID_AUTO, |
1b26f062 SG |
380 | v_free_reserved, CTLFLAG_RD, &vmstats.v_free_reserved, 0, |
381 | "Number of pages reserved for deadlock"); | |
b7ea2f3f | 382 | SYSCTL_ULONG(_vm_stats_vm, OID_AUTO, |
1b26f062 SG |
383 | v_free_min, CTLFLAG_RD, &vmstats.v_free_min, 0, |
384 | "Minimum number of pages desired free"); | |
e91e64c7 MD |
385 | SYSCTL_ULONG(_vm_stats_vm, OID_AUTO, |
386 | v_paging_wait, CTLFLAG_RW, &vmstats.v_paging_wait, 0, | |
387 | "Userland slows down allocations"); | |
388 | SYSCTL_ULONG(_vm_stats_vm, OID_AUTO, | |
389 | v_paging_start, CTLFLAG_RW, &vmstats.v_paging_start, 0, | |
390 | "Pageout daemon begins running"); | |
391 | SYSCTL_ULONG(_vm_stats_vm, OID_AUTO, | |
392 | v_paging_target1, CTLFLAG_RW, &vmstats.v_paging_target1, 0, | |
393 | "Mid pageout daemon target"); | |
394 | SYSCTL_ULONG(_vm_stats_vm, OID_AUTO, | |
395 | v_paging_target2, CTLFLAG_RW, &vmstats.v_paging_target2, 0, | |
396 | "Final pageout daemon target"); | |
b7ea2f3f | 397 | SYSCTL_ULONG(_vm_stats_vm, OID_AUTO, |
1b26f062 SG |
398 | v_free_count, CTLFLAG_RD, &vmstats.v_free_count, 0, |
399 | "Number of pages free"); | |
b7ea2f3f | 400 | SYSCTL_ULONG(_vm_stats_vm, OID_AUTO, |
1b26f062 SG |
401 | v_wire_count, CTLFLAG_RD, &vmstats.v_wire_count, 0, |
402 | "Number of pages wired down"); | |
b7ea2f3f | 403 | SYSCTL_ULONG(_vm_stats_vm, OID_AUTO, |
1b26f062 SG |
404 | v_active_count, CTLFLAG_RD, &vmstats.v_active_count, 0, |
405 | "Number of pages active"); | |
b7ea2f3f | 406 | SYSCTL_ULONG(_vm_stats_vm, OID_AUTO, |
1b26f062 | 407 | v_inactive_target, CTLFLAG_RD, &vmstats.v_inactive_target, 0, |
e91e64c7 | 408 | "Maximum inactive pages during pageout"); |
b7ea2f3f | 409 | SYSCTL_ULONG(_vm_stats_vm, OID_AUTO, |
1b26f062 SG |
410 | v_inactive_count, CTLFLAG_RD, &vmstats.v_inactive_count, 0, |
411 | "Number of pages inactive"); | |
b7ea2f3f | 412 | SYSCTL_ULONG(_vm_stats_vm, OID_AUTO, |
1b26f062 SG |
413 | v_cache_count, CTLFLAG_RD, &vmstats.v_cache_count, 0, |
414 | "Number of pages on buffer cache queue"); | |
b7ea2f3f | 415 | SYSCTL_ULONG(_vm_stats_vm, OID_AUTO, |
1b26f062 SG |
416 | v_pageout_free_min, CTLFLAG_RD, &vmstats.v_pageout_free_min, 0, |
417 | "Min number pages reserved for kernel"); | |
b7ea2f3f | 418 | SYSCTL_ULONG(_vm_stats_vm, OID_AUTO, |
1b26f062 SG |
419 | v_interrupt_free_min, CTLFLAG_RD, &vmstats.v_interrupt_free_min, 0, |
420 | "Reserved number of pages for int code"); | |
015ffe3f | 421 | |
99ad9bc4 | 422 | /* |
6d0742ae MD |
423 | * Callback for per-cpu vmmeter structure. Unlike the global structure, |
424 | * we don't have to aggregate anything. | |
425 | * | |
99ad9bc4 MD |
426 | * No requirements. |
427 | */ | |
015ffe3f SZ |
428 | static int |
429 | do_vmmeter_pcpu(SYSCTL_HANDLER_ARGS) | |
430 | { | |
015ffe3f SZ |
431 | struct globaldata *gd = arg1; |
432 | struct vmmeter vmm; | |
015ffe3f | 433 | |
6d0742ae | 434 | vmm = gd->gd_cnt; |
015ffe3f SZ |
435 | vmm.v_intr += vmm.v_ipi + vmm.v_timer; |
436 | return (sysctl_handle_opaque(oidp, &vmm, sizeof(vmm), req)); | |
437 | } | |
438 | ||
77bc82e1 MD |
439 | /* |
440 | * Callback for long-term slow data collection on 10-second interval. | |
441 | * | |
442 | * Return faults, set data for other entries. | |
443 | */ | |
fef9ed17 MD |
444 | #define PTOB(value) ((uint64_t)(value) << PAGE_SHIFT) |
445 | ||
77bc82e1 MD |
446 | static uint64_t |
447 | collect_vmstats_callback(int n) | |
448 | { | |
449 | static struct vmmeter last_vmm; | |
450 | struct vmmeter cur_vmm; | |
451 | const int boffset = offsetof(struct vmmeter, vmmeter_uint_begin); | |
452 | const int eoffset = offsetof(struct vmmeter, vmmeter_uint_end); | |
453 | uint64_t total; | |
454 | ||
455 | /* | |
456 | * The hardclock already rolls up vmstats for us. | |
457 | */ | |
fef9ed17 MD |
458 | kcollect_setvalue(KCOLLECT_MEMFRE, PTOB(vmstats.v_free_count)); |
459 | kcollect_setvalue(KCOLLECT_MEMCAC, PTOB(vmstats.v_cache_count)); | |
460 | kcollect_setvalue(KCOLLECT_MEMINA, PTOB(vmstats.v_inactive_count)); | |
461 | kcollect_setvalue(KCOLLECT_MEMACT, PTOB(vmstats.v_active_count)); | |
462 | kcollect_setvalue(KCOLLECT_MEMWIR, PTOB(vmstats.v_wire_count)); | |
77bc82e1 MD |
463 | |
464 | /* | |
465 | * Collect pcpu statistics for things like faults. | |
466 | */ | |
467 | bzero(&cur_vmm, sizeof(cur_vmm)); | |
468 | for (n = 0; n < ncpus; ++n) { | |
469 | struct globaldata *gd = globaldata_find(n); | |
470 | int off; | |
471 | ||
472 | for (off = boffset; off <= eoffset; off += sizeof(u_int)) { | |
473 | *(u_int *)((char *)&cur_vmm + off) += | |
474 | *(u_int *)((char *)&gd->gd_cnt + off); | |
475 | } | |
476 | ||
477 | } | |
478 | ||
479 | total = cur_vmm.v_cow_faults - last_vmm.v_cow_faults; | |
480 | last_vmm.v_cow_faults = cur_vmm.v_cow_faults; | |
481 | kcollect_setvalue(KCOLLECT_COWFAULT, total); | |
482 | ||
483 | total = cur_vmm.v_zfod - last_vmm.v_zfod; | |
484 | last_vmm.v_zfod = cur_vmm.v_zfod; | |
485 | kcollect_setvalue(KCOLLECT_ZFILL, total); | |
486 | ||
487 | total = cur_vmm.v_syscall - last_vmm.v_syscall; | |
488 | last_vmm.v_syscall = cur_vmm.v_syscall; | |
489 | kcollect_setvalue(KCOLLECT_SYSCALLS, total); | |
490 | ||
491 | total = cur_vmm.v_intr - last_vmm.v_intr; | |
492 | last_vmm.v_intr = cur_vmm.v_intr; | |
493 | kcollect_setvalue(KCOLLECT_INTR, total); | |
494 | ||
495 | total = cur_vmm.v_ipi - last_vmm.v_ipi; | |
496 | last_vmm.v_ipi = cur_vmm.v_ipi; | |
497 | kcollect_setvalue(KCOLLECT_IPI, total); | |
498 | ||
499 | total = cur_vmm.v_timer - last_vmm.v_timer; | |
500 | last_vmm.v_timer = cur_vmm.v_timer; | |
501 | kcollect_setvalue(KCOLLECT_TIMER, total); | |
502 | ||
503 | total = cur_vmm.v_vm_faults - last_vmm.v_vm_faults; | |
504 | last_vmm.v_vm_faults = cur_vmm.v_vm_faults; | |
505 | ||
506 | return total; | |
507 | } | |
508 | ||
99ad9bc4 MD |
509 | /* |
510 | * Called from the low level boot code only. | |
511 | */ | |
015ffe3f SZ |
512 | static void |
513 | vmmeter_init(void *dummy __unused) | |
514 | { | |
515 | int i; | |
516 | ||
517 | for (i = 0; i < ncpus; ++i) { | |
518 | struct sysctl_ctx_list *ctx; | |
519 | struct sysctl_oid *oid; | |
520 | struct globaldata *gd; | |
521 | char name[32]; | |
522 | ||
523 | ksnprintf(name, sizeof(name), "cpu%d", i); | |
524 | ||
525 | ctx = kmalloc(sizeof(*ctx), M_TEMP, M_WAITOK); | |
12db4c33 | 526 | sysctl_ctx_init(ctx); |
015ffe3f SZ |
527 | oid = SYSCTL_ADD_NODE(ctx, SYSCTL_STATIC_CHILDREN(_vm), |
528 | OID_AUTO, name, CTLFLAG_RD, 0, ""); | |
529 | ||
530 | gd = globaldata_find(i); | |
531 | SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(oid), OID_AUTO, | |
532 | "vmmeter", CTLTYPE_OPAQUE|CTLFLAG_RD, | |
533 | gd, sizeof(struct vmmeter), do_vmmeter_pcpu, | |
534 | "S,vmmeter", "System per-cpu statistics"); | |
535 | } | |
77bc82e1 | 536 | kcollect_register(KCOLLECT_VMFAULT, "fault", collect_vmstats_callback, |
fef9ed17 | 537 | KCOLLECT_SCALE(KCOLLECT_VMFAULT_FORMAT, 0)); |
77bc82e1 MD |
538 | kcollect_register(KCOLLECT_COWFAULT, "cow", NULL, |
539 | KCOLLECT_SCALE(KCOLLECT_COWFAULT_FORMAT, 0)); | |
540 | kcollect_register(KCOLLECT_ZFILL, "zfill", NULL, | |
541 | KCOLLECT_SCALE(KCOLLECT_ZFILL_FORMAT, 0)); | |
542 | ||
543 | kcollect_register(KCOLLECT_MEMFRE, "free", NULL, | |
fef9ed17 MD |
544 | KCOLLECT_SCALE(KCOLLECT_MEMFRE_FORMAT, |
545 | PTOB(vmstats.v_page_count))); | |
77bc82e1 | 546 | kcollect_register(KCOLLECT_MEMCAC, "cache", NULL, |
fef9ed17 MD |
547 | KCOLLECT_SCALE(KCOLLECT_MEMCAC_FORMAT, |
548 | PTOB(vmstats.v_page_count))); | |
77bc82e1 | 549 | kcollect_register(KCOLLECT_MEMINA, "inact", NULL, |
fef9ed17 MD |
550 | KCOLLECT_SCALE(KCOLLECT_MEMINA_FORMAT, |
551 | PTOB(vmstats.v_page_count))); | |
77bc82e1 | 552 | kcollect_register(KCOLLECT_MEMACT, "act", NULL, |
fef9ed17 MD |
553 | KCOLLECT_SCALE(KCOLLECT_MEMACT_FORMAT, |
554 | PTOB(vmstats.v_page_count))); | |
77bc82e1 | 555 | kcollect_register(KCOLLECT_MEMWIR, "wired", NULL, |
fef9ed17 MD |
556 | KCOLLECT_SCALE(KCOLLECT_MEMWIR_FORMAT, |
557 | PTOB(vmstats.v_page_count))); | |
77bc82e1 MD |
558 | |
559 | kcollect_register(KCOLLECT_SYSCALLS, "syscalls", NULL, | |
560 | KCOLLECT_SCALE(KCOLLECT_SYSCALLS_FORMAT, 0)); | |
561 | ||
562 | kcollect_register(KCOLLECT_INTR, "intr", NULL, | |
563 | KCOLLECT_SCALE(KCOLLECT_INTR_FORMAT, 0)); | |
564 | kcollect_register(KCOLLECT_IPI, "ipi", NULL, | |
565 | KCOLLECT_SCALE(KCOLLECT_IPI_FORMAT, 0)); | |
566 | kcollect_register(KCOLLECT_TIMER, "timer", NULL, | |
567 | KCOLLECT_SCALE(KCOLLECT_TIMER_FORMAT, 0)); | |
015ffe3f SZ |
568 | } |
569 | SYSINIT(vmmeter, SI_SUB_PSEUDO, SI_ORDER_ANY, vmmeter_init, 0); | |
5ba14d44 MD |
570 | |
571 | /* | |
75979118 MD |
572 | * Rolls up accumulated pcpu adjustments to vmstats counts into the global |
573 | * structure, copy the global structure into our pcpu structure. Critical | |
574 | * path checks will use our pcpu structure. | |
5ba14d44 MD |
575 | * |
576 | * This is somewhat expensive and only called when needed, and by the | |
577 | * hardclock. | |
578 | */ | |
579 | void | |
580 | vmstats_rollup(void) | |
581 | { | |
582 | int cpu; | |
583 | ||
584 | for (cpu = 0; cpu < ncpus; ++cpu) { | |
585 | vmstats_rollup_cpu(globaldata_find(cpu)); | |
586 | } | |
75979118 | 587 | mycpu->gd_vmstats = vmstats; |
5ba14d44 MD |
588 | } |
589 | ||
590 | void | |
591 | vmstats_rollup_cpu(globaldata_t gd) | |
592 | { | |
b7ea2f3f | 593 | long value; |
5ba14d44 | 594 | |
75979118 | 595 | if (gd->gd_vmstats_adj.v_free_count) { |
b7ea2f3f MD |
596 | value = atomic_swap_long(&gd->gd_vmstats_adj.v_free_count, 0); |
597 | atomic_add_long(&vmstats.v_free_count, value); | |
5ba14d44 | 598 | } |
75979118 | 599 | if (gd->gd_vmstats_adj.v_cache_count) { |
b7ea2f3f MD |
600 | value = atomic_swap_long(&gd->gd_vmstats_adj.v_cache_count, 0); |
601 | atomic_add_long(&vmstats.v_cache_count, value); | |
5ba14d44 | 602 | } |
75979118 | 603 | if (gd->gd_vmstats_adj.v_inactive_count) { |
b7ea2f3f MD |
604 | value=atomic_swap_long(&gd->gd_vmstats_adj.v_inactive_count, 0); |
605 | atomic_add_long(&vmstats.v_inactive_count, value); | |
5ba14d44 | 606 | } |
75979118 | 607 | if (gd->gd_vmstats_adj.v_active_count) { |
b7ea2f3f MD |
608 | value = atomic_swap_long(&gd->gd_vmstats_adj.v_active_count, 0); |
609 | atomic_add_long(&vmstats.v_active_count, value); | |
5ba14d44 | 610 | } |
75979118 | 611 | if (gd->gd_vmstats_adj.v_wire_count) { |
b7ea2f3f MD |
612 | value = atomic_swap_long(&gd->gd_vmstats_adj.v_wire_count, 0); |
613 | atomic_add_long(&vmstats.v_wire_count, value); | |
5ba14d44 MD |
614 | } |
615 | } |