Commit | Line | Data |
---|---|---|
984263bc | 1 | /* |
99ad9bc4 MD |
2 | * (MPSAFE) |
3 | * | |
984263bc MD |
4 | * Copyright (c) 1991 Regents of the University of California. |
5 | * All rights reserved. | |
6 | * Copyright (c) 1994 John S. Dyson | |
7 | * All rights reserved. | |
8 | * Copyright (c) 1994 David Greenman | |
9 | * All rights reserved. | |
10 | * | |
11 | * This code is derived from software contributed to Berkeley by | |
12 | * The Mach Operating System project at Carnegie-Mellon University. | |
13 | * | |
14 | * Redistribution and use in source and binary forms, with or without | |
15 | * modification, are permitted provided that the following conditions | |
16 | * are met: | |
17 | * 1. Redistributions of source code must retain the above copyright | |
18 | * notice, this list of conditions and the following disclaimer. | |
19 | * 2. Redistributions in binary form must reproduce the above copyright | |
20 | * notice, this list of conditions and the following disclaimer in the | |
21 | * documentation and/or other materials provided with the distribution. | |
22 | * 3. All advertising materials mentioning features or use of this software | |
23 | * must display the following acknowledgement: | |
24 | * This product includes software developed by the University of | |
25 | * California, Berkeley and its contributors. | |
26 | * 4. Neither the name of the University nor the names of its contributors | |
27 | * may be used to endorse or promote products derived from this software | |
28 | * without specific prior written permission. | |
29 | * | |
30 | * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND | |
31 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |
32 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | |
33 | * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE | |
34 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | |
35 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | |
36 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | |
37 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | |
38 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY | |
39 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | |
40 | * SUCH DAMAGE. | |
41 | * | |
42 | * from: @(#)vm_pageout.c 7.4 (Berkeley) 5/7/91 | |
43 | * | |
44 | * | |
45 | * Copyright (c) 1987, 1990 Carnegie-Mellon University. | |
46 | * All rights reserved. | |
47 | * | |
48 | * Authors: Avadis Tevanian, Jr., Michael Wayne Young | |
49 | * | |
50 | * Permission to use, copy, modify and distribute this software and | |
51 | * its documentation is hereby granted, provided that both the copyright | |
52 | * notice and this permission notice appear in all copies of the | |
53 | * software, derivative works or modified versions, and any portions | |
54 | * thereof, and that both notices appear in supporting documentation. | |
55 | * | |
56 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" | |
57 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND | |
58 | * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. | |
59 | * | |
60 | * Carnegie Mellon requests users of this software to return to | |
61 | * | |
62 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU | |
63 | * School of Computer Science | |
64 | * Carnegie Mellon University | |
65 | * Pittsburgh PA 15213-3890 | |
66 | * | |
67 | * any improvements or extensions that they make and grant Carnegie the | |
68 | * rights to redistribute these changes. | |
69 | * | |
70 | * $FreeBSD: src/sys/vm/vm_pageout.c,v 1.151.2.15 2002/12/29 18:21:04 dillon Exp $ | |
4ecf7cc9 | 71 | * $DragonFly: src/sys/vm/vm_pageout.c,v 1.36 2008/07/01 02:02:56 dillon Exp $ |
984263bc MD |
72 | */ |
73 | ||
74 | /* | |
75 | * The proverbial page-out daemon. | |
76 | */ | |
77 | ||
78 | #include "opt_vm.h" | |
79 | #include <sys/param.h> | |
80 | #include <sys/systm.h> | |
81 | #include <sys/kernel.h> | |
82 | #include <sys/proc.h> | |
83 | #include <sys/kthread.h> | |
84 | #include <sys/resourcevar.h> | |
85 | #include <sys/signalvar.h> | |
86 | #include <sys/vnode.h> | |
87 | #include <sys/vmmeter.h> | |
88 | #include <sys/sysctl.h> | |
89 | ||
90 | #include <vm/vm.h> | |
91 | #include <vm/vm_param.h> | |
92 | #include <sys/lock.h> | |
93 | #include <vm/vm_object.h> | |
94 | #include <vm/vm_page.h> | |
95 | #include <vm/vm_map.h> | |
96 | #include <vm/vm_pageout.h> | |
97 | #include <vm/vm_pager.h> | |
98 | #include <vm/swap_pager.h> | |
99 | #include <vm/vm_extern.h> | |
5fd012e0 MD |
100 | |
101 | #include <sys/thread2.h> | |
cd8ab232 | 102 | #include <sys/mplock2.h> |
12e4aaff | 103 | #include <vm/vm_page2.h> |
984263bc MD |
104 | |
105 | /* | |
106 | * System initialization | |
107 | */ | |
108 | ||
109 | /* the kernel process "vm_pageout"*/ | |
1388df65 | 110 | static int vm_pageout_clean (vm_page_t); |
20479584 | 111 | static int vm_pageout_scan (int pass); |
1388df65 | 112 | static int vm_pageout_free_page_calc (vm_size_t count); |
bc6dffab | 113 | struct thread *pagethread; |
984263bc | 114 | |
984263bc MD |
115 | #if !defined(NO_SWAPPING) |
116 | /* the kernel process "vm_daemon"*/ | |
1388df65 | 117 | static void vm_daemon (void); |
bc6dffab | 118 | static struct thread *vmthread; |
984263bc MD |
119 | |
120 | static struct kproc_desc vm_kp = { | |
121 | "vmdaemon", | |
122 | vm_daemon, | |
bc6dffab | 123 | &vmthread |
984263bc MD |
124 | }; |
125 | SYSINIT(vmdaemon, SI_SUB_KTHREAD_VM, SI_ORDER_FIRST, kproc_start, &vm_kp) | |
126 | #endif | |
127 | ||
128 | ||
129 | int vm_pages_needed=0; /* Event on which pageout daemon sleeps */ | |
130 | int vm_pageout_deficit=0; /* Estimated number of pages deficit */ | |
131 | int vm_pageout_pages_needed=0; /* flag saying that the pageout daemon needs pages */ | |
132 | ||
133 | #if !defined(NO_SWAPPING) | |
134 | static int vm_pageout_req_swapout; /* XXX */ | |
135 | static int vm_daemon_needed; | |
136 | #endif | |
984263bc MD |
137 | static int vm_max_launder = 32; |
138 | static int vm_pageout_stats_max=0, vm_pageout_stats_interval = 0; | |
139 | static int vm_pageout_full_stats_interval = 0; | |
140 | static int vm_pageout_stats_free_max=0, vm_pageout_algorithm=0; | |
141 | static int defer_swap_pageouts=0; | |
142 | static int disable_swap_pageouts=0; | |
143 | ||
144 | #if defined(NO_SWAPPING) | |
145 | static int vm_swap_enabled=0; | |
146 | static int vm_swap_idle_enabled=0; | |
147 | #else | |
148 | static int vm_swap_enabled=1; | |
149 | static int vm_swap_idle_enabled=0; | |
150 | #endif | |
151 | ||
152 | SYSCTL_INT(_vm, VM_PAGEOUT_ALGORITHM, pageout_algorithm, | |
153 | CTLFLAG_RW, &vm_pageout_algorithm, 0, "LRU page mgmt"); | |
154 | ||
155 | SYSCTL_INT(_vm, OID_AUTO, max_launder, | |
156 | CTLFLAG_RW, &vm_max_launder, 0, "Limit dirty flushes in pageout"); | |
157 | ||
158 | SYSCTL_INT(_vm, OID_AUTO, pageout_stats_max, | |
159 | CTLFLAG_RW, &vm_pageout_stats_max, 0, "Max pageout stats scan length"); | |
160 | ||
161 | SYSCTL_INT(_vm, OID_AUTO, pageout_full_stats_interval, | |
162 | CTLFLAG_RW, &vm_pageout_full_stats_interval, 0, "Interval for full stats scan"); | |
163 | ||
164 | SYSCTL_INT(_vm, OID_AUTO, pageout_stats_interval, | |
165 | CTLFLAG_RW, &vm_pageout_stats_interval, 0, "Interval for partial stats scan"); | |
166 | ||
167 | SYSCTL_INT(_vm, OID_AUTO, pageout_stats_free_max, | |
168 | CTLFLAG_RW, &vm_pageout_stats_free_max, 0, "Not implemented"); | |
169 | ||
170 | #if defined(NO_SWAPPING) | |
171 | SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled, | |
172 | CTLFLAG_RD, &vm_swap_enabled, 0, ""); | |
173 | SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled, | |
174 | CTLFLAG_RD, &vm_swap_idle_enabled, 0, ""); | |
175 | #else | |
176 | SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled, | |
177 | CTLFLAG_RW, &vm_swap_enabled, 0, "Enable entire process swapout"); | |
178 | SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled, | |
179 | CTLFLAG_RW, &vm_swap_idle_enabled, 0, "Allow swapout on idle criteria"); | |
180 | #endif | |
181 | ||
182 | SYSCTL_INT(_vm, OID_AUTO, defer_swapspace_pageouts, | |
183 | CTLFLAG_RW, &defer_swap_pageouts, 0, "Give preference to dirty pages in mem"); | |
184 | ||
185 | SYSCTL_INT(_vm, OID_AUTO, disable_swapspace_pageouts, | |
186 | CTLFLAG_RW, &disable_swap_pageouts, 0, "Disallow swapout of dirty pages"); | |
187 | ||
188 | static int pageout_lock_miss; | |
189 | SYSCTL_INT(_vm, OID_AUTO, pageout_lock_miss, | |
190 | CTLFLAG_RD, &pageout_lock_miss, 0, "vget() lock misses during pageout"); | |
191 | ||
46311ac2 MD |
192 | int vm_load; |
193 | SYSCTL_INT(_vm, OID_AUTO, vm_load, | |
194 | CTLFLAG_RD, &vm_load, 0, "load on the VM system"); | |
195 | int vm_load_enable = 1; | |
196 | SYSCTL_INT(_vm, OID_AUTO, vm_load_enable, | |
197 | CTLFLAG_RW, &vm_load_enable, 0, "enable vm_load rate limiting"); | |
198 | #ifdef INVARIANTS | |
199 | int vm_load_debug; | |
200 | SYSCTL_INT(_vm, OID_AUTO, vm_load_debug, | |
201 | CTLFLAG_RW, &vm_load_debug, 0, "debug vm_load"); | |
202 | #endif | |
203 | ||
984263bc MD |
204 | #define VM_PAGEOUT_PAGE_COUNT 16 |
205 | int vm_pageout_page_count = VM_PAGEOUT_PAGE_COUNT; | |
206 | ||
207 | int vm_page_max_wired; /* XXX max # of wired pages system-wide */ | |
208 | ||
209 | #if !defined(NO_SWAPPING) | |
1388df65 RG |
210 | typedef void freeer_fcn_t (vm_map_t, vm_object_t, vm_pindex_t, int); |
211 | static void vm_pageout_map_deactivate_pages (vm_map_t, vm_pindex_t); | |
984263bc | 212 | static freeer_fcn_t vm_pageout_object_deactivate_pages; |
1388df65 | 213 | static void vm_req_vmdaemon (void); |
984263bc MD |
214 | #endif |
215 | static void vm_pageout_page_stats(void); | |
216 | ||
46311ac2 | 217 | /* |
20479584 | 218 | * Update vm_load to slow down faulting processes. |
99ad9bc4 MD |
219 | * |
220 | * SMP races ok. | |
221 | * No requirements. | |
46311ac2 MD |
222 | */ |
223 | void | |
224 | vm_fault_ratecheck(void) | |
225 | { | |
226 | if (vm_pages_needed) { | |
227 | if (vm_load < 1000) | |
228 | ++vm_load; | |
229 | } else { | |
230 | if (vm_load > 0) | |
231 | --vm_load; | |
232 | } | |
233 | } | |
234 | ||
984263bc MD |
235 | /* |
236 | * vm_pageout_clean: | |
237 | * | |
06ecca5a MD |
238 | * Clean the page and remove it from the laundry. The page must not be |
239 | * busy on-call. | |
984263bc MD |
240 | * |
241 | * We set the busy bit to cause potential page faults on this page to | |
242 | * block. Note the careful timing, however, the busy bit isn't set till | |
243 | * late and we cannot do anything that will mess with the page. | |
99ad9bc4 MD |
244 | * |
245 | * The caller must hold vm_token. | |
984263bc | 246 | */ |
984263bc | 247 | static int |
57e43348 | 248 | vm_pageout_clean(vm_page_t m) |
984263bc | 249 | { |
5f910b2f | 250 | vm_object_t object; |
984263bc MD |
251 | vm_page_t mc[2*vm_pageout_page_count]; |
252 | int pageout_count; | |
253 | int ib, is, page_base; | |
254 | vm_pindex_t pindex = m->pindex; | |
255 | ||
256 | object = m->object; | |
257 | ||
258 | /* | |
259 | * It doesn't cost us anything to pageout OBJT_DEFAULT or OBJT_SWAP | |
260 | * with the new swapper, but we could have serious problems paging | |
261 | * out other object types if there is insufficient memory. | |
262 | * | |
263 | * Unfortunately, checking free memory here is far too late, so the | |
264 | * check has been moved up a procedural level. | |
265 | */ | |
266 | ||
267 | /* | |
268 | * Don't mess with the page if it's busy, held, or special | |
269 | */ | |
270 | if ((m->hold_count != 0) || | |
271 | ((m->busy != 0) || (m->flags & (PG_BUSY|PG_UNMANAGED)))) { | |
272 | return 0; | |
273 | } | |
274 | ||
275 | mc[vm_pageout_page_count] = m; | |
276 | pageout_count = 1; | |
277 | page_base = vm_pageout_page_count; | |
278 | ib = 1; | |
279 | is = 1; | |
280 | ||
281 | /* | |
282 | * Scan object for clusterable pages. | |
283 | * | |
284 | * We can cluster ONLY if: ->> the page is NOT | |
285 | * clean, wired, busy, held, or mapped into a | |
286 | * buffer, and one of the following: | |
287 | * 1) The page is inactive, or a seldom used | |
288 | * active page. | |
289 | * -or- | |
290 | * 2) we force the issue. | |
291 | * | |
292 | * During heavy mmap/modification loads the pageout | |
293 | * daemon can really fragment the underlying file | |
294 | * due to flushing pages out of order and not trying | |
295 | * align the clusters (which leave sporatic out-of-order | |
296 | * holes). To solve this problem we do the reverse scan | |
297 | * first and attempt to align our cluster, then do a | |
298 | * forward scan if room remains. | |
299 | */ | |
300 | ||
301 | more: | |
302 | while (ib && pageout_count < vm_pageout_page_count) { | |
303 | vm_page_t p; | |
304 | ||
305 | if (ib > pindex) { | |
306 | ib = 0; | |
307 | break; | |
308 | } | |
309 | ||
310 | if ((p = vm_page_lookup(object, pindex - ib)) == NULL) { | |
311 | ib = 0; | |
312 | break; | |
313 | } | |
314 | if (((p->queue - p->pc) == PQ_CACHE) || | |
315 | (p->flags & (PG_BUSY|PG_UNMANAGED)) || p->busy) { | |
316 | ib = 0; | |
317 | break; | |
318 | } | |
319 | vm_page_test_dirty(p); | |
320 | if ((p->dirty & p->valid) == 0 || | |
321 | p->queue != PQ_INACTIVE || | |
322 | p->wire_count != 0 || /* may be held by buf cache */ | |
323 | p->hold_count != 0) { /* may be undergoing I/O */ | |
324 | ib = 0; | |
325 | break; | |
326 | } | |
327 | mc[--page_base] = p; | |
328 | ++pageout_count; | |
329 | ++ib; | |
330 | /* | |
331 | * alignment boundry, stop here and switch directions. Do | |
332 | * not clear ib. | |
333 | */ | |
334 | if ((pindex - (ib - 1)) % vm_pageout_page_count == 0) | |
335 | break; | |
336 | } | |
337 | ||
338 | while (pageout_count < vm_pageout_page_count && | |
339 | pindex + is < object->size) { | |
340 | vm_page_t p; | |
341 | ||
342 | if ((p = vm_page_lookup(object, pindex + is)) == NULL) | |
343 | break; | |
344 | if (((p->queue - p->pc) == PQ_CACHE) || | |
345 | (p->flags & (PG_BUSY|PG_UNMANAGED)) || p->busy) { | |
346 | break; | |
347 | } | |
348 | vm_page_test_dirty(p); | |
349 | if ((p->dirty & p->valid) == 0 || | |
350 | p->queue != PQ_INACTIVE || | |
351 | p->wire_count != 0 || /* may be held by buf cache */ | |
352 | p->hold_count != 0) { /* may be undergoing I/O */ | |
353 | break; | |
354 | } | |
355 | mc[page_base + pageout_count] = p; | |
356 | ++pageout_count; | |
357 | ++is; | |
358 | } | |
359 | ||
360 | /* | |
361 | * If we exhausted our forward scan, continue with the reverse scan | |
362 | * when possible, even past a page boundry. This catches boundry | |
363 | * conditions. | |
364 | */ | |
365 | if (ib && pageout_count < vm_pageout_page_count) | |
366 | goto more; | |
367 | ||
368 | /* | |
369 | * we allow reads during pageouts... | |
370 | */ | |
371 | return vm_pageout_flush(&mc[page_base], pageout_count, 0); | |
372 | } | |
373 | ||
374 | /* | |
375 | * vm_pageout_flush() - launder the given pages | |
376 | * | |
377 | * The given pages are laundered. Note that we setup for the start of | |
378 | * I/O ( i.e. busy the page ), mark it read-only, and bump the object | |
379 | * reference count all in here rather then in the parent. If we want | |
380 | * the parent to do more sophisticated things we may have to change | |
381 | * the ordering. | |
99ad9bc4 MD |
382 | * |
383 | * The caller must hold vm_token. | |
984263bc | 384 | */ |
984263bc | 385 | int |
57e43348 | 386 | vm_pageout_flush(vm_page_t *mc, int count, int flags) |
984263bc | 387 | { |
5f910b2f | 388 | vm_object_t object; |
984263bc MD |
389 | int pageout_status[count]; |
390 | int numpagedout = 0; | |
391 | int i; | |
392 | ||
99ad9bc4 MD |
393 | ASSERT_LWKT_TOKEN_HELD(&vm_token); |
394 | ||
984263bc | 395 | /* |
17cde63e MD |
396 | * Initiate I/O. Bump the vm_page_t->busy counter. |
397 | */ | |
398 | for (i = 0; i < count; i++) { | |
399 | KASSERT(mc[i]->valid == VM_PAGE_BITS_ALL, ("vm_pageout_flush page %p index %d/%d: partially invalid page", mc[i], i, count)); | |
400 | vm_page_io_start(mc[i]); | |
401 | } | |
402 | ||
403 | /* | |
4530a3aa MD |
404 | * We must make the pages read-only. This will also force the |
405 | * modified bit in the related pmaps to be cleared. The pager | |
406 | * cannot clear the bit for us since the I/O completion code | |
407 | * typically runs from an interrupt. The act of making the page | |
408 | * read-only handles the case for us. | |
984263bc | 409 | */ |
984263bc | 410 | for (i = 0; i < count; i++) { |
984263bc MD |
411 | vm_page_protect(mc[i], VM_PROT_READ); |
412 | } | |
413 | ||
414 | object = mc[0]->object; | |
415 | vm_object_pip_add(object, count); | |
416 | ||
417 | vm_pager_put_pages(object, mc, count, | |
c439ad8f | 418 | (flags | ((object == &kernel_object) ? VM_PAGER_PUT_SYNC : 0)), |
984263bc MD |
419 | pageout_status); |
420 | ||
421 | for (i = 0; i < count; i++) { | |
422 | vm_page_t mt = mc[i]; | |
423 | ||
424 | switch (pageout_status[i]) { | |
425 | case VM_PAGER_OK: | |
426 | numpagedout++; | |
427 | break; | |
428 | case VM_PAGER_PEND: | |
429 | numpagedout++; | |
430 | break; | |
431 | case VM_PAGER_BAD: | |
432 | /* | |
433 | * Page outside of range of object. Right now we | |
434 | * essentially lose the changes by pretending it | |
435 | * worked. | |
436 | */ | |
437 | pmap_clear_modify(mt); | |
438 | vm_page_undirty(mt); | |
439 | break; | |
440 | case VM_PAGER_ERROR: | |
441 | case VM_PAGER_FAIL: | |
442 | /* | |
c84c24da MD |
443 | * A page typically cannot be paged out when we |
444 | * have run out of swap. We leave the page | |
445 | * marked inactive and will try to page it out | |
446 | * again later. | |
447 | * | |
448 | * Starvation of the active page list is used to | |
449 | * determine when the system is massively memory | |
450 | * starved. | |
984263bc | 451 | */ |
984263bc MD |
452 | break; |
453 | case VM_PAGER_AGAIN: | |
454 | break; | |
455 | } | |
456 | ||
457 | /* | |
458 | * If the operation is still going, leave the page busy to | |
459 | * block all other accesses. Also, leave the paging in | |
460 | * progress indicator set so that we don't attempt an object | |
461 | * collapse. | |
93afe6be MD |
462 | * |
463 | * For any pages which have completed synchronously, | |
464 | * deactivate the page if we are under a severe deficit. | |
465 | * Do not try to enter them into the cache, though, they | |
466 | * might still be read-heavy. | |
984263bc MD |
467 | */ |
468 | if (pageout_status[i] != VM_PAGER_PEND) { | |
469 | vm_object_pip_wakeup(object); | |
470 | vm_page_io_finish(mt); | |
93afe6be MD |
471 | if (vm_page_count_severe()) |
472 | vm_page_deactivate(mt); | |
473 | #if 0 | |
984263bc MD |
474 | if (!vm_page_count_severe() || !vm_page_try_to_cache(mt)) |
475 | vm_page_protect(mt, VM_PROT_READ); | |
93afe6be | 476 | #endif |
984263bc MD |
477 | } |
478 | } | |
479 | return numpagedout; | |
480 | } | |
481 | ||
482 | #if !defined(NO_SWAPPING) | |
483 | /* | |
484 | * vm_pageout_object_deactivate_pages | |
485 | * | |
486 | * deactivate enough pages to satisfy the inactive target | |
487 | * requirements or if vm_page_proc_limit is set, then | |
488 | * deactivate all of the pages in the object and its | |
489 | * backing_objects. | |
490 | * | |
99ad9bc4 MD |
491 | * The map must be locked. |
492 | * The caller must hold vm_token. | |
984263bc | 493 | */ |
1f804340 MD |
494 | static int vm_pageout_object_deactivate_pages_callback(vm_page_t, void *); |
495 | ||
984263bc | 496 | static void |
57e43348 | 497 | vm_pageout_object_deactivate_pages(vm_map_t map, vm_object_t object, |
99ad9bc4 | 498 | vm_pindex_t desired, int map_remove_only) |
984263bc | 499 | { |
1f804340 | 500 | struct rb_vm_page_scan_info info; |
984263bc | 501 | int remove_mode; |
984263bc MD |
502 | |
503 | if (object->type == OBJT_DEVICE || object->type == OBJT_PHYS) | |
504 | return; | |
505 | ||
506 | while (object) { | |
507 | if (pmap_resident_count(vm_map_pmap(map)) <= desired) | |
508 | return; | |
509 | if (object->paging_in_progress) | |
510 | return; | |
511 | ||
512 | remove_mode = map_remove_only; | |
513 | if (object->shadow_count > 1) | |
514 | remove_mode = 1; | |
06ecca5a MD |
515 | |
516 | /* | |
517 | * scan the objects entire memory queue. spl protection is | |
518 | * required to avoid an interrupt unbusy/free race against | |
519 | * our busy check. | |
520 | */ | |
5fd012e0 | 521 | crit_enter(); |
1f804340 MD |
522 | info.limit = remove_mode; |
523 | info.map = map; | |
524 | info.desired = desired; | |
525 | vm_page_rb_tree_RB_SCAN(&object->rb_memq, NULL, | |
526 | vm_pageout_object_deactivate_pages_callback, | |
527 | &info | |
528 | ); | |
529 | crit_exit(); | |
530 | object = object->backing_object; | |
531 | } | |
532 | } | |
99ad9bc4 MD |
533 | |
534 | /* | |
535 | * The caller must hold vm_token. | |
536 | */ | |
1f804340 MD |
537 | static int |
538 | vm_pageout_object_deactivate_pages_callback(vm_page_t p, void *data) | |
539 | { | |
540 | struct rb_vm_page_scan_info *info = data; | |
541 | int actcount; | |
984263bc | 542 | |
1f804340 MD |
543 | if (pmap_resident_count(vm_map_pmap(info->map)) <= info->desired) { |
544 | return(-1); | |
545 | } | |
546 | mycpu->gd_cnt.v_pdpages++; | |
547 | if (p->wire_count != 0 || p->hold_count != 0 || p->busy != 0 || | |
548 | (p->flags & (PG_BUSY|PG_UNMANAGED)) || | |
549 | !pmap_page_exists_quick(vm_map_pmap(info->map), p)) { | |
550 | return(0); | |
551 | } | |
984263bc | 552 | |
1f804340 MD |
553 | actcount = pmap_ts_referenced(p); |
554 | if (actcount) { | |
555 | vm_page_flag_set(p, PG_REFERENCED); | |
556 | } else if (p->flags & PG_REFERENCED) { | |
557 | actcount = 1; | |
558 | } | |
559 | ||
560 | if ((p->queue != PQ_ACTIVE) && | |
561 | (p->flags & PG_REFERENCED)) { | |
562 | vm_page_activate(p); | |
563 | p->act_count += actcount; | |
564 | vm_page_flag_clear(p, PG_REFERENCED); | |
565 | } else if (p->queue == PQ_ACTIVE) { | |
566 | if ((p->flags & PG_REFERENCED) == 0) { | |
567 | p->act_count -= min(p->act_count, ACT_DECLINE); | |
568 | if (!info->limit && (vm_pageout_algorithm || (p->act_count == 0))) { | |
17cde63e | 569 | vm_page_busy(p); |
984263bc | 570 | vm_page_protect(p, VM_PROT_NONE); |
17cde63e | 571 | vm_page_wakeup(p); |
1f804340 MD |
572 | vm_page_deactivate(p); |
573 | } else { | |
574 | TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE].pl, p, pageq); | |
575 | TAILQ_INSERT_TAIL(&vm_page_queues[PQ_ACTIVE].pl, p, pageq); | |
984263bc | 576 | } |
1f804340 MD |
577 | } else { |
578 | vm_page_activate(p); | |
579 | vm_page_flag_clear(p, PG_REFERENCED); | |
580 | if (p->act_count < (ACT_MAX - ACT_ADVANCE)) | |
581 | p->act_count += ACT_ADVANCE; | |
582 | TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE].pl, p, pageq); | |
583 | TAILQ_INSERT_TAIL(&vm_page_queues[PQ_ACTIVE].pl, p, pageq); | |
984263bc | 584 | } |
1f804340 | 585 | } else if (p->queue == PQ_INACTIVE) { |
17cde63e | 586 | vm_page_busy(p); |
1f804340 | 587 | vm_page_protect(p, VM_PROT_NONE); |
17cde63e | 588 | vm_page_wakeup(p); |
984263bc | 589 | } |
1f804340 | 590 | return(0); |
984263bc MD |
591 | } |
592 | ||
593 | /* | |
99ad9bc4 | 594 | * Deactivate some number of pages in a map, try to do it fairly, but |
984263bc | 595 | * that is really hard to do. |
99ad9bc4 MD |
596 | * |
597 | * The caller must hold vm_token. | |
984263bc MD |
598 | */ |
599 | static void | |
57e43348 | 600 | vm_pageout_map_deactivate_pages(vm_map_t map, vm_pindex_t desired) |
984263bc MD |
601 | { |
602 | vm_map_entry_t tmpe; | |
603 | vm_object_t obj, bigobj; | |
604 | int nothingwired; | |
605 | ||
df4f70a6 | 606 | if (lockmgr(&map->lock, LK_EXCLUSIVE | LK_NOWAIT)) { |
984263bc MD |
607 | return; |
608 | } | |
609 | ||
610 | bigobj = NULL; | |
611 | nothingwired = TRUE; | |
612 | ||
613 | /* | |
614 | * first, search out the biggest object, and try to free pages from | |
615 | * that. | |
616 | */ | |
617 | tmpe = map->header.next; | |
618 | while (tmpe != &map->header) { | |
1b874851 MD |
619 | switch(tmpe->maptype) { |
620 | case VM_MAPTYPE_NORMAL: | |
621 | case VM_MAPTYPE_VPAGETABLE: | |
984263bc MD |
622 | obj = tmpe->object.vm_object; |
623 | if ((obj != NULL) && (obj->shadow_count <= 1) && | |
624 | ((bigobj == NULL) || | |
625 | (bigobj->resident_page_count < obj->resident_page_count))) { | |
626 | bigobj = obj; | |
627 | } | |
1b874851 MD |
628 | break; |
629 | default: | |
630 | break; | |
984263bc MD |
631 | } |
632 | if (tmpe->wired_count > 0) | |
633 | nothingwired = FALSE; | |
634 | tmpe = tmpe->next; | |
635 | } | |
636 | ||
637 | if (bigobj) | |
638 | vm_pageout_object_deactivate_pages(map, bigobj, desired, 0); | |
639 | ||
640 | /* | |
641 | * Next, hunt around for other pages to deactivate. We actually | |
642 | * do this search sort of wrong -- .text first is not the best idea. | |
643 | */ | |
644 | tmpe = map->header.next; | |
645 | while (tmpe != &map->header) { | |
646 | if (pmap_resident_count(vm_map_pmap(map)) <= desired) | |
647 | break; | |
1b874851 MD |
648 | switch(tmpe->maptype) { |
649 | case VM_MAPTYPE_NORMAL: | |
650 | case VM_MAPTYPE_VPAGETABLE: | |
984263bc MD |
651 | obj = tmpe->object.vm_object; |
652 | if (obj) | |
653 | vm_pageout_object_deactivate_pages(map, obj, desired, 0); | |
1b874851 MD |
654 | break; |
655 | default: | |
656 | break; | |
984263bc MD |
657 | } |
658 | tmpe = tmpe->next; | |
659 | }; | |
660 | ||
661 | /* | |
662 | * Remove all mappings if a process is swapped out, this will free page | |
663 | * table pages. | |
664 | */ | |
665 | if (desired == 0 && nothingwired) | |
666 | pmap_remove(vm_map_pmap(map), | |
88181b08 | 667 | VM_MIN_USER_ADDRESS, VM_MAX_USER_ADDRESS); |
984263bc | 668 | vm_map_unlock(map); |
984263bc MD |
669 | } |
670 | #endif | |
671 | ||
672 | /* | |
a11aaa81 MD |
673 | * Don't try to be fancy - being fancy can lead to vnode deadlocks. We |
674 | * only do it for OBJT_DEFAULT and OBJT_SWAP objects which we know can | |
675 | * be trivially freed. | |
99ad9bc4 MD |
676 | * |
677 | * The caller must hold vm_token. | |
984263bc | 678 | */ |
99ad9bc4 | 679 | static void |
95813af0 MD |
680 | vm_pageout_page_free(vm_page_t m) |
681 | { | |
984263bc MD |
682 | vm_object_t object = m->object; |
683 | int type = object->type; | |
684 | ||
685 | if (type == OBJT_SWAP || type == OBJT_DEFAULT) | |
686 | vm_object_reference(object); | |
687 | vm_page_busy(m); | |
688 | vm_page_protect(m, VM_PROT_NONE); | |
689 | vm_page_free(m); | |
690 | if (type == OBJT_SWAP || type == OBJT_DEFAULT) | |
691 | vm_object_deallocate(object); | |
692 | } | |
693 | ||
694 | /* | |
20479584 | 695 | * vm_pageout_scan does the dirty work for the pageout daemon. |
984263bc | 696 | */ |
8fa76237 MD |
697 | struct vm_pageout_scan_info { |
698 | struct proc *bigproc; | |
699 | vm_offset_t bigsize; | |
700 | }; | |
701 | ||
702 | static int vm_pageout_scan_callback(struct proc *p, void *data); | |
703 | ||
99ad9bc4 MD |
704 | /* |
705 | * The caller must hold vm_token. | |
706 | */ | |
20479584 | 707 | static int |
984263bc MD |
708 | vm_pageout_scan(int pass) |
709 | { | |
8fa76237 | 710 | struct vm_pageout_scan_info info; |
984263bc MD |
711 | vm_page_t m, next; |
712 | struct vm_page marker; | |
5d6a945b | 713 | struct vnode *vpfailed; /* warning, allowed to be stale */ |
20479584 MD |
714 | int maxscan, pcount; |
715 | int recycle_count; | |
716 | int inactive_shortage, active_shortage; | |
51db7ca2 | 717 | int inactive_original_shortage; |
984263bc MD |
718 | vm_object_t object; |
719 | int actcount; | |
720 | int vnodes_skipped = 0; | |
721 | int maxlaunder; | |
984263bc MD |
722 | |
723 | /* | |
724 | * Do whatever cleanup that the pmap code can. | |
725 | */ | |
726 | pmap_collect(); | |
727 | ||
984263bc | 728 | /* |
20479584 MD |
729 | * Calculate our target for the number of free+cache pages we |
730 | * want to get to. This is higher then the number that causes | |
731 | * allocations to stall (severe) in order to provide hysteresis, | |
732 | * and if we don't make it all the way but get to the minimum | |
733 | * we're happy. | |
984263bc | 734 | */ |
20479584 | 735 | inactive_shortage = vm_paging_target() + vm_pageout_deficit; |
51db7ca2 | 736 | inactive_original_shortage = inactive_shortage; |
20479584 | 737 | vm_pageout_deficit = 0; |
984263bc MD |
738 | |
739 | /* | |
740 | * Initialize our marker | |
741 | */ | |
742 | bzero(&marker, sizeof(marker)); | |
743 | marker.flags = PG_BUSY | PG_FICTITIOUS | PG_MARKER; | |
744 | marker.queue = PQ_INACTIVE; | |
745 | marker.wire_count = 1; | |
746 | ||
747 | /* | |
748 | * Start scanning the inactive queue for pages we can move to the | |
749 | * cache or free. The scan will stop when the target is reached or | |
750 | * we have scanned the entire inactive queue. Note that m->act_count | |
751 | * is not used to form decisions for the inactive queue, only for the | |
752 | * active queue. | |
753 | * | |
754 | * maxlaunder limits the number of dirty pages we flush per scan. | |
755 | * For most systems a smaller value (16 or 32) is more robust under | |
756 | * extreme memory and disk pressure because any unnecessary writes | |
757 | * to disk can result in extreme performance degredation. However, | |
758 | * systems with excessive dirty pages (especially when MAP_NOSYNC is | |
759 | * used) will die horribly with limited laundering. If the pageout | |
760 | * daemon cannot clean enough pages in the first pass, we let it go | |
761 | * all out in succeeding passes. | |
762 | */ | |
763 | if ((maxlaunder = vm_max_launder) <= 1) | |
764 | maxlaunder = 1; | |
765 | if (pass) | |
766 | maxlaunder = 10000; | |
767 | ||
06ecca5a | 768 | /* |
5fd012e0 MD |
769 | * We will generally be in a critical section throughout the |
770 | * scan, but we can release it temporarily when we are sitting on a | |
771 | * non-busy page without fear. this is required to prevent an | |
772 | * interrupt from unbusying or freeing a page prior to our busy | |
773 | * check, leaving us on the wrong queue or checking the wrong | |
774 | * page. | |
06ecca5a | 775 | */ |
5fd012e0 | 776 | crit_enter(); |
984263bc | 777 | rescan0: |
5d6a945b | 778 | vpfailed = NULL; |
12e4aaff | 779 | maxscan = vmstats.v_inactive_count; |
984263bc | 780 | for (m = TAILQ_FIRST(&vm_page_queues[PQ_INACTIVE].pl); |
20479584 | 781 | m != NULL && maxscan-- > 0 && inactive_shortage > 0; |
06ecca5a MD |
782 | m = next |
783 | ) { | |
12e4aaff | 784 | mycpu->gd_cnt.v_pdpages++; |
984263bc | 785 | |
06ecca5a MD |
786 | /* |
787 | * Give interrupts a chance | |
788 | */ | |
5fd012e0 MD |
789 | crit_exit(); |
790 | crit_enter(); | |
984263bc | 791 | |
06ecca5a MD |
792 | /* |
793 | * It's easier for some of the conditions below to just loop | |
794 | * and catch queue changes here rather then check everywhere | |
795 | * else. | |
796 | */ | |
797 | if (m->queue != PQ_INACTIVE) | |
798 | goto rescan0; | |
984263bc MD |
799 | next = TAILQ_NEXT(m, pageq); |
800 | ||
801 | /* | |
802 | * skip marker pages | |
803 | */ | |
804 | if (m->flags & PG_MARKER) | |
805 | continue; | |
806 | ||
807 | /* | |
808 | * A held page may be undergoing I/O, so skip it. | |
809 | */ | |
810 | if (m->hold_count) { | |
984263bc MD |
811 | TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE].pl, m, pageq); |
812 | TAILQ_INSERT_TAIL(&vm_page_queues[PQ_INACTIVE].pl, m, pageq); | |
e527fb6b | 813 | ++vm_swapcache_inactive_heuristic; |
984263bc MD |
814 | continue; |
815 | } | |
06ecca5a | 816 | |
984263bc MD |
817 | /* |
818 | * Dont mess with busy pages, keep in the front of the | |
819 | * queue, most likely are being paged out. | |
820 | */ | |
821 | if (m->busy || (m->flags & PG_BUSY)) { | |
984263bc MD |
822 | continue; |
823 | } | |
824 | ||
984263bc | 825 | if (m->object->ref_count == 0) { |
06ecca5a MD |
826 | /* |
827 | * If the object is not being used, we ignore previous | |
828 | * references. | |
829 | */ | |
984263bc MD |
830 | vm_page_flag_clear(m, PG_REFERENCED); |
831 | pmap_clear_reference(m); | |
832 | ||
984263bc | 833 | } else if (((m->flags & PG_REFERENCED) == 0) && |
06ecca5a MD |
834 | (actcount = pmap_ts_referenced(m))) { |
835 | /* | |
836 | * Otherwise, if the page has been referenced while | |
837 | * in the inactive queue, we bump the "activation | |
838 | * count" upwards, making it less likely that the | |
839 | * page will be added back to the inactive queue | |
840 | * prematurely again. Here we check the page tables | |
841 | * (or emulated bits, if any), given the upper level | |
842 | * VM system not knowing anything about existing | |
843 | * references. | |
844 | */ | |
984263bc MD |
845 | vm_page_activate(m); |
846 | m->act_count += (actcount + ACT_ADVANCE); | |
847 | continue; | |
848 | } | |
849 | ||
850 | /* | |
851 | * If the upper level VM system knows about any page | |
852 | * references, we activate the page. We also set the | |
853 | * "activation count" higher than normal so that we will less | |
854 | * likely place pages back onto the inactive queue again. | |
855 | */ | |
856 | if ((m->flags & PG_REFERENCED) != 0) { | |
857 | vm_page_flag_clear(m, PG_REFERENCED); | |
858 | actcount = pmap_ts_referenced(m); | |
859 | vm_page_activate(m); | |
860 | m->act_count += (actcount + ACT_ADVANCE + 1); | |
861 | continue; | |
862 | } | |
863 | ||
864 | /* | |
865 | * If the upper level VM system doesn't know anything about | |
866 | * the page being dirty, we have to check for it again. As | |
867 | * far as the VM code knows, any partially dirty pages are | |
868 | * fully dirty. | |
41a01a4d MD |
869 | * |
870 | * Pages marked PG_WRITEABLE may be mapped into the user | |
871 | * address space of a process running on another cpu. A | |
872 | * user process (without holding the MP lock) running on | |
873 | * another cpu may be able to touch the page while we are | |
17cde63e MD |
874 | * trying to remove it. vm_page_cache() will handle this |
875 | * case for us. | |
984263bc MD |
876 | */ |
877 | if (m->dirty == 0) { | |
878 | vm_page_test_dirty(m); | |
879 | } else { | |
880 | vm_page_dirty(m); | |
881 | } | |
882 | ||
984263bc | 883 | if (m->valid == 0) { |
41a01a4d MD |
884 | /* |
885 | * Invalid pages can be easily freed | |
886 | */ | |
984263bc | 887 | vm_pageout_page_free(m); |
12e4aaff | 888 | mycpu->gd_cnt.v_dfree++; |
20479584 | 889 | --inactive_shortage; |
984263bc MD |
890 | } else if (m->dirty == 0) { |
891 | /* | |
41a01a4d MD |
892 | * Clean pages can be placed onto the cache queue. |
893 | * This effectively frees them. | |
984263bc MD |
894 | */ |
895 | vm_page_cache(m); | |
20479584 | 896 | --inactive_shortage; |
984263bc MD |
897 | } else if ((m->flags & PG_WINATCFLS) == 0 && pass == 0) { |
898 | /* | |
899 | * Dirty pages need to be paged out, but flushing | |
900 | * a page is extremely expensive verses freeing | |
901 | * a clean page. Rather then artificially limiting | |
902 | * the number of pages we can flush, we instead give | |
903 | * dirty pages extra priority on the inactive queue | |
904 | * by forcing them to be cycled through the queue | |
905 | * twice before being flushed, after which the | |
906 | * (now clean) page will cycle through once more | |
907 | * before being freed. This significantly extends | |
908 | * the thrash point for a heavily loaded machine. | |
909 | */ | |
984263bc MD |
910 | vm_page_flag_set(m, PG_WINATCFLS); |
911 | TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE].pl, m, pageq); | |
912 | TAILQ_INSERT_TAIL(&vm_page_queues[PQ_INACTIVE].pl, m, pageq); | |
e527fb6b | 913 | ++vm_swapcache_inactive_heuristic; |
984263bc MD |
914 | } else if (maxlaunder > 0) { |
915 | /* | |
916 | * We always want to try to flush some dirty pages if | |
917 | * we encounter them, to keep the system stable. | |
918 | * Normally this number is small, but under extreme | |
919 | * pressure where there are insufficient clean pages | |
920 | * on the inactive queue, we may have to go all out. | |
921 | */ | |
922 | int swap_pageouts_ok; | |
923 | struct vnode *vp = NULL; | |
924 | ||
925 | object = m->object; | |
926 | ||
927 | if ((object->type != OBJT_SWAP) && (object->type != OBJT_DEFAULT)) { | |
928 | swap_pageouts_ok = 1; | |
929 | } else { | |
930 | swap_pageouts_ok = !(defer_swap_pageouts || disable_swap_pageouts); | |
931 | swap_pageouts_ok |= (!disable_swap_pageouts && defer_swap_pageouts && | |
20479584 | 932 | vm_page_count_min(0)); |
984263bc MD |
933 | |
934 | } | |
935 | ||
936 | /* | |
937 | * We don't bother paging objects that are "dead". | |
938 | * Those objects are in a "rundown" state. | |
939 | */ | |
940 | if (!swap_pageouts_ok || (object->flags & OBJ_DEAD)) { | |
984263bc MD |
941 | TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE].pl, m, pageq); |
942 | TAILQ_INSERT_TAIL(&vm_page_queues[PQ_INACTIVE].pl, m, pageq); | |
e527fb6b | 943 | ++vm_swapcache_inactive_heuristic; |
984263bc MD |
944 | continue; |
945 | } | |
946 | ||
947 | /* | |
948 | * The object is already known NOT to be dead. It | |
949 | * is possible for the vget() to block the whole | |
950 | * pageout daemon, but the new low-memory handling | |
951 | * code should prevent it. | |
952 | * | |
953 | * The previous code skipped locked vnodes and, worse, | |
954 | * reordered pages in the queue. This results in | |
955 | * completely non-deterministic operation because, | |
956 | * quite often, a vm_fault has initiated an I/O and | |
957 | * is holding a locked vnode at just the point where | |
958 | * the pageout daemon is woken up. | |
959 | * | |
960 | * We can't wait forever for the vnode lock, we might | |
961 | * deadlock due to a vn_read() getting stuck in | |
962 | * vm_wait while holding this vnode. We skip the | |
963 | * vnode if we can't get it in a reasonable amount | |
964 | * of time. | |
5d6a945b MD |
965 | * |
966 | * vpfailed is used to (try to) avoid the case where | |
967 | * a large number of pages are associated with a | |
968 | * locked vnode, which could cause the pageout daemon | |
969 | * to stall for an excessive amount of time. | |
984263bc | 970 | */ |
984263bc | 971 | if (object->type == OBJT_VNODE) { |
5d6a945b | 972 | int flags; |
984263bc | 973 | |
5d6a945b MD |
974 | vp = object->handle; |
975 | flags = LK_EXCLUSIVE | LK_NOOBJ; | |
976 | if (vp == vpfailed) | |
977 | flags |= LK_NOWAIT; | |
978 | else | |
979 | flags |= LK_TIMELOCK; | |
980 | if (vget(vp, flags) != 0) { | |
981 | vpfailed = vp; | |
984263bc MD |
982 | ++pageout_lock_miss; |
983 | if (object->flags & OBJ_MIGHTBEDIRTY) | |
984 | vnodes_skipped++; | |
985 | continue; | |
986 | } | |
987 | ||
988 | /* | |
989 | * The page might have been moved to another | |
990 | * queue during potential blocking in vget() | |
991 | * above. The page might have been freed and | |
992 | * reused for another vnode. The object might | |
993 | * have been reused for another vnode. | |
994 | */ | |
995 | if (m->queue != PQ_INACTIVE || | |
996 | m->object != object || | |
997 | object->handle != vp) { | |
998 | if (object->flags & OBJ_MIGHTBEDIRTY) | |
999 | vnodes_skipped++; | |
1000 | vput(vp); | |
1001 | continue; | |
1002 | } | |
1003 | ||
1004 | /* | |
1005 | * The page may have been busied during the | |
1006 | * blocking in vput(); We don't move the | |
1007 | * page back onto the end of the queue so that | |
1008 | * statistics are more correct if we don't. | |
1009 | */ | |
1010 | if (m->busy || (m->flags & PG_BUSY)) { | |
1011 | vput(vp); | |
1012 | continue; | |
1013 | } | |
1014 | ||
1015 | /* | |
1016 | * If the page has become held it might | |
1017 | * be undergoing I/O, so skip it | |
1018 | */ | |
1019 | if (m->hold_count) { | |
984263bc MD |
1020 | TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE].pl, m, pageq); |
1021 | TAILQ_INSERT_TAIL(&vm_page_queues[PQ_INACTIVE].pl, m, pageq); | |
e527fb6b | 1022 | ++vm_swapcache_inactive_heuristic; |
984263bc MD |
1023 | if (object->flags & OBJ_MIGHTBEDIRTY) |
1024 | vnodes_skipped++; | |
1025 | vput(vp); | |
1026 | continue; | |
1027 | } | |
1028 | } | |
1029 | ||
1030 | /* | |
1031 | * If a page is dirty, then it is either being washed | |
1032 | * (but not yet cleaned) or it is still in the | |
1033 | * laundry. If it is still in the laundry, then we | |
1034 | * start the cleaning operation. | |
1035 | * | |
1036 | * This operation may cluster, invalidating the 'next' | |
1037 | * pointer. To prevent an inordinate number of | |
1038 | * restarts we use our marker to remember our place. | |
1039 | * | |
20479584 MD |
1040 | * decrement inactive_shortage on success to account |
1041 | * for the (future) cleaned page. Otherwise we | |
1042 | * could wind up laundering or cleaning too many | |
1043 | * pages. | |
984263bc | 1044 | */ |
984263bc | 1045 | TAILQ_INSERT_AFTER(&vm_page_queues[PQ_INACTIVE].pl, m, &marker, pageq); |
984263bc | 1046 | if (vm_pageout_clean(m) != 0) { |
20479584 | 1047 | --inactive_shortage; |
984263bc | 1048 | --maxlaunder; |
c84c24da | 1049 | } |
984263bc MD |
1050 | next = TAILQ_NEXT(&marker, pageq); |
1051 | TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE].pl, &marker, pageq); | |
984263bc MD |
1052 | if (vp != NULL) |
1053 | vput(vp); | |
1054 | } | |
1055 | } | |
1056 | ||
1057 | /* | |
20479584 MD |
1058 | * We want to move pages from the active queue to the inactive |
1059 | * queue to get the inactive queue to the inactive target. If | |
1060 | * we still have a page shortage from above we try to directly free | |
1061 | * clean pages instead of moving them. | |
06ecca5a | 1062 | * |
20479584 MD |
1063 | * If we do still have a shortage we keep track of the number of |
1064 | * pages we free or cache (recycle_count) as a measure of thrashing | |
1065 | * between the active and inactive queues. | |
1066 | * | |
51db7ca2 MD |
1067 | * If we were able to completely satisfy the free+cache targets |
1068 | * from the inactive pool we limit the number of pages we move | |
1069 | * from the active pool to the inactive pool to 2x the pages we | |
e6e9a0c3 MD |
1070 | * had removed from the inactive pool (with a minimum of 1/5 the |
1071 | * inactive target). If we were not able to completely satisfy | |
1072 | * the free+cache targets we go for the whole target aggressively. | |
20479584 MD |
1073 | * |
1074 | * NOTE: Both variables can end up negative. | |
1075 | * NOTE: We are still in a critical section. | |
984263bc | 1076 | */ |
20479584 | 1077 | active_shortage = vmstats.v_inactive_target - vmstats.v_inactive_count; |
e6e9a0c3 MD |
1078 | if (inactive_original_shortage < vmstats.v_inactive_target / 10) |
1079 | inactive_original_shortage = vmstats.v_inactive_target / 10; | |
51db7ca2 MD |
1080 | if (inactive_shortage <= 0 && |
1081 | active_shortage > inactive_original_shortage * 2) { | |
1082 | active_shortage = inactive_original_shortage * 2; | |
1083 | } | |
20479584 | 1084 | |
12e4aaff | 1085 | pcount = vmstats.v_active_count; |
20479584 | 1086 | recycle_count = 0; |
984263bc MD |
1087 | m = TAILQ_FIRST(&vm_page_queues[PQ_ACTIVE].pl); |
1088 | ||
20479584 MD |
1089 | while ((m != NULL) && (pcount-- > 0) && |
1090 | (inactive_shortage > 0 || active_shortage > 0) | |
1091 | ) { | |
06ecca5a MD |
1092 | /* |
1093 | * Give interrupts a chance. | |
1094 | */ | |
5fd012e0 MD |
1095 | crit_exit(); |
1096 | crit_enter(); | |
984263bc MD |
1097 | |
1098 | /* | |
06ecca5a | 1099 | * If the page was ripped out from under us, just stop. |
984263bc | 1100 | */ |
06ecca5a | 1101 | if (m->queue != PQ_ACTIVE) |
984263bc | 1102 | break; |
984263bc | 1103 | next = TAILQ_NEXT(m, pageq); |
06ecca5a | 1104 | |
984263bc MD |
1105 | /* |
1106 | * Don't deactivate pages that are busy. | |
1107 | */ | |
1108 | if ((m->busy != 0) || | |
1109 | (m->flags & PG_BUSY) || | |
1110 | (m->hold_count != 0)) { | |
984263bc MD |
1111 | TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE].pl, m, pageq); |
1112 | TAILQ_INSERT_TAIL(&vm_page_queues[PQ_ACTIVE].pl, m, pageq); | |
984263bc MD |
1113 | m = next; |
1114 | continue; | |
1115 | } | |
1116 | ||
1117 | /* | |
1118 | * The count for pagedaemon pages is done after checking the | |
1119 | * page for eligibility... | |
1120 | */ | |
12e4aaff | 1121 | mycpu->gd_cnt.v_pdpages++; |
984263bc MD |
1122 | |
1123 | /* | |
20479584 MD |
1124 | * Check to see "how much" the page has been used and clear |
1125 | * the tracking access bits. If the object has no references | |
1126 | * don't bother paying the expense. | |
984263bc MD |
1127 | */ |
1128 | actcount = 0; | |
1129 | if (m->object->ref_count != 0) { | |
20479584 MD |
1130 | if (m->flags & PG_REFERENCED) |
1131 | ++actcount; | |
984263bc MD |
1132 | actcount += pmap_ts_referenced(m); |
1133 | if (actcount) { | |
1134 | m->act_count += ACT_ADVANCE + actcount; | |
1135 | if (m->act_count > ACT_MAX) | |
1136 | m->act_count = ACT_MAX; | |
1137 | } | |
1138 | } | |
984263bc MD |
1139 | vm_page_flag_clear(m, PG_REFERENCED); |
1140 | ||
1141 | /* | |
20479584 | 1142 | * actcount is only valid if the object ref_count is non-zero. |
984263bc | 1143 | */ |
20479584 | 1144 | if (actcount && m->object->ref_count != 0) { |
984263bc MD |
1145 | TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE].pl, m, pageq); |
1146 | TAILQ_INSERT_TAIL(&vm_page_queues[PQ_ACTIVE].pl, m, pageq); | |
984263bc MD |
1147 | } else { |
1148 | m->act_count -= min(m->act_count, ACT_DECLINE); | |
1149 | if (vm_pageout_algorithm || | |
1150 | m->object->ref_count == 0 || | |
20479584 MD |
1151 | m->act_count < pass + 1 |
1152 | ) { | |
1153 | /* | |
1154 | * Deactivate the page. If we had a | |
1155 | * shortage from our inactive scan try to | |
1156 | * free (cache) the page instead. | |
e6e9a0c3 MD |
1157 | * |
1158 | * Don't just blindly cache the page if | |
1159 | * we do not have a shortage from the | |
1160 | * inactive scan, that could lead to | |
1161 | * gigabytes being moved. | |
20479584 MD |
1162 | */ |
1163 | --active_shortage; | |
1164 | if (inactive_shortage > 0 || | |
1165 | m->object->ref_count == 0) { | |
1166 | if (inactive_shortage > 0) | |
1167 | ++recycle_count; | |
17cde63e | 1168 | vm_page_busy(m); |
984263bc | 1169 | vm_page_protect(m, VM_PROT_NONE); |
17cde63e | 1170 | vm_page_wakeup(m); |
e6e9a0c3 MD |
1171 | if (m->dirty == 0 && |
1172 | inactive_shortage > 0) { | |
20479584 | 1173 | --inactive_shortage; |
984263bc | 1174 | vm_page_cache(m); |
c84c24da | 1175 | } else { |
984263bc | 1176 | vm_page_deactivate(m); |
c84c24da | 1177 | } |
984263bc MD |
1178 | } else { |
1179 | vm_page_deactivate(m); | |
1180 | } | |
1181 | } else { | |
984263bc MD |
1182 | TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE].pl, m, pageq); |
1183 | TAILQ_INSERT_TAIL(&vm_page_queues[PQ_ACTIVE].pl, m, pageq); | |
984263bc MD |
1184 | } |
1185 | } | |
1186 | m = next; | |
1187 | } | |
1188 | ||
984263bc MD |
1189 | /* |
1190 | * We try to maintain some *really* free pages, this allows interrupt | |
1191 | * code to be guaranteed space. Since both cache and free queues | |
1192 | * are considered basically 'free', moving pages from cache to free | |
1193 | * does not effect other calculations. | |
06ecca5a | 1194 | * |
5fd012e0 | 1195 | * NOTE: we are still in a critical section. |
c84c24da MD |
1196 | * |
1197 | * Pages moved from PQ_CACHE to totally free are not counted in the | |
1198 | * pages_freed counter. | |
984263bc | 1199 | */ |
12e4aaff | 1200 | while (vmstats.v_free_count < vmstats.v_free_reserved) { |
984263bc MD |
1201 | static int cache_rover = 0; |
1202 | m = vm_page_list_find(PQ_CACHE, cache_rover, FALSE); | |
20479584 | 1203 | if (m == NULL) |
984263bc MD |
1204 | break; |
1205 | if ((m->flags & (PG_BUSY|PG_UNMANAGED)) || | |
1206 | m->busy || | |
1207 | m->hold_count || | |
1208 | m->wire_count) { | |
1209 | #ifdef INVARIANTS | |
086c1d7e | 1210 | kprintf("Warning: busy page %p found in cache\n", m); |
984263bc MD |
1211 | #endif |
1212 | vm_page_deactivate(m); | |
1213 | continue; | |
1214 | } | |
17cde63e MD |
1215 | KKASSERT((m->flags & PG_MAPPED) == 0); |
1216 | KKASSERT(m->dirty == 0); | |
984263bc MD |
1217 | cache_rover = (cache_rover + PQ_PRIME2) & PQ_L2_MASK; |
1218 | vm_pageout_page_free(m); | |
12e4aaff | 1219 | mycpu->gd_cnt.v_dfree++; |
984263bc | 1220 | } |
06ecca5a | 1221 | |
5fd012e0 | 1222 | crit_exit(); |
984263bc MD |
1223 | |
1224 | #if !defined(NO_SWAPPING) | |
1225 | /* | |
1226 | * Idle process swapout -- run once per second. | |
1227 | */ | |
1228 | if (vm_swap_idle_enabled) { | |
1229 | static long lsec; | |
1230 | if (time_second != lsec) { | |
1231 | vm_pageout_req_swapout |= VM_SWAP_IDLE; | |
1232 | vm_req_vmdaemon(); | |
1233 | lsec = time_second; | |
1234 | } | |
1235 | } | |
1236 | #endif | |
1237 | ||
1238 | /* | |
1239 | * If we didn't get enough free pages, and we have skipped a vnode | |
1240 | * in a writeable object, wakeup the sync daemon. And kick swapout | |
1241 | * if we did not get enough free pages. | |
1242 | */ | |
1243 | if (vm_paging_target() > 0) { | |
20479584 | 1244 | if (vnodes_skipped && vm_page_count_min(0)) |
418ff780 | 1245 | speedup_syncer(); |
984263bc MD |
1246 | #if !defined(NO_SWAPPING) |
1247 | if (vm_swap_enabled && vm_page_count_target()) { | |
1248 | vm_req_vmdaemon(); | |
1249 | vm_pageout_req_swapout |= VM_SWAP_NORMAL; | |
1250 | } | |
1251 | #endif | |
1252 | } | |
1253 | ||
1254 | /* | |
20479584 MD |
1255 | * Handle catastrophic conditions. Under good conditions we should |
1256 | * be at the target, well beyond our minimum. If we could not even | |
1257 | * reach our minimum the system is under heavy stress. | |
1258 | * | |
1259 | * Determine whether we have run out of memory. This occurs when | |
1260 | * swap_pager_full is TRUE and the only pages left in the page | |
1261 | * queues are dirty. We will still likely have page shortages. | |
c84c24da MD |
1262 | * |
1263 | * - swap_pager_full is set if insufficient swap was | |
1264 | * available to satisfy a requested pageout. | |
1265 | * | |
20479584 MD |
1266 | * - the inactive queue is bloated (4 x size of active queue), |
1267 | * meaning it is unable to get rid of dirty pages and. | |
c84c24da | 1268 | * |
20479584 MD |
1269 | * - vm_page_count_min() without counting pages recycled from the |
1270 | * active queue (recycle_count) means we could not recover | |
1271 | * enough pages to meet bare minimum needs. This test only | |
1272 | * works if the inactive queue is bloated. | |
c84c24da | 1273 | * |
20479584 MD |
1274 | * - due to a positive inactive_shortage we shifted the remaining |
1275 | * dirty pages from the active queue to the inactive queue | |
1276 | * trying to find clean ones to free. | |
984263bc | 1277 | */ |
20479584 | 1278 | if (swap_pager_full && vm_page_count_min(recycle_count)) |
c84c24da | 1279 | kprintf("Warning: system low on memory+swap!\n"); |
20479584 MD |
1280 | if (swap_pager_full && vm_page_count_min(recycle_count) && |
1281 | vmstats.v_inactive_count > vmstats.v_active_count * 4 && | |
1282 | inactive_shortage > 0) { | |
1283 | /* | |
1284 | * Kill something. | |
1285 | */ | |
8fa76237 MD |
1286 | info.bigproc = NULL; |
1287 | info.bigsize = 0; | |
1288 | allproc_scan(vm_pageout_scan_callback, &info); | |
1289 | if (info.bigproc != NULL) { | |
1290 | killproc(info.bigproc, "out of swap space"); | |
1291 | info.bigproc->p_nice = PRIO_MIN; | |
08f2f1bb SS |
1292 | info.bigproc->p_usched->resetpriority( |
1293 | FIRST_LWP_IN_PROC(info.bigproc)); | |
12e4aaff | 1294 | wakeup(&vmstats.v_free_count); |
8fa76237 | 1295 | PRELE(info.bigproc); |
984263bc MD |
1296 | } |
1297 | } | |
20479584 | 1298 | return(inactive_shortage); |
984263bc MD |
1299 | } |
1300 | ||
99ad9bc4 MD |
1301 | /* |
1302 | * The caller must hold vm_token and proc_token. | |
1303 | */ | |
8fa76237 MD |
1304 | static int |
1305 | vm_pageout_scan_callback(struct proc *p, void *data) | |
1306 | { | |
1307 | struct vm_pageout_scan_info *info = data; | |
1308 | vm_offset_t size; | |
1309 | ||
1310 | /* | |
20479584 MD |
1311 | * Never kill system processes or init. If we have configured swap |
1312 | * then try to avoid killing low-numbered pids. | |
8fa76237 MD |
1313 | */ |
1314 | if ((p->p_flag & P_SYSTEM) || (p->p_pid == 1) || | |
1315 | ((p->p_pid < 48) && (vm_swap_size != 0))) { | |
1316 | return (0); | |
1317 | } | |
1318 | ||
1319 | /* | |
1320 | * if the process is in a non-running type state, | |
1321 | * don't touch it. | |
1322 | */ | |
20479584 | 1323 | if (p->p_stat != SACTIVE && p->p_stat != SSTOP) |
8fa76237 | 1324 | return (0); |
8fa76237 MD |
1325 | |
1326 | /* | |
20479584 MD |
1327 | * Get the approximate process size. Note that anonymous pages |
1328 | * with backing swap will be counted twice, but there should not | |
1329 | * be too many such pages due to the stress the VM system is | |
1330 | * under at this point. | |
8fa76237 | 1331 | */ |
20479584 | 1332 | size = vmspace_anonymous_count(p->p_vmspace) + |
8fa76237 MD |
1333 | vmspace_swap_count(p->p_vmspace); |
1334 | ||
1335 | /* | |
1336 | * If the this process is bigger than the biggest one | |
1337 | * remember it. | |
1338 | */ | |
20479584 | 1339 | if (info->bigsize < size) { |
8fa76237 MD |
1340 | if (info->bigproc) |
1341 | PRELE(info->bigproc); | |
1342 | PHOLD(p); | |
1343 | info->bigproc = p; | |
1344 | info->bigsize = size; | |
1345 | } | |
1346 | return(0); | |
1347 | } | |
1348 | ||
984263bc MD |
1349 | /* |
1350 | * This routine tries to maintain the pseudo LRU active queue, | |
1351 | * so that during long periods of time where there is no paging, | |
1352 | * that some statistic accumulation still occurs. This code | |
1353 | * helps the situation where paging just starts to occur. | |
99ad9bc4 MD |
1354 | * |
1355 | * The caller must hold vm_token. | |
984263bc MD |
1356 | */ |
1357 | static void | |
57e43348 | 1358 | vm_pageout_page_stats(void) |
984263bc | 1359 | { |
984263bc MD |
1360 | vm_page_t m,next; |
1361 | int pcount,tpcount; /* Number of pages to check */ | |
1362 | static int fullintervalcount = 0; | |
1363 | int page_shortage; | |
984263bc MD |
1364 | |
1365 | page_shortage = | |
12e4aaff MD |
1366 | (vmstats.v_inactive_target + vmstats.v_cache_max + vmstats.v_free_min) - |
1367 | (vmstats.v_free_count + vmstats.v_inactive_count + vmstats.v_cache_count); | |
984263bc MD |
1368 | |
1369 | if (page_shortage <= 0) | |
1370 | return; | |
1371 | ||
5fd012e0 | 1372 | crit_enter(); |
984263bc | 1373 | |
12e4aaff | 1374 | pcount = vmstats.v_active_count; |
984263bc MD |
1375 | fullintervalcount += vm_pageout_stats_interval; |
1376 | if (fullintervalcount < vm_pageout_full_stats_interval) { | |
12e4aaff | 1377 | tpcount = (vm_pageout_stats_max * vmstats.v_active_count) / vmstats.v_page_count; |
984263bc MD |
1378 | if (pcount > tpcount) |
1379 | pcount = tpcount; | |
1380 | } else { | |
1381 | fullintervalcount = 0; | |
1382 | } | |
1383 | ||
1384 | m = TAILQ_FIRST(&vm_page_queues[PQ_ACTIVE].pl); | |
1385 | while ((m != NULL) && (pcount-- > 0)) { | |
1386 | int actcount; | |
1387 | ||
1388 | if (m->queue != PQ_ACTIVE) { | |
1389 | break; | |
1390 | } | |
1391 | ||
1392 | next = TAILQ_NEXT(m, pageq); | |
1393 | /* | |
1394 | * Don't deactivate pages that are busy. | |
1395 | */ | |
1396 | if ((m->busy != 0) || | |
1397 | (m->flags & PG_BUSY) || | |
1398 | (m->hold_count != 0)) { | |
984263bc MD |
1399 | TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE].pl, m, pageq); |
1400 | TAILQ_INSERT_TAIL(&vm_page_queues[PQ_ACTIVE].pl, m, pageq); | |
984263bc MD |
1401 | m = next; |
1402 | continue; | |
1403 | } | |
1404 | ||
1405 | actcount = 0; | |
1406 | if (m->flags & PG_REFERENCED) { | |
1407 | vm_page_flag_clear(m, PG_REFERENCED); | |
1408 | actcount += 1; | |
1409 | } | |
1410 | ||
1411 | actcount += pmap_ts_referenced(m); | |
1412 | if (actcount) { | |
1413 | m->act_count += ACT_ADVANCE + actcount; | |
1414 | if (m->act_count > ACT_MAX) | |
1415 | m->act_count = ACT_MAX; | |
984263bc MD |
1416 | TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE].pl, m, pageq); |
1417 | TAILQ_INSERT_TAIL(&vm_page_queues[PQ_ACTIVE].pl, m, pageq); | |
984263bc MD |
1418 | } else { |
1419 | if (m->act_count == 0) { | |
1420 | /* | |
1421 | * We turn off page access, so that we have | |
1422 | * more accurate RSS stats. We don't do this | |
1423 | * in the normal page deactivation when the | |
1424 | * system is loaded VM wise, because the | |
1425 | * cost of the large number of page protect | |
1426 | * operations would be higher than the value | |
1427 | * of doing the operation. | |
1428 | */ | |
17cde63e | 1429 | vm_page_busy(m); |
984263bc | 1430 | vm_page_protect(m, VM_PROT_NONE); |
17cde63e | 1431 | vm_page_wakeup(m); |
984263bc MD |
1432 | vm_page_deactivate(m); |
1433 | } else { | |
1434 | m->act_count -= min(m->act_count, ACT_DECLINE); | |
984263bc MD |
1435 | TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE].pl, m, pageq); |
1436 | TAILQ_INSERT_TAIL(&vm_page_queues[PQ_ACTIVE].pl, m, pageq); | |
984263bc MD |
1437 | } |
1438 | } | |
1439 | ||
1440 | m = next; | |
1441 | } | |
5fd012e0 | 1442 | crit_exit(); |
984263bc MD |
1443 | } |
1444 | ||
99ad9bc4 MD |
1445 | /* |
1446 | * The caller must hold vm_token. | |
1447 | */ | |
984263bc | 1448 | static int |
57e43348 | 1449 | vm_pageout_free_page_calc(vm_size_t count) |
984263bc | 1450 | { |
12e4aaff | 1451 | if (count < vmstats.v_page_count) |
984263bc MD |
1452 | return 0; |
1453 | /* | |
1454 | * free_reserved needs to include enough for the largest swap pager | |
1455 | * structures plus enough for any pv_entry structs when paging. | |
1456 | */ | |
12e4aaff MD |
1457 | if (vmstats.v_page_count > 1024) |
1458 | vmstats.v_free_min = 4 + (vmstats.v_page_count - 1024) / 200; | |
984263bc | 1459 | else |
12e4aaff MD |
1460 | vmstats.v_free_min = 4; |
1461 | vmstats.v_pageout_free_min = (2*MAXBSIZE)/PAGE_SIZE + | |
1462 | vmstats.v_interrupt_free_min; | |
1463 | vmstats.v_free_reserved = vm_pageout_page_count + | |
1464 | vmstats.v_pageout_free_min + (count / 768) + PQ_L2_SIZE; | |
1465 | vmstats.v_free_severe = vmstats.v_free_min / 2; | |
1466 | vmstats.v_free_min += vmstats.v_free_reserved; | |
1467 | vmstats.v_free_severe += vmstats.v_free_reserved; | |
984263bc MD |
1468 | return 1; |
1469 | } | |
1470 | ||
1471 | ||
1472 | /* | |
20479584 | 1473 | * vm_pageout is the high level pageout daemon. |
99ad9bc4 MD |
1474 | * |
1475 | * No requirements. | |
984263bc MD |
1476 | */ |
1477 | static void | |
cd8ab232 | 1478 | vm_pageout_thread(void) |
984263bc MD |
1479 | { |
1480 | int pass; | |
20479584 | 1481 | int inactive_shortage; |
984263bc | 1482 | |
99ad9bc4 MD |
1483 | /* |
1484 | * Permanently hold vm_token. | |
1485 | */ | |
1486 | lwkt_gettoken(&vm_token); | |
1487 | ||
984263bc MD |
1488 | /* |
1489 | * Initialize some paging parameters. | |
1490 | */ | |
4ecf7cc9 | 1491 | curthread->td_flags |= TDF_SYSTHREAD; |
984263bc | 1492 | |
12e4aaff MD |
1493 | vmstats.v_interrupt_free_min = 2; |
1494 | if (vmstats.v_page_count < 2000) | |
984263bc MD |
1495 | vm_pageout_page_count = 8; |
1496 | ||
12e4aaff | 1497 | vm_pageout_free_page_calc(vmstats.v_page_count); |
20479584 | 1498 | |
984263bc MD |
1499 | /* |
1500 | * v_free_target and v_cache_min control pageout hysteresis. Note | |
1501 | * that these are more a measure of the VM cache queue hysteresis | |
1502 | * then the VM free queue. Specifically, v_free_target is the | |
1503 | * high water mark (free+cache pages). | |
1504 | * | |
1505 | * v_free_reserved + v_cache_min (mostly means v_cache_min) is the | |
1506 | * low water mark, while v_free_min is the stop. v_cache_min must | |
1507 | * be big enough to handle memory needs while the pageout daemon | |
1508 | * is signalled and run to free more pages. | |
1509 | */ | |
12e4aaff MD |
1510 | if (vmstats.v_free_count > 6144) |
1511 | vmstats.v_free_target = 4 * vmstats.v_free_min + vmstats.v_free_reserved; | |
984263bc | 1512 | else |
12e4aaff | 1513 | vmstats.v_free_target = 2 * vmstats.v_free_min + vmstats.v_free_reserved; |
984263bc | 1514 | |
0e8bd897 MD |
1515 | /* |
1516 | * NOTE: With the new buffer cache b_act_count we want the default | |
1517 | * inactive target to be a percentage of available memory. | |
1518 | * | |
1519 | * The inactive target essentially determines the minimum | |
1520 | * number of 'temporary' pages capable of caching one-time-use | |
1521 | * files when the VM system is otherwise full of pages | |
1522 | * belonging to multi-time-use files or active program data. | |
51db7ca2 MD |
1523 | * |
1524 | * NOTE: The inactive target is aggressively persued only if the | |
1525 | * inactive queue becomes too small. If the inactive queue | |
1526 | * is large enough to satisfy page movement to free+cache | |
1527 | * then it is repopulated more slowly from the active queue. | |
e15708fc | 1528 | * This allows a general inactive_target default to be set. |
51db7ca2 MD |
1529 | * |
1530 | * There is an issue here for processes which sit mostly idle | |
1531 | * 'overnight', such as sshd, tcsh, and X. Any movement from | |
1532 | * the active queue will eventually cause such pages to | |
1533 | * recycle eventually causing a lot of paging in the morning. | |
1534 | * To reduce the incidence of this pages cycled out of the | |
1535 | * buffer cache are moved directly to the inactive queue if | |
e15708fc MD |
1536 | * they were only used once or twice. |
1537 | * | |
1538 | * The vfs.vm_cycle_point sysctl can be used to adjust this. | |
1539 | * Increasing the value (up to 64) increases the number of | |
1540 | * buffer recyclements which go directly to the inactive queue. | |
0e8bd897 | 1541 | */ |
12e4aaff MD |
1542 | if (vmstats.v_free_count > 2048) { |
1543 | vmstats.v_cache_min = vmstats.v_free_target; | |
1544 | vmstats.v_cache_max = 2 * vmstats.v_cache_min; | |
984263bc | 1545 | } else { |
12e4aaff MD |
1546 | vmstats.v_cache_min = 0; |
1547 | vmstats.v_cache_max = 0; | |
984263bc | 1548 | } |
e15708fc | 1549 | vmstats.v_inactive_target = vmstats.v_free_count / 4; |
984263bc MD |
1550 | |
1551 | /* XXX does not really belong here */ | |
1552 | if (vm_page_max_wired == 0) | |
12e4aaff | 1553 | vm_page_max_wired = vmstats.v_free_count / 3; |
984263bc MD |
1554 | |
1555 | if (vm_pageout_stats_max == 0) | |
12e4aaff | 1556 | vm_pageout_stats_max = vmstats.v_free_target; |
984263bc MD |
1557 | |
1558 | /* | |
1559 | * Set interval in seconds for stats scan. | |
1560 | */ | |
1561 | if (vm_pageout_stats_interval == 0) | |
1562 | vm_pageout_stats_interval = 5; | |
1563 | if (vm_pageout_full_stats_interval == 0) | |
1564 | vm_pageout_full_stats_interval = vm_pageout_stats_interval * 4; | |
1565 | ||
1566 | ||
1567 | /* | |
1568 | * Set maximum free per pass | |
1569 | */ | |
1570 | if (vm_pageout_stats_free_max == 0) | |
1571 | vm_pageout_stats_free_max = 5; | |
1572 | ||
1573 | swap_pager_swap_init(); | |
1574 | pass = 0; | |
20479584 | 1575 | |
984263bc MD |
1576 | /* |
1577 | * The pageout daemon is never done, so loop forever. | |
1578 | */ | |
1579 | while (TRUE) { | |
1580 | int error; | |
984263bc | 1581 | |
12d8aca7 MD |
1582 | /* |
1583 | * Wait for an action request | |
1584 | */ | |
1585 | crit_enter(); | |
20479584 | 1586 | if (vm_pages_needed == 0) { |
984263bc | 1587 | error = tsleep(&vm_pages_needed, |
20479584 MD |
1588 | 0, "psleep", |
1589 | vm_pageout_stats_interval * hz); | |
1590 | if (error && vm_pages_needed == 0) { | |
984263bc MD |
1591 | vm_pageout_page_stats(); |
1592 | continue; | |
1593 | } | |
20479584 | 1594 | vm_pages_needed = 1; |
984263bc | 1595 | } |
12d8aca7 | 1596 | crit_exit(); |
984263bc | 1597 | |
20479584 MD |
1598 | /* |
1599 | * If we have enough free memory, wakeup waiters. | |
12d8aca7 | 1600 | * (This is optional here) |
20479584 MD |
1601 | */ |
1602 | crit_enter(); | |
1603 | if (!vm_page_count_min(0)) | |
1604 | wakeup(&vmstats.v_free_count); | |
1605 | mycpu->gd_cnt.v_pdwakeups++; | |
5fd012e0 | 1606 | crit_exit(); |
20479584 MD |
1607 | |
1608 | /* | |
12d8aca7 MD |
1609 | * Scan for pageout. Try to avoid thrashing the system |
1610 | * with activity. | |
20479584 | 1611 | */ |
12d8aca7 | 1612 | inactive_shortage = vm_pageout_scan(pass); |
20479584 MD |
1613 | if (inactive_shortage > 0) { |
1614 | ++pass; | |
1615 | if (swap_pager_full) { | |
1616 | /* | |
1617 | * Running out of memory, catastrophic back-off | |
1618 | * to one-second intervals. | |
1619 | */ | |
1620 | tsleep(&vm_pages_needed, 0, "pdelay", hz); | |
1621 | } else if (pass < 10 && vm_pages_needed > 1) { | |
1622 | /* | |
1623 | * Normal operation, additional processes | |
1624 | * have already kicked us. Retry immediately. | |
1625 | */ | |
1626 | } else if (pass < 10) { | |
1627 | /* | |
1628 | * Normal operation, fewer processes. Delay | |
1629 | * a bit but allow wakeups. | |
1630 | */ | |
1631 | vm_pages_needed = 0; | |
1632 | tsleep(&vm_pages_needed, 0, "pdelay", hz / 10); | |
1633 | vm_pages_needed = 1; | |
1634 | } else { | |
1635 | /* | |
1636 | * We've taken too many passes, forced delay. | |
1637 | */ | |
1638 | tsleep(&vm_pages_needed, 0, "pdelay", hz / 10); | |
1639 | } | |
1640 | } else { | |
12d8aca7 MD |
1641 | /* |
1642 | * Interlocked wakeup of waiters (non-optional) | |
1643 | */ | |
20479584 | 1644 | pass = 0; |
12d8aca7 MD |
1645 | if (vm_pages_needed && !vm_page_count_min(0)) { |
1646 | wakeup(&vmstats.v_free_count); | |
1647 | vm_pages_needed = 0; | |
1648 | } | |
20479584 | 1649 | } |
984263bc MD |
1650 | } |
1651 | } | |
1652 | ||
cd8ab232 MD |
1653 | static struct kproc_desc page_kp = { |
1654 | "pagedaemon", | |
1655 | vm_pageout_thread, | |
1656 | &pagethread | |
1657 | }; | |
1658 | SYSINIT(pagedaemon, SI_SUB_KTHREAD_PAGE, SI_ORDER_FIRST, kproc_start, &page_kp) | |
1659 | ||
1660 | ||
20479584 MD |
1661 | /* |
1662 | * Called after allocating a page out of the cache or free queue | |
1663 | * to possibly wake the pagedaemon up to replentish our supply. | |
1664 | * | |
1665 | * We try to generate some hysteresis by waking the pagedaemon up | |
1666 | * when our free+cache pages go below the severe level. The pagedaemon | |
1667 | * tries to get the count back up to at least the minimum, and through | |
1668 | * to the target level if possible. | |
1669 | * | |
1670 | * If the pagedaemon is already active bump vm_pages_needed as a hint | |
1671 | * that there are even more requests pending. | |
99ad9bc4 MD |
1672 | * |
1673 | * SMP races ok? | |
1674 | * No requirements. | |
20479584 | 1675 | */ |
984263bc | 1676 | void |
57e43348 | 1677 | pagedaemon_wakeup(void) |
984263bc | 1678 | { |
20479584 MD |
1679 | if (vm_page_count_severe() && curthread != pagethread) { |
1680 | if (vm_pages_needed == 0) { | |
1681 | vm_pages_needed = 1; | |
1682 | wakeup(&vm_pages_needed); | |
1683 | } else if (vm_page_count_min(0)) { | |
1684 | ++vm_pages_needed; | |
1685 | } | |
984263bc MD |
1686 | } |
1687 | } | |
1688 | ||
1689 | #if !defined(NO_SWAPPING) | |
99ad9bc4 MD |
1690 | |
1691 | /* | |
1692 | * SMP races ok? | |
1693 | * No requirements. | |
1694 | */ | |
984263bc | 1695 | static void |
57e43348 | 1696 | vm_req_vmdaemon(void) |
984263bc MD |
1697 | { |
1698 | static int lastrun = 0; | |
1699 | ||
1700 | if ((ticks > (lastrun + hz)) || (ticks < lastrun)) { | |
1701 | wakeup(&vm_daemon_needed); | |
1702 | lastrun = ticks; | |
1703 | } | |
1704 | } | |
1705 | ||
8fa76237 MD |
1706 | static int vm_daemon_callback(struct proc *p, void *data __unused); |
1707 | ||
99ad9bc4 MD |
1708 | /* |
1709 | * No requirements. | |
1710 | */ | |
984263bc | 1711 | static void |
57e43348 | 1712 | vm_daemon(void) |
984263bc | 1713 | { |
99ad9bc4 MD |
1714 | /* |
1715 | * Permanently hold vm_token. | |
1716 | */ | |
1717 | lwkt_gettoken(&vm_token); | |
1718 | ||
984263bc | 1719 | while (TRUE) { |
377d4740 | 1720 | tsleep(&vm_daemon_needed, 0, "psleep", 0); |
984263bc MD |
1721 | if (vm_pageout_req_swapout) { |
1722 | swapout_procs(vm_pageout_req_swapout); | |
1723 | vm_pageout_req_swapout = 0; | |
1724 | } | |
1725 | /* | |
1726 | * scan the processes for exceeding their rlimits or if | |
1727 | * process is swapped out -- deactivate pages | |
1728 | */ | |
8fa76237 MD |
1729 | allproc_scan(vm_daemon_callback, NULL); |
1730 | } | |
1731 | } | |
984263bc | 1732 | |
99ad9bc4 MD |
1733 | /* |
1734 | * Caller must hold vm_token and proc_token. | |
1735 | */ | |
8fa76237 MD |
1736 | static int |
1737 | vm_daemon_callback(struct proc *p, void *data __unused) | |
1738 | { | |
1739 | vm_pindex_t limit, size; | |
984263bc | 1740 | |
8fa76237 MD |
1741 | /* |
1742 | * if this is a system process or if we have already | |
1743 | * looked at this process, skip it. | |
1744 | */ | |
1745 | if (p->p_flag & (P_SYSTEM | P_WEXIT)) | |
1746 | return (0); | |
984263bc | 1747 | |
8fa76237 MD |
1748 | /* |
1749 | * if the process is in a non-running type state, | |
1750 | * don't touch it. | |
1751 | */ | |
164b8401 | 1752 | if (p->p_stat != SACTIVE && p->p_stat != SSTOP) |
8fa76237 | 1753 | return (0); |
984263bc | 1754 | |
8fa76237 MD |
1755 | /* |
1756 | * get a limit | |
1757 | */ | |
1758 | limit = OFF_TO_IDX(qmin(p->p_rlimit[RLIMIT_RSS].rlim_cur, | |
1759 | p->p_rlimit[RLIMIT_RSS].rlim_max)); | |
1760 | ||
1761 | /* | |
1762 | * let processes that are swapped out really be | |
1763 | * swapped out. Set the limit to nothing to get as | |
1764 | * many pages out to swap as possible. | |
1765 | */ | |
1766 | if (p->p_flag & P_SWAPPEDOUT) | |
1767 | limit = 0; | |
1768 | ||
1769 | size = vmspace_resident_count(p->p_vmspace); | |
1770 | if (limit >= 0 && size >= limit) { | |
1771 | vm_pageout_map_deactivate_pages( | |
1772 | &p->p_vmspace->vm_map, limit); | |
984263bc | 1773 | } |
8fa76237 | 1774 | return (0); |
984263bc | 1775 | } |
8fa76237 | 1776 | |
984263bc | 1777 | #endif |