kernel - Another huge HUGE VM performance improvement for many-cores
[dragonfly.git] / sys / vm / vm_pageout.c
CommitLineData
984263bc 1/*
99ad9bc4
MD
2 * (MPSAFE)
3 *
984263bc
MD
4 * Copyright (c) 1991 Regents of the University of California.
5 * All rights reserved.
6 * Copyright (c) 1994 John S. Dyson
7 * All rights reserved.
8 * Copyright (c) 1994 David Greenman
9 * All rights reserved.
10 *
11 * This code is derived from software contributed to Berkeley by
12 * The Mach Operating System project at Carnegie-Mellon University.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions
16 * are met:
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in the
21 * documentation and/or other materials provided with the distribution.
984263bc
MD
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 * from: @(#)vm_pageout.c 7.4 (Berkeley) 5/7/91
39 *
40 *
41 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
42 * All rights reserved.
43 *
44 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
45 *
46 * Permission to use, copy, modify and distribute this software and
47 * its documentation is hereby granted, provided that both the copyright
48 * notice and this permission notice appear in all copies of the
49 * software, derivative works or modified versions, and any portions
50 * thereof, and that both notices appear in supporting documentation.
51 *
52 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
53 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
54 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
55 *
56 * Carnegie Mellon requests users of this software to return to
57 *
58 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
59 * School of Computer Science
60 * Carnegie Mellon University
61 * Pittsburgh PA 15213-3890
62 *
63 * any improvements or extensions that they make and grant Carnegie the
64 * rights to redistribute these changes.
65 *
66 * $FreeBSD: src/sys/vm/vm_pageout.c,v 1.151.2.15 2002/12/29 18:21:04 dillon Exp $
67 */
68
69/*
70 * The proverbial page-out daemon.
71 */
72
73#include "opt_vm.h"
74#include <sys/param.h>
75#include <sys/systm.h>
76#include <sys/kernel.h>
77#include <sys/proc.h>
78#include <sys/kthread.h>
79#include <sys/resourcevar.h>
80#include <sys/signalvar.h>
81#include <sys/vnode.h>
82#include <sys/vmmeter.h>
83#include <sys/sysctl.h>
84
85#include <vm/vm.h>
86#include <vm/vm_param.h>
87#include <sys/lock.h>
88#include <vm/vm_object.h>
89#include <vm/vm_page.h>
90#include <vm/vm_map.h>
91#include <vm/vm_pageout.h>
92#include <vm/vm_pager.h>
93#include <vm/swap_pager.h>
94#include <vm/vm_extern.h>
5fd012e0
MD
95
96#include <sys/thread2.h>
b12defdc 97#include <sys/spinlock2.h>
12e4aaff 98#include <vm/vm_page2.h>
984263bc
MD
99
100/*
101 * System initialization
102 */
103
104/* the kernel process "vm_pageout"*/
1388df65 105static int vm_pageout_clean (vm_page_t);
1388df65 106static int vm_pageout_free_page_calc (vm_size_t count);
bc6dffab 107struct thread *pagethread;
984263bc 108
984263bc
MD
109#if !defined(NO_SWAPPING)
110/* the kernel process "vm_daemon"*/
1388df65 111static void vm_daemon (void);
bc6dffab 112static struct thread *vmthread;
984263bc
MD
113
114static struct kproc_desc vm_kp = {
115 "vmdaemon",
116 vm_daemon,
bc6dffab 117 &vmthread
984263bc
MD
118};
119SYSINIT(vmdaemon, SI_SUB_KTHREAD_VM, SI_ORDER_FIRST, kproc_start, &vm_kp)
120#endif
121
122
123int vm_pages_needed=0; /* Event on which pageout daemon sleeps */
124int vm_pageout_deficit=0; /* Estimated number of pages deficit */
125int vm_pageout_pages_needed=0; /* flag saying that the pageout daemon needs pages */
126
127#if !defined(NO_SWAPPING)
128static int vm_pageout_req_swapout; /* XXX */
129static int vm_daemon_needed;
130#endif
984263bc
MD
131static int vm_max_launder = 32;
132static int vm_pageout_stats_max=0, vm_pageout_stats_interval = 0;
133static int vm_pageout_full_stats_interval = 0;
134static int vm_pageout_stats_free_max=0, vm_pageout_algorithm=0;
135static int defer_swap_pageouts=0;
136static int disable_swap_pageouts=0;
137
138#if defined(NO_SWAPPING)
139static int vm_swap_enabled=0;
140static int vm_swap_idle_enabled=0;
141#else
142static int vm_swap_enabled=1;
143static int vm_swap_idle_enabled=0;
144#endif
145
146SYSCTL_INT(_vm, VM_PAGEOUT_ALGORITHM, pageout_algorithm,
147 CTLFLAG_RW, &vm_pageout_algorithm, 0, "LRU page mgmt");
148
149SYSCTL_INT(_vm, OID_AUTO, max_launder,
150 CTLFLAG_RW, &vm_max_launder, 0, "Limit dirty flushes in pageout");
151
152SYSCTL_INT(_vm, OID_AUTO, pageout_stats_max,
153 CTLFLAG_RW, &vm_pageout_stats_max, 0, "Max pageout stats scan length");
154
155SYSCTL_INT(_vm, OID_AUTO, pageout_full_stats_interval,
156 CTLFLAG_RW, &vm_pageout_full_stats_interval, 0, "Interval for full stats scan");
157
158SYSCTL_INT(_vm, OID_AUTO, pageout_stats_interval,
159 CTLFLAG_RW, &vm_pageout_stats_interval, 0, "Interval for partial stats scan");
160
161SYSCTL_INT(_vm, OID_AUTO, pageout_stats_free_max,
162 CTLFLAG_RW, &vm_pageout_stats_free_max, 0, "Not implemented");
163
164#if defined(NO_SWAPPING)
165SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled,
166 CTLFLAG_RD, &vm_swap_enabled, 0, "");
167SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled,
168 CTLFLAG_RD, &vm_swap_idle_enabled, 0, "");
169#else
170SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled,
171 CTLFLAG_RW, &vm_swap_enabled, 0, "Enable entire process swapout");
172SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled,
173 CTLFLAG_RW, &vm_swap_idle_enabled, 0, "Allow swapout on idle criteria");
174#endif
175
176SYSCTL_INT(_vm, OID_AUTO, defer_swapspace_pageouts,
177 CTLFLAG_RW, &defer_swap_pageouts, 0, "Give preference to dirty pages in mem");
178
179SYSCTL_INT(_vm, OID_AUTO, disable_swapspace_pageouts,
180 CTLFLAG_RW, &disable_swap_pageouts, 0, "Disallow swapout of dirty pages");
181
182static int pageout_lock_miss;
183SYSCTL_INT(_vm, OID_AUTO, pageout_lock_miss,
184 CTLFLAG_RD, &pageout_lock_miss, 0, "vget() lock misses during pageout");
185
46311ac2
MD
186int vm_load;
187SYSCTL_INT(_vm, OID_AUTO, vm_load,
188 CTLFLAG_RD, &vm_load, 0, "load on the VM system");
189int vm_load_enable = 1;
190SYSCTL_INT(_vm, OID_AUTO, vm_load_enable,
191 CTLFLAG_RW, &vm_load_enable, 0, "enable vm_load rate limiting");
192#ifdef INVARIANTS
193int vm_load_debug;
194SYSCTL_INT(_vm, OID_AUTO, vm_load_debug,
195 CTLFLAG_RW, &vm_load_debug, 0, "debug vm_load");
196#endif
197
984263bc
MD
198#define VM_PAGEOUT_PAGE_COUNT 16
199int vm_pageout_page_count = VM_PAGEOUT_PAGE_COUNT;
200
201int vm_page_max_wired; /* XXX max # of wired pages system-wide */
202
203#if !defined(NO_SWAPPING)
1388df65
RG
204typedef void freeer_fcn_t (vm_map_t, vm_object_t, vm_pindex_t, int);
205static void vm_pageout_map_deactivate_pages (vm_map_t, vm_pindex_t);
984263bc 206static freeer_fcn_t vm_pageout_object_deactivate_pages;
1388df65 207static void vm_req_vmdaemon (void);
984263bc 208#endif
027193eb 209static void vm_pageout_page_stats(int q);
984263bc 210
46311ac2 211/*
20479584 212 * Update vm_load to slow down faulting processes.
99ad9bc4
MD
213 *
214 * SMP races ok.
215 * No requirements.
46311ac2
MD
216 */
217void
218vm_fault_ratecheck(void)
219{
220 if (vm_pages_needed) {
221 if (vm_load < 1000)
222 ++vm_load;
223 } else {
224 if (vm_load > 0)
225 --vm_load;
226 }
227}
228
984263bc
MD
229/*
230 * vm_pageout_clean:
231 *
06ecca5a
MD
232 * Clean the page and remove it from the laundry. The page must not be
233 * busy on-call.
984263bc
MD
234 *
235 * We set the busy bit to cause potential page faults on this page to
236 * block. Note the careful timing, however, the busy bit isn't set till
237 * late and we cannot do anything that will mess with the page.
238 */
984263bc 239static int
57e43348 240vm_pageout_clean(vm_page_t m)
984263bc 241{
5f910b2f 242 vm_object_t object;
984263bc
MD
243 vm_page_t mc[2*vm_pageout_page_count];
244 int pageout_count;
b12defdc 245 int error;
984263bc
MD
246 int ib, is, page_base;
247 vm_pindex_t pindex = m->pindex;
248
249 object = m->object;
250
251 /*
252 * It doesn't cost us anything to pageout OBJT_DEFAULT or OBJT_SWAP
253 * with the new swapper, but we could have serious problems paging
254 * out other object types if there is insufficient memory.
255 *
256 * Unfortunately, checking free memory here is far too late, so the
257 * check has been moved up a procedural level.
258 */
259
260 /*
261 * Don't mess with the page if it's busy, held, or special
b12defdc
MD
262 *
263 * XXX do we really need to check hold_count here? hold_count
264 * isn't supposed to mess with vm_page ops except prevent the
265 * page from being reused.
984263bc 266 */
b12defdc
MD
267 if (m->hold_count != 0 || (m->flags & PG_UNMANAGED)) {
268 vm_page_wakeup(m);
984263bc
MD
269 return 0;
270 }
271
272 mc[vm_pageout_page_count] = m;
273 pageout_count = 1;
274 page_base = vm_pageout_page_count;
275 ib = 1;
276 is = 1;
277
278 /*
279 * Scan object for clusterable pages.
280 *
281 * We can cluster ONLY if: ->> the page is NOT
282 * clean, wired, busy, held, or mapped into a
283 * buffer, and one of the following:
284 * 1) The page is inactive, or a seldom used
285 * active page.
286 * -or-
287 * 2) we force the issue.
288 *
289 * During heavy mmap/modification loads the pageout
290 * daemon can really fragment the underlying file
291 * due to flushing pages out of order and not trying
292 * align the clusters (which leave sporatic out-of-order
293 * holes). To solve this problem we do the reverse scan
294 * first and attempt to align our cluster, then do a
295 * forward scan if room remains.
296 */
297
398c240d 298 vm_object_hold(object);
984263bc
MD
299more:
300 while (ib && pageout_count < vm_pageout_page_count) {
301 vm_page_t p;
302
303 if (ib > pindex) {
304 ib = 0;
305 break;
306 }
307
b12defdc
MD
308 p = vm_page_lookup_busy_try(object, pindex - ib, TRUE, &error);
309 if (error || p == NULL) {
984263bc
MD
310 ib = 0;
311 break;
312 }
b12defdc
MD
313 if ((p->queue - p->pc) == PQ_CACHE ||
314 (p->flags & PG_UNMANAGED)) {
315 vm_page_wakeup(p);
984263bc
MD
316 ib = 0;
317 break;
318 }
319 vm_page_test_dirty(p);
320 if ((p->dirty & p->valid) == 0 ||
027193eb 321 p->queue - p->pc != PQ_INACTIVE ||
984263bc
MD
322 p->wire_count != 0 || /* may be held by buf cache */
323 p->hold_count != 0) { /* may be undergoing I/O */
b12defdc 324 vm_page_wakeup(p);
984263bc
MD
325 ib = 0;
326 break;
327 }
328 mc[--page_base] = p;
329 ++pageout_count;
330 ++ib;
331 /*
332 * alignment boundry, stop here and switch directions. Do
333 * not clear ib.
334 */
335 if ((pindex - (ib - 1)) % vm_pageout_page_count == 0)
336 break;
337 }
338
339 while (pageout_count < vm_pageout_page_count &&
340 pindex + is < object->size) {
341 vm_page_t p;
342
b12defdc
MD
343 p = vm_page_lookup_busy_try(object, pindex + is, TRUE, &error);
344 if (error || p == NULL)
984263bc
MD
345 break;
346 if (((p->queue - p->pc) == PQ_CACHE) ||
347 (p->flags & (PG_BUSY|PG_UNMANAGED)) || p->busy) {
b12defdc 348 vm_page_wakeup(p);
984263bc
MD
349 break;
350 }
351 vm_page_test_dirty(p);
352 if ((p->dirty & p->valid) == 0 ||
027193eb 353 p->queue - p->pc != PQ_INACTIVE ||
984263bc
MD
354 p->wire_count != 0 || /* may be held by buf cache */
355 p->hold_count != 0) { /* may be undergoing I/O */
b12defdc 356 vm_page_wakeup(p);
984263bc
MD
357 break;
358 }
359 mc[page_base + pageout_count] = p;
360 ++pageout_count;
361 ++is;
362 }
363
364 /*
365 * If we exhausted our forward scan, continue with the reverse scan
366 * when possible, even past a page boundry. This catches boundry
367 * conditions.
368 */
369 if (ib && pageout_count < vm_pageout_page_count)
370 goto more;
371
398c240d
VS
372 vm_object_drop(object);
373
984263bc
MD
374 /*
375 * we allow reads during pageouts...
376 */
377 return vm_pageout_flush(&mc[page_base], pageout_count, 0);
378}
379
380/*
381 * vm_pageout_flush() - launder the given pages
382 *
383 * The given pages are laundered. Note that we setup for the start of
384 * I/O ( i.e. busy the page ), mark it read-only, and bump the object
385 * reference count all in here rather then in the parent. If we want
386 * the parent to do more sophisticated things we may have to change
387 * the ordering.
99ad9bc4 388 *
b12defdc
MD
389 * The pages in the array must be busied by the caller and will be
390 * unbusied by this function.
984263bc 391 */
984263bc 392int
57e43348 393vm_pageout_flush(vm_page_t *mc, int count, int flags)
984263bc 394{
5f910b2f 395 vm_object_t object;
984263bc
MD
396 int pageout_status[count];
397 int numpagedout = 0;
398 int i;
399
400 /*
17cde63e
MD
401 * Initiate I/O. Bump the vm_page_t->busy counter.
402 */
403 for (i = 0; i < count; i++) {
b12defdc
MD
404 KASSERT(mc[i]->valid == VM_PAGE_BITS_ALL,
405 ("vm_pageout_flush page %p index %d/%d: partially "
406 "invalid page", mc[i], i, count));
17cde63e
MD
407 vm_page_io_start(mc[i]);
408 }
409
410 /*
4530a3aa
MD
411 * We must make the pages read-only. This will also force the
412 * modified bit in the related pmaps to be cleared. The pager
413 * cannot clear the bit for us since the I/O completion code
414 * typically runs from an interrupt. The act of making the page
415 * read-only handles the case for us.
b12defdc
MD
416 *
417 * Then we can unbusy the pages, we still hold a reference by virtue
418 * of our soft-busy.
984263bc 419 */
984263bc 420 for (i = 0; i < count; i++) {
984263bc 421 vm_page_protect(mc[i], VM_PROT_READ);
b12defdc 422 vm_page_wakeup(mc[i]);
984263bc
MD
423 }
424
425 object = mc[0]->object;
426 vm_object_pip_add(object, count);
427
428 vm_pager_put_pages(object, mc, count,
c439ad8f 429 (flags | ((object == &kernel_object) ? VM_PAGER_PUT_SYNC : 0)),
984263bc
MD
430 pageout_status);
431
432 for (i = 0; i < count; i++) {
433 vm_page_t mt = mc[i];
434
435 switch (pageout_status[i]) {
436 case VM_PAGER_OK:
437 numpagedout++;
438 break;
439 case VM_PAGER_PEND:
440 numpagedout++;
441 break;
442 case VM_PAGER_BAD:
443 /*
444 * Page outside of range of object. Right now we
445 * essentially lose the changes by pretending it
446 * worked.
447 */
b12defdc 448 vm_page_busy_wait(mt, FALSE, "pgbad");
984263bc
MD
449 pmap_clear_modify(mt);
450 vm_page_undirty(mt);
b12defdc 451 vm_page_wakeup(mt);
984263bc
MD
452 break;
453 case VM_PAGER_ERROR:
454 case VM_PAGER_FAIL:
455 /*
c84c24da
MD
456 * A page typically cannot be paged out when we
457 * have run out of swap. We leave the page
458 * marked inactive and will try to page it out
459 * again later.
460 *
461 * Starvation of the active page list is used to
462 * determine when the system is massively memory
463 * starved.
984263bc 464 */
984263bc
MD
465 break;
466 case VM_PAGER_AGAIN:
467 break;
468 }
469
470 /*
471 * If the operation is still going, leave the page busy to
472 * block all other accesses. Also, leave the paging in
473 * progress indicator set so that we don't attempt an object
474 * collapse.
93afe6be
MD
475 *
476 * For any pages which have completed synchronously,
477 * deactivate the page if we are under a severe deficit.
478 * Do not try to enter them into the cache, though, they
479 * might still be read-heavy.
984263bc
MD
480 */
481 if (pageout_status[i] != VM_PAGER_PEND) {
b12defdc 482 vm_page_busy_wait(mt, FALSE, "pgouw");
93afe6be
MD
483 if (vm_page_count_severe())
484 vm_page_deactivate(mt);
485#if 0
984263bc
MD
486 if (!vm_page_count_severe() || !vm_page_try_to_cache(mt))
487 vm_page_protect(mt, VM_PROT_READ);
93afe6be 488#endif
a491077e 489 vm_page_io_finish(mt);
b12defdc 490 vm_page_wakeup(mt);
a491077e 491 vm_object_pip_wakeup(object);
984263bc
MD
492 }
493 }
494 return numpagedout;
495}
496
497#if !defined(NO_SWAPPING)
498/*
b12defdc
MD
499 * deactivate enough pages to satisfy the inactive target
500 * requirements or if vm_page_proc_limit is set, then
501 * deactivate all of the pages in the object and its
502 * backing_objects.
984263bc 503 *
99ad9bc4 504 * The map must be locked.
398c240d 505 * The caller must hold the vm_object.
984263bc 506 */
1f804340
MD
507static int vm_pageout_object_deactivate_pages_callback(vm_page_t, void *);
508
984263bc 509static void
57e43348 510vm_pageout_object_deactivate_pages(vm_map_t map, vm_object_t object,
99ad9bc4 511 vm_pindex_t desired, int map_remove_only)
984263bc 512{
1f804340 513 struct rb_vm_page_scan_info info;
b12defdc
MD
514 vm_object_t lobject;
515 vm_object_t tobject;
984263bc 516 int remove_mode;
984263bc 517
b12defdc 518 lobject = object;
398c240d 519
b12defdc
MD
520 while (lobject) {
521 if (pmap_resident_count(vm_map_pmap(map)) <= desired)
522 break;
523 if (lobject->type == OBJT_DEVICE || lobject->type == OBJT_PHYS)
524 break;
525 if (lobject->paging_in_progress)
526 break;
984263bc
MD
527
528 remove_mode = map_remove_only;
b12defdc 529 if (lobject->shadow_count > 1)
984263bc 530 remove_mode = 1;
06ecca5a
MD
531
532 /*
a5fc46c9
MD
533 * scan the objects entire memory queue. We hold the
534 * object's token so the scan should not race anything.
06ecca5a 535 */
1f804340
MD
536 info.limit = remove_mode;
537 info.map = map;
538 info.desired = desired;
b12defdc 539 vm_page_rb_tree_RB_SCAN(&lobject->rb_memq, NULL,
1f804340
MD
540 vm_pageout_object_deactivate_pages_callback,
541 &info
542 );
b12defdc
MD
543 while ((tobject = lobject->backing_object) != NULL) {
544 KKASSERT(tobject != object);
545 vm_object_hold(tobject);
546 if (tobject == lobject->backing_object)
547 break;
548 vm_object_drop(tobject);
549 }
550 if (lobject != object)
551 vm_object_drop(lobject);
552 lobject = tobject;
1f804340 553 }
b12defdc
MD
554 if (lobject != object)
555 vm_object_drop(lobject);
1f804340 556}
99ad9bc4
MD
557
558/*
398c240d 559 * The caller must hold the vm_object.
99ad9bc4 560 */
1f804340
MD
561static int
562vm_pageout_object_deactivate_pages_callback(vm_page_t p, void *data)
563{
564 struct rb_vm_page_scan_info *info = data;
565 int actcount;
984263bc 566
1f804340
MD
567 if (pmap_resident_count(vm_map_pmap(info->map)) <= info->desired) {
568 return(-1);
569 }
570 mycpu->gd_cnt.v_pdpages++;
b12defdc
MD
571
572 if (vm_page_busy_try(p, TRUE))
573 return(0);
574 if (p->wire_count || p->hold_count || (p->flags & PG_UNMANAGED)) {
575 vm_page_wakeup(p);
576 return(0);
577 }
578 if (!pmap_page_exists_quick(vm_map_pmap(info->map), p)) {
579 vm_page_wakeup(p);
1f804340
MD
580 return(0);
581 }
984263bc 582
1f804340
MD
583 actcount = pmap_ts_referenced(p);
584 if (actcount) {
585 vm_page_flag_set(p, PG_REFERENCED);
586 } else if (p->flags & PG_REFERENCED) {
587 actcount = 1;
588 }
589
b12defdc 590 vm_page_and_queue_spin_lock(p);
027193eb 591 if (p->queue - p->pc != PQ_ACTIVE && (p->flags & PG_REFERENCED)) {
b12defdc 592 vm_page_and_queue_spin_unlock(p);
1f804340
MD
593 vm_page_activate(p);
594 p->act_count += actcount;
595 vm_page_flag_clear(p, PG_REFERENCED);
027193eb 596 } else if (p->queue - p->pc == PQ_ACTIVE) {
1f804340
MD
597 if ((p->flags & PG_REFERENCED) == 0) {
598 p->act_count -= min(p->act_count, ACT_DECLINE);
b12defdc
MD
599 if (!info->limit &&
600 (vm_pageout_algorithm || (p->act_count == 0))) {
601 vm_page_and_queue_spin_unlock(p);
984263bc 602 vm_page_protect(p, VM_PROT_NONE);
1f804340
MD
603 vm_page_deactivate(p);
604 } else {
027193eb
MD
605 TAILQ_REMOVE(&vm_page_queues[p->queue].pl,
606 p, pageq);
607 TAILQ_INSERT_TAIL(&vm_page_queues[p->queue].pl,
608 p, pageq);
b12defdc 609 vm_page_and_queue_spin_unlock(p);
984263bc 610 }
1f804340 611 } else {
b12defdc 612 vm_page_and_queue_spin_unlock(p);
1f804340
MD
613 vm_page_activate(p);
614 vm_page_flag_clear(p, PG_REFERENCED);
b12defdc
MD
615
616 vm_page_and_queue_spin_lock(p);
027193eb 617 if (p->queue - p->pc == PQ_ACTIVE) {
b12defdc
MD
618 if (p->act_count < (ACT_MAX - ACT_ADVANCE))
619 p->act_count += ACT_ADVANCE;
027193eb
MD
620 TAILQ_REMOVE(&vm_page_queues[p->queue].pl,
621 p, pageq);
622 TAILQ_INSERT_TAIL(&vm_page_queues[p->queue].pl,
623 p, pageq);
b12defdc
MD
624 }
625 vm_page_and_queue_spin_unlock(p);
984263bc 626 }
027193eb 627 } else if (p->queue - p->pc == PQ_INACTIVE) {
b12defdc 628 vm_page_and_queue_spin_unlock(p);
1f804340 629 vm_page_protect(p, VM_PROT_NONE);
b12defdc
MD
630 } else {
631 vm_page_and_queue_spin_unlock(p);
984263bc 632 }
b12defdc 633 vm_page_wakeup(p);
1f804340 634 return(0);
984263bc
MD
635}
636
637/*
99ad9bc4 638 * Deactivate some number of pages in a map, try to do it fairly, but
984263bc
MD
639 * that is really hard to do.
640 */
641static void
57e43348 642vm_pageout_map_deactivate_pages(vm_map_t map, vm_pindex_t desired)
984263bc
MD
643{
644 vm_map_entry_t tmpe;
645 vm_object_t obj, bigobj;
646 int nothingwired;
647
df4f70a6 648 if (lockmgr(&map->lock, LK_EXCLUSIVE | LK_NOWAIT)) {
984263bc
MD
649 return;
650 }
651
652 bigobj = NULL;
653 nothingwired = TRUE;
654
655 /*
656 * first, search out the biggest object, and try to free pages from
657 * that.
658 */
659 tmpe = map->header.next;
660 while (tmpe != &map->header) {
1b874851
MD
661 switch(tmpe->maptype) {
662 case VM_MAPTYPE_NORMAL:
663 case VM_MAPTYPE_VPAGETABLE:
984263bc
MD
664 obj = tmpe->object.vm_object;
665 if ((obj != NULL) && (obj->shadow_count <= 1) &&
666 ((bigobj == NULL) ||
667 (bigobj->resident_page_count < obj->resident_page_count))) {
668 bigobj = obj;
669 }
1b874851
MD
670 break;
671 default:
672 break;
984263bc
MD
673 }
674 if (tmpe->wired_count > 0)
675 nothingwired = FALSE;
676 tmpe = tmpe->next;
677 }
678
398c240d 679 if (bigobj)
984263bc
MD
680 vm_pageout_object_deactivate_pages(map, bigobj, desired, 0);
681
682 /*
683 * Next, hunt around for other pages to deactivate. We actually
684 * do this search sort of wrong -- .text first is not the best idea.
685 */
686 tmpe = map->header.next;
687 while (tmpe != &map->header) {
688 if (pmap_resident_count(vm_map_pmap(map)) <= desired)
689 break;
1b874851
MD
690 switch(tmpe->maptype) {
691 case VM_MAPTYPE_NORMAL:
692 case VM_MAPTYPE_VPAGETABLE:
984263bc 693 obj = tmpe->object.vm_object;
398c240d 694 if (obj)
984263bc 695 vm_pageout_object_deactivate_pages(map, obj, desired, 0);
1b874851
MD
696 break;
697 default:
698 break;
984263bc
MD
699 }
700 tmpe = tmpe->next;
701 };
702
703 /*
704 * Remove all mappings if a process is swapped out, this will free page
705 * table pages.
706 */
707 if (desired == 0 && nothingwired)
708 pmap_remove(vm_map_pmap(map),
88181b08 709 VM_MIN_USER_ADDRESS, VM_MAX_USER_ADDRESS);
984263bc 710 vm_map_unlock(map);
984263bc
MD
711}
712#endif
713
714/*
a5fc46c9
MD
715 * Called when the pageout scan wants to free a page. We no longer
716 * try to cycle the vm_object here with a reference & dealloc, which can
717 * cause a non-trivial object collapse in a critical path.
99ad9bc4 718 *
a5fc46c9
MD
719 * It is unclear why we cycled the ref_count in the past, perhaps to try
720 * to optimize shadow chain collapses but I don't quite see why it would
721 * be necessary. An OBJ_DEAD object should terminate any and all vm_pages
722 * synchronously and not have to be kicked-start.
984263bc 723 */
99ad9bc4 724static void
95813af0
MD
725vm_pageout_page_free(vm_page_t m)
726{
984263bc
MD
727 vm_page_protect(m, VM_PROT_NONE);
728 vm_page_free(m);
984263bc
MD
729}
730
731/*
20479584 732 * vm_pageout_scan does the dirty work for the pageout daemon.
984263bc 733 */
8fa76237
MD
734struct vm_pageout_scan_info {
735 struct proc *bigproc;
736 vm_offset_t bigsize;
737};
738
739static int vm_pageout_scan_callback(struct proc *p, void *data);
740
20479584 741static int
027193eb
MD
742vm_pageout_scan_inactive(int pass, int q, int inactive_shortage,
743 int *vnodes_skippedp)
984263bc 744{
b12defdc 745 vm_page_t m;
984263bc 746 struct vm_page marker;
5d6a945b 747 struct vnode *vpfailed; /* warning, allowed to be stale */
027193eb
MD
748 int maxscan;
749 int delta = 0;
984263bc
MD
750 vm_object_t object;
751 int actcount;
984263bc 752 int maxlaunder;
984263bc 753
984263bc
MD
754 /*
755 * Start scanning the inactive queue for pages we can move to the
756 * cache or free. The scan will stop when the target is reached or
757 * we have scanned the entire inactive queue. Note that m->act_count
758 * is not used to form decisions for the inactive queue, only for the
759 * active queue.
760 *
761 * maxlaunder limits the number of dirty pages we flush per scan.
762 * For most systems a smaller value (16 or 32) is more robust under
763 * extreme memory and disk pressure because any unnecessary writes
764 * to disk can result in extreme performance degredation. However,
765 * systems with excessive dirty pages (especially when MAP_NOSYNC is
766 * used) will die horribly with limited laundering. If the pageout
767 * daemon cannot clean enough pages in the first pass, we let it go
768 * all out in succeeding passes.
769 */
770 if ((maxlaunder = vm_max_launder) <= 1)
771 maxlaunder = 1;
772 if (pass)
773 maxlaunder = 10000;
774
06ecca5a 775 /*
b12defdc
MD
776 * Initialize our marker
777 */
778 bzero(&marker, sizeof(marker));
779 marker.flags = PG_BUSY | PG_FICTITIOUS | PG_MARKER;
027193eb
MD
780 marker.queue = PQ_INACTIVE + q;
781 marker.pc = q;
b12defdc
MD
782 marker.wire_count = 1;
783
784 /*
785 * Inactive queue scan.
786 *
787 * NOTE: The vm_page must be spinlocked before the queue to avoid
788 * deadlocks, so it is easiest to simply iterate the loop
789 * with the queue unlocked at the top.
06ecca5a 790 */
5d6a945b 791 vpfailed = NULL;
b12defdc 792
027193eb
MD
793 vm_page_queues_spin_lock(PQ_INACTIVE + q);
794 TAILQ_INSERT_HEAD(&vm_page_queues[PQ_INACTIVE + q].pl, &marker, pageq);
12e4aaff 795 maxscan = vmstats.v_inactive_count;
027193eb 796 vm_page_queues_spin_unlock(PQ_INACTIVE + q);
b12defdc
MD
797
798 while ((m = TAILQ_NEXT(&marker, pageq)) != NULL &&
027193eb 799 maxscan-- > 0 && inactive_shortage - delta > 0)
b12defdc
MD
800 {
801 vm_page_and_queue_spin_lock(m);
802 if (m != TAILQ_NEXT(&marker, pageq)) {
803 vm_page_and_queue_spin_unlock(m);
804 ++maxscan;
805 continue;
806 }
027193eb
MD
807 KKASSERT(m->queue - m->pc == PQ_INACTIVE);
808 TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE + q].pl,
b12defdc 809 &marker, pageq);
027193eb 810 TAILQ_INSERT_AFTER(&vm_page_queues[PQ_INACTIVE + q].pl, m,
b12defdc 811 &marker, pageq);
12e4aaff 812 mycpu->gd_cnt.v_pdpages++;
984263bc 813
06ecca5a 814 /*
b12defdc 815 * Skip marker pages
06ecca5a 816 */
b12defdc
MD
817 if (m->flags & PG_MARKER) {
818 vm_page_and_queue_spin_unlock(m);
819 continue;
820 }
984263bc
MD
821
822 /*
b12defdc
MD
823 * Try to busy the page. Don't mess with pages which are
824 * already busy or reorder them in the queue.
984263bc 825 */
b12defdc
MD
826 if (vm_page_busy_try(m, TRUE)) {
827 vm_page_and_queue_spin_unlock(m);
984263bc 828 continue;
b12defdc
MD
829 }
830 vm_page_and_queue_spin_unlock(m);
027193eb 831 KKASSERT(m->queue - m->pc == PQ_INACTIVE);
984263bc 832
d2d8515b
MD
833 lwkt_yield();
834
984263bc 835 /*
b12defdc
MD
836 * The page has been successfully busied and is now no
837 * longer spinlocked. The queue is no longer spinlocked
838 * either.
984263bc 839 */
06ecca5a 840
984263bc 841 /*
b12defdc 842 * A held page may be undergoing I/O, so skip it.
984263bc 843 */
b12defdc
MD
844 if (m->hold_count) {
845 vm_page_and_queue_spin_lock(m);
027193eb
MD
846 if (m->queue - m->pc == PQ_INACTIVE) {
847 TAILQ_REMOVE(
848 &vm_page_queues[PQ_INACTIVE + q].pl,
849 m, pageq);
850 TAILQ_INSERT_TAIL(
851 &vm_page_queues[PQ_INACTIVE + q].pl,
852 m, pageq);
b12defdc
MD
853 }
854 vm_page_and_queue_spin_unlock(m);
855 ++vm_swapcache_inactive_heuristic;
856 vm_page_wakeup(m);
984263bc
MD
857 continue;
858 }
859
984263bc 860 if (m->object->ref_count == 0) {
06ecca5a
MD
861 /*
862 * If the object is not being used, we ignore previous
863 * references.
864 */
984263bc
MD
865 vm_page_flag_clear(m, PG_REFERENCED);
866 pmap_clear_reference(m);
b12defdc 867 /* fall through to end */
984263bc 868 } else if (((m->flags & PG_REFERENCED) == 0) &&
06ecca5a
MD
869 (actcount = pmap_ts_referenced(m))) {
870 /*
871 * Otherwise, if the page has been referenced while
872 * in the inactive queue, we bump the "activation
873 * count" upwards, making it less likely that the
874 * page will be added back to the inactive queue
875 * prematurely again. Here we check the page tables
876 * (or emulated bits, if any), given the upper level
877 * VM system not knowing anything about existing
878 * references.
879 */
984263bc
MD
880 vm_page_activate(m);
881 m->act_count += (actcount + ACT_ADVANCE);
b12defdc 882 vm_page_wakeup(m);
984263bc
MD
883 continue;
884 }
885
886 /*
b12defdc
MD
887 * (m) is still busied.
888 *
984263bc
MD
889 * If the upper level VM system knows about any page
890 * references, we activate the page. We also set the
891 * "activation count" higher than normal so that we will less
892 * likely place pages back onto the inactive queue again.
893 */
894 if ((m->flags & PG_REFERENCED) != 0) {
895 vm_page_flag_clear(m, PG_REFERENCED);
896 actcount = pmap_ts_referenced(m);
897 vm_page_activate(m);
898 m->act_count += (actcount + ACT_ADVANCE + 1);
b12defdc 899 vm_page_wakeup(m);
984263bc
MD
900 continue;
901 }
902
903 /*
904 * If the upper level VM system doesn't know anything about
905 * the page being dirty, we have to check for it again. As
906 * far as the VM code knows, any partially dirty pages are
907 * fully dirty.
41a01a4d
MD
908 *
909 * Pages marked PG_WRITEABLE may be mapped into the user
910 * address space of a process running on another cpu. A
911 * user process (without holding the MP lock) running on
912 * another cpu may be able to touch the page while we are
17cde63e
MD
913 * trying to remove it. vm_page_cache() will handle this
914 * case for us.
984263bc
MD
915 */
916 if (m->dirty == 0) {
917 vm_page_test_dirty(m);
918 } else {
919 vm_page_dirty(m);
920 }
921
984263bc 922 if (m->valid == 0) {
41a01a4d
MD
923 /*
924 * Invalid pages can be easily freed
925 */
984263bc 926 vm_pageout_page_free(m);
12e4aaff 927 mycpu->gd_cnt.v_dfree++;
027193eb 928 ++delta;
984263bc
MD
929 } else if (m->dirty == 0) {
930 /*
41a01a4d
MD
931 * Clean pages can be placed onto the cache queue.
932 * This effectively frees them.
984263bc
MD
933 */
934 vm_page_cache(m);
027193eb 935 ++delta;
984263bc
MD
936 } else if ((m->flags & PG_WINATCFLS) == 0 && pass == 0) {
937 /*
938 * Dirty pages need to be paged out, but flushing
939 * a page is extremely expensive verses freeing
940 * a clean page. Rather then artificially limiting
941 * the number of pages we can flush, we instead give
942 * dirty pages extra priority on the inactive queue
943 * by forcing them to be cycled through the queue
944 * twice before being flushed, after which the
945 * (now clean) page will cycle through once more
946 * before being freed. This significantly extends
947 * the thrash point for a heavily loaded machine.
948 */
984263bc 949 vm_page_flag_set(m, PG_WINATCFLS);
b12defdc 950 vm_page_and_queue_spin_lock(m);
027193eb
MD
951 if (m->queue - m->pc == PQ_INACTIVE) {
952 TAILQ_REMOVE(
953 &vm_page_queues[PQ_INACTIVE + q].pl,
954 m, pageq);
955 TAILQ_INSERT_TAIL(
956 &vm_page_queues[PQ_INACTIVE + q].pl,
957 m, pageq);
b12defdc
MD
958 }
959 vm_page_and_queue_spin_unlock(m);
e527fb6b 960 ++vm_swapcache_inactive_heuristic;
b12defdc 961 vm_page_wakeup(m);
984263bc
MD
962 } else if (maxlaunder > 0) {
963 /*
964 * We always want to try to flush some dirty pages if
965 * we encounter them, to keep the system stable.
966 * Normally this number is small, but under extreme
967 * pressure where there are insufficient clean pages
968 * on the inactive queue, we may have to go all out.
969 */
970 int swap_pageouts_ok;
971 struct vnode *vp = NULL;
972
973 object = m->object;
974
975 if ((object->type != OBJT_SWAP) && (object->type != OBJT_DEFAULT)) {
976 swap_pageouts_ok = 1;
977 } else {
978 swap_pageouts_ok = !(defer_swap_pageouts || disable_swap_pageouts);
979 swap_pageouts_ok |= (!disable_swap_pageouts && defer_swap_pageouts &&
20479584 980 vm_page_count_min(0));
984263bc
MD
981
982 }
983
984 /*
985 * We don't bother paging objects that are "dead".
986 * Those objects are in a "rundown" state.
987 */
988 if (!swap_pageouts_ok || (object->flags & OBJ_DEAD)) {
b12defdc 989 vm_page_and_queue_spin_lock(m);
027193eb
MD
990 if (m->queue - m->pc == PQ_INACTIVE) {
991 TAILQ_REMOVE(
992 &vm_page_queues[PQ_INACTIVE + q].pl,
993 m, pageq);
994 TAILQ_INSERT_TAIL(
995 &vm_page_queues[PQ_INACTIVE + q].pl,
996 m, pageq);
b12defdc
MD
997 }
998 vm_page_and_queue_spin_unlock(m);
e527fb6b 999 ++vm_swapcache_inactive_heuristic;
b12defdc 1000 vm_page_wakeup(m);
984263bc
MD
1001 continue;
1002 }
1003
1004 /*
b12defdc
MD
1005 * (m) is still busied.
1006 *
984263bc
MD
1007 * The object is already known NOT to be dead. It
1008 * is possible for the vget() to block the whole
1009 * pageout daemon, but the new low-memory handling
1010 * code should prevent it.
1011 *
1012 * The previous code skipped locked vnodes and, worse,
1013 * reordered pages in the queue. This results in
1014 * completely non-deterministic operation because,
1015 * quite often, a vm_fault has initiated an I/O and
1016 * is holding a locked vnode at just the point where
1017 * the pageout daemon is woken up.
1018 *
1019 * We can't wait forever for the vnode lock, we might
1020 * deadlock due to a vn_read() getting stuck in
1021 * vm_wait while holding this vnode. We skip the
1022 * vnode if we can't get it in a reasonable amount
1023 * of time.
5d6a945b
MD
1024 *
1025 * vpfailed is used to (try to) avoid the case where
1026 * a large number of pages are associated with a
1027 * locked vnode, which could cause the pageout daemon
1028 * to stall for an excessive amount of time.
984263bc 1029 */
984263bc 1030 if (object->type == OBJT_VNODE) {
5d6a945b 1031 int flags;
984263bc 1032
5d6a945b
MD
1033 vp = object->handle;
1034 flags = LK_EXCLUSIVE | LK_NOOBJ;
1035 if (vp == vpfailed)
1036 flags |= LK_NOWAIT;
1037 else
1038 flags |= LK_TIMELOCK;
b12defdc
MD
1039 vm_page_hold(m);
1040 vm_page_wakeup(m);
1041
1042 /*
1043 * We have unbusied (m) temporarily so we can
1044 * acquire the vp lock without deadlocking.
1045 * (m) is held to prevent destruction.
1046 */
5d6a945b
MD
1047 if (vget(vp, flags) != 0) {
1048 vpfailed = vp;
984263bc
MD
1049 ++pageout_lock_miss;
1050 if (object->flags & OBJ_MIGHTBEDIRTY)
027193eb 1051 ++*vnodes_skippedp;
b12defdc 1052 vm_page_unhold(m);
984263bc
MD
1053 continue;
1054 }
1055
1056 /*
1057 * The page might have been moved to another
1058 * queue during potential blocking in vget()
1059 * above. The page might have been freed and
1060 * reused for another vnode. The object might
1061 * have been reused for another vnode.
1062 */
027193eb 1063 if (m->queue - m->pc != PQ_INACTIVE ||
984263bc
MD
1064 m->object != object ||
1065 object->handle != vp) {
1066 if (object->flags & OBJ_MIGHTBEDIRTY)
027193eb 1067 ++*vnodes_skippedp;
984263bc 1068 vput(vp);
b12defdc 1069 vm_page_unhold(m);
984263bc
MD
1070 continue;
1071 }
1072
1073 /*
1074 * The page may have been busied during the
1075 * blocking in vput(); We don't move the
1076 * page back onto the end of the queue so that
1077 * statistics are more correct if we don't.
1078 */
b12defdc 1079 if (vm_page_busy_try(m, TRUE)) {
984263bc 1080 vput(vp);
b12defdc 1081 vm_page_unhold(m);
984263bc
MD
1082 continue;
1083 }
b12defdc 1084 vm_page_unhold(m);
984263bc
MD
1085
1086 /*
b12defdc
MD
1087 * (m) is busied again
1088 *
1089 * We own the busy bit and remove our hold
1090 * bit. If the page is still held it
1091 * might be undergoing I/O, so skip it.
984263bc
MD
1092 */
1093 if (m->hold_count) {
b12defdc 1094 vm_page_and_queue_spin_lock(m);
027193eb
MD
1095 if (m->queue - m->pc == PQ_INACTIVE) {
1096 TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE + q].pl, m, pageq);
1097 TAILQ_INSERT_TAIL(&vm_page_queues[PQ_INACTIVE + q].pl, m, pageq);
b12defdc
MD
1098 }
1099 vm_page_and_queue_spin_unlock(m);
e527fb6b 1100 ++vm_swapcache_inactive_heuristic;
984263bc 1101 if (object->flags & OBJ_MIGHTBEDIRTY)
027193eb 1102 ++*vnodes_skippedp;
b12defdc 1103 vm_page_wakeup(m);
984263bc
MD
1104 vput(vp);
1105 continue;
1106 }
b12defdc 1107 /* (m) is left busied as we fall through */
984263bc
MD
1108 }
1109
1110 /*
b12defdc
MD
1111 * page is busy and not held here.
1112 *
984263bc
MD
1113 * If a page is dirty, then it is either being washed
1114 * (but not yet cleaned) or it is still in the
1115 * laundry. If it is still in the laundry, then we
1116 * start the cleaning operation.
1117 *
20479584
MD
1118 * decrement inactive_shortage on success to account
1119 * for the (future) cleaned page. Otherwise we
1120 * could wind up laundering or cleaning too many
1121 * pages.
984263bc 1122 */
984263bc 1123 if (vm_pageout_clean(m) != 0) {
027193eb 1124 ++delta;
984263bc 1125 --maxlaunder;
c84c24da 1126 }
b12defdc 1127 /* clean ate busy, page no longer accessible */
984263bc
MD
1128 if (vp != NULL)
1129 vput(vp);
b12defdc
MD
1130 } else {
1131 vm_page_wakeup(m);
984263bc
MD
1132 }
1133 }
027193eb
MD
1134 vm_page_queues_spin_lock(PQ_INACTIVE + q);
1135 TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE + q].pl, &marker, pageq);
1136 vm_page_queues_spin_unlock(PQ_INACTIVE + q);
1137
1138 return (delta);
1139}
1140
1141static int
1142vm_pageout_scan_active(int pass, int q,
1143 int inactive_shortage, int active_shortage,
1144 int *recycle_countp)
1145{
1146 struct vm_page marker;
1147 vm_page_t m;
1148 int actcount;
1149 int delta = 0;
1150 int pcount;
984263bc
MD
1151
1152 /*
20479584
MD
1153 * We want to move pages from the active queue to the inactive
1154 * queue to get the inactive queue to the inactive target. If
1155 * we still have a page shortage from above we try to directly free
1156 * clean pages instead of moving them.
06ecca5a 1157 *
20479584
MD
1158 * If we do still have a shortage we keep track of the number of
1159 * pages we free or cache (recycle_count) as a measure of thrashing
1160 * between the active and inactive queues.
1161 *
51db7ca2
MD
1162 * If we were able to completely satisfy the free+cache targets
1163 * from the inactive pool we limit the number of pages we move
1164 * from the active pool to the inactive pool to 2x the pages we
e6e9a0c3
MD
1165 * had removed from the inactive pool (with a minimum of 1/5 the
1166 * inactive target). If we were not able to completely satisfy
1167 * the free+cache targets we go for the whole target aggressively.
20479584
MD
1168 *
1169 * NOTE: Both variables can end up negative.
1170 * NOTE: We are still in a critical section.
984263bc 1171 */
20479584 1172
027193eb
MD
1173 bzero(&marker, sizeof(marker));
1174 marker.flags = PG_BUSY | PG_FICTITIOUS | PG_MARKER;
1175 marker.queue = PQ_ACTIVE + q;
1176 marker.pc = q;
1177 marker.wire_count = 1;
b12defdc 1178
027193eb
MD
1179 vm_page_queues_spin_lock(PQ_ACTIVE + q);
1180 TAILQ_INSERT_HEAD(&vm_page_queues[PQ_ACTIVE + q].pl, &marker, pageq);
1181 vm_page_queues_spin_unlock(PQ_ACTIVE + q);
b12defdc
MD
1182 pcount = vmstats.v_active_count;
1183
1184 while ((m = TAILQ_NEXT(&marker, pageq)) != NULL &&
027193eb
MD
1185 pcount-- > 0 && (inactive_shortage - delta > 0 ||
1186 active_shortage > 0))
b12defdc
MD
1187 {
1188 vm_page_and_queue_spin_lock(m);
1189 if (m != TAILQ_NEXT(&marker, pageq)) {
1190 vm_page_and_queue_spin_unlock(m);
1191 ++pcount;
1192 continue;
1193 }
027193eb
MD
1194 KKASSERT(m->queue - m->pc == PQ_ACTIVE);
1195 TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE + q].pl,
b12defdc 1196 &marker, pageq);
027193eb 1197 TAILQ_INSERT_AFTER(&vm_page_queues[PQ_ACTIVE + q].pl, m,
b12defdc 1198 &marker, pageq);
984263bc 1199
984263bc 1200 /*
b12defdc 1201 * Skip marker pages
984263bc 1202 */
b12defdc
MD
1203 if (m->flags & PG_MARKER) {
1204 vm_page_and_queue_spin_unlock(m);
1205 continue;
1206 }
06ecca5a 1207
984263bc 1208 /*
b12defdc
MD
1209 * Try to busy the page. Don't mess with pages which are
1210 * already busy or reorder them in the queue.
984263bc 1211 */
b12defdc
MD
1212 if (vm_page_busy_try(m, TRUE)) {
1213 vm_page_and_queue_spin_unlock(m);
984263bc
MD
1214 continue;
1215 }
1216
b12defdc
MD
1217 /*
1218 * Don't deactivate pages that are held, even if we can
1219 * busy them. (XXX why not?)
1220 */
1221 if (m->hold_count != 0) {
027193eb 1222 TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE + q].pl,
b12defdc 1223 m, pageq);
027193eb 1224 TAILQ_INSERT_TAIL(&vm_page_queues[PQ_ACTIVE + q].pl,
b12defdc
MD
1225 m, pageq);
1226 vm_page_and_queue_spin_unlock(m);
1227 vm_page_wakeup(m);
1228 continue;
1229 }
1230 vm_page_and_queue_spin_unlock(m);
d2d8515b 1231 lwkt_yield();
b12defdc
MD
1232
1233 /*
1234 * The page has been successfully busied and the page and
1235 * queue are no longer locked.
1236 */
1237
984263bc
MD
1238 /*
1239 * The count for pagedaemon pages is done after checking the
1240 * page for eligibility...
1241 */
12e4aaff 1242 mycpu->gd_cnt.v_pdpages++;
984263bc
MD
1243
1244 /*
20479584
MD
1245 * Check to see "how much" the page has been used and clear
1246 * the tracking access bits. If the object has no references
1247 * don't bother paying the expense.
984263bc
MD
1248 */
1249 actcount = 0;
1250 if (m->object->ref_count != 0) {
20479584
MD
1251 if (m->flags & PG_REFERENCED)
1252 ++actcount;
984263bc
MD
1253 actcount += pmap_ts_referenced(m);
1254 if (actcount) {
1255 m->act_count += ACT_ADVANCE + actcount;
1256 if (m->act_count > ACT_MAX)
1257 m->act_count = ACT_MAX;
1258 }
1259 }
984263bc
MD
1260 vm_page_flag_clear(m, PG_REFERENCED);
1261
1262 /*
20479584 1263 * actcount is only valid if the object ref_count is non-zero.
984263bc 1264 */
20479584 1265 if (actcount && m->object->ref_count != 0) {
b12defdc 1266 vm_page_and_queue_spin_lock(m);
027193eb
MD
1267 if (m->queue - m->pc == PQ_ACTIVE) {
1268 TAILQ_REMOVE(
1269 &vm_page_queues[PQ_ACTIVE + q].pl,
1270 m, pageq);
1271 TAILQ_INSERT_TAIL(
1272 &vm_page_queues[PQ_ACTIVE + q].pl,
1273 m, pageq);
b12defdc
MD
1274 }
1275 vm_page_and_queue_spin_unlock(m);
1276 vm_page_wakeup(m);
984263bc
MD
1277 } else {
1278 m->act_count -= min(m->act_count, ACT_DECLINE);
1279 if (vm_pageout_algorithm ||
1280 m->object->ref_count == 0 ||
20479584
MD
1281 m->act_count < pass + 1
1282 ) {
1283 /*
1284 * Deactivate the page. If we had a
1285 * shortage from our inactive scan try to
1286 * free (cache) the page instead.
e6e9a0c3
MD
1287 *
1288 * Don't just blindly cache the page if
1289 * we do not have a shortage from the
1290 * inactive scan, that could lead to
1291 * gigabytes being moved.
20479584
MD
1292 */
1293 --active_shortage;
027193eb 1294 if (inactive_shortage - delta > 0 ||
20479584 1295 m->object->ref_count == 0) {
027193eb
MD
1296 if (inactive_shortage - delta > 0)
1297 ++*recycle_countp;
984263bc 1298 vm_page_protect(m, VM_PROT_NONE);
e6e9a0c3 1299 if (m->dirty == 0 &&
027193eb
MD
1300 inactive_shortage - delta > 0) {
1301 ++delta;
984263bc 1302 vm_page_cache(m);
c84c24da 1303 } else {
984263bc 1304 vm_page_deactivate(m);
a491077e 1305 vm_page_wakeup(m);
c84c24da 1306 }
984263bc
MD
1307 } else {
1308 vm_page_deactivate(m);
b12defdc 1309 vm_page_wakeup(m);
984263bc
MD
1310 }
1311 } else {
b12defdc 1312 vm_page_and_queue_spin_lock(m);
027193eb 1313 if (m->queue - m->pc == PQ_ACTIVE) {
b12defdc 1314 TAILQ_REMOVE(
027193eb
MD
1315 &vm_page_queues[PQ_ACTIVE + q].pl,
1316 m, pageq);
b12defdc 1317 TAILQ_INSERT_TAIL(
027193eb
MD
1318 &vm_page_queues[PQ_ACTIVE + q].pl,
1319 m, pageq);
b12defdc
MD
1320 }
1321 vm_page_and_queue_spin_unlock(m);
1322 vm_page_wakeup(m);
984263bc
MD
1323 }
1324 }
984263bc
MD
1325 }
1326
b12defdc
MD
1327 /*
1328 * Clean out our local marker.
1329 */
027193eb
MD
1330 vm_page_queues_spin_lock(PQ_ACTIVE + q);
1331 TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE + q].pl, &marker, pageq);
1332 vm_page_queues_spin_unlock(PQ_ACTIVE + q);
1333
1334 return (delta);
1335}
1336
1337/*
1338 * The number of actually free pages can drop down to v_free_reserved,
1339 * we try to build the free count back above v_free_min. Note that
1340 * vm_paging_needed() also returns TRUE if v_free_count is not at
1341 * least v_free_min so that is the minimum we must build the free
1342 * count to.
1343 *
1344 * We use a slightly higher target to improve hysteresis,
1345 * ((v_free_target + v_free_min) / 2). Since v_free_target
1346 * is usually the same as v_cache_min this maintains about
1347 * half the pages in the free queue as are in the cache queue,
1348 * providing pretty good pipelining for pageout operation.
1349 *
1350 * The system operator can manipulate vm.v_cache_min and
1351 * vm.v_free_target to tune the pageout demon. Be sure
1352 * to keep vm.v_free_min < vm.v_free_target.
1353 *
1354 * Note that the original paging target is to get at least
1355 * (free_min + cache_min) into (free + cache). The slightly
1356 * higher target will shift additional pages from cache to free
1357 * without effecting the original paging target in order to
1358 * maintain better hysteresis and not have the free count always
1359 * be dead-on v_free_min.
1360 *
1361 * NOTE: we are still in a critical section.
1362 *
1363 * Pages moved from PQ_CACHE to totally free are not counted in the
1364 * pages_freed counter.
1365 */
1366static void
1367vm_pageout_scan_cache(int inactive_shortage,
1368 int vnodes_skipped, int recycle_count)
1369{
1370 struct vm_pageout_scan_info info;
1371 vm_page_t m;
b12defdc 1372
cd3c66bd
MD
1373 while (vmstats.v_free_count <
1374 (vmstats.v_free_min + vmstats.v_free_target) / 2) {
1375 /*
b12defdc 1376 * This steals some code from vm/vm_page.c
cd3c66bd 1377 */
984263bc 1378 static int cache_rover = 0;
b12defdc
MD
1379
1380 m = vm_page_list_find(PQ_CACHE, cache_rover & PQ_L2_MASK, FALSE);
20479584 1381 if (m == NULL)
984263bc 1382 break;
b12defdc
MD
1383 /* page is returned removed from its queue and spinlocked */
1384 if (vm_page_busy_try(m, TRUE)) {
1385 vm_page_deactivate_locked(m);
1386 vm_page_spin_unlock(m);
984263bc 1387#ifdef INVARIANTS
086c1d7e 1388 kprintf("Warning: busy page %p found in cache\n", m);
984263bc 1389#endif
b12defdc
MD
1390 continue;
1391 }
1392 vm_page_spin_unlock(m);
1393 pagedaemon_wakeup();
d2d8515b 1394 lwkt_yield();
b12defdc
MD
1395
1396 /*
1397 * Page has been successfully busied and it and its queue
1398 * is no longer spinlocked.
1399 */
1400 if ((m->flags & PG_UNMANAGED) ||
1401 m->hold_count ||
1402 m->wire_count) {
984263bc 1403 vm_page_deactivate(m);
b12defdc 1404 vm_page_wakeup(m);
984263bc
MD
1405 continue;
1406 }
17cde63e
MD
1407 KKASSERT((m->flags & PG_MAPPED) == 0);
1408 KKASSERT(m->dirty == 0);
b12defdc 1409 cache_rover += PQ_PRIME2;
984263bc 1410 vm_pageout_page_free(m);
12e4aaff 1411 mycpu->gd_cnt.v_dfree++;
984263bc 1412 }
06ecca5a 1413
984263bc
MD
1414#if !defined(NO_SWAPPING)
1415 /*
1416 * Idle process swapout -- run once per second.
1417 */
1418 if (vm_swap_idle_enabled) {
1419 static long lsec;
1420 if (time_second != lsec) {
1421 vm_pageout_req_swapout |= VM_SWAP_IDLE;
1422 vm_req_vmdaemon();
1423 lsec = time_second;
1424 }
1425 }
1426#endif
1427
1428 /*
1429 * If we didn't get enough free pages, and we have skipped a vnode
1430 * in a writeable object, wakeup the sync daemon. And kick swapout
1431 * if we did not get enough free pages.
1432 */
1433 if (vm_paging_target() > 0) {
20479584 1434 if (vnodes_skipped && vm_page_count_min(0))
418ff780 1435 speedup_syncer();
984263bc
MD
1436#if !defined(NO_SWAPPING)
1437 if (vm_swap_enabled && vm_page_count_target()) {
1438 vm_req_vmdaemon();
1439 vm_pageout_req_swapout |= VM_SWAP_NORMAL;
1440 }
1441#endif
1442 }
1443
1444 /*
20479584
MD
1445 * Handle catastrophic conditions. Under good conditions we should
1446 * be at the target, well beyond our minimum. If we could not even
1447 * reach our minimum the system is under heavy stress.
1448 *
1449 * Determine whether we have run out of memory. This occurs when
1450 * swap_pager_full is TRUE and the only pages left in the page
1451 * queues are dirty. We will still likely have page shortages.
c84c24da
MD
1452 *
1453 * - swap_pager_full is set if insufficient swap was
1454 * available to satisfy a requested pageout.
1455 *
20479584
MD
1456 * - the inactive queue is bloated (4 x size of active queue),
1457 * meaning it is unable to get rid of dirty pages and.
c84c24da 1458 *
20479584
MD
1459 * - vm_page_count_min() without counting pages recycled from the
1460 * active queue (recycle_count) means we could not recover
1461 * enough pages to meet bare minimum needs. This test only
1462 * works if the inactive queue is bloated.
c84c24da 1463 *
20479584
MD
1464 * - due to a positive inactive_shortage we shifted the remaining
1465 * dirty pages from the active queue to the inactive queue
1466 * trying to find clean ones to free.
984263bc 1467 */
20479584 1468 if (swap_pager_full && vm_page_count_min(recycle_count))
c84c24da 1469 kprintf("Warning: system low on memory+swap!\n");
20479584
MD
1470 if (swap_pager_full && vm_page_count_min(recycle_count) &&
1471 vmstats.v_inactive_count > vmstats.v_active_count * 4 &&
1472 inactive_shortage > 0) {
1473 /*
1474 * Kill something.
1475 */
8fa76237
MD
1476 info.bigproc = NULL;
1477 info.bigsize = 0;
1478 allproc_scan(vm_pageout_scan_callback, &info);
1479 if (info.bigproc != NULL) {
1480 killproc(info.bigproc, "out of swap space");
1481 info.bigproc->p_nice = PRIO_MIN;
08f2f1bb
SS
1482 info.bigproc->p_usched->resetpriority(
1483 FIRST_LWP_IN_PROC(info.bigproc));
12e4aaff 1484 wakeup(&vmstats.v_free_count);
8fa76237 1485 PRELE(info.bigproc);
984263bc
MD
1486 }
1487 }
1488}
1489
99ad9bc4 1490/*
b12defdc 1491 * The caller must hold proc_token.
99ad9bc4 1492 */
8fa76237
MD
1493static int
1494vm_pageout_scan_callback(struct proc *p, void *data)
1495{
1496 struct vm_pageout_scan_info *info = data;
1497 vm_offset_t size;
1498
1499 /*
20479584
MD
1500 * Never kill system processes or init. If we have configured swap
1501 * then try to avoid killing low-numbered pids.
8fa76237
MD
1502 */
1503 if ((p->p_flag & P_SYSTEM) || (p->p_pid == 1) ||
1504 ((p->p_pid < 48) && (vm_swap_size != 0))) {
1505 return (0);
1506 }
1507
1508 /*
1509 * if the process is in a non-running type state,
1510 * don't touch it.
1511 */
20479584 1512 if (p->p_stat != SACTIVE && p->p_stat != SSTOP)
8fa76237 1513 return (0);
8fa76237
MD
1514
1515 /*
20479584
MD
1516 * Get the approximate process size. Note that anonymous pages
1517 * with backing swap will be counted twice, but there should not
1518 * be too many such pages due to the stress the VM system is
1519 * under at this point.
8fa76237 1520 */
20479584 1521 size = vmspace_anonymous_count(p->p_vmspace) +
8fa76237
MD
1522 vmspace_swap_count(p->p_vmspace);
1523
1524 /*
1525 * If the this process is bigger than the biggest one
1526 * remember it.
1527 */
20479584 1528 if (info->bigsize < size) {
8fa76237
MD
1529 if (info->bigproc)
1530 PRELE(info->bigproc);
1531 PHOLD(p);
1532 info->bigproc = p;
1533 info->bigsize = size;
1534 }
d2d8515b 1535 lwkt_yield();
8fa76237
MD
1536 return(0);
1537}
1538
984263bc
MD
1539/*
1540 * This routine tries to maintain the pseudo LRU active queue,
1541 * so that during long periods of time where there is no paging,
1542 * that some statistic accumulation still occurs. This code
1543 * helps the situation where paging just starts to occur.
1544 */
1545static void
027193eb 1546vm_pageout_page_stats(int q)
984263bc 1547{
984263bc 1548 static int fullintervalcount = 0;
b12defdc
MD
1549 struct vm_page marker;
1550 vm_page_t m;
1551 int pcount, tpcount; /* Number of pages to check */
984263bc 1552 int page_shortage;
984263bc 1553
b12defdc
MD
1554 page_shortage = (vmstats.v_inactive_target + vmstats.v_cache_max +
1555 vmstats.v_free_min) -
1556 (vmstats.v_free_count + vmstats.v_inactive_count +
1557 vmstats.v_cache_count);
984263bc
MD
1558
1559 if (page_shortage <= 0)
1560 return;
1561
12e4aaff 1562 pcount = vmstats.v_active_count;
984263bc
MD
1563 fullintervalcount += vm_pageout_stats_interval;
1564 if (fullintervalcount < vm_pageout_full_stats_interval) {
b12defdc
MD
1565 tpcount = (vm_pageout_stats_max * vmstats.v_active_count) /
1566 vmstats.v_page_count;
984263bc
MD
1567 if (pcount > tpcount)
1568 pcount = tpcount;
1569 } else {
1570 fullintervalcount = 0;
1571 }
1572
b12defdc
MD
1573 bzero(&marker, sizeof(marker));
1574 marker.flags = PG_BUSY | PG_FICTITIOUS | PG_MARKER;
027193eb
MD
1575 marker.queue = PQ_ACTIVE + q;
1576 marker.pc = q;
b12defdc
MD
1577 marker.wire_count = 1;
1578
027193eb
MD
1579 vm_page_queues_spin_lock(PQ_ACTIVE + q);
1580 TAILQ_INSERT_HEAD(&vm_page_queues[PQ_ACTIVE + q].pl, &marker, pageq);
1581 vm_page_queues_spin_unlock(PQ_ACTIVE + q);
b12defdc
MD
1582
1583 while ((m = TAILQ_NEXT(&marker, pageq)) != NULL &&
1584 pcount-- > 0)
1585 {
984263bc
MD
1586 int actcount;
1587
b12defdc
MD
1588 vm_page_and_queue_spin_lock(m);
1589 if (m != TAILQ_NEXT(&marker, pageq)) {
1590 vm_page_and_queue_spin_unlock(m);
1591 ++pcount;
1592 continue;
984263bc 1593 }
027193eb
MD
1594 KKASSERT(m->queue - m->pc == PQ_ACTIVE);
1595 TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE + q].pl, &marker, pageq);
1596 TAILQ_INSERT_AFTER(&vm_page_queues[PQ_ACTIVE + q].pl, m,
b12defdc 1597 &marker, pageq);
984263bc 1598
984263bc 1599 /*
b12defdc 1600 * Ignore markers
984263bc 1601 */
b12defdc
MD
1602 if (m->flags & PG_MARKER) {
1603 vm_page_and_queue_spin_unlock(m);
984263bc
MD
1604 continue;
1605 }
1606
b12defdc
MD
1607 /*
1608 * Ignore pages we can't busy
1609 */
1610 if (vm_page_busy_try(m, TRUE)) {
1611 vm_page_and_queue_spin_unlock(m);
1612 continue;
1613 }
1614 vm_page_and_queue_spin_unlock(m);
027193eb 1615 KKASSERT(m->queue - m->pc == PQ_ACTIVE);
b12defdc
MD
1616
1617 /*
1618 * We now have a safely busied page, the page and queue
1619 * spinlocks have been released.
1620 *
1621 * Ignore held pages
1622 */
1623 if (m->hold_count) {
1624 vm_page_wakeup(m);
1625 continue;
1626 }
1627
1628 /*
1629 * Calculate activity
1630 */
984263bc
MD
1631 actcount = 0;
1632 if (m->flags & PG_REFERENCED) {
1633 vm_page_flag_clear(m, PG_REFERENCED);
1634 actcount += 1;
1635 }
984263bc 1636 actcount += pmap_ts_referenced(m);
b12defdc
MD
1637
1638 /*
1639 * Update act_count and move page to end of queue.
1640 */
984263bc
MD
1641 if (actcount) {
1642 m->act_count += ACT_ADVANCE + actcount;
1643 if (m->act_count > ACT_MAX)
1644 m->act_count = ACT_MAX;
b12defdc 1645 vm_page_and_queue_spin_lock(m);
027193eb
MD
1646 if (m->queue - m->pc == PQ_ACTIVE) {
1647 TAILQ_REMOVE(
1648 &vm_page_queues[PQ_ACTIVE + q].pl,
1649 m, pageq);
1650 TAILQ_INSERT_TAIL(
1651 &vm_page_queues[PQ_ACTIVE + q].pl,
1652 m, pageq);
984263bc 1653 }
b12defdc
MD
1654 vm_page_and_queue_spin_unlock(m);
1655 vm_page_wakeup(m);
1656 continue;
984263bc
MD
1657 }
1658
b12defdc
MD
1659 if (m->act_count == 0) {
1660 /*
1661 * We turn off page access, so that we have
1662 * more accurate RSS stats. We don't do this
1663 * in the normal page deactivation when the
1664 * system is loaded VM wise, because the
1665 * cost of the large number of page protect
1666 * operations would be higher than the value
1667 * of doing the operation.
1668 *
1669 * We use the marker to save our place so
1670 * we can release the spin lock. both (m)
1671 * and (next) will be invalid.
1672 */
1673 vm_page_protect(m, VM_PROT_NONE);
1674 vm_page_deactivate(m);
1675 } else {
1676 m->act_count -= min(m->act_count, ACT_DECLINE);
1677 vm_page_and_queue_spin_lock(m);
027193eb
MD
1678 if (m->queue - m->pc == PQ_ACTIVE) {
1679 TAILQ_REMOVE(
1680 &vm_page_queues[PQ_ACTIVE + q].pl,
1681 m, pageq);
1682 TAILQ_INSERT_TAIL(
1683 &vm_page_queues[PQ_ACTIVE + q].pl,
1684 m, pageq);
b12defdc
MD
1685 }
1686 vm_page_and_queue_spin_unlock(m);
1687 }
1688 vm_page_wakeup(m);
984263bc 1689 }
b12defdc
MD
1690
1691 /*
1692 * Remove our local marker
1693 */
027193eb
MD
1694 vm_page_queues_spin_lock(PQ_ACTIVE + q);
1695 TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE + q].pl, &marker, pageq);
1696 vm_page_queues_spin_unlock(PQ_ACTIVE + q);
984263bc
MD
1697}
1698
1699static int
57e43348 1700vm_pageout_free_page_calc(vm_size_t count)
984263bc 1701{
12e4aaff 1702 if (count < vmstats.v_page_count)
984263bc
MD
1703 return 0;
1704 /*
1705 * free_reserved needs to include enough for the largest swap pager
1706 * structures plus enough for any pv_entry structs when paging.
0a4d4828
MD
1707 *
1708 * v_free_min normal allocations
1709 * v_free_reserved system allocations
1710 * v_pageout_free_min allocations by pageout daemon
1711 * v_interrupt_free_min low level allocations (e.g swap structures)
984263bc 1712 */
12e4aaff 1713 if (vmstats.v_page_count > 1024)
0a4d4828 1714 vmstats.v_free_min = 64 + (vmstats.v_page_count - 1024) / 200;
984263bc 1715 else
0a4d4828
MD
1716 vmstats.v_free_min = 64;
1717 vmstats.v_free_reserved = vmstats.v_free_min * 4 / 8 + 7;
1718 vmstats.v_free_severe = vmstats.v_free_min * 4 / 8 + 0;
1719 vmstats.v_pageout_free_min = vmstats.v_free_min * 2 / 8 + 7;
1720 vmstats.v_interrupt_free_min = vmstats.v_free_min * 1 / 8 + 7;
1721
984263bc
MD
1722 return 1;
1723}
1724
1725
1726/*
20479584 1727 * vm_pageout is the high level pageout daemon.
99ad9bc4
MD
1728 *
1729 * No requirements.
984263bc
MD
1730 */
1731static void
cd8ab232 1732vm_pageout_thread(void)
984263bc
MD
1733{
1734 int pass;
027193eb 1735 int q;
984263bc
MD
1736
1737 /*
1738 * Initialize some paging parameters.
1739 */
4ecf7cc9 1740 curthread->td_flags |= TDF_SYSTHREAD;
984263bc 1741
12e4aaff 1742 if (vmstats.v_page_count < 2000)
984263bc
MD
1743 vm_pageout_page_count = 8;
1744
12e4aaff 1745 vm_pageout_free_page_calc(vmstats.v_page_count);
20479584 1746
984263bc
MD
1747 /*
1748 * v_free_target and v_cache_min control pageout hysteresis. Note
1749 * that these are more a measure of the VM cache queue hysteresis
1750 * then the VM free queue. Specifically, v_free_target is the
1751 * high water mark (free+cache pages).
1752 *
1753 * v_free_reserved + v_cache_min (mostly means v_cache_min) is the
1754 * low water mark, while v_free_min is the stop. v_cache_min must
1755 * be big enough to handle memory needs while the pageout daemon
1756 * is signalled and run to free more pages.
1757 */
12e4aaff
MD
1758 if (vmstats.v_free_count > 6144)
1759 vmstats.v_free_target = 4 * vmstats.v_free_min + vmstats.v_free_reserved;
984263bc 1760 else
12e4aaff 1761 vmstats.v_free_target = 2 * vmstats.v_free_min + vmstats.v_free_reserved;
984263bc 1762
0e8bd897
MD
1763 /*
1764 * NOTE: With the new buffer cache b_act_count we want the default
1765 * inactive target to be a percentage of available memory.
1766 *
1767 * The inactive target essentially determines the minimum
1768 * number of 'temporary' pages capable of caching one-time-use
1769 * files when the VM system is otherwise full of pages
1770 * belonging to multi-time-use files or active program data.
51db7ca2
MD
1771 *
1772 * NOTE: The inactive target is aggressively persued only if the
1773 * inactive queue becomes too small. If the inactive queue
1774 * is large enough to satisfy page movement to free+cache
1775 * then it is repopulated more slowly from the active queue.
e15708fc 1776 * This allows a general inactive_target default to be set.
51db7ca2
MD
1777 *
1778 * There is an issue here for processes which sit mostly idle
1779 * 'overnight', such as sshd, tcsh, and X. Any movement from
1780 * the active queue will eventually cause such pages to
1781 * recycle eventually causing a lot of paging in the morning.
1782 * To reduce the incidence of this pages cycled out of the
1783 * buffer cache are moved directly to the inactive queue if
e15708fc
MD
1784 * they were only used once or twice.
1785 *
1786 * The vfs.vm_cycle_point sysctl can be used to adjust this.
1787 * Increasing the value (up to 64) increases the number of
1788 * buffer recyclements which go directly to the inactive queue.
0e8bd897 1789 */
12e4aaff
MD
1790 if (vmstats.v_free_count > 2048) {
1791 vmstats.v_cache_min = vmstats.v_free_target;
1792 vmstats.v_cache_max = 2 * vmstats.v_cache_min;
984263bc 1793 } else {
12e4aaff
MD
1794 vmstats.v_cache_min = 0;
1795 vmstats.v_cache_max = 0;
984263bc 1796 }
e15708fc 1797 vmstats.v_inactive_target = vmstats.v_free_count / 4;
984263bc
MD
1798
1799 /* XXX does not really belong here */
1800 if (vm_page_max_wired == 0)
12e4aaff 1801 vm_page_max_wired = vmstats.v_free_count / 3;
984263bc
MD
1802
1803 if (vm_pageout_stats_max == 0)
12e4aaff 1804 vm_pageout_stats_max = vmstats.v_free_target;
984263bc
MD
1805
1806 /*
1807 * Set interval in seconds for stats scan.
1808 */
1809 if (vm_pageout_stats_interval == 0)
1810 vm_pageout_stats_interval = 5;
1811 if (vm_pageout_full_stats_interval == 0)
1812 vm_pageout_full_stats_interval = vm_pageout_stats_interval * 4;
1813
1814
1815 /*
1816 * Set maximum free per pass
1817 */
1818 if (vm_pageout_stats_free_max == 0)
1819 vm_pageout_stats_free_max = 5;
1820
1821 swap_pager_swap_init();
1822 pass = 0;
20479584 1823
984263bc
MD
1824 /*
1825 * The pageout daemon is never done, so loop forever.
1826 */
1827 while (TRUE) {
1828 int error;
027193eb
MD
1829 int delta1;
1830 int delta2;
1831 int inactive_shortage;
1832 int active_shortage;
1833 int vnodes_skipped = 0;
1834 int recycle_count = 0;
1835 int tmp;
984263bc 1836
12d8aca7 1837 /*
1bfac262
MD
1838 * Wait for an action request. If we timeout check to
1839 * see if paging is needed (in case the normal wakeup
1840 * code raced us).
12d8aca7 1841 */
20479584 1842 if (vm_pages_needed == 0) {
984263bc 1843 error = tsleep(&vm_pages_needed,
20479584
MD
1844 0, "psleep",
1845 vm_pageout_stats_interval * hz);
1bfac262
MD
1846 if (error &&
1847 vm_paging_needed() == 0 &&
1848 vm_pages_needed == 0) {
027193eb
MD
1849 for (q = 0; q < PQ_MAXL2_SIZE; ++q)
1850 vm_pageout_page_stats(q);
984263bc
MD
1851 continue;
1852 }
20479584 1853 vm_pages_needed = 1;
984263bc
MD
1854 }
1855
20479584 1856 mycpu->gd_cnt.v_pdwakeups++;
20479584 1857
027193eb
MD
1858 /*
1859 * Do whatever cleanup that the pmap code can.
1860 */
1861 pmap_collect();
1862
20479584 1863 /*
12d8aca7
MD
1864 * Scan for pageout. Try to avoid thrashing the system
1865 * with activity.
027193eb
MD
1866 *
1867 * Calculate our target for the number of free+cache pages we
1868 * want to get to. This is higher then the number that causes
1869 * allocations to stall (severe) in order to provide hysteresis,
1870 * and if we don't make it all the way but get to the minimum
1871 * we're happy.
1872 */
1873 inactive_shortage = vm_paging_target() + vm_pageout_deficit;
1874 vm_pageout_deficit = 0;
1875 delta1 = 0;
1876 for (q = 0; q < PQ_MAXL2_SIZE; ++q) {
1877 delta1 += vm_pageout_scan_inactive(
1878 pass, q,
1879 inactive_shortage / PQ_MAXL2_SIZE + 1,
1880 &vnodes_skipped);
1881 }
1882
1883 /*
1884 * Figure out how many active pages we must deactivate. If
1885 * we were able to reach our target with just the inactive
1886 * scan above we limit the number of active pages we
1887 * deactivate to reduce unnecessary work.
1888 */
1889 active_shortage = vmstats.v_inactive_target -
1890 vmstats.v_inactive_count;
1891
1892 tmp = inactive_shortage;
1893 if (tmp < vmstats.v_inactive_target / 10)
1894 tmp = vmstats.v_inactive_target / 10;
1895 inactive_shortage -= delta1;
1896 if (inactive_shortage <= 0 && active_shortage > tmp * 2)
1897 active_shortage = tmp * 2;
1898
1899 delta2 = 0;
1900 for (q = 0; q < PQ_MAXL2_SIZE; ++q) {
1901 delta2 += vm_pageout_scan_active(
1902 pass, q,
1903 inactive_shortage / PQ_MAXL2_SIZE + 1,
1904 active_shortage / PQ_MAXL2_SIZE + 1,
1905 &recycle_count);
1906 }
1907
1908 /*
1909 * Finally free enough cache pages to meet our free page
1910 * requirement and take more drastic measures if we are
1911 * still in trouble.
1912 */
1913 inactive_shortage -= delta2;
1914 vm_pageout_scan_cache(inactive_shortage, vnodes_skipped,
1915 recycle_count);
1916
1917 /*
1918 * Wait for more work.
20479584
MD
1919 */
1920 if (inactive_shortage > 0) {
1921 ++pass;
1922 if (swap_pager_full) {
1923 /*
1924 * Running out of memory, catastrophic back-off
1925 * to one-second intervals.
1926 */
1927 tsleep(&vm_pages_needed, 0, "pdelay", hz);
1928 } else if (pass < 10 && vm_pages_needed > 1) {
1929 /*
1930 * Normal operation, additional processes
1931 * have already kicked us. Retry immediately.
1932 */
1933 } else if (pass < 10) {
1934 /*
1935 * Normal operation, fewer processes. Delay
1936 * a bit but allow wakeups.
1937 */
1938 vm_pages_needed = 0;
1939 tsleep(&vm_pages_needed, 0, "pdelay", hz / 10);
1940 vm_pages_needed = 1;
1941 } else {
1942 /*
1943 * We've taken too many passes, forced delay.
1944 */
1945 tsleep(&vm_pages_needed, 0, "pdelay", hz / 10);
1946 }
1947 } else {
12d8aca7
MD
1948 /*
1949 * Interlocked wakeup of waiters (non-optional)
1950 */
20479584 1951 pass = 0;
12d8aca7
MD
1952 if (vm_pages_needed && !vm_page_count_min(0)) {
1953 wakeup(&vmstats.v_free_count);
1954 vm_pages_needed = 0;
1955 }
20479584 1956 }
984263bc
MD
1957 }
1958}
1959
cd8ab232
MD
1960static struct kproc_desc page_kp = {
1961 "pagedaemon",
1962 vm_pageout_thread,
1963 &pagethread
1964};
1965SYSINIT(pagedaemon, SI_SUB_KTHREAD_PAGE, SI_ORDER_FIRST, kproc_start, &page_kp)
1966
1967
20479584
MD
1968/*
1969 * Called after allocating a page out of the cache or free queue
1970 * to possibly wake the pagedaemon up to replentish our supply.
1971 *
1972 * We try to generate some hysteresis by waking the pagedaemon up
1bfac262
MD
1973 * when our free+cache pages go below the free_min+cache_min level.
1974 * The pagedaemon tries to get the count back up to at least the
1975 * minimum, and through to the target level if possible.
20479584
MD
1976 *
1977 * If the pagedaemon is already active bump vm_pages_needed as a hint
1978 * that there are even more requests pending.
99ad9bc4
MD
1979 *
1980 * SMP races ok?
1981 * No requirements.
20479584 1982 */
984263bc 1983void
57e43348 1984pagedaemon_wakeup(void)
984263bc 1985{
1bfac262 1986 if (vm_paging_needed() && curthread != pagethread) {
20479584 1987 if (vm_pages_needed == 0) {
1bfac262 1988 vm_pages_needed = 1; /* SMP race ok */
20479584
MD
1989 wakeup(&vm_pages_needed);
1990 } else if (vm_page_count_min(0)) {
1bfac262 1991 ++vm_pages_needed; /* SMP race ok */
20479584 1992 }
984263bc
MD
1993 }
1994}
1995
1996#if !defined(NO_SWAPPING)
99ad9bc4
MD
1997
1998/*
1999 * SMP races ok?
2000 * No requirements.
2001 */
984263bc 2002static void
57e43348 2003vm_req_vmdaemon(void)
984263bc
MD
2004{
2005 static int lastrun = 0;
2006
2007 if ((ticks > (lastrun + hz)) || (ticks < lastrun)) {
2008 wakeup(&vm_daemon_needed);
2009 lastrun = ticks;
2010 }
2011}
2012
8fa76237
MD
2013static int vm_daemon_callback(struct proc *p, void *data __unused);
2014
99ad9bc4
MD
2015/*
2016 * No requirements.
2017 */
984263bc 2018static void
57e43348 2019vm_daemon(void)
984263bc 2020{
99ad9bc4 2021 /*
b12defdc 2022 * XXX vm_daemon_needed specific token?
99ad9bc4 2023 */
984263bc 2024 while (TRUE) {
377d4740 2025 tsleep(&vm_daemon_needed, 0, "psleep", 0);
984263bc
MD
2026 if (vm_pageout_req_swapout) {
2027 swapout_procs(vm_pageout_req_swapout);
2028 vm_pageout_req_swapout = 0;
2029 }
2030 /*
2031 * scan the processes for exceeding their rlimits or if
2032 * process is swapped out -- deactivate pages
2033 */
8fa76237
MD
2034 allproc_scan(vm_daemon_callback, NULL);
2035 }
2036}
984263bc 2037
99ad9bc4 2038/*
b12defdc 2039 * Caller must hold proc_token.
99ad9bc4 2040 */
8fa76237
MD
2041static int
2042vm_daemon_callback(struct proc *p, void *data __unused)
2043{
2044 vm_pindex_t limit, size;
984263bc 2045
8fa76237
MD
2046 /*
2047 * if this is a system process or if we have already
2048 * looked at this process, skip it.
2049 */
2050 if (p->p_flag & (P_SYSTEM | P_WEXIT))
2051 return (0);
984263bc 2052
8fa76237
MD
2053 /*
2054 * if the process is in a non-running type state,
2055 * don't touch it.
2056 */
164b8401 2057 if (p->p_stat != SACTIVE && p->p_stat != SSTOP)
8fa76237 2058 return (0);
984263bc 2059
8fa76237
MD
2060 /*
2061 * get a limit
2062 */
2063 limit = OFF_TO_IDX(qmin(p->p_rlimit[RLIMIT_RSS].rlim_cur,
2064 p->p_rlimit[RLIMIT_RSS].rlim_max));
2065
2066 /*
2067 * let processes that are swapped out really be
2068 * swapped out. Set the limit to nothing to get as
2069 * many pages out to swap as possible.
2070 */
2071 if (p->p_flag & P_SWAPPEDOUT)
2072 limit = 0;
2073
b12defdc 2074 lwkt_gettoken(&p->p_vmspace->vm_map.token);
8fa76237
MD
2075 size = vmspace_resident_count(p->p_vmspace);
2076 if (limit >= 0 && size >= limit) {
b12defdc 2077 vm_pageout_map_deactivate_pages(&p->p_vmspace->vm_map, limit);
984263bc 2078 }
b12defdc 2079 lwkt_reltoken(&p->p_vmspace->vm_map.token);
8fa76237 2080 return (0);
984263bc 2081}
8fa76237 2082
984263bc 2083#endif