kernel - Performance improvements during heavy memory/IO use
[dragonfly.git] / sys / vm / vm_pageout.c
CommitLineData
984263bc 1/*
99ad9bc4
MD
2 * (MPSAFE)
3 *
984263bc
MD
4 * Copyright (c) 1991 Regents of the University of California.
5 * All rights reserved.
6 * Copyright (c) 1994 John S. Dyson
7 * All rights reserved.
8 * Copyright (c) 1994 David Greenman
9 * All rights reserved.
10 *
11 * This code is derived from software contributed to Berkeley by
12 * The Mach Operating System project at Carnegie-Mellon University.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions
16 * are met:
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in the
21 * documentation and/or other materials provided with the distribution.
984263bc
MD
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 * from: @(#)vm_pageout.c 7.4 (Berkeley) 5/7/91
39 *
40 *
41 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
42 * All rights reserved.
43 *
44 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
45 *
46 * Permission to use, copy, modify and distribute this software and
47 * its documentation is hereby granted, provided that both the copyright
48 * notice and this permission notice appear in all copies of the
49 * software, derivative works or modified versions, and any portions
50 * thereof, and that both notices appear in supporting documentation.
51 *
52 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
53 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
54 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
55 *
56 * Carnegie Mellon requests users of this software to return to
57 *
58 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
59 * School of Computer Science
60 * Carnegie Mellon University
61 * Pittsburgh PA 15213-3890
62 *
63 * any improvements or extensions that they make and grant Carnegie the
64 * rights to redistribute these changes.
65 *
66 * $FreeBSD: src/sys/vm/vm_pageout.c,v 1.151.2.15 2002/12/29 18:21:04 dillon Exp $
67 */
68
69/*
70 * The proverbial page-out daemon.
71 */
72
73#include "opt_vm.h"
74#include <sys/param.h>
75#include <sys/systm.h>
76#include <sys/kernel.h>
77#include <sys/proc.h>
78#include <sys/kthread.h>
79#include <sys/resourcevar.h>
80#include <sys/signalvar.h>
81#include <sys/vnode.h>
82#include <sys/vmmeter.h>
83#include <sys/sysctl.h>
84
85#include <vm/vm.h>
86#include <vm/vm_param.h>
87#include <sys/lock.h>
88#include <vm/vm_object.h>
89#include <vm/vm_page.h>
90#include <vm/vm_map.h>
91#include <vm/vm_pageout.h>
92#include <vm/vm_pager.h>
93#include <vm/swap_pager.h>
94#include <vm/vm_extern.h>
5fd012e0
MD
95
96#include <sys/thread2.h>
b12defdc 97#include <sys/spinlock2.h>
12e4aaff 98#include <vm/vm_page2.h>
984263bc
MD
99
100/*
101 * System initialization
102 */
103
104/* the kernel process "vm_pageout"*/
1388df65 105static int vm_pageout_clean (vm_page_t);
1388df65 106static int vm_pageout_free_page_calc (vm_size_t count);
bc6dffab 107struct thread *pagethread;
984263bc 108
984263bc
MD
109#if !defined(NO_SWAPPING)
110/* the kernel process "vm_daemon"*/
1388df65 111static void vm_daemon (void);
bc6dffab 112static struct thread *vmthread;
984263bc
MD
113
114static struct kproc_desc vm_kp = {
115 "vmdaemon",
116 vm_daemon,
bc6dffab 117 &vmthread
984263bc
MD
118};
119SYSINIT(vmdaemon, SI_SUB_KTHREAD_VM, SI_ORDER_FIRST, kproc_start, &vm_kp)
120#endif
121
122
123int vm_pages_needed=0; /* Event on which pageout daemon sleeps */
124int vm_pageout_deficit=0; /* Estimated number of pages deficit */
125int vm_pageout_pages_needed=0; /* flag saying that the pageout daemon needs pages */
126
127#if !defined(NO_SWAPPING)
128static int vm_pageout_req_swapout; /* XXX */
129static int vm_daemon_needed;
130#endif
984263bc
MD
131static int vm_max_launder = 32;
132static int vm_pageout_stats_max=0, vm_pageout_stats_interval = 0;
133static int vm_pageout_full_stats_interval = 0;
134static int vm_pageout_stats_free_max=0, vm_pageout_algorithm=0;
135static int defer_swap_pageouts=0;
136static int disable_swap_pageouts=0;
137
138#if defined(NO_SWAPPING)
139static int vm_swap_enabled=0;
140static int vm_swap_idle_enabled=0;
141#else
142static int vm_swap_enabled=1;
143static int vm_swap_idle_enabled=0;
144#endif
145
146SYSCTL_INT(_vm, VM_PAGEOUT_ALGORITHM, pageout_algorithm,
147 CTLFLAG_RW, &vm_pageout_algorithm, 0, "LRU page mgmt");
148
149SYSCTL_INT(_vm, OID_AUTO, max_launder,
150 CTLFLAG_RW, &vm_max_launder, 0, "Limit dirty flushes in pageout");
151
152SYSCTL_INT(_vm, OID_AUTO, pageout_stats_max,
153 CTLFLAG_RW, &vm_pageout_stats_max, 0, "Max pageout stats scan length");
154
155SYSCTL_INT(_vm, OID_AUTO, pageout_full_stats_interval,
156 CTLFLAG_RW, &vm_pageout_full_stats_interval, 0, "Interval for full stats scan");
157
158SYSCTL_INT(_vm, OID_AUTO, pageout_stats_interval,
159 CTLFLAG_RW, &vm_pageout_stats_interval, 0, "Interval for partial stats scan");
160
161SYSCTL_INT(_vm, OID_AUTO, pageout_stats_free_max,
162 CTLFLAG_RW, &vm_pageout_stats_free_max, 0, "Not implemented");
163
164#if defined(NO_SWAPPING)
165SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled,
166 CTLFLAG_RD, &vm_swap_enabled, 0, "");
167SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled,
168 CTLFLAG_RD, &vm_swap_idle_enabled, 0, "");
169#else
170SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled,
171 CTLFLAG_RW, &vm_swap_enabled, 0, "Enable entire process swapout");
172SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled,
173 CTLFLAG_RW, &vm_swap_idle_enabled, 0, "Allow swapout on idle criteria");
174#endif
175
176SYSCTL_INT(_vm, OID_AUTO, defer_swapspace_pageouts,
177 CTLFLAG_RW, &defer_swap_pageouts, 0, "Give preference to dirty pages in mem");
178
179SYSCTL_INT(_vm, OID_AUTO, disable_swapspace_pageouts,
180 CTLFLAG_RW, &disable_swap_pageouts, 0, "Disallow swapout of dirty pages");
181
182static int pageout_lock_miss;
183SYSCTL_INT(_vm, OID_AUTO, pageout_lock_miss,
184 CTLFLAG_RD, &pageout_lock_miss, 0, "vget() lock misses during pageout");
185
186#define VM_PAGEOUT_PAGE_COUNT 16
187int vm_pageout_page_count = VM_PAGEOUT_PAGE_COUNT;
188
189int vm_page_max_wired; /* XXX max # of wired pages system-wide */
190
191#if !defined(NO_SWAPPING)
1388df65
RG
192typedef void freeer_fcn_t (vm_map_t, vm_object_t, vm_pindex_t, int);
193static void vm_pageout_map_deactivate_pages (vm_map_t, vm_pindex_t);
984263bc 194static freeer_fcn_t vm_pageout_object_deactivate_pages;
1388df65 195static void vm_req_vmdaemon (void);
984263bc 196#endif
027193eb 197static void vm_pageout_page_stats(int q);
984263bc
MD
198
199/*
200 * vm_pageout_clean:
201 *
06ecca5a
MD
202 * Clean the page and remove it from the laundry. The page must not be
203 * busy on-call.
984263bc
MD
204 *
205 * We set the busy bit to cause potential page faults on this page to
206 * block. Note the careful timing, however, the busy bit isn't set till
207 * late and we cannot do anything that will mess with the page.
208 */
984263bc 209static int
57e43348 210vm_pageout_clean(vm_page_t m)
984263bc 211{
5f910b2f 212 vm_object_t object;
984263bc
MD
213 vm_page_t mc[2*vm_pageout_page_count];
214 int pageout_count;
b12defdc 215 int error;
984263bc
MD
216 int ib, is, page_base;
217 vm_pindex_t pindex = m->pindex;
218
219 object = m->object;
220
221 /*
222 * It doesn't cost us anything to pageout OBJT_DEFAULT or OBJT_SWAP
223 * with the new swapper, but we could have serious problems paging
224 * out other object types if there is insufficient memory.
225 *
226 * Unfortunately, checking free memory here is far too late, so the
227 * check has been moved up a procedural level.
228 */
229
230 /*
231 * Don't mess with the page if it's busy, held, or special
b12defdc
MD
232 *
233 * XXX do we really need to check hold_count here? hold_count
234 * isn't supposed to mess with vm_page ops except prevent the
235 * page from being reused.
984263bc 236 */
b12defdc
MD
237 if (m->hold_count != 0 || (m->flags & PG_UNMANAGED)) {
238 vm_page_wakeup(m);
984263bc
MD
239 return 0;
240 }
241
242 mc[vm_pageout_page_count] = m;
243 pageout_count = 1;
244 page_base = vm_pageout_page_count;
245 ib = 1;
246 is = 1;
247
248 /*
249 * Scan object for clusterable pages.
250 *
251 * We can cluster ONLY if: ->> the page is NOT
252 * clean, wired, busy, held, or mapped into a
253 * buffer, and one of the following:
254 * 1) The page is inactive, or a seldom used
255 * active page.
256 * -or-
257 * 2) we force the issue.
258 *
259 * During heavy mmap/modification loads the pageout
260 * daemon can really fragment the underlying file
261 * due to flushing pages out of order and not trying
262 * align the clusters (which leave sporatic out-of-order
263 * holes). To solve this problem we do the reverse scan
264 * first and attempt to align our cluster, then do a
265 * forward scan if room remains.
266 */
267
398c240d 268 vm_object_hold(object);
984263bc
MD
269more:
270 while (ib && pageout_count < vm_pageout_page_count) {
271 vm_page_t p;
272
273 if (ib > pindex) {
274 ib = 0;
275 break;
276 }
277
b12defdc
MD
278 p = vm_page_lookup_busy_try(object, pindex - ib, TRUE, &error);
279 if (error || p == NULL) {
984263bc
MD
280 ib = 0;
281 break;
282 }
b12defdc
MD
283 if ((p->queue - p->pc) == PQ_CACHE ||
284 (p->flags & PG_UNMANAGED)) {
285 vm_page_wakeup(p);
984263bc
MD
286 ib = 0;
287 break;
288 }
289 vm_page_test_dirty(p);
290 if ((p->dirty & p->valid) == 0 ||
027193eb 291 p->queue - p->pc != PQ_INACTIVE ||
984263bc
MD
292 p->wire_count != 0 || /* may be held by buf cache */
293 p->hold_count != 0) { /* may be undergoing I/O */
b12defdc 294 vm_page_wakeup(p);
984263bc
MD
295 ib = 0;
296 break;
297 }
298 mc[--page_base] = p;
299 ++pageout_count;
300 ++ib;
301 /*
302 * alignment boundry, stop here and switch directions. Do
303 * not clear ib.
304 */
305 if ((pindex - (ib - 1)) % vm_pageout_page_count == 0)
306 break;
307 }
308
309 while (pageout_count < vm_pageout_page_count &&
310 pindex + is < object->size) {
311 vm_page_t p;
312
b12defdc
MD
313 p = vm_page_lookup_busy_try(object, pindex + is, TRUE, &error);
314 if (error || p == NULL)
984263bc
MD
315 break;
316 if (((p->queue - p->pc) == PQ_CACHE) ||
317 (p->flags & (PG_BUSY|PG_UNMANAGED)) || p->busy) {
b12defdc 318 vm_page_wakeup(p);
984263bc
MD
319 break;
320 }
321 vm_page_test_dirty(p);
322 if ((p->dirty & p->valid) == 0 ||
027193eb 323 p->queue - p->pc != PQ_INACTIVE ||
984263bc
MD
324 p->wire_count != 0 || /* may be held by buf cache */
325 p->hold_count != 0) { /* may be undergoing I/O */
b12defdc 326 vm_page_wakeup(p);
984263bc
MD
327 break;
328 }
329 mc[page_base + pageout_count] = p;
330 ++pageout_count;
331 ++is;
332 }
333
334 /*
335 * If we exhausted our forward scan, continue with the reverse scan
336 * when possible, even past a page boundry. This catches boundry
337 * conditions.
338 */
339 if (ib && pageout_count < vm_pageout_page_count)
340 goto more;
341
398c240d
VS
342 vm_object_drop(object);
343
984263bc
MD
344 /*
345 * we allow reads during pageouts...
346 */
347 return vm_pageout_flush(&mc[page_base], pageout_count, 0);
348}
349
350/*
351 * vm_pageout_flush() - launder the given pages
352 *
353 * The given pages are laundered. Note that we setup for the start of
354 * I/O ( i.e. busy the page ), mark it read-only, and bump the object
355 * reference count all in here rather then in the parent. If we want
356 * the parent to do more sophisticated things we may have to change
357 * the ordering.
99ad9bc4 358 *
b12defdc
MD
359 * The pages in the array must be busied by the caller and will be
360 * unbusied by this function.
984263bc 361 */
984263bc 362int
57e43348 363vm_pageout_flush(vm_page_t *mc, int count, int flags)
984263bc 364{
5f910b2f 365 vm_object_t object;
984263bc
MD
366 int pageout_status[count];
367 int numpagedout = 0;
368 int i;
369
370 /*
17cde63e
MD
371 * Initiate I/O. Bump the vm_page_t->busy counter.
372 */
373 for (i = 0; i < count; i++) {
b12defdc
MD
374 KASSERT(mc[i]->valid == VM_PAGE_BITS_ALL,
375 ("vm_pageout_flush page %p index %d/%d: partially "
376 "invalid page", mc[i], i, count));
17cde63e
MD
377 vm_page_io_start(mc[i]);
378 }
379
380 /*
4530a3aa
MD
381 * We must make the pages read-only. This will also force the
382 * modified bit in the related pmaps to be cleared. The pager
383 * cannot clear the bit for us since the I/O completion code
384 * typically runs from an interrupt. The act of making the page
385 * read-only handles the case for us.
b12defdc
MD
386 *
387 * Then we can unbusy the pages, we still hold a reference by virtue
388 * of our soft-busy.
984263bc 389 */
984263bc 390 for (i = 0; i < count; i++) {
984263bc 391 vm_page_protect(mc[i], VM_PROT_READ);
b12defdc 392 vm_page_wakeup(mc[i]);
984263bc
MD
393 }
394
395 object = mc[0]->object;
396 vm_object_pip_add(object, count);
397
398 vm_pager_put_pages(object, mc, count,
c439ad8f 399 (flags | ((object == &kernel_object) ? VM_PAGER_PUT_SYNC : 0)),
984263bc
MD
400 pageout_status);
401
402 for (i = 0; i < count; i++) {
403 vm_page_t mt = mc[i];
404
405 switch (pageout_status[i]) {
406 case VM_PAGER_OK:
407 numpagedout++;
408 break;
409 case VM_PAGER_PEND:
410 numpagedout++;
411 break;
412 case VM_PAGER_BAD:
413 /*
414 * Page outside of range of object. Right now we
415 * essentially lose the changes by pretending it
416 * worked.
417 */
b12defdc 418 vm_page_busy_wait(mt, FALSE, "pgbad");
984263bc
MD
419 pmap_clear_modify(mt);
420 vm_page_undirty(mt);
b12defdc 421 vm_page_wakeup(mt);
984263bc
MD
422 break;
423 case VM_PAGER_ERROR:
424 case VM_PAGER_FAIL:
425 /*
c84c24da
MD
426 * A page typically cannot be paged out when we
427 * have run out of swap. We leave the page
428 * marked inactive and will try to page it out
429 * again later.
430 *
431 * Starvation of the active page list is used to
432 * determine when the system is massively memory
433 * starved.
984263bc 434 */
984263bc
MD
435 break;
436 case VM_PAGER_AGAIN:
437 break;
438 }
439
440 /*
441 * If the operation is still going, leave the page busy to
442 * block all other accesses. Also, leave the paging in
443 * progress indicator set so that we don't attempt an object
444 * collapse.
93afe6be
MD
445 *
446 * For any pages which have completed synchronously,
447 * deactivate the page if we are under a severe deficit.
448 * Do not try to enter them into the cache, though, they
449 * might still be read-heavy.
984263bc
MD
450 */
451 if (pageout_status[i] != VM_PAGER_PEND) {
b12defdc 452 vm_page_busy_wait(mt, FALSE, "pgouw");
93afe6be
MD
453 if (vm_page_count_severe())
454 vm_page_deactivate(mt);
455#if 0
984263bc
MD
456 if (!vm_page_count_severe() || !vm_page_try_to_cache(mt))
457 vm_page_protect(mt, VM_PROT_READ);
93afe6be 458#endif
a491077e 459 vm_page_io_finish(mt);
b12defdc 460 vm_page_wakeup(mt);
a491077e 461 vm_object_pip_wakeup(object);
984263bc
MD
462 }
463 }
464 return numpagedout;
465}
466
467#if !defined(NO_SWAPPING)
468/*
b12defdc
MD
469 * deactivate enough pages to satisfy the inactive target
470 * requirements or if vm_page_proc_limit is set, then
471 * deactivate all of the pages in the object and its
472 * backing_objects.
984263bc 473 *
99ad9bc4 474 * The map must be locked.
398c240d 475 * The caller must hold the vm_object.
984263bc 476 */
1f804340
MD
477static int vm_pageout_object_deactivate_pages_callback(vm_page_t, void *);
478
984263bc 479static void
57e43348 480vm_pageout_object_deactivate_pages(vm_map_t map, vm_object_t object,
99ad9bc4 481 vm_pindex_t desired, int map_remove_only)
984263bc 482{
1f804340 483 struct rb_vm_page_scan_info info;
b12defdc
MD
484 vm_object_t lobject;
485 vm_object_t tobject;
984263bc 486 int remove_mode;
984263bc 487
05b9db80 488 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object));
b12defdc 489 lobject = object;
398c240d 490
b12defdc
MD
491 while (lobject) {
492 if (pmap_resident_count(vm_map_pmap(map)) <= desired)
493 break;
494 if (lobject->type == OBJT_DEVICE || lobject->type == OBJT_PHYS)
495 break;
496 if (lobject->paging_in_progress)
497 break;
984263bc
MD
498
499 remove_mode = map_remove_only;
b12defdc 500 if (lobject->shadow_count > 1)
984263bc 501 remove_mode = 1;
06ecca5a
MD
502
503 /*
a5fc46c9
MD
504 * scan the objects entire memory queue. We hold the
505 * object's token so the scan should not race anything.
06ecca5a 506 */
1f804340
MD
507 info.limit = remove_mode;
508 info.map = map;
509 info.desired = desired;
b12defdc 510 vm_page_rb_tree_RB_SCAN(&lobject->rb_memq, NULL,
1f804340
MD
511 vm_pageout_object_deactivate_pages_callback,
512 &info
513 );
b12defdc
MD
514 while ((tobject = lobject->backing_object) != NULL) {
515 KKASSERT(tobject != object);
516 vm_object_hold(tobject);
517 if (tobject == lobject->backing_object)
518 break;
519 vm_object_drop(tobject);
520 }
05b9db80
MD
521 if (lobject != object) {
522 vm_object_lock_swap();
b12defdc 523 vm_object_drop(lobject);
05b9db80 524 }
b12defdc 525 lobject = tobject;
1f804340 526 }
b12defdc
MD
527 if (lobject != object)
528 vm_object_drop(lobject);
1f804340 529}
99ad9bc4
MD
530
531/*
398c240d 532 * The caller must hold the vm_object.
99ad9bc4 533 */
1f804340
MD
534static int
535vm_pageout_object_deactivate_pages_callback(vm_page_t p, void *data)
536{
537 struct rb_vm_page_scan_info *info = data;
538 int actcount;
984263bc 539
1f804340
MD
540 if (pmap_resident_count(vm_map_pmap(info->map)) <= info->desired) {
541 return(-1);
542 }
543 mycpu->gd_cnt.v_pdpages++;
b12defdc
MD
544
545 if (vm_page_busy_try(p, TRUE))
546 return(0);
547 if (p->wire_count || p->hold_count || (p->flags & PG_UNMANAGED)) {
548 vm_page_wakeup(p);
549 return(0);
550 }
551 if (!pmap_page_exists_quick(vm_map_pmap(info->map), p)) {
552 vm_page_wakeup(p);
1f804340
MD
553 return(0);
554 }
984263bc 555
1f804340
MD
556 actcount = pmap_ts_referenced(p);
557 if (actcount) {
558 vm_page_flag_set(p, PG_REFERENCED);
559 } else if (p->flags & PG_REFERENCED) {
560 actcount = 1;
561 }
562
b12defdc 563 vm_page_and_queue_spin_lock(p);
027193eb 564 if (p->queue - p->pc != PQ_ACTIVE && (p->flags & PG_REFERENCED)) {
b12defdc 565 vm_page_and_queue_spin_unlock(p);
1f804340
MD
566 vm_page_activate(p);
567 p->act_count += actcount;
568 vm_page_flag_clear(p, PG_REFERENCED);
027193eb 569 } else if (p->queue - p->pc == PQ_ACTIVE) {
1f804340
MD
570 if ((p->flags & PG_REFERENCED) == 0) {
571 p->act_count -= min(p->act_count, ACT_DECLINE);
b12defdc
MD
572 if (!info->limit &&
573 (vm_pageout_algorithm || (p->act_count == 0))) {
574 vm_page_and_queue_spin_unlock(p);
984263bc 575 vm_page_protect(p, VM_PROT_NONE);
1f804340
MD
576 vm_page_deactivate(p);
577 } else {
027193eb
MD
578 TAILQ_REMOVE(&vm_page_queues[p->queue].pl,
579 p, pageq);
580 TAILQ_INSERT_TAIL(&vm_page_queues[p->queue].pl,
581 p, pageq);
b12defdc 582 vm_page_and_queue_spin_unlock(p);
984263bc 583 }
1f804340 584 } else {
b12defdc 585 vm_page_and_queue_spin_unlock(p);
1f804340
MD
586 vm_page_activate(p);
587 vm_page_flag_clear(p, PG_REFERENCED);
b12defdc
MD
588
589 vm_page_and_queue_spin_lock(p);
027193eb 590 if (p->queue - p->pc == PQ_ACTIVE) {
b12defdc
MD
591 if (p->act_count < (ACT_MAX - ACT_ADVANCE))
592 p->act_count += ACT_ADVANCE;
027193eb
MD
593 TAILQ_REMOVE(&vm_page_queues[p->queue].pl,
594 p, pageq);
595 TAILQ_INSERT_TAIL(&vm_page_queues[p->queue].pl,
596 p, pageq);
b12defdc
MD
597 }
598 vm_page_and_queue_spin_unlock(p);
984263bc 599 }
027193eb 600 } else if (p->queue - p->pc == PQ_INACTIVE) {
b12defdc 601 vm_page_and_queue_spin_unlock(p);
1f804340 602 vm_page_protect(p, VM_PROT_NONE);
b12defdc
MD
603 } else {
604 vm_page_and_queue_spin_unlock(p);
984263bc 605 }
b12defdc 606 vm_page_wakeup(p);
1f804340 607 return(0);
984263bc
MD
608}
609
610/*
99ad9bc4 611 * Deactivate some number of pages in a map, try to do it fairly, but
984263bc
MD
612 * that is really hard to do.
613 */
614static void
57e43348 615vm_pageout_map_deactivate_pages(vm_map_t map, vm_pindex_t desired)
984263bc
MD
616{
617 vm_map_entry_t tmpe;
618 vm_object_t obj, bigobj;
619 int nothingwired;
620
df4f70a6 621 if (lockmgr(&map->lock, LK_EXCLUSIVE | LK_NOWAIT)) {
984263bc
MD
622 return;
623 }
624
625 bigobj = NULL;
626 nothingwired = TRUE;
627
628 /*
629 * first, search out the biggest object, and try to free pages from
630 * that.
631 */
632 tmpe = map->header.next;
633 while (tmpe != &map->header) {
1b874851
MD
634 switch(tmpe->maptype) {
635 case VM_MAPTYPE_NORMAL:
636 case VM_MAPTYPE_VPAGETABLE:
984263bc
MD
637 obj = tmpe->object.vm_object;
638 if ((obj != NULL) && (obj->shadow_count <= 1) &&
639 ((bigobj == NULL) ||
640 (bigobj->resident_page_count < obj->resident_page_count))) {
641 bigobj = obj;
642 }
1b874851
MD
643 break;
644 default:
645 break;
984263bc
MD
646 }
647 if (tmpe->wired_count > 0)
648 nothingwired = FALSE;
649 tmpe = tmpe->next;
650 }
651
05b9db80
MD
652 if (bigobj) {
653 vm_object_hold(bigobj);
984263bc 654 vm_pageout_object_deactivate_pages(map, bigobj, desired, 0);
05b9db80
MD
655 vm_object_drop(bigobj);
656 }
984263bc
MD
657
658 /*
659 * Next, hunt around for other pages to deactivate. We actually
660 * do this search sort of wrong -- .text first is not the best idea.
661 */
662 tmpe = map->header.next;
663 while (tmpe != &map->header) {
664 if (pmap_resident_count(vm_map_pmap(map)) <= desired)
665 break;
1b874851
MD
666 switch(tmpe->maptype) {
667 case VM_MAPTYPE_NORMAL:
668 case VM_MAPTYPE_VPAGETABLE:
984263bc 669 obj = tmpe->object.vm_object;
05b9db80
MD
670 if (obj) {
671 vm_object_hold(obj);
984263bc 672 vm_pageout_object_deactivate_pages(map, obj, desired, 0);
05b9db80
MD
673 vm_object_drop(obj);
674 }
1b874851
MD
675 break;
676 default:
677 break;
984263bc
MD
678 }
679 tmpe = tmpe->next;
680 };
681
682 /*
683 * Remove all mappings if a process is swapped out, this will free page
684 * table pages.
685 */
686 if (desired == 0 && nothingwired)
687 pmap_remove(vm_map_pmap(map),
88181b08 688 VM_MIN_USER_ADDRESS, VM_MAX_USER_ADDRESS);
984263bc 689 vm_map_unlock(map);
984263bc
MD
690}
691#endif
692
693/*
a5fc46c9
MD
694 * Called when the pageout scan wants to free a page. We no longer
695 * try to cycle the vm_object here with a reference & dealloc, which can
696 * cause a non-trivial object collapse in a critical path.
99ad9bc4 697 *
a5fc46c9
MD
698 * It is unclear why we cycled the ref_count in the past, perhaps to try
699 * to optimize shadow chain collapses but I don't quite see why it would
700 * be necessary. An OBJ_DEAD object should terminate any and all vm_pages
701 * synchronously and not have to be kicked-start.
984263bc 702 */
99ad9bc4 703static void
95813af0
MD
704vm_pageout_page_free(vm_page_t m)
705{
984263bc
MD
706 vm_page_protect(m, VM_PROT_NONE);
707 vm_page_free(m);
984263bc
MD
708}
709
710/*
20479584 711 * vm_pageout_scan does the dirty work for the pageout daemon.
984263bc 712 */
8fa76237
MD
713struct vm_pageout_scan_info {
714 struct proc *bigproc;
715 vm_offset_t bigsize;
716};
717
718static int vm_pageout_scan_callback(struct proc *p, void *data);
719
20479584 720static int
027193eb
MD
721vm_pageout_scan_inactive(int pass, int q, int inactive_shortage,
722 int *vnodes_skippedp)
984263bc 723{
b12defdc 724 vm_page_t m;
984263bc 725 struct vm_page marker;
5d6a945b 726 struct vnode *vpfailed; /* warning, allowed to be stale */
027193eb
MD
727 int maxscan;
728 int delta = 0;
984263bc
MD
729 vm_object_t object;
730 int actcount;
984263bc 731 int maxlaunder;
984263bc 732
984263bc
MD
733 /*
734 * Start scanning the inactive queue for pages we can move to the
735 * cache or free. The scan will stop when the target is reached or
736 * we have scanned the entire inactive queue. Note that m->act_count
737 * is not used to form decisions for the inactive queue, only for the
738 * active queue.
739 *
740 * maxlaunder limits the number of dirty pages we flush per scan.
741 * For most systems a smaller value (16 or 32) is more robust under
742 * extreme memory and disk pressure because any unnecessary writes
743 * to disk can result in extreme performance degredation. However,
744 * systems with excessive dirty pages (especially when MAP_NOSYNC is
745 * used) will die horribly with limited laundering. If the pageout
746 * daemon cannot clean enough pages in the first pass, we let it go
747 * all out in succeeding passes.
748 */
749 if ((maxlaunder = vm_max_launder) <= 1)
750 maxlaunder = 1;
751 if (pass)
752 maxlaunder = 10000;
753
06ecca5a 754 /*
b12defdc
MD
755 * Initialize our marker
756 */
757 bzero(&marker, sizeof(marker));
758 marker.flags = PG_BUSY | PG_FICTITIOUS | PG_MARKER;
027193eb
MD
759 marker.queue = PQ_INACTIVE + q;
760 marker.pc = q;
b12defdc
MD
761 marker.wire_count = 1;
762
763 /*
764 * Inactive queue scan.
765 *
766 * NOTE: The vm_page must be spinlocked before the queue to avoid
767 * deadlocks, so it is easiest to simply iterate the loop
768 * with the queue unlocked at the top.
06ecca5a 769 */
5d6a945b 770 vpfailed = NULL;
b12defdc 771
027193eb
MD
772 vm_page_queues_spin_lock(PQ_INACTIVE + q);
773 TAILQ_INSERT_HEAD(&vm_page_queues[PQ_INACTIVE + q].pl, &marker, pageq);
12e4aaff 774 maxscan = vmstats.v_inactive_count;
027193eb 775 vm_page_queues_spin_unlock(PQ_INACTIVE + q);
b12defdc
MD
776
777 while ((m = TAILQ_NEXT(&marker, pageq)) != NULL &&
027193eb 778 maxscan-- > 0 && inactive_shortage - delta > 0)
b12defdc
MD
779 {
780 vm_page_and_queue_spin_lock(m);
781 if (m != TAILQ_NEXT(&marker, pageq)) {
782 vm_page_and_queue_spin_unlock(m);
783 ++maxscan;
784 continue;
785 }
027193eb
MD
786 KKASSERT(m->queue - m->pc == PQ_INACTIVE);
787 TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE + q].pl,
b12defdc 788 &marker, pageq);
027193eb 789 TAILQ_INSERT_AFTER(&vm_page_queues[PQ_INACTIVE + q].pl, m,
b12defdc 790 &marker, pageq);
12e4aaff 791 mycpu->gd_cnt.v_pdpages++;
984263bc 792
06ecca5a 793 /*
b12defdc 794 * Skip marker pages
06ecca5a 795 */
b12defdc
MD
796 if (m->flags & PG_MARKER) {
797 vm_page_and_queue_spin_unlock(m);
798 continue;
799 }
984263bc
MD
800
801 /*
b12defdc
MD
802 * Try to busy the page. Don't mess with pages which are
803 * already busy or reorder them in the queue.
984263bc 804 */
b12defdc
MD
805 if (vm_page_busy_try(m, TRUE)) {
806 vm_page_and_queue_spin_unlock(m);
984263bc 807 continue;
b12defdc
MD
808 }
809 vm_page_and_queue_spin_unlock(m);
027193eb 810 KKASSERT(m->queue - m->pc == PQ_INACTIVE);
984263bc 811
d2d8515b
MD
812 lwkt_yield();
813
984263bc 814 /*
b12defdc
MD
815 * The page has been successfully busied and is now no
816 * longer spinlocked. The queue is no longer spinlocked
817 * either.
984263bc 818 */
06ecca5a 819
90244566
MD
820 /*
821 * It is possible for a page to be busied ad-hoc (e.g. the
822 * pmap_collect() code) and wired and race against the
823 * allocation of a new page. vm_page_alloc() may be forced
824 * to deactivate the wired page in which case it winds up
825 * on the inactive queue and must be handled here. We
826 * correct the problem simply by unqueuing the page.
827 */
828 if (m->wire_count) {
829 vm_page_unqueue_nowakeup(m);
830 vm_page_wakeup(m);
831 kprintf("WARNING: pagedaemon: wired page on "
832 "inactive queue %p\n", m);
833 continue;
834 }
835
984263bc 836 /*
b12defdc 837 * A held page may be undergoing I/O, so skip it.
984263bc 838 */
b12defdc
MD
839 if (m->hold_count) {
840 vm_page_and_queue_spin_lock(m);
027193eb
MD
841 if (m->queue - m->pc == PQ_INACTIVE) {
842 TAILQ_REMOVE(
843 &vm_page_queues[PQ_INACTIVE + q].pl,
844 m, pageq);
845 TAILQ_INSERT_TAIL(
846 &vm_page_queues[PQ_INACTIVE + q].pl,
847 m, pageq);
b12defdc
MD
848 }
849 vm_page_and_queue_spin_unlock(m);
850 ++vm_swapcache_inactive_heuristic;
851 vm_page_wakeup(m);
984263bc
MD
852 continue;
853 }
854
984263bc 855 if (m->object->ref_count == 0) {
06ecca5a
MD
856 /*
857 * If the object is not being used, we ignore previous
858 * references.
859 */
984263bc
MD
860 vm_page_flag_clear(m, PG_REFERENCED);
861 pmap_clear_reference(m);
b12defdc 862 /* fall through to end */
984263bc 863 } else if (((m->flags & PG_REFERENCED) == 0) &&
06ecca5a
MD
864 (actcount = pmap_ts_referenced(m))) {
865 /*
866 * Otherwise, if the page has been referenced while
867 * in the inactive queue, we bump the "activation
868 * count" upwards, making it less likely that the
869 * page will be added back to the inactive queue
870 * prematurely again. Here we check the page tables
871 * (or emulated bits, if any), given the upper level
872 * VM system not knowing anything about existing
873 * references.
874 */
984263bc
MD
875 vm_page_activate(m);
876 m->act_count += (actcount + ACT_ADVANCE);
b12defdc 877 vm_page_wakeup(m);
984263bc
MD
878 continue;
879 }
880
881 /*
b12defdc
MD
882 * (m) is still busied.
883 *
984263bc
MD
884 * If the upper level VM system knows about any page
885 * references, we activate the page. We also set the
886 * "activation count" higher than normal so that we will less
887 * likely place pages back onto the inactive queue again.
888 */
889 if ((m->flags & PG_REFERENCED) != 0) {
890 vm_page_flag_clear(m, PG_REFERENCED);
891 actcount = pmap_ts_referenced(m);
892 vm_page_activate(m);
893 m->act_count += (actcount + ACT_ADVANCE + 1);
b12defdc 894 vm_page_wakeup(m);
984263bc
MD
895 continue;
896 }
897
898 /*
899 * If the upper level VM system doesn't know anything about
900 * the page being dirty, we have to check for it again. As
901 * far as the VM code knows, any partially dirty pages are
902 * fully dirty.
41a01a4d
MD
903 *
904 * Pages marked PG_WRITEABLE may be mapped into the user
905 * address space of a process running on another cpu. A
906 * user process (without holding the MP lock) running on
907 * another cpu may be able to touch the page while we are
17cde63e
MD
908 * trying to remove it. vm_page_cache() will handle this
909 * case for us.
984263bc
MD
910 */
911 if (m->dirty == 0) {
912 vm_page_test_dirty(m);
913 } else {
914 vm_page_dirty(m);
915 }
916
984263bc 917 if (m->valid == 0) {
41a01a4d
MD
918 /*
919 * Invalid pages can be easily freed
920 */
984263bc 921 vm_pageout_page_free(m);
12e4aaff 922 mycpu->gd_cnt.v_dfree++;
027193eb 923 ++delta;
984263bc
MD
924 } else if (m->dirty == 0) {
925 /*
41a01a4d
MD
926 * Clean pages can be placed onto the cache queue.
927 * This effectively frees them.
984263bc
MD
928 */
929 vm_page_cache(m);
027193eb 930 ++delta;
984263bc
MD
931 } else if ((m->flags & PG_WINATCFLS) == 0 && pass == 0) {
932 /*
933 * Dirty pages need to be paged out, but flushing
934 * a page is extremely expensive verses freeing
935 * a clean page. Rather then artificially limiting
936 * the number of pages we can flush, we instead give
937 * dirty pages extra priority on the inactive queue
938 * by forcing them to be cycled through the queue
939 * twice before being flushed, after which the
940 * (now clean) page will cycle through once more
941 * before being freed. This significantly extends
942 * the thrash point for a heavily loaded machine.
943 */
984263bc 944 vm_page_flag_set(m, PG_WINATCFLS);
b12defdc 945 vm_page_and_queue_spin_lock(m);
027193eb
MD
946 if (m->queue - m->pc == PQ_INACTIVE) {
947 TAILQ_REMOVE(
948 &vm_page_queues[PQ_INACTIVE + q].pl,
949 m, pageq);
950 TAILQ_INSERT_TAIL(
951 &vm_page_queues[PQ_INACTIVE + q].pl,
952 m, pageq);
b12defdc
MD
953 }
954 vm_page_and_queue_spin_unlock(m);
e527fb6b 955 ++vm_swapcache_inactive_heuristic;
b12defdc 956 vm_page_wakeup(m);
984263bc
MD
957 } else if (maxlaunder > 0) {
958 /*
959 * We always want to try to flush some dirty pages if
960 * we encounter them, to keep the system stable.
961 * Normally this number is small, but under extreme
962 * pressure where there are insufficient clean pages
963 * on the inactive queue, we may have to go all out.
964 */
965 int swap_pageouts_ok;
966 struct vnode *vp = NULL;
967
968 object = m->object;
969
970 if ((object->type != OBJT_SWAP) && (object->type != OBJT_DEFAULT)) {
971 swap_pageouts_ok = 1;
972 } else {
973 swap_pageouts_ok = !(defer_swap_pageouts || disable_swap_pageouts);
974 swap_pageouts_ok |= (!disable_swap_pageouts && defer_swap_pageouts &&
20479584 975 vm_page_count_min(0));
984263bc
MD
976
977 }
978
979 /*
980 * We don't bother paging objects that are "dead".
981 * Those objects are in a "rundown" state.
982 */
983 if (!swap_pageouts_ok || (object->flags & OBJ_DEAD)) {
b12defdc 984 vm_page_and_queue_spin_lock(m);
027193eb
MD
985 if (m->queue - m->pc == PQ_INACTIVE) {
986 TAILQ_REMOVE(
987 &vm_page_queues[PQ_INACTIVE + q].pl,
988 m, pageq);
989 TAILQ_INSERT_TAIL(
990 &vm_page_queues[PQ_INACTIVE + q].pl,
991 m, pageq);
b12defdc
MD
992 }
993 vm_page_and_queue_spin_unlock(m);
e527fb6b 994 ++vm_swapcache_inactive_heuristic;
b12defdc 995 vm_page_wakeup(m);
984263bc
MD
996 continue;
997 }
998
999 /*
b12defdc
MD
1000 * (m) is still busied.
1001 *
984263bc
MD
1002 * The object is already known NOT to be dead. It
1003 * is possible for the vget() to block the whole
1004 * pageout daemon, but the new low-memory handling
1005 * code should prevent it.
1006 *
1007 * The previous code skipped locked vnodes and, worse,
1008 * reordered pages in the queue. This results in
1009 * completely non-deterministic operation because,
1010 * quite often, a vm_fault has initiated an I/O and
1011 * is holding a locked vnode at just the point where
1012 * the pageout daemon is woken up.
1013 *
1014 * We can't wait forever for the vnode lock, we might
1015 * deadlock due to a vn_read() getting stuck in
1016 * vm_wait while holding this vnode. We skip the
1017 * vnode if we can't get it in a reasonable amount
1018 * of time.
5d6a945b
MD
1019 *
1020 * vpfailed is used to (try to) avoid the case where
1021 * a large number of pages are associated with a
1022 * locked vnode, which could cause the pageout daemon
1023 * to stall for an excessive amount of time.
984263bc 1024 */
984263bc 1025 if (object->type == OBJT_VNODE) {
5d6a945b 1026 int flags;
984263bc 1027
5d6a945b
MD
1028 vp = object->handle;
1029 flags = LK_EXCLUSIVE | LK_NOOBJ;
1030 if (vp == vpfailed)
1031 flags |= LK_NOWAIT;
1032 else
1033 flags |= LK_TIMELOCK;
b12defdc
MD
1034 vm_page_hold(m);
1035 vm_page_wakeup(m);
1036
1037 /*
1038 * We have unbusied (m) temporarily so we can
1039 * acquire the vp lock without deadlocking.
1040 * (m) is held to prevent destruction.
1041 */
5d6a945b
MD
1042 if (vget(vp, flags) != 0) {
1043 vpfailed = vp;
984263bc
MD
1044 ++pageout_lock_miss;
1045 if (object->flags & OBJ_MIGHTBEDIRTY)
027193eb 1046 ++*vnodes_skippedp;
b12defdc 1047 vm_page_unhold(m);
984263bc
MD
1048 continue;
1049 }
1050
1051 /*
1052 * The page might have been moved to another
1053 * queue during potential blocking in vget()
1054 * above. The page might have been freed and
1055 * reused for another vnode. The object might
1056 * have been reused for another vnode.
1057 */
027193eb 1058 if (m->queue - m->pc != PQ_INACTIVE ||
984263bc
MD
1059 m->object != object ||
1060 object->handle != vp) {
1061 if (object->flags & OBJ_MIGHTBEDIRTY)
027193eb 1062 ++*vnodes_skippedp;
984263bc 1063 vput(vp);
b12defdc 1064 vm_page_unhold(m);
984263bc
MD
1065 continue;
1066 }
1067
1068 /*
1069 * The page may have been busied during the
1070 * blocking in vput(); We don't move the
1071 * page back onto the end of the queue so that
1072 * statistics are more correct if we don't.
1073 */
b12defdc 1074 if (vm_page_busy_try(m, TRUE)) {
984263bc 1075 vput(vp);
b12defdc 1076 vm_page_unhold(m);
984263bc
MD
1077 continue;
1078 }
b12defdc 1079 vm_page_unhold(m);
984263bc
MD
1080
1081 /*
b12defdc
MD
1082 * (m) is busied again
1083 *
1084 * We own the busy bit and remove our hold
1085 * bit. If the page is still held it
1086 * might be undergoing I/O, so skip it.
984263bc
MD
1087 */
1088 if (m->hold_count) {
b12defdc 1089 vm_page_and_queue_spin_lock(m);
027193eb
MD
1090 if (m->queue - m->pc == PQ_INACTIVE) {
1091 TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE + q].pl, m, pageq);
1092 TAILQ_INSERT_TAIL(&vm_page_queues[PQ_INACTIVE + q].pl, m, pageq);
b12defdc
MD
1093 }
1094 vm_page_and_queue_spin_unlock(m);
e527fb6b 1095 ++vm_swapcache_inactive_heuristic;
984263bc 1096 if (object->flags & OBJ_MIGHTBEDIRTY)
027193eb 1097 ++*vnodes_skippedp;
b12defdc 1098 vm_page_wakeup(m);
984263bc
MD
1099 vput(vp);
1100 continue;
1101 }
b12defdc 1102 /* (m) is left busied as we fall through */
984263bc
MD
1103 }
1104
1105 /*
b12defdc
MD
1106 * page is busy and not held here.
1107 *
984263bc
MD
1108 * If a page is dirty, then it is either being washed
1109 * (but not yet cleaned) or it is still in the
1110 * laundry. If it is still in the laundry, then we
1111 * start the cleaning operation.
1112 *
20479584
MD
1113 * decrement inactive_shortage on success to account
1114 * for the (future) cleaned page. Otherwise we
1115 * could wind up laundering or cleaning too many
1116 * pages.
984263bc 1117 */
984263bc 1118 if (vm_pageout_clean(m) != 0) {
027193eb 1119 ++delta;
984263bc 1120 --maxlaunder;
c84c24da 1121 }
b12defdc 1122 /* clean ate busy, page no longer accessible */
984263bc
MD
1123 if (vp != NULL)
1124 vput(vp);
b12defdc
MD
1125 } else {
1126 vm_page_wakeup(m);
984263bc
MD
1127 }
1128 }
027193eb
MD
1129 vm_page_queues_spin_lock(PQ_INACTIVE + q);
1130 TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE + q].pl, &marker, pageq);
1131 vm_page_queues_spin_unlock(PQ_INACTIVE + q);
1132
1133 return (delta);
1134}
1135
1136static int
1137vm_pageout_scan_active(int pass, int q,
1138 int inactive_shortage, int active_shortage,
1139 int *recycle_countp)
1140{
1141 struct vm_page marker;
1142 vm_page_t m;
1143 int actcount;
1144 int delta = 0;
1145 int pcount;
984263bc
MD
1146
1147 /*
20479584
MD
1148 * We want to move pages from the active queue to the inactive
1149 * queue to get the inactive queue to the inactive target. If
1150 * we still have a page shortage from above we try to directly free
1151 * clean pages instead of moving them.
06ecca5a 1152 *
20479584
MD
1153 * If we do still have a shortage we keep track of the number of
1154 * pages we free or cache (recycle_count) as a measure of thrashing
1155 * between the active and inactive queues.
1156 *
51db7ca2
MD
1157 * If we were able to completely satisfy the free+cache targets
1158 * from the inactive pool we limit the number of pages we move
1159 * from the active pool to the inactive pool to 2x the pages we
e6e9a0c3
MD
1160 * had removed from the inactive pool (with a minimum of 1/5 the
1161 * inactive target). If we were not able to completely satisfy
1162 * the free+cache targets we go for the whole target aggressively.
20479584
MD
1163 *
1164 * NOTE: Both variables can end up negative.
1165 * NOTE: We are still in a critical section.
984263bc 1166 */
20479584 1167
027193eb
MD
1168 bzero(&marker, sizeof(marker));
1169 marker.flags = PG_BUSY | PG_FICTITIOUS | PG_MARKER;
1170 marker.queue = PQ_ACTIVE + q;
1171 marker.pc = q;
1172 marker.wire_count = 1;
b12defdc 1173
027193eb
MD
1174 vm_page_queues_spin_lock(PQ_ACTIVE + q);
1175 TAILQ_INSERT_HEAD(&vm_page_queues[PQ_ACTIVE + q].pl, &marker, pageq);
1176 vm_page_queues_spin_unlock(PQ_ACTIVE + q);
b12defdc
MD
1177 pcount = vmstats.v_active_count;
1178
1179 while ((m = TAILQ_NEXT(&marker, pageq)) != NULL &&
027193eb
MD
1180 pcount-- > 0 && (inactive_shortage - delta > 0 ||
1181 active_shortage > 0))
b12defdc
MD
1182 {
1183 vm_page_and_queue_spin_lock(m);
1184 if (m != TAILQ_NEXT(&marker, pageq)) {
1185 vm_page_and_queue_spin_unlock(m);
1186 ++pcount;
1187 continue;
1188 }
027193eb
MD
1189 KKASSERT(m->queue - m->pc == PQ_ACTIVE);
1190 TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE + q].pl,
b12defdc 1191 &marker, pageq);
027193eb 1192 TAILQ_INSERT_AFTER(&vm_page_queues[PQ_ACTIVE + q].pl, m,
b12defdc 1193 &marker, pageq);
984263bc 1194
984263bc 1195 /*
b12defdc 1196 * Skip marker pages
984263bc 1197 */
b12defdc
MD
1198 if (m->flags & PG_MARKER) {
1199 vm_page_and_queue_spin_unlock(m);
1200 continue;
1201 }
06ecca5a 1202
984263bc 1203 /*
b12defdc
MD
1204 * Try to busy the page. Don't mess with pages which are
1205 * already busy or reorder them in the queue.
984263bc 1206 */
b12defdc
MD
1207 if (vm_page_busy_try(m, TRUE)) {
1208 vm_page_and_queue_spin_unlock(m);
984263bc
MD
1209 continue;
1210 }
1211
b12defdc
MD
1212 /*
1213 * Don't deactivate pages that are held, even if we can
1214 * busy them. (XXX why not?)
1215 */
1216 if (m->hold_count != 0) {
027193eb 1217 TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE + q].pl,
b12defdc 1218 m, pageq);
027193eb 1219 TAILQ_INSERT_TAIL(&vm_page_queues[PQ_ACTIVE + q].pl,
b12defdc
MD
1220 m, pageq);
1221 vm_page_and_queue_spin_unlock(m);
1222 vm_page_wakeup(m);
1223 continue;
1224 }
1225 vm_page_and_queue_spin_unlock(m);
d2d8515b 1226 lwkt_yield();
b12defdc
MD
1227
1228 /*
1229 * The page has been successfully busied and the page and
1230 * queue are no longer locked.
1231 */
1232
984263bc
MD
1233 /*
1234 * The count for pagedaemon pages is done after checking the
1235 * page for eligibility...
1236 */
12e4aaff 1237 mycpu->gd_cnt.v_pdpages++;
984263bc
MD
1238
1239 /*
20479584
MD
1240 * Check to see "how much" the page has been used and clear
1241 * the tracking access bits. If the object has no references
1242 * don't bother paying the expense.
984263bc
MD
1243 */
1244 actcount = 0;
1245 if (m->object->ref_count != 0) {
20479584
MD
1246 if (m->flags & PG_REFERENCED)
1247 ++actcount;
984263bc
MD
1248 actcount += pmap_ts_referenced(m);
1249 if (actcount) {
1250 m->act_count += ACT_ADVANCE + actcount;
1251 if (m->act_count > ACT_MAX)
1252 m->act_count = ACT_MAX;
1253 }
1254 }
984263bc
MD
1255 vm_page_flag_clear(m, PG_REFERENCED);
1256
1257 /*
20479584 1258 * actcount is only valid if the object ref_count is non-zero.
984263bc 1259 */
20479584 1260 if (actcount && m->object->ref_count != 0) {
b12defdc 1261 vm_page_and_queue_spin_lock(m);
027193eb
MD
1262 if (m->queue - m->pc == PQ_ACTIVE) {
1263 TAILQ_REMOVE(
1264 &vm_page_queues[PQ_ACTIVE + q].pl,
1265 m, pageq);
1266 TAILQ_INSERT_TAIL(
1267 &vm_page_queues[PQ_ACTIVE + q].pl,
1268 m, pageq);
b12defdc
MD
1269 }
1270 vm_page_and_queue_spin_unlock(m);
1271 vm_page_wakeup(m);
984263bc
MD
1272 } else {
1273 m->act_count -= min(m->act_count, ACT_DECLINE);
1274 if (vm_pageout_algorithm ||
1275 m->object->ref_count == 0 ||
20479584
MD
1276 m->act_count < pass + 1
1277 ) {
1278 /*
1279 * Deactivate the page. If we had a
1280 * shortage from our inactive scan try to
1281 * free (cache) the page instead.
e6e9a0c3
MD
1282 *
1283 * Don't just blindly cache the page if
1284 * we do not have a shortage from the
1285 * inactive scan, that could lead to
1286 * gigabytes being moved.
20479584
MD
1287 */
1288 --active_shortage;
027193eb 1289 if (inactive_shortage - delta > 0 ||
20479584 1290 m->object->ref_count == 0) {
027193eb
MD
1291 if (inactive_shortage - delta > 0)
1292 ++*recycle_countp;
984263bc 1293 vm_page_protect(m, VM_PROT_NONE);
e6e9a0c3 1294 if (m->dirty == 0 &&
027193eb
MD
1295 inactive_shortage - delta > 0) {
1296 ++delta;
984263bc 1297 vm_page_cache(m);
c84c24da 1298 } else {
984263bc 1299 vm_page_deactivate(m);
a491077e 1300 vm_page_wakeup(m);
c84c24da 1301 }
984263bc
MD
1302 } else {
1303 vm_page_deactivate(m);
b12defdc 1304 vm_page_wakeup(m);
984263bc
MD
1305 }
1306 } else {
b12defdc 1307 vm_page_and_queue_spin_lock(m);
027193eb 1308 if (m->queue - m->pc == PQ_ACTIVE) {
b12defdc 1309 TAILQ_REMOVE(
027193eb
MD
1310 &vm_page_queues[PQ_ACTIVE + q].pl,
1311 m, pageq);
b12defdc 1312 TAILQ_INSERT_TAIL(
027193eb
MD
1313 &vm_page_queues[PQ_ACTIVE + q].pl,
1314 m, pageq);
b12defdc
MD
1315 }
1316 vm_page_and_queue_spin_unlock(m);
1317 vm_page_wakeup(m);
984263bc
MD
1318 }
1319 }
984263bc
MD
1320 }
1321
b12defdc
MD
1322 /*
1323 * Clean out our local marker.
1324 */
027193eb
MD
1325 vm_page_queues_spin_lock(PQ_ACTIVE + q);
1326 TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE + q].pl, &marker, pageq);
1327 vm_page_queues_spin_unlock(PQ_ACTIVE + q);
1328
1329 return (delta);
1330}
1331
1332/*
1333 * The number of actually free pages can drop down to v_free_reserved,
1334 * we try to build the free count back above v_free_min. Note that
1335 * vm_paging_needed() also returns TRUE if v_free_count is not at
1336 * least v_free_min so that is the minimum we must build the free
1337 * count to.
1338 *
1339 * We use a slightly higher target to improve hysteresis,
1340 * ((v_free_target + v_free_min) / 2). Since v_free_target
1341 * is usually the same as v_cache_min this maintains about
1342 * half the pages in the free queue as are in the cache queue,
1343 * providing pretty good pipelining for pageout operation.
1344 *
1345 * The system operator can manipulate vm.v_cache_min and
1346 * vm.v_free_target to tune the pageout demon. Be sure
1347 * to keep vm.v_free_min < vm.v_free_target.
1348 *
1349 * Note that the original paging target is to get at least
1350 * (free_min + cache_min) into (free + cache). The slightly
1351 * higher target will shift additional pages from cache to free
1352 * without effecting the original paging target in order to
1353 * maintain better hysteresis and not have the free count always
1354 * be dead-on v_free_min.
1355 *
1356 * NOTE: we are still in a critical section.
1357 *
1358 * Pages moved from PQ_CACHE to totally free are not counted in the
1359 * pages_freed counter.
1360 */
1361static void
1362vm_pageout_scan_cache(int inactive_shortage,
1363 int vnodes_skipped, int recycle_count)
1364{
1365 struct vm_pageout_scan_info info;
1366 vm_page_t m;
b12defdc 1367
cd3c66bd
MD
1368 while (vmstats.v_free_count <
1369 (vmstats.v_free_min + vmstats.v_free_target) / 2) {
1370 /*
b12defdc 1371 * This steals some code from vm/vm_page.c
cd3c66bd 1372 */
984263bc 1373 static int cache_rover = 0;
b12defdc
MD
1374
1375 m = vm_page_list_find(PQ_CACHE, cache_rover & PQ_L2_MASK, FALSE);
20479584 1376 if (m == NULL)
984263bc 1377 break;
b12defdc
MD
1378 /* page is returned removed from its queue and spinlocked */
1379 if (vm_page_busy_try(m, TRUE)) {
1380 vm_page_deactivate_locked(m);
1381 vm_page_spin_unlock(m);
984263bc 1382#ifdef INVARIANTS
086c1d7e 1383 kprintf("Warning: busy page %p found in cache\n", m);
984263bc 1384#endif
b12defdc
MD
1385 continue;
1386 }
1387 vm_page_spin_unlock(m);
1388 pagedaemon_wakeup();
d2d8515b 1389 lwkt_yield();
b12defdc
MD
1390
1391 /*
1392 * Page has been successfully busied and it and its queue
1393 * is no longer spinlocked.
1394 */
1395 if ((m->flags & PG_UNMANAGED) ||
1396 m->hold_count ||
1397 m->wire_count) {
984263bc 1398 vm_page_deactivate(m);
b12defdc 1399 vm_page_wakeup(m);
984263bc
MD
1400 continue;
1401 }
17cde63e
MD
1402 KKASSERT((m->flags & PG_MAPPED) == 0);
1403 KKASSERT(m->dirty == 0);
b12defdc 1404 cache_rover += PQ_PRIME2;
984263bc 1405 vm_pageout_page_free(m);
12e4aaff 1406 mycpu->gd_cnt.v_dfree++;
984263bc 1407 }
06ecca5a 1408
984263bc
MD
1409#if !defined(NO_SWAPPING)
1410 /*
1411 * Idle process swapout -- run once per second.
1412 */
1413 if (vm_swap_idle_enabled) {
1414 static long lsec;
1415 if (time_second != lsec) {
1416 vm_pageout_req_swapout |= VM_SWAP_IDLE;
1417 vm_req_vmdaemon();
1418 lsec = time_second;
1419 }
1420 }
1421#endif
1422
1423 /*
1424 * If we didn't get enough free pages, and we have skipped a vnode
1425 * in a writeable object, wakeup the sync daemon. And kick swapout
1426 * if we did not get enough free pages.
1427 */
1428 if (vm_paging_target() > 0) {
20479584 1429 if (vnodes_skipped && vm_page_count_min(0))
418ff780 1430 speedup_syncer();
984263bc
MD
1431#if !defined(NO_SWAPPING)
1432 if (vm_swap_enabled && vm_page_count_target()) {
1433 vm_req_vmdaemon();
1434 vm_pageout_req_swapout |= VM_SWAP_NORMAL;
1435 }
1436#endif
1437 }
1438
1439 /*
20479584
MD
1440 * Handle catastrophic conditions. Under good conditions we should
1441 * be at the target, well beyond our minimum. If we could not even
1442 * reach our minimum the system is under heavy stress.
1443 *
1444 * Determine whether we have run out of memory. This occurs when
1445 * swap_pager_full is TRUE and the only pages left in the page
1446 * queues are dirty. We will still likely have page shortages.
c84c24da
MD
1447 *
1448 * - swap_pager_full is set if insufficient swap was
1449 * available to satisfy a requested pageout.
1450 *
20479584
MD
1451 * - the inactive queue is bloated (4 x size of active queue),
1452 * meaning it is unable to get rid of dirty pages and.
c84c24da 1453 *
20479584
MD
1454 * - vm_page_count_min() without counting pages recycled from the
1455 * active queue (recycle_count) means we could not recover
1456 * enough pages to meet bare minimum needs. This test only
1457 * works if the inactive queue is bloated.
c84c24da 1458 *
20479584
MD
1459 * - due to a positive inactive_shortage we shifted the remaining
1460 * dirty pages from the active queue to the inactive queue
1461 * trying to find clean ones to free.
984263bc 1462 */
20479584 1463 if (swap_pager_full && vm_page_count_min(recycle_count))
c84c24da 1464 kprintf("Warning: system low on memory+swap!\n");
20479584
MD
1465 if (swap_pager_full && vm_page_count_min(recycle_count) &&
1466 vmstats.v_inactive_count > vmstats.v_active_count * 4 &&
1467 inactive_shortage > 0) {
1468 /*
1469 * Kill something.
1470 */
8fa76237
MD
1471 info.bigproc = NULL;
1472 info.bigsize = 0;
1473 allproc_scan(vm_pageout_scan_callback, &info);
1474 if (info.bigproc != NULL) {
1475 killproc(info.bigproc, "out of swap space");
1476 info.bigproc->p_nice = PRIO_MIN;
08f2f1bb
SS
1477 info.bigproc->p_usched->resetpriority(
1478 FIRST_LWP_IN_PROC(info.bigproc));
12e4aaff 1479 wakeup(&vmstats.v_free_count);
8fa76237 1480 PRELE(info.bigproc);
984263bc
MD
1481 }
1482 }
1483}
1484
99ad9bc4 1485/*
b12defdc 1486 * The caller must hold proc_token.
99ad9bc4 1487 */
8fa76237
MD
1488static int
1489vm_pageout_scan_callback(struct proc *p, void *data)
1490{
1491 struct vm_pageout_scan_info *info = data;
1492 vm_offset_t size;
1493
1494 /*
20479584
MD
1495 * Never kill system processes or init. If we have configured swap
1496 * then try to avoid killing low-numbered pids.
8fa76237
MD
1497 */
1498 if ((p->p_flag & P_SYSTEM) || (p->p_pid == 1) ||
1499 ((p->p_pid < 48) && (vm_swap_size != 0))) {
1500 return (0);
1501 }
1502
1503 /*
1504 * if the process is in a non-running type state,
1505 * don't touch it.
1506 */
20479584 1507 if (p->p_stat != SACTIVE && p->p_stat != SSTOP)
8fa76237 1508 return (0);
8fa76237
MD
1509
1510 /*
20479584
MD
1511 * Get the approximate process size. Note that anonymous pages
1512 * with backing swap will be counted twice, but there should not
1513 * be too many such pages due to the stress the VM system is
1514 * under at this point.
8fa76237 1515 */
20479584 1516 size = vmspace_anonymous_count(p->p_vmspace) +
8fa76237
MD
1517 vmspace_swap_count(p->p_vmspace);
1518
1519 /*
1520 * If the this process is bigger than the biggest one
1521 * remember it.
1522 */
20479584 1523 if (info->bigsize < size) {
8fa76237
MD
1524 if (info->bigproc)
1525 PRELE(info->bigproc);
1526 PHOLD(p);
1527 info->bigproc = p;
1528 info->bigsize = size;
1529 }
d2d8515b 1530 lwkt_yield();
8fa76237
MD
1531 return(0);
1532}
1533
984263bc
MD
1534/*
1535 * This routine tries to maintain the pseudo LRU active queue,
1536 * so that during long periods of time where there is no paging,
1537 * that some statistic accumulation still occurs. This code
1538 * helps the situation where paging just starts to occur.
1539 */
1540static void
027193eb 1541vm_pageout_page_stats(int q)
984263bc 1542{
984263bc 1543 static int fullintervalcount = 0;
b12defdc
MD
1544 struct vm_page marker;
1545 vm_page_t m;
1546 int pcount, tpcount; /* Number of pages to check */
984263bc 1547 int page_shortage;
984263bc 1548
b12defdc
MD
1549 page_shortage = (vmstats.v_inactive_target + vmstats.v_cache_max +
1550 vmstats.v_free_min) -
1551 (vmstats.v_free_count + vmstats.v_inactive_count +
1552 vmstats.v_cache_count);
984263bc
MD
1553
1554 if (page_shortage <= 0)
1555 return;
1556
12e4aaff 1557 pcount = vmstats.v_active_count;
984263bc
MD
1558 fullintervalcount += vm_pageout_stats_interval;
1559 if (fullintervalcount < vm_pageout_full_stats_interval) {
b12defdc
MD
1560 tpcount = (vm_pageout_stats_max * vmstats.v_active_count) /
1561 vmstats.v_page_count;
984263bc
MD
1562 if (pcount > tpcount)
1563 pcount = tpcount;
1564 } else {
1565 fullintervalcount = 0;
1566 }
1567
b12defdc
MD
1568 bzero(&marker, sizeof(marker));
1569 marker.flags = PG_BUSY | PG_FICTITIOUS | PG_MARKER;
027193eb
MD
1570 marker.queue = PQ_ACTIVE + q;
1571 marker.pc = q;
b12defdc
MD
1572 marker.wire_count = 1;
1573
027193eb
MD
1574 vm_page_queues_spin_lock(PQ_ACTIVE + q);
1575 TAILQ_INSERT_HEAD(&vm_page_queues[PQ_ACTIVE + q].pl, &marker, pageq);
1576 vm_page_queues_spin_unlock(PQ_ACTIVE + q);
b12defdc
MD
1577
1578 while ((m = TAILQ_NEXT(&marker, pageq)) != NULL &&
1579 pcount-- > 0)
1580 {
984263bc
MD
1581 int actcount;
1582
b12defdc
MD
1583 vm_page_and_queue_spin_lock(m);
1584 if (m != TAILQ_NEXT(&marker, pageq)) {
1585 vm_page_and_queue_spin_unlock(m);
1586 ++pcount;
1587 continue;
984263bc 1588 }
027193eb
MD
1589 KKASSERT(m->queue - m->pc == PQ_ACTIVE);
1590 TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE + q].pl, &marker, pageq);
1591 TAILQ_INSERT_AFTER(&vm_page_queues[PQ_ACTIVE + q].pl, m,
b12defdc 1592 &marker, pageq);
984263bc 1593
984263bc 1594 /*
b12defdc 1595 * Ignore markers
984263bc 1596 */
b12defdc
MD
1597 if (m->flags & PG_MARKER) {
1598 vm_page_and_queue_spin_unlock(m);
984263bc
MD
1599 continue;
1600 }
1601
b12defdc
MD
1602 /*
1603 * Ignore pages we can't busy
1604 */
1605 if (vm_page_busy_try(m, TRUE)) {
1606 vm_page_and_queue_spin_unlock(m);
1607 continue;
1608 }
1609 vm_page_and_queue_spin_unlock(m);
027193eb 1610 KKASSERT(m->queue - m->pc == PQ_ACTIVE);
b12defdc
MD
1611
1612 /*
1613 * We now have a safely busied page, the page and queue
1614 * spinlocks have been released.
1615 *
1616 * Ignore held pages
1617 */
1618 if (m->hold_count) {
1619 vm_page_wakeup(m);
1620 continue;
1621 }
1622
1623 /*
1624 * Calculate activity
1625 */
984263bc
MD
1626 actcount = 0;
1627 if (m->flags & PG_REFERENCED) {
1628 vm_page_flag_clear(m, PG_REFERENCED);
1629 actcount += 1;
1630 }
984263bc 1631 actcount += pmap_ts_referenced(m);
b12defdc
MD
1632
1633 /*
1634 * Update act_count and move page to end of queue.
1635 */
984263bc
MD
1636 if (actcount) {
1637 m->act_count += ACT_ADVANCE + actcount;
1638 if (m->act_count > ACT_MAX)
1639 m->act_count = ACT_MAX;
b12defdc 1640 vm_page_and_queue_spin_lock(m);
027193eb
MD
1641 if (m->queue - m->pc == PQ_ACTIVE) {
1642 TAILQ_REMOVE(
1643 &vm_page_queues[PQ_ACTIVE + q].pl,
1644 m, pageq);
1645 TAILQ_INSERT_TAIL(
1646 &vm_page_queues[PQ_ACTIVE + q].pl,
1647 m, pageq);
984263bc 1648 }
b12defdc
MD
1649 vm_page_and_queue_spin_unlock(m);
1650 vm_page_wakeup(m);
1651 continue;
984263bc
MD
1652 }
1653
b12defdc
MD
1654 if (m->act_count == 0) {
1655 /*
1656 * We turn off page access, so that we have
1657 * more accurate RSS stats. We don't do this
1658 * in the normal page deactivation when the
1659 * system is loaded VM wise, because the
1660 * cost of the large number of page protect
1661 * operations would be higher than the value
1662 * of doing the operation.
1663 *
1664 * We use the marker to save our place so
1665 * we can release the spin lock. both (m)
1666 * and (next) will be invalid.
1667 */
1668 vm_page_protect(m, VM_PROT_NONE);
1669 vm_page_deactivate(m);
1670 } else {
1671 m->act_count -= min(m->act_count, ACT_DECLINE);
1672 vm_page_and_queue_spin_lock(m);
027193eb
MD
1673 if (m->queue - m->pc == PQ_ACTIVE) {
1674 TAILQ_REMOVE(
1675 &vm_page_queues[PQ_ACTIVE + q].pl,
1676 m, pageq);
1677 TAILQ_INSERT_TAIL(
1678 &vm_page_queues[PQ_ACTIVE + q].pl,
1679 m, pageq);
b12defdc
MD
1680 }
1681 vm_page_and_queue_spin_unlock(m);
1682 }
1683 vm_page_wakeup(m);
984263bc 1684 }
b12defdc
MD
1685
1686 /*
1687 * Remove our local marker
1688 */
027193eb
MD
1689 vm_page_queues_spin_lock(PQ_ACTIVE + q);
1690 TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE + q].pl, &marker, pageq);
1691 vm_page_queues_spin_unlock(PQ_ACTIVE + q);
984263bc
MD
1692}
1693
1694static int
57e43348 1695vm_pageout_free_page_calc(vm_size_t count)
984263bc 1696{
12e4aaff 1697 if (count < vmstats.v_page_count)
984263bc
MD
1698 return 0;
1699 /*
1700 * free_reserved needs to include enough for the largest swap pager
1701 * structures plus enough for any pv_entry structs when paging.
0a4d4828
MD
1702 *
1703 * v_free_min normal allocations
1704 * v_free_reserved system allocations
1705 * v_pageout_free_min allocations by pageout daemon
1706 * v_interrupt_free_min low level allocations (e.g swap structures)
984263bc 1707 */
12e4aaff 1708 if (vmstats.v_page_count > 1024)
0a4d4828 1709 vmstats.v_free_min = 64 + (vmstats.v_page_count - 1024) / 200;
984263bc 1710 else
0a4d4828
MD
1711 vmstats.v_free_min = 64;
1712 vmstats.v_free_reserved = vmstats.v_free_min * 4 / 8 + 7;
1713 vmstats.v_free_severe = vmstats.v_free_min * 4 / 8 + 0;
1714 vmstats.v_pageout_free_min = vmstats.v_free_min * 2 / 8 + 7;
1715 vmstats.v_interrupt_free_min = vmstats.v_free_min * 1 / 8 + 7;
1716
984263bc
MD
1717 return 1;
1718}
1719
1720
1721/*
20479584 1722 * vm_pageout is the high level pageout daemon.
99ad9bc4
MD
1723 *
1724 * No requirements.
984263bc
MD
1725 */
1726static void
cd8ab232 1727vm_pageout_thread(void)
984263bc
MD
1728{
1729 int pass;
027193eb 1730 int q;
984263bc
MD
1731
1732 /*
1733 * Initialize some paging parameters.
1734 */
4ecf7cc9 1735 curthread->td_flags |= TDF_SYSTHREAD;
984263bc 1736
12e4aaff 1737 if (vmstats.v_page_count < 2000)
984263bc
MD
1738 vm_pageout_page_count = 8;
1739
12e4aaff 1740 vm_pageout_free_page_calc(vmstats.v_page_count);
20479584 1741
984263bc
MD
1742 /*
1743 * v_free_target and v_cache_min control pageout hysteresis. Note
1744 * that these are more a measure of the VM cache queue hysteresis
1745 * then the VM free queue. Specifically, v_free_target is the
1746 * high water mark (free+cache pages).
1747 *
1748 * v_free_reserved + v_cache_min (mostly means v_cache_min) is the
1749 * low water mark, while v_free_min is the stop. v_cache_min must
1750 * be big enough to handle memory needs while the pageout daemon
1751 * is signalled and run to free more pages.
1752 */
12e4aaff
MD
1753 if (vmstats.v_free_count > 6144)
1754 vmstats.v_free_target = 4 * vmstats.v_free_min + vmstats.v_free_reserved;
984263bc 1755 else
12e4aaff 1756 vmstats.v_free_target = 2 * vmstats.v_free_min + vmstats.v_free_reserved;
984263bc 1757
0e8bd897
MD
1758 /*
1759 * NOTE: With the new buffer cache b_act_count we want the default
1760 * inactive target to be a percentage of available memory.
1761 *
1762 * The inactive target essentially determines the minimum
1763 * number of 'temporary' pages capable of caching one-time-use
1764 * files when the VM system is otherwise full of pages
1765 * belonging to multi-time-use files or active program data.
51db7ca2
MD
1766 *
1767 * NOTE: The inactive target is aggressively persued only if the
1768 * inactive queue becomes too small. If the inactive queue
1769 * is large enough to satisfy page movement to free+cache
1770 * then it is repopulated more slowly from the active queue.
e15708fc 1771 * This allows a general inactive_target default to be set.
51db7ca2
MD
1772 *
1773 * There is an issue here for processes which sit mostly idle
1774 * 'overnight', such as sshd, tcsh, and X. Any movement from
1775 * the active queue will eventually cause such pages to
1776 * recycle eventually causing a lot of paging in the morning.
1777 * To reduce the incidence of this pages cycled out of the
1778 * buffer cache are moved directly to the inactive queue if
e15708fc
MD
1779 * they were only used once or twice.
1780 *
1781 * The vfs.vm_cycle_point sysctl can be used to adjust this.
1782 * Increasing the value (up to 64) increases the number of
1783 * buffer recyclements which go directly to the inactive queue.
0e8bd897 1784 */
12e4aaff
MD
1785 if (vmstats.v_free_count > 2048) {
1786 vmstats.v_cache_min = vmstats.v_free_target;
1787 vmstats.v_cache_max = 2 * vmstats.v_cache_min;
984263bc 1788 } else {
12e4aaff
MD
1789 vmstats.v_cache_min = 0;
1790 vmstats.v_cache_max = 0;
984263bc 1791 }
e15708fc 1792 vmstats.v_inactive_target = vmstats.v_free_count / 4;
984263bc
MD
1793
1794 /* XXX does not really belong here */
1795 if (vm_page_max_wired == 0)
12e4aaff 1796 vm_page_max_wired = vmstats.v_free_count / 3;
984263bc
MD
1797
1798 if (vm_pageout_stats_max == 0)
12e4aaff 1799 vm_pageout_stats_max = vmstats.v_free_target;
984263bc
MD
1800
1801 /*
1802 * Set interval in seconds for stats scan.
1803 */
1804 if (vm_pageout_stats_interval == 0)
1805 vm_pageout_stats_interval = 5;
1806 if (vm_pageout_full_stats_interval == 0)
1807 vm_pageout_full_stats_interval = vm_pageout_stats_interval * 4;
1808
1809
1810 /*
1811 * Set maximum free per pass
1812 */
1813 if (vm_pageout_stats_free_max == 0)
1814 vm_pageout_stats_free_max = 5;
1815
1816 swap_pager_swap_init();
1817 pass = 0;
20479584 1818
984263bc
MD
1819 /*
1820 * The pageout daemon is never done, so loop forever.
1821 */
1822 while (TRUE) {
1823 int error;
027193eb
MD
1824 int delta1;
1825 int delta2;
1826 int inactive_shortage;
1827 int active_shortage;
1828 int vnodes_skipped = 0;
1829 int recycle_count = 0;
1830 int tmp;
984263bc 1831
12d8aca7 1832 /*
1bfac262
MD
1833 * Wait for an action request. If we timeout check to
1834 * see if paging is needed (in case the normal wakeup
1835 * code raced us).
12d8aca7 1836 */
20479584 1837 if (vm_pages_needed == 0) {
984263bc 1838 error = tsleep(&vm_pages_needed,
20479584
MD
1839 0, "psleep",
1840 vm_pageout_stats_interval * hz);
1bfac262
MD
1841 if (error &&
1842 vm_paging_needed() == 0 &&
1843 vm_pages_needed == 0) {
027193eb
MD
1844 for (q = 0; q < PQ_MAXL2_SIZE; ++q)
1845 vm_pageout_page_stats(q);
984263bc
MD
1846 continue;
1847 }
20479584 1848 vm_pages_needed = 1;
984263bc
MD
1849 }
1850
20479584 1851 mycpu->gd_cnt.v_pdwakeups++;
20479584 1852
027193eb
MD
1853 /*
1854 * Do whatever cleanup that the pmap code can.
1855 */
1856 pmap_collect();
1857
20479584 1858 /*
12d8aca7
MD
1859 * Scan for pageout. Try to avoid thrashing the system
1860 * with activity.
027193eb
MD
1861 *
1862 * Calculate our target for the number of free+cache pages we
1863 * want to get to. This is higher then the number that causes
1864 * allocations to stall (severe) in order to provide hysteresis,
1865 * and if we don't make it all the way but get to the minimum
1866 * we're happy.
1867 */
1868 inactive_shortage = vm_paging_target() + vm_pageout_deficit;
1869 vm_pageout_deficit = 0;
1870 delta1 = 0;
1871 for (q = 0; q < PQ_MAXL2_SIZE; ++q) {
1872 delta1 += vm_pageout_scan_inactive(
1873 pass, q,
1874 inactive_shortage / PQ_MAXL2_SIZE + 1,
1875 &vnodes_skipped);
1876 }
1877
1878 /*
1879 * Figure out how many active pages we must deactivate. If
1880 * we were able to reach our target with just the inactive
1881 * scan above we limit the number of active pages we
1882 * deactivate to reduce unnecessary work.
1883 */
1884 active_shortage = vmstats.v_inactive_target -
1885 vmstats.v_inactive_count;
1886
3038a8ca
MD
1887 /*
1888 * If we were unable to free sufficient inactive pages to
1889 * satisfy the free/cache queue requirements then simply
1890 * reaching the inactive target may not be good enough.
1891 * Try to deactivate pages in excess of the target based
1892 * on the shortfall.
1893 *
1894 * However to prevent thrashing the VM system do not
1895 * deactivate more than an additional 1/10 the inactive
1896 * target's worth of active pages.
1897 */
1898 if (delta1 < inactive_shortage) {
1899 tmp = (inactive_shortage - delta1) * 2;
1900 if (tmp > vmstats.v_inactive_target / 10)
1901 tmp = vmstats.v_inactive_target / 10;
1902 active_shortage += tmp;
1903 }
027193eb
MD
1904
1905 delta2 = 0;
1906 for (q = 0; q < PQ_MAXL2_SIZE; ++q) {
1907 delta2 += vm_pageout_scan_active(
1908 pass, q,
1909 inactive_shortage / PQ_MAXL2_SIZE + 1,
1910 active_shortage / PQ_MAXL2_SIZE + 1,
1911 &recycle_count);
1912 }
1913
1914 /*
1915 * Finally free enough cache pages to meet our free page
1916 * requirement and take more drastic measures if we are
1917 * still in trouble.
1918 */
1919 inactive_shortage -= delta2;
1920 vm_pageout_scan_cache(inactive_shortage, vnodes_skipped,
1921 recycle_count);
1922
1923 /*
1924 * Wait for more work.
20479584
MD
1925 */
1926 if (inactive_shortage > 0) {
1927 ++pass;
1928 if (swap_pager_full) {
1929 /*
1930 * Running out of memory, catastrophic back-off
1931 * to one-second intervals.
1932 */
1933 tsleep(&vm_pages_needed, 0, "pdelay", hz);
1934 } else if (pass < 10 && vm_pages_needed > 1) {
1935 /*
1936 * Normal operation, additional processes
1937 * have already kicked us. Retry immediately.
1938 */
1939 } else if (pass < 10) {
1940 /*
1941 * Normal operation, fewer processes. Delay
1942 * a bit but allow wakeups.
1943 */
1944 vm_pages_needed = 0;
1945 tsleep(&vm_pages_needed, 0, "pdelay", hz / 10);
1946 vm_pages_needed = 1;
1947 } else {
1948 /*
1949 * We've taken too many passes, forced delay.
1950 */
1951 tsleep(&vm_pages_needed, 0, "pdelay", hz / 10);
1952 }
1953 } else {
12d8aca7
MD
1954 /*
1955 * Interlocked wakeup of waiters (non-optional)
1956 */
20479584 1957 pass = 0;
12d8aca7
MD
1958 if (vm_pages_needed && !vm_page_count_min(0)) {
1959 wakeup(&vmstats.v_free_count);
1960 vm_pages_needed = 0;
1961 }
20479584 1962 }
984263bc
MD
1963 }
1964}
1965
cd8ab232
MD
1966static struct kproc_desc page_kp = {
1967 "pagedaemon",
1968 vm_pageout_thread,
1969 &pagethread
1970};
1971SYSINIT(pagedaemon, SI_SUB_KTHREAD_PAGE, SI_ORDER_FIRST, kproc_start, &page_kp)
1972
1973
20479584
MD
1974/*
1975 * Called after allocating a page out of the cache or free queue
1976 * to possibly wake the pagedaemon up to replentish our supply.
1977 *
1978 * We try to generate some hysteresis by waking the pagedaemon up
1bfac262
MD
1979 * when our free+cache pages go below the free_min+cache_min level.
1980 * The pagedaemon tries to get the count back up to at least the
1981 * minimum, and through to the target level if possible.
20479584
MD
1982 *
1983 * If the pagedaemon is already active bump vm_pages_needed as a hint
1984 * that there are even more requests pending.
99ad9bc4
MD
1985 *
1986 * SMP races ok?
1987 * No requirements.
20479584 1988 */
984263bc 1989void
57e43348 1990pagedaemon_wakeup(void)
984263bc 1991{
1bfac262 1992 if (vm_paging_needed() && curthread != pagethread) {
20479584 1993 if (vm_pages_needed == 0) {
1bfac262 1994 vm_pages_needed = 1; /* SMP race ok */
20479584
MD
1995 wakeup(&vm_pages_needed);
1996 } else if (vm_page_count_min(0)) {
1bfac262 1997 ++vm_pages_needed; /* SMP race ok */
20479584 1998 }
984263bc
MD
1999 }
2000}
2001
2002#if !defined(NO_SWAPPING)
99ad9bc4
MD
2003
2004/*
2005 * SMP races ok?
2006 * No requirements.
2007 */
984263bc 2008static void
57e43348 2009vm_req_vmdaemon(void)
984263bc
MD
2010{
2011 static int lastrun = 0;
2012
2013 if ((ticks > (lastrun + hz)) || (ticks < lastrun)) {
2014 wakeup(&vm_daemon_needed);
2015 lastrun = ticks;
2016 }
2017}
2018
8fa76237
MD
2019static int vm_daemon_callback(struct proc *p, void *data __unused);
2020
99ad9bc4
MD
2021/*
2022 * No requirements.
2023 */
984263bc 2024static void
57e43348 2025vm_daemon(void)
984263bc 2026{
99ad9bc4 2027 /*
b12defdc 2028 * XXX vm_daemon_needed specific token?
99ad9bc4 2029 */
984263bc 2030 while (TRUE) {
377d4740 2031 tsleep(&vm_daemon_needed, 0, "psleep", 0);
984263bc
MD
2032 if (vm_pageout_req_swapout) {
2033 swapout_procs(vm_pageout_req_swapout);
2034 vm_pageout_req_swapout = 0;
2035 }
2036 /*
2037 * scan the processes for exceeding their rlimits or if
2038 * process is swapped out -- deactivate pages
2039 */
8fa76237
MD
2040 allproc_scan(vm_daemon_callback, NULL);
2041 }
2042}
984263bc 2043
99ad9bc4 2044/*
b12defdc 2045 * Caller must hold proc_token.
99ad9bc4 2046 */
8fa76237
MD
2047static int
2048vm_daemon_callback(struct proc *p, void *data __unused)
2049{
2050 vm_pindex_t limit, size;
984263bc 2051
8fa76237
MD
2052 /*
2053 * if this is a system process or if we have already
2054 * looked at this process, skip it.
2055 */
2056 if (p->p_flag & (P_SYSTEM | P_WEXIT))
2057 return (0);
984263bc 2058
8fa76237
MD
2059 /*
2060 * if the process is in a non-running type state,
2061 * don't touch it.
2062 */
164b8401 2063 if (p->p_stat != SACTIVE && p->p_stat != SSTOP)
8fa76237 2064 return (0);
984263bc 2065
8fa76237
MD
2066 /*
2067 * get a limit
2068 */
2069 limit = OFF_TO_IDX(qmin(p->p_rlimit[RLIMIT_RSS].rlim_cur,
2070 p->p_rlimit[RLIMIT_RSS].rlim_max));
2071
2072 /*
2073 * let processes that are swapped out really be
2074 * swapped out. Set the limit to nothing to get as
2075 * many pages out to swap as possible.
2076 */
2077 if (p->p_flag & P_SWAPPEDOUT)
2078 limit = 0;
2079
b12defdc 2080 lwkt_gettoken(&p->p_vmspace->vm_map.token);
8fa76237
MD
2081 size = vmspace_resident_count(p->p_vmspace);
2082 if (limit >= 0 && size >= limit) {
b12defdc 2083 vm_pageout_map_deactivate_pages(&p->p_vmspace->vm_map, limit);
984263bc 2084 }
b12defdc 2085 lwkt_reltoken(&p->p_vmspace->vm_map.token);
8fa76237 2086 return (0);
984263bc 2087}
8fa76237 2088
984263bc 2089#endif