Sync our gcc5 manual pages with gcc-5.4.0
[dragonfly.git] / sys / vm / vm_pageout.c
CommitLineData
984263bc
MD
1/*
2 * Copyright (c) 1991 Regents of the University of California.
3 * All rights reserved.
4 * Copyright (c) 1994 John S. Dyson
5 * All rights reserved.
6 * Copyright (c) 1994 David Greenman
7 * All rights reserved.
8 *
9 * This code is derived from software contributed to Berkeley by
10 * The Mach Operating System project at Carnegie-Mellon University.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
dc71b7ab 20 * 3. Neither the name of the University nor the names of its contributors
984263bc
MD
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * from: @(#)vm_pageout.c 7.4 (Berkeley) 5/7/91
37 *
38 *
39 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
40 * All rights reserved.
41 *
42 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
43 *
44 * Permission to use, copy, modify and distribute this software and
45 * its documentation is hereby granted, provided that both the copyright
46 * notice and this permission notice appear in all copies of the
47 * software, derivative works or modified versions, and any portions
48 * thereof, and that both notices appear in supporting documentation.
49 *
50 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
51 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
52 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
53 *
54 * Carnegie Mellon requests users of this software to return to
55 *
56 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
57 * School of Computer Science
58 * Carnegie Mellon University
59 * Pittsburgh PA 15213-3890
60 *
61 * any improvements or extensions that they make and grant Carnegie the
62 * rights to redistribute these changes.
63 *
64 * $FreeBSD: src/sys/vm/vm_pageout.c,v 1.151.2.15 2002/12/29 18:21:04 dillon Exp $
65 */
66
67/*
68 * The proverbial page-out daemon.
69 */
70
71#include "opt_vm.h"
72#include <sys/param.h>
73#include <sys/systm.h>
74#include <sys/kernel.h>
75#include <sys/proc.h>
76#include <sys/kthread.h>
77#include <sys/resourcevar.h>
78#include <sys/signalvar.h>
79#include <sys/vnode.h>
80#include <sys/vmmeter.h>
32c821cf 81#include <sys/conf.h>
984263bc
MD
82#include <sys/sysctl.h>
83
84#include <vm/vm.h>
85#include <vm/vm_param.h>
86#include <sys/lock.h>
87#include <vm/vm_object.h>
88#include <vm/vm_page.h>
89#include <vm/vm_map.h>
90#include <vm/vm_pageout.h>
91#include <vm/vm_pager.h>
92#include <vm/swap_pager.h>
93#include <vm/vm_extern.h>
5fd012e0
MD
94
95#include <sys/thread2.h>
b12defdc 96#include <sys/spinlock2.h>
12e4aaff 97#include <vm/vm_page2.h>
984263bc
MD
98
99/*
100 * System initialization
101 */
102
103/* the kernel process "vm_pageout"*/
b7ea2f3f
MD
104static int vm_pageout_page(vm_page_t m, long *max_launderp,
105 long *vnodes_skippedp, struct vnode **vpfailedp,
534ee349
MD
106 int pass, int vmflush_flags);
107static int vm_pageout_clean_helper (vm_page_t, int);
1388df65 108static int vm_pageout_free_page_calc (vm_size_t count);
534ee349 109static void vm_pageout_page_free(vm_page_t m) ;
32c821cf 110struct thread *emergpager;
bc6dffab 111struct thread *pagethread;
c3feb36a 112static int sequence_emerg_pager;
984263bc 113
984263bc
MD
114#if !defined(NO_SWAPPING)
115/* the kernel process "vm_daemon"*/
1388df65 116static void vm_daemon (void);
bc6dffab 117static struct thread *vmthread;
984263bc
MD
118
119static struct kproc_desc vm_kp = {
120 "vmdaemon",
121 vm_daemon,
bc6dffab 122 &vmthread
984263bc 123};
f3f3eadb 124SYSINIT(vmdaemon, SI_SUB_KTHREAD_VM, SI_ORDER_FIRST, kproc_start, &vm_kp);
984263bc
MD
125#endif
126
534ee349
MD
127int vm_pages_needed = 0; /* Event on which pageout daemon sleeps */
128int vm_pageout_deficit = 0; /* Estimated number of pages deficit */
129int vm_pageout_pages_needed = 0;/* pageout daemon needs pages */
77d1fb91 130int vm_page_free_hysteresis = 16;
c3feb36a 131static int vm_pagedaemon_time;
984263bc
MD
132
133#if !defined(NO_SWAPPING)
534ee349 134static int vm_pageout_req_swapout;
984263bc
MD
135static int vm_daemon_needed;
136#endif
79ce07d0 137static int vm_max_launder = 4096;
ead23175 138static int vm_emerg_launder = 100;
984263bc
MD
139static int vm_pageout_stats_max=0, vm_pageout_stats_interval = 0;
140static int vm_pageout_full_stats_interval = 0;
141static int vm_pageout_stats_free_max=0, vm_pageout_algorithm=0;
142static int defer_swap_pageouts=0;
143static int disable_swap_pageouts=0;
38587548
MD
144static u_int vm_anonmem_decline = ACT_DECLINE;
145static u_int vm_filemem_decline = ACT_DECLINE * 2;
984263bc
MD
146
147#if defined(NO_SWAPPING)
148static int vm_swap_enabled=0;
149static int vm_swap_idle_enabled=0;
150#else
151static int vm_swap_enabled=1;
152static int vm_swap_idle_enabled=0;
153#endif
486b807a 154int vm_pageout_memuse_mode=1; /* 0-disable, 1-passive, 2-active swp*/
984263bc 155
38587548
MD
156SYSCTL_UINT(_vm, VM_PAGEOUT_ALGORITHM, anonmem_decline,
157 CTLFLAG_RW, &vm_anonmem_decline, 0, "active->inactive anon memory");
158
159SYSCTL_INT(_vm, VM_PAGEOUT_ALGORITHM, filemem_decline,
160 CTLFLAG_RW, &vm_filemem_decline, 0, "active->inactive file cache");
984263bc 161
77d1fb91
MD
162SYSCTL_INT(_vm, OID_AUTO, page_free_hysteresis,
163 CTLFLAG_RW, &vm_page_free_hysteresis, 0,
164 "Free more pages than the minimum required");
165
984263bc
MD
166SYSCTL_INT(_vm, OID_AUTO, max_launder,
167 CTLFLAG_RW, &vm_max_launder, 0, "Limit dirty flushes in pageout");
ead23175
MD
168SYSCTL_INT(_vm, OID_AUTO, emerg_launder,
169 CTLFLAG_RW, &vm_emerg_launder, 0, "Emergency pager minimum");
984263bc
MD
170
171SYSCTL_INT(_vm, OID_AUTO, pageout_stats_max,
172 CTLFLAG_RW, &vm_pageout_stats_max, 0, "Max pageout stats scan length");
173
174SYSCTL_INT(_vm, OID_AUTO, pageout_full_stats_interval,
175 CTLFLAG_RW, &vm_pageout_full_stats_interval, 0, "Interval for full stats scan");
176
177SYSCTL_INT(_vm, OID_AUTO, pageout_stats_interval,
178 CTLFLAG_RW, &vm_pageout_stats_interval, 0, "Interval for partial stats scan");
179
180SYSCTL_INT(_vm, OID_AUTO, pageout_stats_free_max,
181 CTLFLAG_RW, &vm_pageout_stats_free_max, 0, "Not implemented");
486b807a
MD
182SYSCTL_INT(_vm, OID_AUTO, pageout_memuse_mode,
183 CTLFLAG_RW, &vm_pageout_memuse_mode, 0, "memoryuse resource mode");
984263bc
MD
184
185#if defined(NO_SWAPPING)
186SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled,
187 CTLFLAG_RD, &vm_swap_enabled, 0, "");
188SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled,
189 CTLFLAG_RD, &vm_swap_idle_enabled, 0, "");
190#else
191SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled,
192 CTLFLAG_RW, &vm_swap_enabled, 0, "Enable entire process swapout");
193SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled,
194 CTLFLAG_RW, &vm_swap_idle_enabled, 0, "Allow swapout on idle criteria");
195#endif
196
197SYSCTL_INT(_vm, OID_AUTO, defer_swapspace_pageouts,
198 CTLFLAG_RW, &defer_swap_pageouts, 0, "Give preference to dirty pages in mem");
199
200SYSCTL_INT(_vm, OID_AUTO, disable_swapspace_pageouts,
201 CTLFLAG_RW, &disable_swap_pageouts, 0, "Disallow swapout of dirty pages");
202
203static int pageout_lock_miss;
204SYSCTL_INT(_vm, OID_AUTO, pageout_lock_miss,
205 CTLFLAG_RD, &pageout_lock_miss, 0, "vget() lock misses during pageout");
206
984263bc
MD
207int vm_page_max_wired; /* XXX max # of wired pages system-wide */
208
209#if !defined(NO_SWAPPING)
1388df65 210static void vm_req_vmdaemon (void);
984263bc 211#endif
027193eb 212static void vm_pageout_page_stats(int q);
984263bc 213
9cd626ca
MD
214/*
215 * Calculate approximately how many pages on each queue to try to
216 * clean. An exact calculation creates an edge condition when the
217 * queues are unbalanced so add significant slop. The queue scans
218 * will stop early when targets are reached and will start where they
219 * left off on the next pass.
1f79526e
MD
220 *
221 * We need to be generous here because there are all sorts of loading
222 * conditions that can cause edge cases if try to average over all queues.
223 * In particular, storage subsystems have become so fast that paging
224 * activity can become quite frantic. Eventually we will probably need
225 * two paging threads, one for dirty pages and one for clean, to deal
226 * with the bandwidth requirements.
227
228 * So what we do is calculate a value that can be satisfied nominally by
229 * only having to scan half the queues.
9cd626ca 230 */
b7ea2f3f
MD
231static __inline long
232PQAVERAGE(long n)
51c99c61 233{
b7ea2f3f 234 long avg;
9cd626ca
MD
235
236 if (n >= 0) {
1f79526e 237 avg = ((n + (PQ_L2_SIZE - 1)) / (PQ_L2_SIZE / 2) + 1);
9cd626ca 238 } else {
1f79526e 239 avg = ((n - (PQ_L2_SIZE - 1)) / (PQ_L2_SIZE / 2) - 1);
9cd626ca
MD
240 }
241 return avg;
51c99c61
MD
242}
243
984263bc 244/*
534ee349 245 * vm_pageout_clean_helper:
984263bc 246 *
95270b7e
MD
247 * Clean the page and remove it from the laundry. The page must be busied
248 * by the caller and will be disposed of (put away, flushed) by this routine.
984263bc 249 */
984263bc 250static int
534ee349 251vm_pageout_clean_helper(vm_page_t m, int vmflush_flags)
984263bc 252{
5f910b2f 253 vm_object_t object;
79ce07d0 254 vm_page_t mc[BLIST_MAX_ALLOC];
b12defdc 255 int error;
984263bc
MD
256 int ib, is, page_base;
257 vm_pindex_t pindex = m->pindex;
258
259 object = m->object;
260
261 /*
95270b7e 262 * Don't mess with the page if it's held or special.
b12defdc
MD
263 *
264 * XXX do we really need to check hold_count here? hold_count
265 * isn't supposed to mess with vm_page ops except prevent the
266 * page from being reused.
984263bc 267 */
b12defdc
MD
268 if (m->hold_count != 0 || (m->flags & PG_UNMANAGED)) {
269 vm_page_wakeup(m);
984263bc
MD
270 return 0;
271 }
272
79ce07d0
MD
273 /*
274 * Place page in cluster. Align cluster for optimal swap space
275 * allocation (whether it is swap or not). This is typically ~16-32
276 * pages, which also tends to align the cluster to multiples of the
277 * filesystem block size if backed by a filesystem.
278 */
279 page_base = pindex % BLIST_MAX_ALLOC;
280 mc[page_base] = m;
281 ib = page_base - 1;
282 is = page_base + 1;
984263bc
MD
283
284 /*
285 * Scan object for clusterable pages.
286 *
287 * We can cluster ONLY if: ->> the page is NOT
288 * clean, wired, busy, held, or mapped into a
289 * buffer, and one of the following:
290 * 1) The page is inactive, or a seldom used
291 * active page.
292 * -or-
293 * 2) we force the issue.
294 *
295 * During heavy mmap/modification loads the pageout
296 * daemon can really fragment the underlying file
297 * due to flushing pages out of order and not trying
298 * align the clusters (which leave sporatic out-of-order
299 * holes). To solve this problem we do the reverse scan
300 * first and attempt to align our cluster, then do a
301 * forward scan if room remains.
302 */
398c240d 303 vm_object_hold(object);
534ee349 304
79ce07d0 305 while (ib >= 0) {
984263bc
MD
306 vm_page_t p;
307
79ce07d0
MD
308 p = vm_page_lookup_busy_try(object, pindex - page_base + ib,
309 TRUE, &error);
310 if (error || p == NULL)
984263bc 311 break;
b12defdc
MD
312 if ((p->queue - p->pc) == PQ_CACHE ||
313 (p->flags & PG_UNMANAGED)) {
314 vm_page_wakeup(p);
984263bc
MD
315 break;
316 }
317 vm_page_test_dirty(p);
9bf025db
MD
318 if (((p->dirty & p->valid) == 0 &&
319 (p->flags & PG_NEED_COMMIT) == 0) ||
984263bc
MD
320 p->wire_count != 0 || /* may be held by buf cache */
321 p->hold_count != 0) { /* may be undergoing I/O */
b12defdc 322 vm_page_wakeup(p);
984263bc
MD
323 break;
324 }
534ee349
MD
325 if (p->queue - p->pc != PQ_INACTIVE) {
326 if (p->queue - p->pc != PQ_ACTIVE ||
327 (vmflush_flags & VM_PAGER_ALLOW_ACTIVE) == 0) {
328 vm_page_wakeup(p);
329 break;
330 }
331 }
332
333 /*
334 * Try to maintain page groupings in the cluster.
335 */
336 if (m->flags & PG_WINATCFLS)
337 vm_page_flag_set(p, PG_WINATCFLS);
338 else
339 vm_page_flag_clear(p, PG_WINATCFLS);
340 p->act_count = m->act_count;
341
79ce07d0
MD
342 mc[ib] = p;
343 --ib;
984263bc 344 }
79ce07d0 345 ++ib; /* fixup */
984263bc 346
79ce07d0
MD
347 while (is < BLIST_MAX_ALLOC &&
348 pindex - page_base + is < object->size) {
984263bc
MD
349 vm_page_t p;
350
79ce07d0
MD
351 p = vm_page_lookup_busy_try(object, pindex - page_base + is,
352 TRUE, &error);
b12defdc 353 if (error || p == NULL)
984263bc
MD
354 break;
355 if (((p->queue - p->pc) == PQ_CACHE) ||
534ee349 356 (p->flags & PG_UNMANAGED)) {
b12defdc 357 vm_page_wakeup(p);
984263bc
MD
358 break;
359 }
360 vm_page_test_dirty(p);
9bf025db
MD
361 if (((p->dirty & p->valid) == 0 &&
362 (p->flags & PG_NEED_COMMIT) == 0) ||
984263bc
MD
363 p->wire_count != 0 || /* may be held by buf cache */
364 p->hold_count != 0) { /* may be undergoing I/O */
b12defdc 365 vm_page_wakeup(p);
984263bc
MD
366 break;
367 }
534ee349
MD
368 if (p->queue - p->pc != PQ_INACTIVE) {
369 if (p->queue - p->pc != PQ_ACTIVE ||
370 (vmflush_flags & VM_PAGER_ALLOW_ACTIVE) == 0) {
371 vm_page_wakeup(p);
372 break;
373 }
374 }
375
376 /*
377 * Try to maintain page groupings in the cluster.
378 */
379 if (m->flags & PG_WINATCFLS)
380 vm_page_flag_set(p, PG_WINATCFLS);
381 else
382 vm_page_flag_clear(p, PG_WINATCFLS);
383 p->act_count = m->act_count;
384
79ce07d0 385 mc[is] = p;
984263bc
MD
386 ++is;
387 }
388
398c240d
VS
389 vm_object_drop(object);
390
984263bc
MD
391 /*
392 * we allow reads during pageouts...
393 */
534ee349 394 return vm_pageout_flush(&mc[ib], is - ib, vmflush_flags);
984263bc
MD
395}
396
397/*
398 * vm_pageout_flush() - launder the given pages
399 *
400 * The given pages are laundered. Note that we setup for the start of
401 * I/O ( i.e. busy the page ), mark it read-only, and bump the object
402 * reference count all in here rather then in the parent. If we want
403 * the parent to do more sophisticated things we may have to change
404 * the ordering.
99ad9bc4 405 *
b12defdc
MD
406 * The pages in the array must be busied by the caller and will be
407 * unbusied by this function.
984263bc 408 */
984263bc 409int
534ee349 410vm_pageout_flush(vm_page_t *mc, int count, int vmflush_flags)
984263bc 411{
5f910b2f 412 vm_object_t object;
984263bc
MD
413 int pageout_status[count];
414 int numpagedout = 0;
415 int i;
416
417 /*
17cde63e
MD
418 * Initiate I/O. Bump the vm_page_t->busy counter.
419 */
420 for (i = 0; i < count; i++) {
b12defdc
MD
421 KASSERT(mc[i]->valid == VM_PAGE_BITS_ALL,
422 ("vm_pageout_flush page %p index %d/%d: partially "
423 "invalid page", mc[i], i, count));
17cde63e
MD
424 vm_page_io_start(mc[i]);
425 }
426
427 /*
4530a3aa
MD
428 * We must make the pages read-only. This will also force the
429 * modified bit in the related pmaps to be cleared. The pager
430 * cannot clear the bit for us since the I/O completion code
431 * typically runs from an interrupt. The act of making the page
432 * read-only handles the case for us.
b12defdc
MD
433 *
434 * Then we can unbusy the pages, we still hold a reference by virtue
435 * of our soft-busy.
984263bc 436 */
984263bc 437 for (i = 0; i < count; i++) {
534ee349
MD
438 if (vmflush_flags & VM_PAGER_TRY_TO_CACHE)
439 vm_page_protect(mc[i], VM_PROT_NONE);
440 else
441 vm_page_protect(mc[i], VM_PROT_READ);
b12defdc 442 vm_page_wakeup(mc[i]);
984263bc
MD
443 }
444
445 object = mc[0]->object;
446 vm_object_pip_add(object, count);
447
448 vm_pager_put_pages(object, mc, count,
95270b7e
MD
449 (vmflush_flags |
450 ((object == &kernel_object) ?
451 VM_PAGER_PUT_SYNC : 0)),
452 pageout_status);
984263bc
MD
453
454 for (i = 0; i < count; i++) {
455 vm_page_t mt = mc[i];
456
457 switch (pageout_status[i]) {
458 case VM_PAGER_OK:
459 numpagedout++;
460 break;
461 case VM_PAGER_PEND:
462 numpagedout++;
463 break;
464 case VM_PAGER_BAD:
465 /*
466 * Page outside of range of object. Right now we
467 * essentially lose the changes by pretending it
468 * worked.
469 */
b12defdc 470 vm_page_busy_wait(mt, FALSE, "pgbad");
984263bc
MD
471 pmap_clear_modify(mt);
472 vm_page_undirty(mt);
b12defdc 473 vm_page_wakeup(mt);
984263bc
MD
474 break;
475 case VM_PAGER_ERROR:
476 case VM_PAGER_FAIL:
477 /*
c84c24da
MD
478 * A page typically cannot be paged out when we
479 * have run out of swap. We leave the page
480 * marked inactive and will try to page it out
481 * again later.
482 *
483 * Starvation of the active page list is used to
484 * determine when the system is massively memory
485 * starved.
984263bc 486 */
984263bc
MD
487 break;
488 case VM_PAGER_AGAIN:
489 break;
490 }
491
492 /*
534ee349
MD
493 * If not PENDing this was a synchronous operation and we
494 * clean up after the I/O. If it is PENDing the mess is
495 * cleaned up asynchronously.
496 *
497 * Also nominally act on the caller's wishes if the caller
498 * wants to try to really clean (cache or free) the page.
93afe6be 499 *
534ee349
MD
500 * Also nominally deactivate the page if the system is
501 * memory-stressed.
984263bc
MD
502 */
503 if (pageout_status[i] != VM_PAGER_PEND) {
b12defdc 504 vm_page_busy_wait(mt, FALSE, "pgouw");
a491077e 505 vm_page_io_finish(mt);
534ee349
MD
506 if (vmflush_flags & VM_PAGER_TRY_TO_CACHE) {
507 vm_page_try_to_cache(mt);
508 } else if (vm_page_count_severe()) {
509 vm_page_deactivate(mt);
510 vm_page_wakeup(mt);
511 } else {
512 vm_page_wakeup(mt);
513 }
a491077e 514 vm_object_pip_wakeup(object);
984263bc
MD
515 }
516 }
517 return numpagedout;
518}
519
520#if !defined(NO_SWAPPING)
534ee349 521
984263bc 522/*
a7a03a5f
MD
523 * Callback function, page busied for us. We must dispose of the busy
524 * condition. Any related pmap pages may be held but will not be locked.
534ee349
MD
525 */
526static
527int
a7a03a5f
MD
528vm_pageout_mdp_callback(struct pmap_pgscan_info *info, vm_offset_t va,
529 vm_page_t p)
1f804340 530{
1f804340 531 int actcount;
534ee349 532 int cleanit = 0;
984263bc 533
534ee349
MD
534 /*
535 * Basic tests - There should never be a marker, and we can stop
536 * once the RSS is below the required level.
537 */
538 KKASSERT((p->flags & PG_MARKER) == 0);
a7a03a5f
MD
539 if (pmap_resident_tlnw_count(info->pmap) <= info->limit) {
540 vm_page_wakeup(p);
1f804340
MD
541 return(-1);
542 }
534ee349 543
1f804340 544 mycpu->gd_cnt.v_pdpages++;
b12defdc 545
534ee349
MD
546 if (p->wire_count || p->hold_count || (p->flags & PG_UNMANAGED)) {
547 vm_page_wakeup(p);
548 goto done;
549 }
984263bc 550
a7a03a5f
MD
551 ++info->actioncount;
552
553 /*
554 * Check if the page has been referened recently. If it has,
555 * activate it and skip.
556 */
1f804340
MD
557 actcount = pmap_ts_referenced(p);
558 if (actcount) {
559 vm_page_flag_set(p, PG_REFERENCED);
560 } else if (p->flags & PG_REFERENCED) {
561 actcount = 1;
562 }
563
a7a03a5f
MD
564 if (actcount) {
565 if (p->queue - p->pc != PQ_ACTIVE) {
566 vm_page_and_queue_spin_lock(p);
567 if (p->queue - p->pc != PQ_ACTIVE) {
b12defdc 568 vm_page_and_queue_spin_unlock(p);
a7a03a5f 569 vm_page_activate(p);
1f804340 570 } else {
b12defdc 571 vm_page_and_queue_spin_unlock(p);
984263bc 572 }
1f804340 573 } else {
a7a03a5f
MD
574 p->act_count += actcount;
575 if (p->act_count > ACT_MAX)
576 p->act_count = ACT_MAX;
577 }
578 vm_page_flag_clear(p, PG_REFERENCED);
579 vm_page_wakeup(p);
580 goto done;
581 }
b12defdc 582
a7a03a5f
MD
583 /*
584 * Remove the page from this particular pmap. Once we do this, our
585 * pmap scans will not see it again (unless it gets faulted in), so
586 * we must actively dispose of or deal with the page.
587 */
588 pmap_remove_specific(info->pmap, p);
589
590 /*
591 * If the page is not mapped to another process (i.e. as would be
592 * typical if this were a shared page from a library) then deactivate
593 * the page and clean it in two passes only.
594 *
595 * If the page hasn't been referenced since the last check, remove it
596 * from the pmap. If it is no longer mapped, deactivate it
597 * immediately, accelerating the normal decline.
598 *
599 * Once the page has been removed from the pmap the RSS code no
600 * longer tracks it so we have to make sure that it is staged for
601 * potential flush action.
602 */
603 if ((p->flags & PG_MAPPED) == 0) {
604 if (p->queue - p->pc == PQ_ACTIVE) {
605 vm_page_deactivate(p);
984263bc 606 }
a7a03a5f 607 if (p->queue - p->pc == PQ_INACTIVE) {
534ee349
MD
608 cleanit = 1;
609 }
984263bc 610 }
534ee349
MD
611
612 /*
613 * Ok, try to fully clean the page and any nearby pages such that at
614 * least the requested page is freed or moved to the cache queue.
615 *
616 * We usually do this synchronously to allow us to get the page into
617 * the CACHE queue quickly, which will prevent memory exhaustion if
618 * a process with a memoryuse limit is running away. However, the
619 * sysadmin may desire to set vm.swap_user_async which relaxes this
620 * and improves write performance.
621 */
622 if (cleanit) {
b7ea2f3f
MD
623 long max_launder = 0x7FFF;
624 long vnodes_skipped = 0;
534ee349
MD
625 int vmflush_flags;
626 struct vnode *vpfailed = NULL;
627
a7a03a5f 628 info->offset = va;
534ee349 629
486b807a 630 if (vm_pageout_memuse_mode >= 2) {
a7a03a5f
MD
631 vmflush_flags = VM_PAGER_TRY_TO_CACHE |
632 VM_PAGER_ALLOW_ACTIVE;
633 if (swap_user_async == 0)
634 vmflush_flags |= VM_PAGER_PUT_SYNC;
486b807a 635 vm_page_flag_set(p, PG_WINATCFLS);
a7a03a5f
MD
636 info->cleancount +=
637 vm_pageout_page(p, &max_launder,
638 &vnodes_skipped,
639 &vpfailed, 1, vmflush_flags);
486b807a 640 } else {
486b807a 641 vm_page_wakeup(p);
a7a03a5f 642 ++info->cleancount;
486b807a 643 }
534ee349
MD
644 } else {
645 vm_page_wakeup(p);
646 }
07540d37
MD
647
648 /*
649 * Must be at end to avoid SMP races.
650 */
534ee349
MD
651done:
652 lwkt_user_yield();
a7a03a5f 653 return 0;
984263bc
MD
654}
655
656/*
534ee349 657 * Deactivate some number of pages in a map due to set RLIMIT_RSS limits.
a7a03a5f
MD
658 * that is relatively difficult to do. We try to keep track of where we
659 * left off last time to reduce scan overhead.
486b807a
MD
660 *
661 * Called when vm_pageout_memuse_mode is >= 1.
984263bc 662 */
534ee349
MD
663void
664vm_pageout_map_deactivate_pages(vm_map_t map, vm_pindex_t limit)
984263bc 665{
a7a03a5f
MD
666 vm_offset_t pgout_offset;
667 struct pmap_pgscan_info info;
534ee349 668 int retries = 3;
984263bc 669
534ee349
MD
670 pgout_offset = map->pgout_offset;
671again:
a7a03a5f
MD
672#if 0
673 kprintf("%016jx ", pgout_offset);
674#endif
675 if (pgout_offset < VM_MIN_USER_ADDRESS)
676 pgout_offset = VM_MIN_USER_ADDRESS;
677 if (pgout_offset >= VM_MAX_USER_ADDRESS)
534ee349 678 pgout_offset = 0;
a7a03a5f
MD
679 info.pmap = vm_map_pmap(map);
680 info.limit = limit;
681 info.beg_addr = pgout_offset;
682 info.end_addr = VM_MAX_USER_ADDRESS;
683 info.callback = vm_pageout_mdp_callback;
684 info.cleancount = 0;
685 info.actioncount = 0;
686 info.busycount = 0;
687
688 pmap_pgscan(&info);
689 pgout_offset = info.offset;
690#if 0
691 kprintf("%016jx %08lx %08lx\n", pgout_offset,
692 info.cleancount, info.actioncount);
693#endif
984263bc 694
a7a03a5f
MD
695 if (pgout_offset != VM_MAX_USER_ADDRESS &&
696 pmap_resident_tlnw_count(vm_map_pmap(map)) > limit) {
697 goto again;
698 } else if (retries &&
699 pmap_resident_tlnw_count(vm_map_pmap(map)) > limit) {
700 --retries;
701 goto again;
702 }
534ee349 703 map->pgout_offset = pgout_offset;
984263bc
MD
704}
705#endif
706
707/*
a5fc46c9
MD
708 * Called when the pageout scan wants to free a page. We no longer
709 * try to cycle the vm_object here with a reference & dealloc, which can
710 * cause a non-trivial object collapse in a critical path.
99ad9bc4 711 *
a5fc46c9
MD
712 * It is unclear why we cycled the ref_count in the past, perhaps to try
713 * to optimize shadow chain collapses but I don't quite see why it would
714 * be necessary. An OBJ_DEAD object should terminate any and all vm_pages
715 * synchronously and not have to be kicked-start.
984263bc 716 */
99ad9bc4 717static void
95813af0
MD
718vm_pageout_page_free(vm_page_t m)
719{
984263bc
MD
720 vm_page_protect(m, VM_PROT_NONE);
721 vm_page_free(m);
984263bc
MD
722}
723
724/*
20479584 725 * vm_pageout_scan does the dirty work for the pageout daemon.
984263bc 726 */
8fa76237
MD
727struct vm_pageout_scan_info {
728 struct proc *bigproc;
729 vm_offset_t bigsize;
730};
731
732static int vm_pageout_scan_callback(struct proc *p, void *data);
733
c3feb36a
MD
734/*
735 * Scan inactive queue
736 *
737 * WARNING! Can be called from two pagedaemon threads simultaneously.
738 */
20479584 739static int
b7ea2f3f
MD
740vm_pageout_scan_inactive(int pass, int q, long avail_shortage,
741 long *vnodes_skipped)
984263bc 742{
b12defdc 743 vm_page_t m;
984263bc 744 struct vm_page marker;
5d6a945b 745 struct vnode *vpfailed; /* warning, allowed to be stale */
027193eb 746 int maxscan;
b7ea2f3f
MD
747 long delta = 0;
748 long max_launder;
c3feb36a
MD
749 int isep;
750
751 isep = (curthread == emergpager);
984263bc 752
984263bc
MD
753 /*
754 * Start scanning the inactive queue for pages we can move to the
755 * cache or free. The scan will stop when the target is reached or
756 * we have scanned the entire inactive queue. Note that m->act_count
757 * is not used to form decisions for the inactive queue, only for the
758 * active queue.
759 *
534ee349 760 * max_launder limits the number of dirty pages we flush per scan.
984263bc
MD
761 * For most systems a smaller value (16 or 32) is more robust under
762 * extreme memory and disk pressure because any unnecessary writes
763 * to disk can result in extreme performance degredation. However,
764 * systems with excessive dirty pages (especially when MAP_NOSYNC is
765 * used) will die horribly with limited laundering. If the pageout
766 * daemon cannot clean enough pages in the first pass, we let it go
767 * all out in succeeding passes.
c3feb36a
MD
768 *
769 * NOTE! THE EMERGENCY PAGER (isep) DOES NOT LAUNDER VNODE-BACKED
770 * PAGES.
984263bc 771 */
534ee349
MD
772 if ((max_launder = vm_max_launder) <= 1)
773 max_launder = 1;
984263bc 774 if (pass)
534ee349 775 max_launder = 10000;
984263bc 776
06ecca5a 777 /*
b12defdc
MD
778 * Initialize our marker
779 */
780 bzero(&marker, sizeof(marker));
bc0aa189
MD
781 marker.flags = PG_FICTITIOUS | PG_MARKER;
782 marker.busy_count = PBUSY_LOCKED;
027193eb
MD
783 marker.queue = PQ_INACTIVE + q;
784 marker.pc = q;
b12defdc
MD
785 marker.wire_count = 1;
786
787 /*
788 * Inactive queue scan.
789 *
790 * NOTE: The vm_page must be spinlocked before the queue to avoid
791 * deadlocks, so it is easiest to simply iterate the loop
792 * with the queue unlocked at the top.
06ecca5a 793 */
5d6a945b 794 vpfailed = NULL;
b12defdc 795
027193eb
MD
796 vm_page_queues_spin_lock(PQ_INACTIVE + q);
797 TAILQ_INSERT_HEAD(&vm_page_queues[PQ_INACTIVE + q].pl, &marker, pageq);
51c99c61 798 maxscan = vm_page_queues[PQ_INACTIVE + q].lcnt;
b12defdc 799
bb0d6093
MD
800 /*
801 * Queue locked at top of loop to avoid stack marker issues.
802 */
b12defdc 803 while ((m = TAILQ_NEXT(&marker, pageq)) != NULL &&
51c99c61 804 maxscan-- > 0 && avail_shortage - delta > 0)
b12defdc 805 {
534ee349
MD
806 int count;
807
808 KKASSERT(m->queue == PQ_INACTIVE + q);
027193eb 809 TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE + q].pl,
b12defdc 810 &marker, pageq);
027193eb 811 TAILQ_INSERT_AFTER(&vm_page_queues[PQ_INACTIVE + q].pl, m,
b12defdc 812 &marker, pageq);
12e4aaff 813 mycpu->gd_cnt.v_pdpages++;
984263bc 814
06ecca5a 815 /*
bb0d6093
MD
816 * Skip marker pages (atomic against other markers to avoid
817 * infinite hop-over scans).
06ecca5a 818 */
bb0d6093 819 if (m->flags & PG_MARKER)
b12defdc 820 continue;
984263bc
MD
821
822 /*
b12defdc
MD
823 * Try to busy the page. Don't mess with pages which are
824 * already busy or reorder them in the queue.
984263bc 825 */
bb0d6093 826 if (vm_page_busy_try(m, TRUE))
984263bc 827 continue;
d2d8515b 828
984263bc 829 /*
bb0d6093
MD
830 * Remaining operations run with the page busy and neither
831 * the page or the queue will be spin-locked.
984263bc 832 */
bb0d6093 833 vm_page_queues_spin_unlock(PQ_INACTIVE + q);
534ee349
MD
834 KKASSERT(m->queue == PQ_INACTIVE + q);
835
c3feb36a
MD
836 /*
837 * The emergency pager runs when the primary pager gets
838 * stuck, which typically means the primary pager deadlocked
839 * on a vnode-backed page. Therefore, the emergency pager
32c821cf
MD
840 * must skip any complex objects.
841 *
842 * We disallow VNODEs unless they are VCHR whos device ops
843 * does not flag D_NOEMERGPGR.
c3feb36a 844 */
32c821cf
MD
845 if (isep && m->object) {
846 struct vnode *vp;
847
848 switch(m->object->type) {
849 case OBJT_DEFAULT:
850 case OBJT_SWAP:
851 /*
852 * Allow anonymous memory and assume that
853 * swap devices are not complex, since its
854 * kinda worthless if we can't swap out dirty
855 * anonymous pages.
856 */
857 break;
858 case OBJT_VNODE:
859 /*
860 * Allow VCHR device if the D_NOEMERGPGR
861 * flag is not set, deny other vnode types
862 * as being too complex.
863 */
864 vp = m->object->handle;
865 if (vp && vp->v_type == VCHR &&
866 vp->v_rdev && vp->v_rdev->si_ops &&
867 (vp->v_rdev->si_ops->head.flags &
868 D_NOEMERGPGR) == 0) {
869 break;
870 }
871 /* Deny - fall through */
872 default:
873 /*
874 * Deny
875 */
c3feb36a
MD
876 vm_page_wakeup(m);
877 vm_page_queues_spin_lock(PQ_INACTIVE + q);
878 lwkt_yield();
879 continue;
880 }
881 }
882
883 /*
884 * Try to pageout the page and perhaps other nearby pages.
885 */
534ee349
MD
886 count = vm_pageout_page(m, &max_launder, vnodes_skipped,
887 &vpfailed, pass, 0);
888 delta += count;
06ecca5a 889
90244566 890 /*
534ee349
MD
891 * Systems with a ton of memory can wind up with huge
892 * deactivation counts. Because the inactive scan is
893 * doing a lot of flushing, the combination can result
894 * in excessive paging even in situations where other
895 * unrelated threads free up sufficient VM.
896 *
897 * To deal with this we abort the nominal active->inactive
898 * scan before we hit the inactive target when free+cache
899 * levels have reached a reasonable target.
900 *
901 * When deciding to stop early we need to add some slop to
902 * the test and we need to return full completion to the caller
903 * to prevent the caller from thinking there is something
904 * wrong and issuing a low-memory+swap warning or pkill.
905 *
906 * A deficit forces paging regardless of the state of the
907 * VM page queues (used for RSS enforcement).
90244566 908 */
534ee349
MD
909 lwkt_yield();
910 vm_page_queues_spin_lock(PQ_INACTIVE + q);
911 if (vm_paging_target() < -vm_max_launder) {
912 /*
913 * Stopping early, return full completion to caller.
914 */
915 if (delta < avail_shortage)
916 delta = avail_shortage;
917 break;
918 }
919 }
920
921 /* page queue still spin-locked */
922 TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE + q].pl, &marker, pageq);
923 vm_page_queues_spin_unlock(PQ_INACTIVE + q);
924
925 return (delta);
926}
927
928/*
929 * Pageout the specified page, return the total number of pages paged out
930 * (this routine may cluster).
931 *
932 * The page must be busied and soft-busied by the caller and will be disposed
933 * of by this function.
934 */
935static int
b7ea2f3f 936vm_pageout_page(vm_page_t m, long *max_launderp, long *vnodes_skippedp,
534ee349
MD
937 struct vnode **vpfailedp, int pass, int vmflush_flags)
938{
939 vm_object_t object;
940 int actcount;
941 int count = 0;
942
943 /*
944 * It is possible for a page to be busied ad-hoc (e.g. the
945 * pmap_collect() code) and wired and race against the
946 * allocation of a new page. vm_page_alloc() may be forced
947 * to deactivate the wired page in which case it winds up
948 * on the inactive queue and must be handled here. We
949 * correct the problem simply by unqueuing the page.
950 */
951 if (m->wire_count) {
952 vm_page_unqueue_nowakeup(m);
953 vm_page_wakeup(m);
954 kprintf("WARNING: pagedaemon: wired page on "
955 "inactive queue %p\n", m);
956 return 0;
957 }
958
959 /*
960 * A held page may be undergoing I/O, so skip it.
961 */
962 if (m->hold_count) {
963 vm_page_and_queue_spin_lock(m);
964 if (m->queue - m->pc == PQ_INACTIVE) {
965 TAILQ_REMOVE(
966 &vm_page_queues[m->queue].pl, m, pageq);
967 TAILQ_INSERT_TAIL(
968 &vm_page_queues[m->queue].pl, m, pageq);
969 ++vm_swapcache_inactive_heuristic;
90244566 970 }
534ee349
MD
971 vm_page_and_queue_spin_unlock(m);
972 vm_page_wakeup(m);
973 return 0;
974 }
90244566 975
534ee349 976 if (m->object == NULL || m->object->ref_count == 0) {
984263bc 977 /*
534ee349
MD
978 * If the object is not being used, we ignore previous
979 * references.
984263bc 980 */
534ee349
MD
981 vm_page_flag_clear(m, PG_REFERENCED);
982 pmap_clear_reference(m);
983 /* fall through to end */
984 } else if (((m->flags & PG_REFERENCED) == 0) &&
985 (actcount = pmap_ts_referenced(m))) {
986 /*
987 * Otherwise, if the page has been referenced while
988 * in the inactive queue, we bump the "activation
989 * count" upwards, making it less likely that the
990 * page will be added back to the inactive queue
991 * prematurely again. Here we check the page tables
992 * (or emulated bits, if any), given the upper level
993 * VM system not knowing anything about existing
994 * references.
995 */
996 vm_page_activate(m);
997 m->act_count += (actcount + ACT_ADVANCE);
998 vm_page_wakeup(m);
999 return 0;
1000 }
984263bc 1001
534ee349
MD
1002 /*
1003 * (m) is still busied.
1004 *
1005 * If the upper level VM system knows about any page
1006 * references, we activate the page. We also set the
1007 * "activation count" higher than normal so that we will less
1008 * likely place pages back onto the inactive queue again.
1009 */
1010 if ((m->flags & PG_REFERENCED) != 0) {
1011 vm_page_flag_clear(m, PG_REFERENCED);
1012 actcount = pmap_ts_referenced(m);
1013 vm_page_activate(m);
1014 m->act_count += (actcount + ACT_ADVANCE + 1);
1015 vm_page_wakeup(m);
1016 return 0;
1017 }
1018
1019 /*
1020 * If the upper level VM system doesn't know anything about
1021 * the page being dirty, we have to check for it again. As
1022 * far as the VM code knows, any partially dirty pages are
1023 * fully dirty.
1024 *
1025 * Pages marked PG_WRITEABLE may be mapped into the user
1026 * address space of a process running on another cpu. A
1027 * user process (without holding the MP lock) running on
1028 * another cpu may be able to touch the page while we are
1029 * trying to remove it. vm_page_cache() will handle this
1030 * case for us.
1031 */
1032 if (m->dirty == 0) {
1033 vm_page_test_dirty(m);
1034 } else {
1035 vm_page_dirty(m);
1036 }
984263bc 1037
534ee349 1038 if (m->valid == 0 && (m->flags & PG_NEED_COMMIT) == 0) {
984263bc 1039 /*
534ee349 1040 * Invalid pages can be easily freed
984263bc 1041 */
534ee349
MD
1042 vm_pageout_page_free(m);
1043 mycpu->gd_cnt.v_dfree++;
1044 ++count;
1045 } else if (m->dirty == 0 && (m->flags & PG_NEED_COMMIT) == 0) {
1046 /*
1047 * Clean pages can be placed onto the cache queue.
1048 * This effectively frees them.
1049 */
1050 vm_page_cache(m);
1051 ++count;
1052 } else if ((m->flags & PG_WINATCFLS) == 0 && pass == 0) {
1053 /*
1054 * Dirty pages need to be paged out, but flushing
1055 * a page is extremely expensive verses freeing
1056 * a clean page. Rather then artificially limiting
1057 * the number of pages we can flush, we instead give
1058 * dirty pages extra priority on the inactive queue
1059 * by forcing them to be cycled through the queue
1060 * twice before being flushed, after which the
1061 * (now clean) page will cycle through once more
1062 * before being freed. This significantly extends
1063 * the thrash point for a heavily loaded machine.
1064 */
1065 vm_page_flag_set(m, PG_WINATCFLS);
1066 vm_page_and_queue_spin_lock(m);
1067 if (m->queue - m->pc == PQ_INACTIVE) {
1068 TAILQ_REMOVE(
1069 &vm_page_queues[m->queue].pl, m, pageq);
1070 TAILQ_INSERT_TAIL(
1071 &vm_page_queues[m->queue].pl, m, pageq);
1072 ++vm_swapcache_inactive_heuristic;
984263bc 1073 }
534ee349
MD
1074 vm_page_and_queue_spin_unlock(m);
1075 vm_page_wakeup(m);
1076 } else if (*max_launderp > 0) {
984263bc 1077 /*
534ee349
MD
1078 * We always want to try to flush some dirty pages if
1079 * we encounter them, to keep the system stable.
1080 * Normally this number is small, but under extreme
1081 * pressure where there are insufficient clean pages
1082 * on the inactive queue, we may have to go all out.
984263bc 1083 */
534ee349
MD
1084 int swap_pageouts_ok;
1085 struct vnode *vp = NULL;
1086
1087 swap_pageouts_ok = 0;
1088 object = m->object;
1089 if (object &&
1090 (object->type != OBJT_SWAP) &&
1091 (object->type != OBJT_DEFAULT)) {
1092 swap_pageouts_ok = 1;
984263bc 1093 } else {
c3feb36a
MD
1094 swap_pageouts_ok = !(defer_swap_pageouts ||
1095 disable_swap_pageouts);
1096 swap_pageouts_ok |= (!disable_swap_pageouts &&
1097 defer_swap_pageouts &&
1098 vm_page_count_min(0));
984263bc
MD
1099 }
1100
534ee349
MD
1101 /*
1102 * We don't bother paging objects that are "dead".
1103 * Those objects are in a "rundown" state.
1104 */
1105 if (!swap_pageouts_ok ||
1106 (object == NULL) ||
1107 (object->flags & OBJ_DEAD)) {
b12defdc 1108 vm_page_and_queue_spin_lock(m);
027193eb
MD
1109 if (m->queue - m->pc == PQ_INACTIVE) {
1110 TAILQ_REMOVE(
534ee349
MD
1111 &vm_page_queues[m->queue].pl,
1112 m, pageq);
027193eb 1113 TAILQ_INSERT_TAIL(
534ee349
MD
1114 &vm_page_queues[m->queue].pl,
1115 m, pageq);
64949baa 1116 ++vm_swapcache_inactive_heuristic;
b12defdc
MD
1117 }
1118 vm_page_and_queue_spin_unlock(m);
b12defdc 1119 vm_page_wakeup(m);
534ee349
MD
1120 return 0;
1121 }
1122
1123 /*
1124 * (m) is still busied.
1125 *
1126 * The object is already known NOT to be dead. It
1127 * is possible for the vget() to block the whole
1128 * pageout daemon, but the new low-memory handling
1129 * code should prevent it.
1130 *
1131 * The previous code skipped locked vnodes and, worse,
1132 * reordered pages in the queue. This results in
1133 * completely non-deterministic operation because,
1134 * quite often, a vm_fault has initiated an I/O and
1135 * is holding a locked vnode at just the point where
1136 * the pageout daemon is woken up.
1137 *
1138 * We can't wait forever for the vnode lock, we might
1139 * deadlock due to a vn_read() getting stuck in
1140 * vm_wait while holding this vnode. We skip the
1141 * vnode if we can't get it in a reasonable amount
1142 * of time.
1143 *
1144 * vpfailed is used to (try to) avoid the case where
1145 * a large number of pages are associated with a
1146 * locked vnode, which could cause the pageout daemon
1147 * to stall for an excessive amount of time.
1148 */
1149 if (object->type == OBJT_VNODE) {
1150 int flags;
1151
1152 vp = object->handle;
1153 flags = LK_EXCLUSIVE;
1154 if (vp == *vpfailedp)
1155 flags |= LK_NOWAIT;
1156 else
1157 flags |= LK_TIMELOCK;
1158 vm_page_hold(m);
1159 vm_page_wakeup(m);
1160
984263bc 1161 /*
534ee349
MD
1162 * We have unbusied (m) temporarily so we can
1163 * acquire the vp lock without deadlocking.
1164 * (m) is held to prevent destruction.
984263bc 1165 */
534ee349
MD
1166 if (vget(vp, flags) != 0) {
1167 *vpfailedp = vp;
1168 ++pageout_lock_miss;
1169 if (object->flags & OBJ_MIGHTBEDIRTY)
1170 ++*vnodes_skippedp;
1171 vm_page_unhold(m);
1172 return 0;
984263bc
MD
1173 }
1174
1175 /*
534ee349
MD
1176 * The page might have been moved to another
1177 * queue during potential blocking in vget()
1178 * above. The page might have been freed and
1179 * reused for another vnode. The object might
1180 * have been reused for another vnode.
984263bc 1181 */
534ee349
MD
1182 if (m->queue - m->pc != PQ_INACTIVE ||
1183 m->object != object ||
1184 object->handle != vp) {
1185 if (object->flags & OBJ_MIGHTBEDIRTY)
1186 ++*vnodes_skippedp;
1187 vput(vp);
1188 vm_page_unhold(m);
1189 return 0;
984263bc
MD
1190 }
1191
1192 /*
534ee349
MD
1193 * The page may have been busied during the
1194 * blocking in vput(); We don't move the
1195 * page back onto the end of the queue so that
1196 * statistics are more correct if we don't.
984263bc 1197 */
534ee349
MD
1198 if (vm_page_busy_try(m, TRUE)) {
1199 vput(vp);
b12defdc 1200 vm_page_unhold(m);
534ee349 1201 return 0;
984263bc 1202 }
534ee349 1203 vm_page_unhold(m);
984263bc
MD
1204
1205 /*
534ee349 1206 * (m) is busied again
b12defdc 1207 *
534ee349
MD
1208 * We own the busy bit and remove our hold
1209 * bit. If the page is still held it
1210 * might be undergoing I/O, so skip it.
79ce07d0 1211 */
534ee349
MD
1212 if (m->hold_count) {
1213 vm_page_and_queue_spin_lock(m);
1214 if (m->queue - m->pc == PQ_INACTIVE) {
1215 TAILQ_REMOVE(&vm_page_queues[m->queue].pl, m, pageq);
1216 TAILQ_INSERT_TAIL(&vm_page_queues[m->queue].pl, m, pageq);
1217 ++vm_swapcache_inactive_heuristic;
1218 }
1219 vm_page_and_queue_spin_unlock(m);
1220 if (object->flags & OBJ_MIGHTBEDIRTY)
1221 ++*vnodes_skippedp;
1222 vm_page_wakeup(m);
984263bc 1223 vput(vp);
534ee349
MD
1224 return 0;
1225 }
1226 /* (m) is left busied as we fall through */
984263bc 1227 }
1e1efe39
MD
1228
1229 /*
534ee349 1230 * page is busy and not held here.
1e1efe39 1231 *
534ee349
MD
1232 * If a page is dirty, then it is either being washed
1233 * (but not yet cleaned) or it is still in the
1234 * laundry. If it is still in the laundry, then we
1235 * start the cleaning operation.
1e1efe39 1236 *
534ee349
MD
1237 * decrement inactive_shortage on success to account
1238 * for the (future) cleaned page. Otherwise we
1239 * could wind up laundering or cleaning too many
1240 * pages.
1241 *
1242 * NOTE: Cleaning the page here does not cause
1243 * force_deficit to be adjusted, because the
1244 * page is not being freed or moved to the
1245 * cache.
1e1efe39 1246 */
534ee349
MD
1247 count = vm_pageout_clean_helper(m, vmflush_flags);
1248 *max_launderp -= count;
79ce07d0 1249
534ee349
MD
1250 /*
1251 * Clean ate busy, page no longer accessible
1252 */
1253 if (vp != NULL)
1254 vput(vp);
1255 } else {
1256 vm_page_wakeup(m);
1257 }
1258 return count;
027193eb
MD
1259}
1260
c3feb36a
MD
1261/*
1262 * Scan active queue
1263 *
1264 * WARNING! Can be called from two pagedaemon threads simultaneously.
1265 */
027193eb
MD
1266static int
1267vm_pageout_scan_active(int pass, int q,
b7ea2f3f
MD
1268 long avail_shortage, long inactive_shortage,
1269 long *recycle_countp)
027193eb
MD
1270{
1271 struct vm_page marker;
1272 vm_page_t m;
1273 int actcount;
b7ea2f3f
MD
1274 long delta = 0;
1275 long maxscan;
c3feb36a
MD
1276 int isep;
1277
1278 isep = (curthread == emergpager);
984263bc
MD
1279
1280 /*
20479584
MD
1281 * We want to move pages from the active queue to the inactive
1282 * queue to get the inactive queue to the inactive target. If
1283 * we still have a page shortage from above we try to directly free
1284 * clean pages instead of moving them.
06ecca5a 1285 *
20479584
MD
1286 * If we do still have a shortage we keep track of the number of
1287 * pages we free or cache (recycle_count) as a measure of thrashing
1288 * between the active and inactive queues.
1289 *
51db7ca2
MD
1290 * If we were able to completely satisfy the free+cache targets
1291 * from the inactive pool we limit the number of pages we move
1292 * from the active pool to the inactive pool to 2x the pages we
e6e9a0c3
MD
1293 * had removed from the inactive pool (with a minimum of 1/5 the
1294 * inactive target). If we were not able to completely satisfy
1295 * the free+cache targets we go for the whole target aggressively.
20479584
MD
1296 *
1297 * NOTE: Both variables can end up negative.
1298 * NOTE: We are still in a critical section.
c3feb36a
MD
1299 *
1300 * NOTE! THE EMERGENCY PAGER (isep) DOES NOT LAUNDER VNODE-BACKED
1301 * PAGES.
984263bc 1302 */
20479584 1303
027193eb 1304 bzero(&marker, sizeof(marker));
bc0aa189
MD
1305 marker.flags = PG_FICTITIOUS | PG_MARKER;
1306 marker.busy_count = PBUSY_LOCKED;
027193eb
MD
1307 marker.queue = PQ_ACTIVE + q;
1308 marker.pc = q;
1309 marker.wire_count = 1;
b12defdc 1310
027193eb
MD
1311 vm_page_queues_spin_lock(PQ_ACTIVE + q);
1312 TAILQ_INSERT_HEAD(&vm_page_queues[PQ_ACTIVE + q].pl, &marker, pageq);
51c99c61 1313 maxscan = vm_page_queues[PQ_ACTIVE + q].lcnt;
b12defdc 1314
bb0d6093
MD
1315 /*
1316 * Queue locked at top of loop to avoid stack marker issues.
1317 */
b12defdc 1318 while ((m = TAILQ_NEXT(&marker, pageq)) != NULL &&
51c99c61
MD
1319 maxscan-- > 0 && (avail_shortage - delta > 0 ||
1320 inactive_shortage > 0))
b12defdc 1321 {
534ee349 1322 KKASSERT(m->queue == PQ_ACTIVE + q);
027193eb 1323 TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE + q].pl,
b12defdc 1324 &marker, pageq);
027193eb 1325 TAILQ_INSERT_AFTER(&vm_page_queues[PQ_ACTIVE + q].pl, m,
b12defdc 1326 &marker, pageq);
984263bc 1327
984263bc 1328 /*
bb0d6093
MD
1329 * Skip marker pages (atomic against other markers to avoid
1330 * infinite hop-over scans).
984263bc 1331 */
bb0d6093 1332 if (m->flags & PG_MARKER)
b12defdc 1333 continue;
06ecca5a 1334
984263bc 1335 /*
b12defdc
MD
1336 * Try to busy the page. Don't mess with pages which are
1337 * already busy or reorder them in the queue.
984263bc 1338 */
bb0d6093 1339 if (vm_page_busy_try(m, TRUE))
984263bc 1340 continue;
bb0d6093
MD
1341
1342 /*
1343 * Remaining operations run with the page busy and neither
1344 * the page or the queue will be spin-locked.
1345 */
1346 vm_page_queues_spin_unlock(PQ_ACTIVE + q);
534ee349 1347 KKASSERT(m->queue == PQ_ACTIVE + q);
984263bc 1348
b12defdc
MD
1349 /*
1350 * Don't deactivate pages that are held, even if we can
1351 * busy them. (XXX why not?)
1352 */
1353 if (m->hold_count != 0) {
bb0d6093
MD
1354 vm_page_and_queue_spin_lock(m);
1355 if (m->queue - m->pc == PQ_ACTIVE) {
1356 TAILQ_REMOVE(
1357 &vm_page_queues[PQ_ACTIVE + q].pl,
1358 m, pageq);
1359 TAILQ_INSERT_TAIL(
1360 &vm_page_queues[PQ_ACTIVE + q].pl,
1361 m, pageq);
1362 }
b12defdc
MD
1363 vm_page_and_queue_spin_unlock(m);
1364 vm_page_wakeup(m);
bb0d6093 1365 goto next;
b12defdc 1366 }
b12defdc 1367
c3feb36a
MD
1368 /*
1369 * The emergency pager ignores vnode-backed pages as these
1370 * are the pages that probably bricked the main pager.
1371 */
1372 if (isep && m->object && m->object->type == OBJT_VNODE) {
1373 vm_page_and_queue_spin_lock(m);
1374 if (m->queue - m->pc == PQ_ACTIVE) {
1375 TAILQ_REMOVE(
1376 &vm_page_queues[PQ_ACTIVE + q].pl,
1377 m, pageq);
1378 TAILQ_INSERT_TAIL(
1379 &vm_page_queues[PQ_ACTIVE + q].pl,
1380 m, pageq);
1381 }
1382 vm_page_and_queue_spin_unlock(m);
1383 vm_page_wakeup(m);
1384 goto next;
1385 }
1386
984263bc
MD
1387 /*
1388 * The count for pagedaemon pages is done after checking the
1389 * page for eligibility...
1390 */
12e4aaff 1391 mycpu->gd_cnt.v_pdpages++;
984263bc
MD
1392
1393 /*
20479584
MD
1394 * Check to see "how much" the page has been used and clear
1395 * the tracking access bits. If the object has no references
1396 * don't bother paying the expense.
984263bc
MD
1397 */
1398 actcount = 0;
19cd98ea 1399 if (m->object && m->object->ref_count != 0) {
20479584
MD
1400 if (m->flags & PG_REFERENCED)
1401 ++actcount;
984263bc
MD
1402 actcount += pmap_ts_referenced(m);
1403 if (actcount) {
1404 m->act_count += ACT_ADVANCE + actcount;
1405 if (m->act_count > ACT_MAX)
1406 m->act_count = ACT_MAX;
1407 }
1408 }
984263bc
MD
1409 vm_page_flag_clear(m, PG_REFERENCED);
1410
1411 /*
20479584 1412 * actcount is only valid if the object ref_count is non-zero.
19cd98ea 1413 * If the page does not have an object, actcount will be zero.
984263bc 1414 */
20479584 1415 if (actcount && m->object->ref_count != 0) {
b12defdc 1416 vm_page_and_queue_spin_lock(m);
027193eb
MD
1417 if (m->queue - m->pc == PQ_ACTIVE) {
1418 TAILQ_REMOVE(
1419 &vm_page_queues[PQ_ACTIVE + q].pl,
1420 m, pageq);
1421 TAILQ_INSERT_TAIL(
1422 &vm_page_queues[PQ_ACTIVE + q].pl,
1423 m, pageq);
b12defdc
MD
1424 }
1425 vm_page_and_queue_spin_unlock(m);
1426 vm_page_wakeup(m);
984263bc 1427 } else {
38587548
MD
1428 switch(m->object->type) {
1429 case OBJT_DEFAULT:
1430 case OBJT_SWAP:
1431 m->act_count -= min(m->act_count,
1432 vm_anonmem_decline);
1433 break;
1434 default:
1435 m->act_count -= min(m->act_count,
1436 vm_filemem_decline);
1437 break;
1438 }
984263bc 1439 if (vm_pageout_algorithm ||
19cd98ea
VS
1440 (m->object == NULL) ||
1441 (m->object && (m->object->ref_count == 0)) ||
20479584
MD
1442 m->act_count < pass + 1
1443 ) {
1444 /*
1445 * Deactivate the page. If we had a
1446 * shortage from our inactive scan try to
1447 * free (cache) the page instead.
e6e9a0c3
MD
1448 *
1449 * Don't just blindly cache the page if
1450 * we do not have a shortage from the
1451 * inactive scan, that could lead to
1452 * gigabytes being moved.
20479584 1453 */
51c99c61
MD
1454 --inactive_shortage;
1455 if (avail_shortage - delta > 0 ||
19cd98ea
VS
1456 (m->object && (m->object->ref_count == 0)))
1457 {
51c99c61 1458 if (avail_shortage - delta > 0)
027193eb 1459 ++*recycle_countp;
984263bc 1460 vm_page_protect(m, VM_PROT_NONE);
e6e9a0c3 1461 if (m->dirty == 0 &&
9bf025db 1462 (m->flags & PG_NEED_COMMIT) == 0 &&
51c99c61 1463 avail_shortage - delta > 0) {
984263bc 1464 vm_page_cache(m);
c84c24da 1465 } else {
984263bc 1466 vm_page_deactivate(m);
a491077e 1467 vm_page_wakeup(m);
c84c24da 1468 }
984263bc
MD
1469 } else {
1470 vm_page_deactivate(m);
b12defdc 1471 vm_page_wakeup(m);
984263bc 1472 }
51c99c61 1473 ++delta;
984263bc 1474 } else {
b12defdc 1475 vm_page_and_queue_spin_lock(m);
027193eb 1476 if (m->queue - m->pc == PQ_ACTIVE) {
b12defdc 1477 TAILQ_REMOVE(
027193eb
MD
1478 &vm_page_queues[PQ_ACTIVE + q].pl,
1479 m, pageq);
b12defdc 1480 TAILQ_INSERT_TAIL(
027193eb
MD
1481 &vm_page_queues[PQ_ACTIVE + q].pl,
1482 m, pageq);
b12defdc
MD
1483 }
1484 vm_page_and_queue_spin_unlock(m);
1485 vm_page_wakeup(m);
984263bc
MD
1486 }
1487 }
bb0d6093 1488next:
534ee349 1489 lwkt_yield();
bb0d6093 1490 vm_page_queues_spin_lock(PQ_ACTIVE + q);
984263bc
MD
1491 }
1492
b12defdc
MD
1493 /*
1494 * Clean out our local marker.
bb0d6093
MD
1495 *
1496 * Page queue still spin-locked.
b12defdc 1497 */
027193eb
MD
1498 TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE + q].pl, &marker, pageq);
1499 vm_page_queues_spin_unlock(PQ_ACTIVE + q);
1500
1501 return (delta);
1502}
1503
1504/*
1505 * The number of actually free pages can drop down to v_free_reserved,
1506 * we try to build the free count back above v_free_min. Note that
1507 * vm_paging_needed() also returns TRUE if v_free_count is not at
1508 * least v_free_min so that is the minimum we must build the free
1509 * count to.
1510 *
1511 * We use a slightly higher target to improve hysteresis,
1512 * ((v_free_target + v_free_min) / 2). Since v_free_target
1513 * is usually the same as v_cache_min this maintains about
1514 * half the pages in the free queue as are in the cache queue,
1515 * providing pretty good pipelining for pageout operation.
1516 *
1517 * The system operator can manipulate vm.v_cache_min and
1518 * vm.v_free_target to tune the pageout demon. Be sure
1519 * to keep vm.v_free_min < vm.v_free_target.
1520 *
1521 * Note that the original paging target is to get at least
1522 * (free_min + cache_min) into (free + cache). The slightly
1523 * higher target will shift additional pages from cache to free
1524 * without effecting the original paging target in order to
1525 * maintain better hysteresis and not have the free count always
1526 * be dead-on v_free_min.
1527 *
1528 * NOTE: we are still in a critical section.
1529 *
1530 * Pages moved from PQ_CACHE to totally free are not counted in the
1531 * pages_freed counter.
c3feb36a
MD
1532 *
1533 * WARNING! Can be called from two pagedaemon threads simultaneously.
027193eb
MD
1534 */
1535static void
b7ea2f3f
MD
1536vm_pageout_scan_cache(long avail_shortage, int pass,
1537 long vnodes_skipped, long recycle_count)
027193eb 1538{
9cd626ca 1539 static int lastkillticks;
027193eb
MD
1540 struct vm_pageout_scan_info info;
1541 vm_page_t m;
c3feb36a
MD
1542 int isep;
1543
1544 isep = (curthread == emergpager);
b12defdc 1545
cd3c66bd
MD
1546 while (vmstats.v_free_count <
1547 (vmstats.v_free_min + vmstats.v_free_target) / 2) {
1548 /*
b12defdc 1549 * This steals some code from vm/vm_page.c
cf2880a2
MD
1550 *
1551 * Create two rovers and adjust the code to reduce
1552 * chances of them winding up at the same index (which
1553 * can cause a lot of contention).
cd3c66bd 1554 */
cf2880a2
MD
1555 static int cache_rover[2] = { 0, PQ_L2_MASK / 2 };
1556
1557 if (((cache_rover[0] ^ cache_rover[1]) & PQ_L2_MASK) == 0)
1558 goto next_rover;
b12defdc 1559
cf2880a2 1560 m = vm_page_list_find(PQ_CACHE, cache_rover[isep] & PQ_L2_MASK);
20479584 1561 if (m == NULL)
984263bc 1562 break;
b12defdc
MD
1563 /* page is returned removed from its queue and spinlocked */
1564 if (vm_page_busy_try(m, TRUE)) {
1565 vm_page_deactivate_locked(m);
1566 vm_page_spin_unlock(m);
b12defdc
MD
1567 continue;
1568 }
1569 vm_page_spin_unlock(m);
1570 pagedaemon_wakeup();
d2d8515b 1571 lwkt_yield();
b12defdc
MD
1572
1573 /*
bb0d6093
MD
1574 * Remaining operations run with the page busy and neither
1575 * the page or the queue will be spin-locked.
b12defdc 1576 */
9bf025db 1577 if ((m->flags & (PG_UNMANAGED | PG_NEED_COMMIT)) ||
b12defdc
MD
1578 m->hold_count ||
1579 m->wire_count) {
984263bc 1580 vm_page_deactivate(m);
b12defdc 1581 vm_page_wakeup(m);
984263bc
MD
1582 continue;
1583 }
17cde63e
MD
1584 KKASSERT((m->flags & PG_MAPPED) == 0);
1585 KKASSERT(m->dirty == 0);
984263bc 1586 vm_pageout_page_free(m);
12e4aaff 1587 mycpu->gd_cnt.v_dfree++;
cf2880a2
MD
1588next_rover:
1589 if (isep)
1590 cache_rover[1] -= PQ_PRIME2;
1591 else
1592 cache_rover[0] += PQ_PRIME2;
984263bc 1593 }
06ecca5a 1594
984263bc
MD
1595#if !defined(NO_SWAPPING)
1596 /*
1597 * Idle process swapout -- run once per second.
1598 */
1599 if (vm_swap_idle_enabled) {
cec73927
MD
1600 static time_t lsec;
1601 if (time_uptime != lsec) {
534ee349 1602 atomic_set_int(&vm_pageout_req_swapout, VM_SWAP_IDLE);
984263bc 1603 vm_req_vmdaemon();
cec73927 1604 lsec = time_uptime;
984263bc
MD
1605 }
1606 }
1607#endif
1608
1609 /*
1610 * If we didn't get enough free pages, and we have skipped a vnode
1611 * in a writeable object, wakeup the sync daemon. And kick swapout
1612 * if we did not get enough free pages.
1613 */
1614 if (vm_paging_target() > 0) {
20479584 1615 if (vnodes_skipped && vm_page_count_min(0))
cf6a53ca 1616 speedup_syncer(NULL);
984263bc
MD
1617#if !defined(NO_SWAPPING)
1618 if (vm_swap_enabled && vm_page_count_target()) {
534ee349 1619 atomic_set_int(&vm_pageout_req_swapout, VM_SWAP_NORMAL);
984263bc 1620 vm_req_vmdaemon();
984263bc
MD
1621 }
1622#endif
1623 }
1624
1625 /*
20479584
MD
1626 * Handle catastrophic conditions. Under good conditions we should
1627 * be at the target, well beyond our minimum. If we could not even
09eff544
MD
1628 * reach our minimum the system is under heavy stress. But just being
1629 * under heavy stress does not trigger process killing.
20479584 1630 *
09eff544
MD
1631 * We consider ourselves to have run out of memory if the swap pager
1632 * is full and avail_shortage is still positive. The secondary check
1633 * ensures that we do not kill processes if the instantanious
1634 * availability is good, even if the pageout demon pass says it
1635 * couldn't get to the target.
c3feb36a
MD
1636 *
1637 * NOTE! THE EMERGENCY PAGER (isep) DOES NOT HANDLE SWAP FULL
1638 * SITUATIONS.
984263bc 1639 */
09eff544 1640 if (swap_pager_almost_full &&
9cd626ca 1641 pass > 0 &&
c3feb36a 1642 isep == 0 &&
09eff544
MD
1643 (vm_page_count_min(recycle_count) || avail_shortage > 0)) {
1644 kprintf("Warning: system low on memory+swap "
b7ea2f3f 1645 "shortage %ld for %d ticks!\n",
09eff544 1646 avail_shortage, ticks - swap_fail_ticks);
d081ceac 1647 if (bootverbose)
b7ea2f3f
MD
1648 kprintf("Metrics: spaf=%d spf=%d pass=%d "
1649 "avail=%ld target=%ld last=%u\n",
d081ceac
MD
1650 swap_pager_almost_full,
1651 swap_pager_full,
1652 pass,
1653 avail_shortage,
1654 vm_paging_target(),
1655 (unsigned int)(ticks - lastkillticks));
09eff544
MD
1656 }
1657 if (swap_pager_full &&
9cd626ca 1658 pass > 1 &&
c3feb36a 1659 isep == 0 &&
09eff544 1660 avail_shortage > 0 &&
9cd626ca
MD
1661 vm_paging_target() > 0 &&
1662 (unsigned int)(ticks - lastkillticks) >= hz) {
20479584 1663 /*
9cd626ca
MD
1664 * Kill something, maximum rate once per second to give
1665 * the process time to free up sufficient memory.
20479584 1666 */
9cd626ca 1667 lastkillticks = ticks;
8fa76237
MD
1668 info.bigproc = NULL;
1669 info.bigsize = 0;
586c4308 1670 allproc_scan(vm_pageout_scan_callback, &info, 0);
8fa76237 1671 if (info.bigproc != NULL) {
d081ceac
MD
1672 kprintf("Try to kill process %d %s\n",
1673 info.bigproc->p_pid, info.bigproc->p_comm);
8fa76237 1674 info.bigproc->p_nice = PRIO_MIN;
08f2f1bb
SS
1675 info.bigproc->p_usched->resetpriority(
1676 FIRST_LWP_IN_PROC(info.bigproc));
2c9e2984 1677 atomic_set_int(&info.bigproc->p_flags, P_LOWMEMKILL);
9cd626ca 1678 killproc(info.bigproc, "out of swap space");
12e4aaff 1679 wakeup(&vmstats.v_free_count);
8fa76237 1680 PRELE(info.bigproc);
984263bc
MD
1681 }
1682 }
1683}
1684
8fa76237
MD
1685static int
1686vm_pageout_scan_callback(struct proc *p, void *data)
1687{
1688 struct vm_pageout_scan_info *info = data;
1689 vm_offset_t size;
1690
1691 /*
20479584
MD
1692 * Never kill system processes or init. If we have configured swap
1693 * then try to avoid killing low-numbered pids.
8fa76237 1694 */
4643740a 1695 if ((p->p_flags & P_SYSTEM) || (p->p_pid == 1) ||
8fa76237
MD
1696 ((p->p_pid < 48) && (vm_swap_size != 0))) {
1697 return (0);
1698 }
1699
a8d3ab53
MD
1700 lwkt_gettoken(&p->p_token);
1701
8fa76237
MD
1702 /*
1703 * if the process is in a non-running type state,
1704 * don't touch it.
1705 */
f5b92db7 1706 if (p->p_stat != SACTIVE && p->p_stat != SSTOP && p->p_stat != SCORE) {
a8d3ab53 1707 lwkt_reltoken(&p->p_token);
8fa76237 1708 return (0);
a8d3ab53 1709 }
8fa76237
MD
1710
1711 /*
20479584
MD
1712 * Get the approximate process size. Note that anonymous pages
1713 * with backing swap will be counted twice, but there should not
1714 * be too many such pages due to the stress the VM system is
1715 * under at this point.
8fa76237 1716 */
20479584 1717 size = vmspace_anonymous_count(p->p_vmspace) +
8fa76237
MD
1718 vmspace_swap_count(p->p_vmspace);
1719
1720 /*
1721 * If the this process is bigger than the biggest one
1722 * remember it.
1723 */
20479584 1724 if (info->bigsize < size) {
8fa76237
MD
1725 if (info->bigproc)
1726 PRELE(info->bigproc);
1727 PHOLD(p);
1728 info->bigproc = p;
1729 info->bigsize = size;
1730 }
a8d3ab53 1731 lwkt_reltoken(&p->p_token);
d2d8515b 1732 lwkt_yield();
a8d3ab53 1733
8fa76237
MD
1734 return(0);
1735}
1736
984263bc
MD
1737/*
1738 * This routine tries to maintain the pseudo LRU active queue,
1739 * so that during long periods of time where there is no paging,
1740 * that some statistic accumulation still occurs. This code
1741 * helps the situation where paging just starts to occur.
1742 */
1743static void
027193eb 1744vm_pageout_page_stats(int q)
984263bc 1745{
984263bc 1746 static int fullintervalcount = 0;
b12defdc
MD
1747 struct vm_page marker;
1748 vm_page_t m;
b7ea2f3f
MD
1749 long pcount, tpcount; /* Number of pages to check */
1750 long page_shortage;
984263bc 1751
b12defdc
MD
1752 page_shortage = (vmstats.v_inactive_target + vmstats.v_cache_max +
1753 vmstats.v_free_min) -
1754 (vmstats.v_free_count + vmstats.v_inactive_count +
1755 vmstats.v_cache_count);
984263bc
MD
1756
1757 if (page_shortage <= 0)
1758 return;
1759
51c99c61 1760 pcount = vm_page_queues[PQ_ACTIVE + q].lcnt;
984263bc
MD
1761 fullintervalcount += vm_pageout_stats_interval;
1762 if (fullintervalcount < vm_pageout_full_stats_interval) {
51c99c61
MD
1763 tpcount = (vm_pageout_stats_max * pcount) /
1764 vmstats.v_page_count + 1;
984263bc
MD
1765 if (pcount > tpcount)
1766 pcount = tpcount;
1767 } else {
1768 fullintervalcount = 0;
1769 }
1770
b12defdc 1771 bzero(&marker, sizeof(marker));
bc0aa189
MD
1772 marker.flags = PG_FICTITIOUS | PG_MARKER;
1773 marker.busy_count = PBUSY_LOCKED;
027193eb
MD
1774 marker.queue = PQ_ACTIVE + q;
1775 marker.pc = q;
b12defdc
MD
1776 marker.wire_count = 1;
1777
027193eb
MD
1778 vm_page_queues_spin_lock(PQ_ACTIVE + q);
1779 TAILQ_INSERT_HEAD(&vm_page_queues[PQ_ACTIVE + q].pl, &marker, pageq);
b12defdc 1780
bb0d6093
MD
1781 /*
1782 * Queue locked at top of loop to avoid stack marker issues.
1783 */
b12defdc
MD
1784 while ((m = TAILQ_NEXT(&marker, pageq)) != NULL &&
1785 pcount-- > 0)
1786 {
984263bc
MD
1787 int actcount;
1788
534ee349 1789 KKASSERT(m->queue == PQ_ACTIVE + q);
027193eb
MD
1790 TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE + q].pl, &marker, pageq);
1791 TAILQ_INSERT_AFTER(&vm_page_queues[PQ_ACTIVE + q].pl, m,
b12defdc 1792 &marker, pageq);
984263bc 1793
984263bc 1794 /*
bb0d6093
MD
1795 * Skip marker pages (atomic against other markers to avoid
1796 * infinite hop-over scans).
984263bc 1797 */
bb0d6093 1798 if (m->flags & PG_MARKER)
984263bc 1799 continue;
984263bc 1800
b12defdc
MD
1801 /*
1802 * Ignore pages we can't busy
1803 */
bb0d6093 1804 if (vm_page_busy_try(m, TRUE))
b12defdc 1805 continue;
bb0d6093
MD
1806
1807 /*
1808 * Remaining operations run with the page busy and neither
1809 * the page or the queue will be spin-locked.
1810 */
1811 vm_page_queues_spin_unlock(PQ_ACTIVE + q);
534ee349 1812 KKASSERT(m->queue == PQ_ACTIVE + q);
b12defdc
MD
1813
1814 /*
1815 * We now have a safely busied page, the page and queue
1816 * spinlocks have been released.
1817 *
1818 * Ignore held pages
1819 */
1820 if (m->hold_count) {
1821 vm_page_wakeup(m);
bb0d6093 1822 goto next;
b12defdc
MD
1823 }
1824
1825 /*
1826 * Calculate activity
1827 */
984263bc
MD
1828 actcount = 0;
1829 if (m->flags & PG_REFERENCED) {
1830 vm_page_flag_clear(m, PG_REFERENCED);
1831 actcount += 1;
1832 }
984263bc 1833 actcount += pmap_ts_referenced(m);
b12defdc
MD
1834
1835 /*
1836 * Update act_count and move page to end of queue.
1837 */
984263bc
MD
1838 if (actcount) {
1839 m->act_count += ACT_ADVANCE + actcount;
1840 if (m->act_count > ACT_MAX)
1841 m->act_count = ACT_MAX;
b12defdc 1842 vm_page_and_queue_spin_lock(m);
027193eb
MD
1843 if (m->queue - m->pc == PQ_ACTIVE) {
1844 TAILQ_REMOVE(
1845 &vm_page_queues[PQ_ACTIVE + q].pl,
1846 m, pageq);
1847 TAILQ_INSERT_TAIL(
1848 &vm_page_queues[PQ_ACTIVE + q].pl,
1849 m, pageq);
984263bc 1850 }
b12defdc
MD
1851 vm_page_and_queue_spin_unlock(m);
1852 vm_page_wakeup(m);
bb0d6093 1853 goto next;
984263bc
MD
1854 }
1855
b12defdc
MD
1856 if (m->act_count == 0) {
1857 /*
1858 * We turn off page access, so that we have
1859 * more accurate RSS stats. We don't do this
1860 * in the normal page deactivation when the
1861 * system is loaded VM wise, because the
1862 * cost of the large number of page protect
1863 * operations would be higher than the value
1864 * of doing the operation.
1865 *
1866 * We use the marker to save our place so
1867 * we can release the spin lock. both (m)
1868 * and (next) will be invalid.
1869 */
1870 vm_page_protect(m, VM_PROT_NONE);
1871 vm_page_deactivate(m);
1872 } else {
1873 m->act_count -= min(m->act_count, ACT_DECLINE);
1874 vm_page_and_queue_spin_lock(m);
027193eb
MD
1875 if (m->queue - m->pc == PQ_ACTIVE) {
1876 TAILQ_REMOVE(
1877 &vm_page_queues[PQ_ACTIVE + q].pl,
1878 m, pageq);
1879 TAILQ_INSERT_TAIL(
1880 &vm_page_queues[PQ_ACTIVE + q].pl,
1881 m, pageq);
b12defdc
MD
1882 }
1883 vm_page_and_queue_spin_unlock(m);
1884 }
1885 vm_page_wakeup(m);
bb0d6093
MD
1886next:
1887 vm_page_queues_spin_lock(PQ_ACTIVE + q);
984263bc 1888 }
b12defdc
MD
1889
1890 /*
1891 * Remove our local marker
bb0d6093
MD
1892 *
1893 * Page queue still spin-locked.
b12defdc 1894 */
027193eb
MD
1895 TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE + q].pl, &marker, pageq);
1896 vm_page_queues_spin_unlock(PQ_ACTIVE + q);
984263bc
MD
1897}
1898
1899static int
57e43348 1900vm_pageout_free_page_calc(vm_size_t count)
984263bc 1901{
12e4aaff 1902 if (count < vmstats.v_page_count)
984263bc
MD
1903 return 0;
1904 /*
1905 * free_reserved needs to include enough for the largest swap pager
1906 * structures plus enough for any pv_entry structs when paging.
0a4d4828
MD
1907 *
1908 * v_free_min normal allocations
1909 * v_free_reserved system allocations
1910 * v_pageout_free_min allocations by pageout daemon
1911 * v_interrupt_free_min low level allocations (e.g swap structures)
984263bc 1912 */
12e4aaff 1913 if (vmstats.v_page_count > 1024)
0a4d4828 1914 vmstats.v_free_min = 64 + (vmstats.v_page_count - 1024) / 200;
984263bc 1915 else
0a4d4828 1916 vmstats.v_free_min = 64;
55a7854b
MD
1917
1918 /*
1919 * Make sure the vmmeter slop can't blow out our global minimums.
8178c126
MD
1920 *
1921 * However, to accomodate weird configurations (vkernels with many
1922 * cpus and little memory, or artifically reduced hw.physmem), do
1923 * not allow v_free_min to exceed 1/20 of ram or the pageout demon
1924 * will go out of control.
55a7854b
MD
1925 */
1926 if (vmstats.v_free_min < VMMETER_SLOP_COUNT * ncpus * 10)
1927 vmstats.v_free_min = VMMETER_SLOP_COUNT * ncpus * 10;
8178c126
MD
1928 if (vmstats.v_free_min > vmstats.v_page_count / 20)
1929 vmstats.v_free_min = vmstats.v_page_count / 20;
55a7854b 1930
0a4d4828
MD
1931 vmstats.v_free_reserved = vmstats.v_free_min * 4 / 8 + 7;
1932 vmstats.v_free_severe = vmstats.v_free_min * 4 / 8 + 0;
1933 vmstats.v_pageout_free_min = vmstats.v_free_min * 2 / 8 + 7;
1934 vmstats.v_interrupt_free_min = vmstats.v_free_min * 1 / 8 + 7;
1935
984263bc
MD
1936 return 1;
1937}
1938
1939
1940/*
c3feb36a
MD
1941 * vm_pageout is the high level pageout daemon. TWO kernel threads run
1942 * this daemon, the primary pageout daemon and the emergency pageout daemon.
99ad9bc4 1943 *
c3feb36a
MD
1944 * The emergency pageout daemon takes over when the primary pageout daemon
1945 * deadlocks. The emergency pageout daemon ONLY pages out to swap, thus
1946 * avoiding the many low-memory deadlocks which can occur when paging out
1947 * to VFS's.
984263bc
MD
1948 */
1949static void
cd8ab232 1950vm_pageout_thread(void)
984263bc
MD
1951{
1952 int pass;
027193eb 1953 int q;
79ce07d0
MD
1954 int q1iterator = 0;
1955 int q2iterator = 0;
c3feb36a 1956 int isep;
c3feb36a
MD
1957
1958 curthread->td_flags |= TDF_SYSTHREAD;
984263bc
MD
1959
1960 /*
c3feb36a 1961 * We only need to setup once.
984263bc 1962 */
c3feb36a 1963 isep = 0;
c3feb36a
MD
1964 if (curthread == emergpager) {
1965 isep = 1;
1966 goto skip_setup;
1967 }
984263bc 1968
c3feb36a
MD
1969 /*
1970 * Initialize some paging parameters.
1971 */
12e4aaff 1972 vm_pageout_free_page_calc(vmstats.v_page_count);
20479584 1973
984263bc
MD
1974 /*
1975 * v_free_target and v_cache_min control pageout hysteresis. Note
1976 * that these are more a measure of the VM cache queue hysteresis
1977 * then the VM free queue. Specifically, v_free_target is the
1978 * high water mark (free+cache pages).
1979 *
1980 * v_free_reserved + v_cache_min (mostly means v_cache_min) is the
1981 * low water mark, while v_free_min is the stop. v_cache_min must
1982 * be big enough to handle memory needs while the pageout daemon
1983 * is signalled and run to free more pages.
1984 */
12e4aaff 1985 if (vmstats.v_free_count > 6144)
8178c126
MD
1986 vmstats.v_free_target = 4 * vmstats.v_free_min +
1987 vmstats.v_free_reserved;
984263bc 1988 else
8178c126
MD
1989 vmstats.v_free_target = 2 * vmstats.v_free_min +
1990 vmstats.v_free_reserved;
984263bc 1991
0e8bd897
MD
1992 /*
1993 * NOTE: With the new buffer cache b_act_count we want the default
1994 * inactive target to be a percentage of available memory.
1995 *
1996 * The inactive target essentially determines the minimum
1997 * number of 'temporary' pages capable of caching one-time-use
1998 * files when the VM system is otherwise full of pages
1999 * belonging to multi-time-use files or active program data.
51db7ca2
MD
2000 *
2001 * NOTE: The inactive target is aggressively persued only if the
2002 * inactive queue becomes too small. If the inactive queue
2003 * is large enough to satisfy page movement to free+cache
2004 * then it is repopulated more slowly from the active queue.
e15708fc 2005 * This allows a general inactive_target default to be set.
51db7ca2
MD
2006 *
2007 * There is an issue here for processes which sit mostly idle
2008 * 'overnight', such as sshd, tcsh, and X. Any movement from
2009 * the active queue will eventually cause such pages to
2010 * recycle eventually causing a lot of paging in the morning.
2011 * To reduce the incidence of this pages cycled out of the
2012 * buffer cache are moved directly to the inactive queue if
e15708fc
MD
2013 * they were only used once or twice.
2014 *
2015 * The vfs.vm_cycle_point sysctl can be used to adjust this.
2016 * Increasing the value (up to 64) increases the number of
2017 * buffer recyclements which go directly to the inactive queue.
0e8bd897 2018 */
12e4aaff
MD
2019 if (vmstats.v_free_count > 2048) {
2020 vmstats.v_cache_min = vmstats.v_free_target;
2021 vmstats.v_cache_max = 2 * vmstats.v_cache_min;
984263bc 2022 } else {
12e4aaff
MD
2023 vmstats.v_cache_min = 0;
2024 vmstats.v_cache_max = 0;
984263bc 2025 }
e15708fc 2026 vmstats.v_inactive_target = vmstats.v_free_count / 4;
984263bc
MD
2027
2028 /* XXX does not really belong here */
2029 if (vm_page_max_wired == 0)
12e4aaff 2030 vm_page_max_wired = vmstats.v_free_count / 3;
984263bc
MD
2031
2032 if (vm_pageout_stats_max == 0)
12e4aaff 2033 vm_pageout_stats_max = vmstats.v_free_target;
984263bc
MD
2034
2035 /*
2036 * Set interval in seconds for stats scan.
2037 */
2038 if (vm_pageout_stats_interval == 0)
2039 vm_pageout_stats_interval = 5;
2040 if (vm_pageout_full_stats_interval == 0)
2041 vm_pageout_full_stats_interval = vm_pageout_stats_interval * 4;
2042
2043
2044 /*
2045 * Set maximum free per pass
2046 */
2047 if (vm_pageout_stats_free_max == 0)
2048 vm_pageout_stats_free_max = 5;
2049
2050 swap_pager_swap_init();
2051 pass = 0;
20479584 2052
c3feb36a
MD
2053 atomic_swap_int(&sequence_emerg_pager, 1);
2054 wakeup(&sequence_emerg_pager);
2055
2056skip_setup:
2057 /*
2058 * Sequence emergency pager startup
2059 */
2060 if (isep) {
2061 while (sequence_emerg_pager == 0)
2062 tsleep(&sequence_emerg_pager, 0, "pstartup", hz);
2063 }
2064
984263bc
MD
2065 /*
2066 * The pageout daemon is never done, so loop forever.
c3feb36a
MD
2067 *
2068 * WARNING! This code is being executed by two kernel threads
2069 * potentially simultaneously.
984263bc
MD
2070 */
2071 while (TRUE) {
2072 int error;
b7ea2f3f
MD
2073 long avail_shortage;
2074 long inactive_shortage;
2075 long vnodes_skipped = 0;
2076 long recycle_count = 0;
2077 long tmp;
984263bc 2078
12d8aca7 2079 /*
1bfac262
MD
2080 * Wait for an action request. If we timeout check to
2081 * see if paging is needed (in case the normal wakeup
2082 * code raced us).
12d8aca7 2083 */
c3feb36a
MD
2084 if (isep) {
2085 /*
2086 * Emergency pagedaemon monitors the primary
2087 * pagedaemon while vm_pages_needed != 0.
2088 *
2089 * The emergency pagedaemon only runs if VM paging
2090 * is needed and the primary pagedaemon has not
2091 * updated vm_pagedaemon_time for more than 2 seconds.
2092 */
2093 if (vm_pages_needed)
2094 tsleep(&vm_pagedaemon_time, 0, "psleep", hz);
2095 else
2096 tsleep(&vm_pagedaemon_time, 0, "psleep", hz*10);
2097 if (vm_pages_needed == 0) {
2098 pass = 0;
2099 continue;
2100 }
2101 if ((int)(ticks - vm_pagedaemon_time) < hz * 2) {
c3feb36a 2102 pass = 0;
984263bc
MD
2103 continue;
2104 }
c3feb36a
MD
2105 } else {
2106 /*
2107 * Primary pagedaemon
2108 */
2109 if (vm_pages_needed == 0) {
2110 error = tsleep(&vm_pages_needed,
2111 0, "psleep",
2112 vm_pageout_stats_interval * hz);
2113 if (error &&
2114 vm_paging_needed() == 0 &&
2115 vm_pages_needed == 0) {
2116 for (q = 0; q < PQ_L2_SIZE; ++q)
2117 vm_pageout_page_stats(q);
2118 continue;
2119 }
2120 vm_pagedaemon_time = ticks;
2121 vm_pages_needed = 1;
2122
2123 /*
2124 * Wake the emergency pagedaemon up so it
2125 * can monitor us. It will automatically
2126 * go back into a long sleep when
2127 * vm_pages_needed returns to 0.
2128 */
2129 wakeup(&vm_pagedaemon_time);
2130 }
984263bc
MD
2131 }
2132
20479584 2133 mycpu->gd_cnt.v_pdwakeups++;
20479584 2134
027193eb 2135 /*
534ee349
MD
2136 * Scan for INACTIVE->CLEAN/PAGEOUT
2137 *
2138 * This routine tries to avoid thrashing the system with
2139 * unnecessary activity.
027193eb
MD
2140 *
2141 * Calculate our target for the number of free+cache pages we
2142 * want to get to. This is higher then the number that causes
2143 * allocations to stall (severe) in order to provide hysteresis,
2144 * and if we don't make it all the way but get to the minimum
47891af8
MD
2145 * we're happy. Goose it a bit if there are multiple requests
2146 * for memory.
2147 *
2148 * Don't reduce avail_shortage inside the loop or the
2149 * PQAVERAGE() calculation will break.
534ee349
MD
2150 *
2151 * NOTE! deficit is differentiated from avail_shortage as
2152 * REQUIRING at least (deficit) pages to be cleaned,
2153 * even if the page queues are in good shape. This
2154 * is used primarily for handling per-process
2155 * RLIMIT_RSS and may also see small values when
2156 * processes block due to low memory.
027193eb 2157 */
5ba14d44 2158 vmstats_rollup();
c3feb36a
MD
2159 if (isep == 0)
2160 vm_pagedaemon_time = ticks;
51c99c61 2161 avail_shortage = vm_paging_target() + vm_pageout_deficit;
027193eb 2162 vm_pageout_deficit = 0;
79ce07d0 2163
51c99c61 2164 if (avail_shortage > 0) {
b7ea2f3f 2165 long delta = 0;
39cfbf32 2166 int qq;
47891af8 2167
39cfbf32 2168 qq = q1iterator;
51c99c61 2169 for (q = 0; q < PQ_L2_SIZE; ++q) {
47891af8 2170 delta += vm_pageout_scan_inactive(
79ce07d0 2171 pass,
39cfbf32 2172 qq & PQ_L2_MASK,
51c99c61
MD
2173 PQAVERAGE(avail_shortage),
2174 &vnodes_skipped);
39cfbf32
MD
2175 if (isep)
2176 --qq;
2177 else
2178 ++qq;
47891af8 2179 if (avail_shortage - delta <= 0)
79ce07d0 2180 break;
51c99c61 2181 }
47891af8 2182 avail_shortage -= delta;
39cfbf32 2183 q1iterator = qq;
027193eb
MD
2184 }
2185
2186 /*
2187 * Figure out how many active pages we must deactivate. If
2188 * we were able to reach our target with just the inactive
2189 * scan above we limit the number of active pages we
2190 * deactivate to reduce unnecessary work.
2191 */
5ba14d44 2192 vmstats_rollup();
c3feb36a
MD
2193 if (isep == 0)
2194 vm_pagedaemon_time = ticks;
51c99c61
MD
2195 inactive_shortage = vmstats.v_inactive_target -
2196 vmstats.v_inactive_count;
027193eb 2197
3038a8ca
MD
2198 /*
2199 * If we were unable to free sufficient inactive pages to
2200 * satisfy the free/cache queue requirements then simply
2201 * reaching the inactive target may not be good enough.
2202 * Try to deactivate pages in excess of the target based
2203 * on the shortfall.
2204 *
2205 * However to prevent thrashing the VM system do not
2206 * deactivate more than an additional 1/10 the inactive
2207 * target's worth of active pages.
2208 */
51c99c61
MD
2209 if (avail_shortage > 0) {
2210 tmp = avail_shortage * 2;
3038a8ca
MD
2211 if (tmp > vmstats.v_inactive_target / 10)
2212 tmp = vmstats.v_inactive_target / 10;
51c99c61 2213 inactive_shortage += tmp;
3038a8ca 2214 }
027193eb 2215
a5c1251f 2216 /*
534ee349
MD
2217 * Only trigger a pmap cleanup on inactive shortage.
2218 */
c3feb36a 2219 if (isep == 0 && inactive_shortage > 0) {
534ee349
MD
2220 pmap_collect();
2221 }
2222
2223 /*
2224 * Scan for ACTIVE->INACTIVE
2225 *
a5c1251f
MD
2226 * Only trigger on inactive shortage. Triggering on
2227 * avail_shortage can starve the active queue with
2228 * unnecessary active->inactive transitions and destroy
2229 * performance.
ead23175
MD
2230 *
2231 * If this is the emergency pager, always try to move
2232 * a few pages from active to inactive because the inactive
2233 * queue might have enough pages, but not enough anonymous
2234 * pages.
a5c1251f 2235 */
ead23175
MD
2236 if (isep && inactive_shortage < vm_emerg_launder)
2237 inactive_shortage = vm_emerg_launder;
2238
a5c1251f 2239 if (/*avail_shortage > 0 ||*/ inactive_shortage > 0) {
b7ea2f3f 2240 long delta = 0;
39cfbf32 2241 int qq;
79ce07d0 2242
39cfbf32 2243 qq = q2iterator;
51c99c61 2244 for (q = 0; q < PQ_L2_SIZE; ++q) {
47891af8 2245 delta += vm_pageout_scan_active(
79ce07d0 2246 pass,
39cfbf32 2247 qq & PQ_L2_MASK,
51c99c61
MD
2248 PQAVERAGE(avail_shortage),
2249 PQAVERAGE(inactive_shortage),
2250 &recycle_count);
39cfbf32
MD
2251 if (isep)
2252 --qq;
2253 else
2254 ++qq;
47891af8
MD
2255 if (inactive_shortage - delta <= 0 &&
2256 avail_shortage - delta <= 0) {
79ce07d0
MD
2257 break;
2258 }
51c99c61 2259 }
47891af8
MD
2260 inactive_shortage -= delta;
2261 avail_shortage -= delta;
39cfbf32 2262 q2iterator = qq;
027193eb
MD
2263 }
2264
2265 /*
534ee349
MD
2266 * Scan for CACHE->FREE
2267 *
027193eb
MD
2268 * Finally free enough cache pages to meet our free page
2269 * requirement and take more drastic measures if we are
2270 * still in trouble.
2271 */
5ba14d44 2272 vmstats_rollup();
c3feb36a
MD
2273 if (isep == 0)
2274 vm_pagedaemon_time = ticks;
09eff544
MD
2275 vm_pageout_scan_cache(avail_shortage, pass,
2276 vnodes_skipped, recycle_count);
027193eb
MD
2277
2278 /*
2279 * Wait for more work.
20479584 2280 */
51c99c61 2281 if (avail_shortage > 0) {
20479584 2282 ++pass;
9cd626ca 2283 if (pass < 10 && vm_pages_needed > 1) {
20479584
MD
2284 /*
2285 * Normal operation, additional processes
9cd626ca
MD
2286 * have already kicked us. Retry immediately
2287 * unless swap space is completely full in
2288 * which case delay a bit.
20479584 2289 */
9cd626ca
MD
2290 if (swap_pager_full) {
2291 tsleep(&vm_pages_needed, 0, "pdelay",
2292 hz / 5);
2293 } /* else immediate retry */
20479584
MD
2294 } else if (pass < 10) {
2295 /*
2296 * Normal operation, fewer processes. Delay
ead23175
MD
2297 * a bit but allow wakeups. vm_pages_needed
2298 * is only adjusted against the primary
2299 * pagedaemon here.
20479584 2300 */
ead23175
MD
2301 if (isep == 0)
2302 vm_pages_needed = 0;
20479584 2303 tsleep(&vm_pages_needed, 0, "pdelay", hz / 10);
ead23175
MD
2304 if (isep == 0)
2305 vm_pages_needed = 1;
9cd626ca 2306 } else if (swap_pager_full == 0) {
20479584
MD
2307 /*
2308 * We've taken too many passes, forced delay.
2309 */
2310 tsleep(&vm_pages_needed, 0, "pdelay", hz / 10);
9cd626ca
MD
2311 } else {
2312 /*
2313 * Running out of memory, catastrophic
2314 * back-off to one-second intervals.
2315 */
2316 tsleep(&vm_pages_needed, 0, "pdelay", hz);
20479584 2317 }
77d1fb91 2318 } else if (vm_pages_needed) {
12d8aca7 2319 /*
77d1fb91
MD
2320 * Interlocked wakeup of waiters (non-optional).
2321 *
2322 * Similar to vm_page_free_wakeup() in vm_page.c,
2323 * wake
12d8aca7 2324 */
20479584 2325 pass = 0;
77d1fb91
MD
2326 if (!vm_page_count_min(vm_page_free_hysteresis) ||
2327 !vm_page_count_target()) {
12d8aca7 2328 vm_pages_needed = 0;
77d1fb91 2329 wakeup(&vmstats.v_free_count);
12d8aca7 2330 }
77d1fb91
MD
2331 } else {
2332 pass = 0;
20479584 2333 }
984263bc
MD
2334 }
2335}
2336
c3feb36a 2337static struct kproc_desc pg1_kp = {
cd8ab232
MD
2338 "pagedaemon",
2339 vm_pageout_thread,
2340 &pagethread
2341};
c3feb36a
MD
2342SYSINIT(pagedaemon, SI_SUB_KTHREAD_PAGE, SI_ORDER_FIRST, kproc_start, &pg1_kp);
2343
2344static struct kproc_desc pg2_kp = {
2345 "emergpager",
2346 vm_pageout_thread,
2347 &emergpager
2348};
2349SYSINIT(emergpager, SI_SUB_KTHREAD_PAGE, SI_ORDER_ANY, kproc_start, &pg2_kp);
cd8ab232
MD
2350
2351
20479584
MD
2352/*
2353 * Called after allocating a page out of the cache or free queue
2354 * to possibly wake the pagedaemon up to replentish our supply.
2355 *
2356 * We try to generate some hysteresis by waking the pagedaemon up
1bfac262
MD
2357 * when our free+cache pages go below the free_min+cache_min level.
2358 * The pagedaemon tries to get the count back up to at least the
2359 * minimum, and through to the target level if possible.
20479584
MD
2360 *
2361 * If the pagedaemon is already active bump vm_pages_needed as a hint
2362 * that there are even more requests pending.
99ad9bc4
MD
2363 *
2364 * SMP races ok?
2365 * No requirements.
20479584 2366 */
984263bc 2367void
57e43348 2368pagedaemon_wakeup(void)
984263bc 2369{
1bfac262 2370 if (vm_paging_needed() && curthread != pagethread) {
20479584 2371 if (vm_pages_needed == 0) {
1bfac262 2372 vm_pages_needed = 1; /* SMP race ok */
20479584
MD
2373 wakeup(&vm_pages_needed);
2374 } else if (vm_page_count_min(0)) {
1bfac262 2375 ++vm_pages_needed; /* SMP race ok */
20479584 2376 }
984263bc
MD
2377 }
2378}
2379
2380#if !defined(NO_SWAPPING)
99ad9bc4
MD
2381
2382/*
2383 * SMP races ok?
2384 * No requirements.
2385 */
984263bc 2386static void
57e43348 2387vm_req_vmdaemon(void)
984263bc
MD
2388{
2389 static int lastrun = 0;
2390
2391 if ((ticks > (lastrun + hz)) || (ticks < lastrun)) {
2392 wakeup(&vm_daemon_needed);
2393 lastrun = ticks;
2394 }
2395}
2396
8fa76237
MD
2397static int vm_daemon_callback(struct proc *p, void *data __unused);
2398
99ad9bc4
MD
2399/*
2400 * No requirements.
2401 */
984263bc 2402static void
57e43348 2403vm_daemon(void)
984263bc 2404{
534ee349
MD
2405 int req_swapout;
2406
984263bc 2407 while (TRUE) {
377d4740 2408 tsleep(&vm_daemon_needed, 0, "psleep", 0);
534ee349
MD
2409 req_swapout = atomic_swap_int(&vm_pageout_req_swapout, 0);
2410
2411 /*
2412 * forced swapouts
2413 */
2414 if (req_swapout)
984263bc 2415 swapout_procs(vm_pageout_req_swapout);
534ee349 2416
984263bc
MD
2417 /*
2418 * scan the processes for exceeding their rlimits or if
2419 * process is swapped out -- deactivate pages
2420 */
586c4308 2421 allproc_scan(vm_daemon_callback, NULL, 0);
8fa76237
MD
2422 }
2423}
984263bc 2424
8fa76237
MD
2425static int
2426vm_daemon_callback(struct proc *p, void *data __unused)
2427{
38189316 2428 struct vmspace *vm;
8fa76237 2429 vm_pindex_t limit, size;
984263bc 2430
8fa76237
MD
2431 /*
2432 * if this is a system process or if we have already
2433 * looked at this process, skip it.
2434 */
a8d3ab53
MD
2435 lwkt_gettoken(&p->p_token);
2436
2437 if (p->p_flags & (P_SYSTEM | P_WEXIT)) {
2438 lwkt_reltoken(&p->p_token);
8fa76237 2439 return (0);
a8d3ab53 2440 }
984263bc 2441
8fa76237
MD
2442 /*
2443 * if the process is in a non-running type state,
2444 * don't touch it.
2445 */
f5b92db7 2446 if (p->p_stat != SACTIVE && p->p_stat != SSTOP && p->p_stat != SCORE) {
a8d3ab53 2447 lwkt_reltoken(&p->p_token);
8fa76237 2448 return (0);
a8d3ab53 2449 }
984263bc 2450
8fa76237
MD
2451 /*
2452 * get a limit
2453 */
2454 limit = OFF_TO_IDX(qmin(p->p_rlimit[RLIMIT_RSS].rlim_cur,
2455 p->p_rlimit[RLIMIT_RSS].rlim_max));
2456
2457 /*
2458 * let processes that are swapped out really be
2459 * swapped out. Set the limit to nothing to get as
2460 * many pages out to swap as possible.
2461 */
4643740a 2462 if (p->p_flags & P_SWAPPEDOUT)
8fa76237
MD
2463 limit = 0;
2464
38189316
MD
2465 vm = p->p_vmspace;
2466 vmspace_hold(vm);
534ee349 2467 size = pmap_resident_tlnw_count(&vm->vm_pmap);
a7a03a5f
MD
2468 if (limit >= 0 && size > 4096 &&
2469 size - 4096 >= limit && vm_pageout_memuse_mode >= 1) {
38189316 2470 vm_pageout_map_deactivate_pages(&vm->vm_map, limit);
984263bc 2471 }
38189316 2472 vmspace_drop(vm);
a8d3ab53
MD
2473
2474 lwkt_reltoken(&p->p_token);
2475
8fa76237 2476 return (0);
984263bc 2477}
8fa76237 2478
984263bc 2479#endif