kernel -- vm_pageout: Handle pages w/ NULL vm_objects on the act/in pageqs.
[dragonfly.git] / sys / vm / vm_pageout.c
CommitLineData
984263bc
MD
1/*
2 * Copyright (c) 1991 Regents of the University of California.
3 * All rights reserved.
4 * Copyright (c) 1994 John S. Dyson
5 * All rights reserved.
6 * Copyright (c) 1994 David Greenman
7 * All rights reserved.
8 *
9 * This code is derived from software contributed to Berkeley by
10 * The Mach Operating System project at Carnegie-Mellon University.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
984263bc
MD
20 * 4. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * from: @(#)vm_pageout.c 7.4 (Berkeley) 5/7/91
37 *
38 *
39 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
40 * All rights reserved.
41 *
42 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
43 *
44 * Permission to use, copy, modify and distribute this software and
45 * its documentation is hereby granted, provided that both the copyright
46 * notice and this permission notice appear in all copies of the
47 * software, derivative works or modified versions, and any portions
48 * thereof, and that both notices appear in supporting documentation.
49 *
50 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
51 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
52 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
53 *
54 * Carnegie Mellon requests users of this software to return to
55 *
56 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
57 * School of Computer Science
58 * Carnegie Mellon University
59 * Pittsburgh PA 15213-3890
60 *
61 * any improvements or extensions that they make and grant Carnegie the
62 * rights to redistribute these changes.
63 *
64 * $FreeBSD: src/sys/vm/vm_pageout.c,v 1.151.2.15 2002/12/29 18:21:04 dillon Exp $
65 */
66
67/*
68 * The proverbial page-out daemon.
69 */
70
71#include "opt_vm.h"
72#include <sys/param.h>
73#include <sys/systm.h>
74#include <sys/kernel.h>
75#include <sys/proc.h>
76#include <sys/kthread.h>
77#include <sys/resourcevar.h>
78#include <sys/signalvar.h>
79#include <sys/vnode.h>
80#include <sys/vmmeter.h>
81#include <sys/sysctl.h>
82
83#include <vm/vm.h>
84#include <vm/vm_param.h>
85#include <sys/lock.h>
86#include <vm/vm_object.h>
87#include <vm/vm_page.h>
88#include <vm/vm_map.h>
89#include <vm/vm_pageout.h>
90#include <vm/vm_pager.h>
91#include <vm/swap_pager.h>
92#include <vm/vm_extern.h>
5fd012e0
MD
93
94#include <sys/thread2.h>
b12defdc 95#include <sys/spinlock2.h>
12e4aaff 96#include <vm/vm_page2.h>
984263bc
MD
97
98/*
99 * System initialization
100 */
101
102/* the kernel process "vm_pageout"*/
1388df65 103static int vm_pageout_clean (vm_page_t);
1388df65 104static int vm_pageout_free_page_calc (vm_size_t count);
bc6dffab 105struct thread *pagethread;
984263bc 106
984263bc
MD
107#if !defined(NO_SWAPPING)
108/* the kernel process "vm_daemon"*/
1388df65 109static void vm_daemon (void);
bc6dffab 110static struct thread *vmthread;
984263bc
MD
111
112static struct kproc_desc vm_kp = {
113 "vmdaemon",
114 vm_daemon,
bc6dffab 115 &vmthread
984263bc
MD
116};
117SYSINIT(vmdaemon, SI_SUB_KTHREAD_VM, SI_ORDER_FIRST, kproc_start, &vm_kp)
118#endif
119
984263bc
MD
120int vm_pages_needed=0; /* Event on which pageout daemon sleeps */
121int vm_pageout_deficit=0; /* Estimated number of pages deficit */
122int vm_pageout_pages_needed=0; /* flag saying that the pageout daemon needs pages */
123
124#if !defined(NO_SWAPPING)
125static int vm_pageout_req_swapout; /* XXX */
126static int vm_daemon_needed;
127#endif
984263bc
MD
128static int vm_max_launder = 32;
129static int vm_pageout_stats_max=0, vm_pageout_stats_interval = 0;
130static int vm_pageout_full_stats_interval = 0;
131static int vm_pageout_stats_free_max=0, vm_pageout_algorithm=0;
132static int defer_swap_pageouts=0;
133static int disable_swap_pageouts=0;
134
135#if defined(NO_SWAPPING)
136static int vm_swap_enabled=0;
137static int vm_swap_idle_enabled=0;
138#else
139static int vm_swap_enabled=1;
140static int vm_swap_idle_enabled=0;
141#endif
142
143SYSCTL_INT(_vm, VM_PAGEOUT_ALGORITHM, pageout_algorithm,
144 CTLFLAG_RW, &vm_pageout_algorithm, 0, "LRU page mgmt");
145
146SYSCTL_INT(_vm, OID_AUTO, max_launder,
147 CTLFLAG_RW, &vm_max_launder, 0, "Limit dirty flushes in pageout");
148
149SYSCTL_INT(_vm, OID_AUTO, pageout_stats_max,
150 CTLFLAG_RW, &vm_pageout_stats_max, 0, "Max pageout stats scan length");
151
152SYSCTL_INT(_vm, OID_AUTO, pageout_full_stats_interval,
153 CTLFLAG_RW, &vm_pageout_full_stats_interval, 0, "Interval for full stats scan");
154
155SYSCTL_INT(_vm, OID_AUTO, pageout_stats_interval,
156 CTLFLAG_RW, &vm_pageout_stats_interval, 0, "Interval for partial stats scan");
157
158SYSCTL_INT(_vm, OID_AUTO, pageout_stats_free_max,
159 CTLFLAG_RW, &vm_pageout_stats_free_max, 0, "Not implemented");
160
161#if defined(NO_SWAPPING)
162SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled,
163 CTLFLAG_RD, &vm_swap_enabled, 0, "");
164SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled,
165 CTLFLAG_RD, &vm_swap_idle_enabled, 0, "");
166#else
167SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled,
168 CTLFLAG_RW, &vm_swap_enabled, 0, "Enable entire process swapout");
169SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled,
170 CTLFLAG_RW, &vm_swap_idle_enabled, 0, "Allow swapout on idle criteria");
171#endif
172
173SYSCTL_INT(_vm, OID_AUTO, defer_swapspace_pageouts,
174 CTLFLAG_RW, &defer_swap_pageouts, 0, "Give preference to dirty pages in mem");
175
176SYSCTL_INT(_vm, OID_AUTO, disable_swapspace_pageouts,
177 CTLFLAG_RW, &disable_swap_pageouts, 0, "Disallow swapout of dirty pages");
178
179static int pageout_lock_miss;
180SYSCTL_INT(_vm, OID_AUTO, pageout_lock_miss,
181 CTLFLAG_RD, &pageout_lock_miss, 0, "vget() lock misses during pageout");
182
183#define VM_PAGEOUT_PAGE_COUNT 16
184int vm_pageout_page_count = VM_PAGEOUT_PAGE_COUNT;
185
186int vm_page_max_wired; /* XXX max # of wired pages system-wide */
187
188#if !defined(NO_SWAPPING)
1388df65
RG
189typedef void freeer_fcn_t (vm_map_t, vm_object_t, vm_pindex_t, int);
190static void vm_pageout_map_deactivate_pages (vm_map_t, vm_pindex_t);
984263bc 191static freeer_fcn_t vm_pageout_object_deactivate_pages;
1388df65 192static void vm_req_vmdaemon (void);
984263bc 193#endif
027193eb 194static void vm_pageout_page_stats(int q);
984263bc 195
51c99c61
MD
196static __inline int
197PQAVERAGE(int n)
198{
199 if (n >= 0)
200 return((n + (PQ_L2_SIZE - 1)) / PQ_L2_SIZE + 1);
201 else
202 return((n - (PQ_L2_SIZE - 1)) / PQ_L2_SIZE - 1);
203}
204
984263bc
MD
205/*
206 * vm_pageout_clean:
207 *
06ecca5a
MD
208 * Clean the page and remove it from the laundry. The page must not be
209 * busy on-call.
984263bc
MD
210 *
211 * We set the busy bit to cause potential page faults on this page to
212 * block. Note the careful timing, however, the busy bit isn't set till
213 * late and we cannot do anything that will mess with the page.
214 */
984263bc 215static int
57e43348 216vm_pageout_clean(vm_page_t m)
984263bc 217{
5f910b2f 218 vm_object_t object;
984263bc
MD
219 vm_page_t mc[2*vm_pageout_page_count];
220 int pageout_count;
b12defdc 221 int error;
984263bc
MD
222 int ib, is, page_base;
223 vm_pindex_t pindex = m->pindex;
224
225 object = m->object;
226
227 /*
228 * It doesn't cost us anything to pageout OBJT_DEFAULT or OBJT_SWAP
229 * with the new swapper, but we could have serious problems paging
230 * out other object types if there is insufficient memory.
231 *
232 * Unfortunately, checking free memory here is far too late, so the
233 * check has been moved up a procedural level.
234 */
235
236 /*
237 * Don't mess with the page if it's busy, held, or special
b12defdc
MD
238 *
239 * XXX do we really need to check hold_count here? hold_count
240 * isn't supposed to mess with vm_page ops except prevent the
241 * page from being reused.
984263bc 242 */
b12defdc
MD
243 if (m->hold_count != 0 || (m->flags & PG_UNMANAGED)) {
244 vm_page_wakeup(m);
984263bc
MD
245 return 0;
246 }
247
248 mc[vm_pageout_page_count] = m;
249 pageout_count = 1;
250 page_base = vm_pageout_page_count;
251 ib = 1;
252 is = 1;
253
254 /*
255 * Scan object for clusterable pages.
256 *
257 * We can cluster ONLY if: ->> the page is NOT
258 * clean, wired, busy, held, or mapped into a
259 * buffer, and one of the following:
260 * 1) The page is inactive, or a seldom used
261 * active page.
262 * -or-
263 * 2) we force the issue.
264 *
265 * During heavy mmap/modification loads the pageout
266 * daemon can really fragment the underlying file
267 * due to flushing pages out of order and not trying
268 * align the clusters (which leave sporatic out-of-order
269 * holes). To solve this problem we do the reverse scan
270 * first and attempt to align our cluster, then do a
271 * forward scan if room remains.
272 */
273
398c240d 274 vm_object_hold(object);
984263bc
MD
275more:
276 while (ib && pageout_count < vm_pageout_page_count) {
277 vm_page_t p;
278
279 if (ib > pindex) {
280 ib = 0;
281 break;
282 }
283
b12defdc
MD
284 p = vm_page_lookup_busy_try(object, pindex - ib, TRUE, &error);
285 if (error || p == NULL) {
984263bc
MD
286 ib = 0;
287 break;
288 }
b12defdc
MD
289 if ((p->queue - p->pc) == PQ_CACHE ||
290 (p->flags & PG_UNMANAGED)) {
291 vm_page_wakeup(p);
984263bc
MD
292 ib = 0;
293 break;
294 }
295 vm_page_test_dirty(p);
9bf025db
MD
296 if (((p->dirty & p->valid) == 0 &&
297 (p->flags & PG_NEED_COMMIT) == 0) ||
027193eb 298 p->queue - p->pc != PQ_INACTIVE ||
984263bc
MD
299 p->wire_count != 0 || /* may be held by buf cache */
300 p->hold_count != 0) { /* may be undergoing I/O */
b12defdc 301 vm_page_wakeup(p);
984263bc
MD
302 ib = 0;
303 break;
304 }
305 mc[--page_base] = p;
306 ++pageout_count;
307 ++ib;
308 /*
309 * alignment boundry, stop here and switch directions. Do
310 * not clear ib.
311 */
312 if ((pindex - (ib - 1)) % vm_pageout_page_count == 0)
313 break;
314 }
315
316 while (pageout_count < vm_pageout_page_count &&
317 pindex + is < object->size) {
318 vm_page_t p;
319
b12defdc
MD
320 p = vm_page_lookup_busy_try(object, pindex + is, TRUE, &error);
321 if (error || p == NULL)
984263bc
MD
322 break;
323 if (((p->queue - p->pc) == PQ_CACHE) ||
324 (p->flags & (PG_BUSY|PG_UNMANAGED)) || p->busy) {
b12defdc 325 vm_page_wakeup(p);
984263bc
MD
326 break;
327 }
328 vm_page_test_dirty(p);
9bf025db
MD
329 if (((p->dirty & p->valid) == 0 &&
330 (p->flags & PG_NEED_COMMIT) == 0) ||
027193eb 331 p->queue - p->pc != PQ_INACTIVE ||
984263bc
MD
332 p->wire_count != 0 || /* may be held by buf cache */
333 p->hold_count != 0) { /* may be undergoing I/O */
b12defdc 334 vm_page_wakeup(p);
984263bc
MD
335 break;
336 }
337 mc[page_base + pageout_count] = p;
338 ++pageout_count;
339 ++is;
340 }
341
342 /*
343 * If we exhausted our forward scan, continue with the reverse scan
344 * when possible, even past a page boundry. This catches boundry
345 * conditions.
346 */
347 if (ib && pageout_count < vm_pageout_page_count)
348 goto more;
349
398c240d
VS
350 vm_object_drop(object);
351
984263bc
MD
352 /*
353 * we allow reads during pageouts...
354 */
355 return vm_pageout_flush(&mc[page_base], pageout_count, 0);
356}
357
358/*
359 * vm_pageout_flush() - launder the given pages
360 *
361 * The given pages are laundered. Note that we setup for the start of
362 * I/O ( i.e. busy the page ), mark it read-only, and bump the object
363 * reference count all in here rather then in the parent. If we want
364 * the parent to do more sophisticated things we may have to change
365 * the ordering.
99ad9bc4 366 *
b12defdc
MD
367 * The pages in the array must be busied by the caller and will be
368 * unbusied by this function.
984263bc 369 */
984263bc 370int
57e43348 371vm_pageout_flush(vm_page_t *mc, int count, int flags)
984263bc 372{
5f910b2f 373 vm_object_t object;
984263bc
MD
374 int pageout_status[count];
375 int numpagedout = 0;
376 int i;
377
378 /*
17cde63e
MD
379 * Initiate I/O. Bump the vm_page_t->busy counter.
380 */
381 for (i = 0; i < count; i++) {
b12defdc
MD
382 KASSERT(mc[i]->valid == VM_PAGE_BITS_ALL,
383 ("vm_pageout_flush page %p index %d/%d: partially "
384 "invalid page", mc[i], i, count));
17cde63e
MD
385 vm_page_io_start(mc[i]);
386 }
387
388 /*
4530a3aa
MD
389 * We must make the pages read-only. This will also force the
390 * modified bit in the related pmaps to be cleared. The pager
391 * cannot clear the bit for us since the I/O completion code
392 * typically runs from an interrupt. The act of making the page
393 * read-only handles the case for us.
b12defdc
MD
394 *
395 * Then we can unbusy the pages, we still hold a reference by virtue
396 * of our soft-busy.
984263bc 397 */
984263bc 398 for (i = 0; i < count; i++) {
984263bc 399 vm_page_protect(mc[i], VM_PROT_READ);
b12defdc 400 vm_page_wakeup(mc[i]);
984263bc
MD
401 }
402
403 object = mc[0]->object;
404 vm_object_pip_add(object, count);
405
406 vm_pager_put_pages(object, mc, count,
c439ad8f 407 (flags | ((object == &kernel_object) ? VM_PAGER_PUT_SYNC : 0)),
984263bc
MD
408 pageout_status);
409
410 for (i = 0; i < count; i++) {
411 vm_page_t mt = mc[i];
412
413 switch (pageout_status[i]) {
414 case VM_PAGER_OK:
415 numpagedout++;
416 break;
417 case VM_PAGER_PEND:
418 numpagedout++;
419 break;
420 case VM_PAGER_BAD:
421 /*
422 * Page outside of range of object. Right now we
423 * essentially lose the changes by pretending it
424 * worked.
425 */
b12defdc 426 vm_page_busy_wait(mt, FALSE, "pgbad");
984263bc
MD
427 pmap_clear_modify(mt);
428 vm_page_undirty(mt);
b12defdc 429 vm_page_wakeup(mt);
984263bc
MD
430 break;
431 case VM_PAGER_ERROR:
432 case VM_PAGER_FAIL:
433 /*
c84c24da
MD
434 * A page typically cannot be paged out when we
435 * have run out of swap. We leave the page
436 * marked inactive and will try to page it out
437 * again later.
438 *
439 * Starvation of the active page list is used to
440 * determine when the system is massively memory
441 * starved.
984263bc 442 */
984263bc
MD
443 break;
444 case VM_PAGER_AGAIN:
445 break;
446 }
447
448 /*
449 * If the operation is still going, leave the page busy to
450 * block all other accesses. Also, leave the paging in
451 * progress indicator set so that we don't attempt an object
452 * collapse.
93afe6be
MD
453 *
454 * For any pages which have completed synchronously,
455 * deactivate the page if we are under a severe deficit.
456 * Do not try to enter them into the cache, though, they
457 * might still be read-heavy.
984263bc
MD
458 */
459 if (pageout_status[i] != VM_PAGER_PEND) {
b12defdc 460 vm_page_busy_wait(mt, FALSE, "pgouw");
93afe6be
MD
461 if (vm_page_count_severe())
462 vm_page_deactivate(mt);
463#if 0
984263bc
MD
464 if (!vm_page_count_severe() || !vm_page_try_to_cache(mt))
465 vm_page_protect(mt, VM_PROT_READ);
93afe6be 466#endif
a491077e 467 vm_page_io_finish(mt);
b12defdc 468 vm_page_wakeup(mt);
a491077e 469 vm_object_pip_wakeup(object);
984263bc
MD
470 }
471 }
472 return numpagedout;
473}
474
475#if !defined(NO_SWAPPING)
476/*
b12defdc
MD
477 * deactivate enough pages to satisfy the inactive target
478 * requirements or if vm_page_proc_limit is set, then
479 * deactivate all of the pages in the object and its
480 * backing_objects.
984263bc 481 *
99ad9bc4 482 * The map must be locked.
398c240d 483 * The caller must hold the vm_object.
984263bc 484 */
1f804340
MD
485static int vm_pageout_object_deactivate_pages_callback(vm_page_t, void *);
486
984263bc 487static void
57e43348 488vm_pageout_object_deactivate_pages(vm_map_t map, vm_object_t object,
99ad9bc4 489 vm_pindex_t desired, int map_remove_only)
984263bc 490{
1f804340 491 struct rb_vm_page_scan_info info;
b12defdc
MD
492 vm_object_t lobject;
493 vm_object_t tobject;
984263bc 494 int remove_mode;
984263bc 495
05b9db80 496 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object));
b12defdc 497 lobject = object;
398c240d 498
b12defdc
MD
499 while (lobject) {
500 if (pmap_resident_count(vm_map_pmap(map)) <= desired)
501 break;
502 if (lobject->type == OBJT_DEVICE || lobject->type == OBJT_PHYS)
503 break;
504 if (lobject->paging_in_progress)
505 break;
984263bc
MD
506
507 remove_mode = map_remove_only;
b12defdc 508 if (lobject->shadow_count > 1)
984263bc 509 remove_mode = 1;
06ecca5a
MD
510
511 /*
a5fc46c9
MD
512 * scan the objects entire memory queue. We hold the
513 * object's token so the scan should not race anything.
06ecca5a 514 */
1f804340
MD
515 info.limit = remove_mode;
516 info.map = map;
517 info.desired = desired;
b12defdc 518 vm_page_rb_tree_RB_SCAN(&lobject->rb_memq, NULL,
1f804340
MD
519 vm_pageout_object_deactivate_pages_callback,
520 &info
521 );
b12defdc
MD
522 while ((tobject = lobject->backing_object) != NULL) {
523 KKASSERT(tobject != object);
524 vm_object_hold(tobject);
525 if (tobject == lobject->backing_object)
526 break;
527 vm_object_drop(tobject);
528 }
05b9db80
MD
529 if (lobject != object) {
530 vm_object_lock_swap();
b12defdc 531 vm_object_drop(lobject);
05b9db80 532 }
b12defdc 533 lobject = tobject;
1f804340 534 }
b12defdc
MD
535 if (lobject != object)
536 vm_object_drop(lobject);
1f804340 537}
99ad9bc4
MD
538
539/*
398c240d 540 * The caller must hold the vm_object.
99ad9bc4 541 */
1f804340
MD
542static int
543vm_pageout_object_deactivate_pages_callback(vm_page_t p, void *data)
544{
545 struct rb_vm_page_scan_info *info = data;
546 int actcount;
984263bc 547
1f804340
MD
548 if (pmap_resident_count(vm_map_pmap(info->map)) <= info->desired) {
549 return(-1);
550 }
551 mycpu->gd_cnt.v_pdpages++;
b12defdc
MD
552
553 if (vm_page_busy_try(p, TRUE))
554 return(0);
555 if (p->wire_count || p->hold_count || (p->flags & PG_UNMANAGED)) {
556 vm_page_wakeup(p);
557 return(0);
558 }
559 if (!pmap_page_exists_quick(vm_map_pmap(info->map), p)) {
560 vm_page_wakeup(p);
1f804340
MD
561 return(0);
562 }
984263bc 563
1f804340
MD
564 actcount = pmap_ts_referenced(p);
565 if (actcount) {
566 vm_page_flag_set(p, PG_REFERENCED);
567 } else if (p->flags & PG_REFERENCED) {
568 actcount = 1;
569 }
570
b12defdc 571 vm_page_and_queue_spin_lock(p);
027193eb 572 if (p->queue - p->pc != PQ_ACTIVE && (p->flags & PG_REFERENCED)) {
b12defdc 573 vm_page_and_queue_spin_unlock(p);
1f804340
MD
574 vm_page_activate(p);
575 p->act_count += actcount;
576 vm_page_flag_clear(p, PG_REFERENCED);
027193eb 577 } else if (p->queue - p->pc == PQ_ACTIVE) {
1f804340
MD
578 if ((p->flags & PG_REFERENCED) == 0) {
579 p->act_count -= min(p->act_count, ACT_DECLINE);
b12defdc
MD
580 if (!info->limit &&
581 (vm_pageout_algorithm || (p->act_count == 0))) {
582 vm_page_and_queue_spin_unlock(p);
984263bc 583 vm_page_protect(p, VM_PROT_NONE);
1f804340
MD
584 vm_page_deactivate(p);
585 } else {
027193eb
MD
586 TAILQ_REMOVE(&vm_page_queues[p->queue].pl,
587 p, pageq);
588 TAILQ_INSERT_TAIL(&vm_page_queues[p->queue].pl,
589 p, pageq);
b12defdc 590 vm_page_and_queue_spin_unlock(p);
984263bc 591 }
1f804340 592 } else {
b12defdc 593 vm_page_and_queue_spin_unlock(p);
1f804340
MD
594 vm_page_activate(p);
595 vm_page_flag_clear(p, PG_REFERENCED);
b12defdc
MD
596
597 vm_page_and_queue_spin_lock(p);
027193eb 598 if (p->queue - p->pc == PQ_ACTIVE) {
b12defdc
MD
599 if (p->act_count < (ACT_MAX - ACT_ADVANCE))
600 p->act_count += ACT_ADVANCE;
027193eb
MD
601 TAILQ_REMOVE(&vm_page_queues[p->queue].pl,
602 p, pageq);
603 TAILQ_INSERT_TAIL(&vm_page_queues[p->queue].pl,
604 p, pageq);
b12defdc
MD
605 }
606 vm_page_and_queue_spin_unlock(p);
984263bc 607 }
027193eb 608 } else if (p->queue - p->pc == PQ_INACTIVE) {
b12defdc 609 vm_page_and_queue_spin_unlock(p);
1f804340 610 vm_page_protect(p, VM_PROT_NONE);
b12defdc
MD
611 } else {
612 vm_page_and_queue_spin_unlock(p);
984263bc 613 }
b12defdc 614 vm_page_wakeup(p);
1f804340 615 return(0);
984263bc
MD
616}
617
618/*
99ad9bc4 619 * Deactivate some number of pages in a map, try to do it fairly, but
984263bc
MD
620 * that is really hard to do.
621 */
622static void
57e43348 623vm_pageout_map_deactivate_pages(vm_map_t map, vm_pindex_t desired)
984263bc
MD
624{
625 vm_map_entry_t tmpe;
626 vm_object_t obj, bigobj;
627 int nothingwired;
628
df4f70a6 629 if (lockmgr(&map->lock, LK_EXCLUSIVE | LK_NOWAIT)) {
984263bc
MD
630 return;
631 }
632
633 bigobj = NULL;
634 nothingwired = TRUE;
635
636 /*
637 * first, search out the biggest object, and try to free pages from
638 * that.
639 */
640 tmpe = map->header.next;
641 while (tmpe != &map->header) {
1b874851
MD
642 switch(tmpe->maptype) {
643 case VM_MAPTYPE_NORMAL:
644 case VM_MAPTYPE_VPAGETABLE:
984263bc
MD
645 obj = tmpe->object.vm_object;
646 if ((obj != NULL) && (obj->shadow_count <= 1) &&
647 ((bigobj == NULL) ||
648 (bigobj->resident_page_count < obj->resident_page_count))) {
649 bigobj = obj;
650 }
1b874851
MD
651 break;
652 default:
653 break;
984263bc
MD
654 }
655 if (tmpe->wired_count > 0)
656 nothingwired = FALSE;
657 tmpe = tmpe->next;
658 }
659
05b9db80
MD
660 if (bigobj) {
661 vm_object_hold(bigobj);
984263bc 662 vm_pageout_object_deactivate_pages(map, bigobj, desired, 0);
05b9db80
MD
663 vm_object_drop(bigobj);
664 }
984263bc
MD
665
666 /*
667 * Next, hunt around for other pages to deactivate. We actually
668 * do this search sort of wrong -- .text first is not the best idea.
669 */
670 tmpe = map->header.next;
671 while (tmpe != &map->header) {
672 if (pmap_resident_count(vm_map_pmap(map)) <= desired)
673 break;
1b874851
MD
674 switch(tmpe->maptype) {
675 case VM_MAPTYPE_NORMAL:
676 case VM_MAPTYPE_VPAGETABLE:
984263bc 677 obj = tmpe->object.vm_object;
05b9db80
MD
678 if (obj) {
679 vm_object_hold(obj);
984263bc 680 vm_pageout_object_deactivate_pages(map, obj, desired, 0);
05b9db80
MD
681 vm_object_drop(obj);
682 }
1b874851
MD
683 break;
684 default:
685 break;
984263bc
MD
686 }
687 tmpe = tmpe->next;
688 };
689
690 /*
691 * Remove all mappings if a process is swapped out, this will free page
692 * table pages.
693 */
694 if (desired == 0 && nothingwired)
695 pmap_remove(vm_map_pmap(map),
88181b08 696 VM_MIN_USER_ADDRESS, VM_MAX_USER_ADDRESS);
984263bc 697 vm_map_unlock(map);
984263bc
MD
698}
699#endif
700
701/*
a5fc46c9
MD
702 * Called when the pageout scan wants to free a page. We no longer
703 * try to cycle the vm_object here with a reference & dealloc, which can
704 * cause a non-trivial object collapse in a critical path.
99ad9bc4 705 *
a5fc46c9
MD
706 * It is unclear why we cycled the ref_count in the past, perhaps to try
707 * to optimize shadow chain collapses but I don't quite see why it would
708 * be necessary. An OBJ_DEAD object should terminate any and all vm_pages
709 * synchronously and not have to be kicked-start.
984263bc 710 */
99ad9bc4 711static void
95813af0
MD
712vm_pageout_page_free(vm_page_t m)
713{
984263bc
MD
714 vm_page_protect(m, VM_PROT_NONE);
715 vm_page_free(m);
984263bc
MD
716}
717
718/*
20479584 719 * vm_pageout_scan does the dirty work for the pageout daemon.
984263bc 720 */
8fa76237
MD
721struct vm_pageout_scan_info {
722 struct proc *bigproc;
723 vm_offset_t bigsize;
724};
725
726static int vm_pageout_scan_callback(struct proc *p, void *data);
727
20479584 728static int
51c99c61 729vm_pageout_scan_inactive(int pass, int q, int avail_shortage,
027193eb 730 int *vnodes_skippedp)
984263bc 731{
b12defdc 732 vm_page_t m;
984263bc 733 struct vm_page marker;
5d6a945b 734 struct vnode *vpfailed; /* warning, allowed to be stale */
027193eb
MD
735 int maxscan;
736 int delta = 0;
984263bc
MD
737 vm_object_t object;
738 int actcount;
984263bc 739 int maxlaunder;
984263bc
MD
740
741 /*
984263bc
MD
742 * Start scanning the inactive queue for pages we can move to the
743 * cache or free. The scan will stop when the target is reached or
744 * we have scanned the entire inactive queue. Note that m->act_count
745 * is not used to form decisions for the inactive queue, only for the
746 * active queue.
747 *
748 * maxlaunder limits the number of dirty pages we flush per scan.
749 * For most systems a smaller value (16 or 32) is more robust under
750 * extreme memory and disk pressure because any unnecessary writes
751 * to disk can result in extreme performance degredation. However,
752 * systems with excessive dirty pages (especially when MAP_NOSYNC is
753 * used) will die horribly with limited laundering. If the pageout
754 * daemon cannot clean enough pages in the first pass, we let it go
755 * all out in succeeding passes.
756 */
757 if ((maxlaunder = vm_max_launder) <= 1)
758 maxlaunder = 1;
759 if (pass)
760 maxlaunder = 10000;
761
06ecca5a 762 /*
b12defdc
MD
763 * Initialize our marker
764 */
765 bzero(&marker, sizeof(marker));
766 marker.flags = PG_BUSY | PG_FICTITIOUS | PG_MARKER;
027193eb
MD
767 marker.queue = PQ_INACTIVE + q;
768 marker.pc = q;
b12defdc
MD
769 marker.wire_count = 1;
770
771 /*
772 * Inactive queue scan.
773 *
774 * NOTE: The vm_page must be spinlocked before the queue to avoid
775 * deadlocks, so it is easiest to simply iterate the loop
776 * with the queue unlocked at the top.
06ecca5a 777 */
5d6a945b 778 vpfailed = NULL;
b12defdc 779
027193eb
MD
780 vm_page_queues_spin_lock(PQ_INACTIVE + q);
781 TAILQ_INSERT_HEAD(&vm_page_queues[PQ_INACTIVE + q].pl, &marker, pageq);
51c99c61 782 maxscan = vm_page_queues[PQ_INACTIVE + q].lcnt;
027193eb 783 vm_page_queues_spin_unlock(PQ_INACTIVE + q);
b12defdc
MD
784
785 while ((m = TAILQ_NEXT(&marker, pageq)) != NULL &&
51c99c61 786 maxscan-- > 0 && avail_shortage - delta > 0)
b12defdc
MD
787 {
788 vm_page_and_queue_spin_lock(m);
789 if (m != TAILQ_NEXT(&marker, pageq)) {
790 vm_page_and_queue_spin_unlock(m);
791 ++maxscan;
792 continue;
793 }
027193eb
MD
794 KKASSERT(m->queue - m->pc == PQ_INACTIVE);
795 TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE + q].pl,
b12defdc 796 &marker, pageq);
027193eb 797 TAILQ_INSERT_AFTER(&vm_page_queues[PQ_INACTIVE + q].pl, m,
b12defdc 798 &marker, pageq);
12e4aaff 799 mycpu->gd_cnt.v_pdpages++;
984263bc 800
06ecca5a 801 /*
b12defdc 802 * Skip marker pages
06ecca5a 803 */
b12defdc
MD
804 if (m->flags & PG_MARKER) {
805 vm_page_and_queue_spin_unlock(m);
806 continue;
807 }
984263bc
MD
808
809 /*
b12defdc
MD
810 * Try to busy the page. Don't mess with pages which are
811 * already busy or reorder them in the queue.
984263bc 812 */
b12defdc
MD
813 if (vm_page_busy_try(m, TRUE)) {
814 vm_page_and_queue_spin_unlock(m);
984263bc 815 continue;
b12defdc
MD
816 }
817 vm_page_and_queue_spin_unlock(m);
027193eb 818 KKASSERT(m->queue - m->pc == PQ_INACTIVE);
984263bc 819
d2d8515b
MD
820 lwkt_yield();
821
984263bc 822 /*
b12defdc
MD
823 * The page has been successfully busied and is now no
824 * longer spinlocked. The queue is no longer spinlocked
825 * either.
984263bc 826 */
06ecca5a 827
984263bc 828 /*
90244566
MD
829 * It is possible for a page to be busied ad-hoc (e.g. the
830 * pmap_collect() code) and wired and race against the
831 * allocation of a new page. vm_page_alloc() may be forced
832 * to deactivate the wired page in which case it winds up
833 * on the inactive queue and must be handled here. We
834 * correct the problem simply by unqueuing the page.
835 */
836 if (m->wire_count) {
837 vm_page_unqueue_nowakeup(m);
838 vm_page_wakeup(m);
839 kprintf("WARNING: pagedaemon: wired page on "
840 "inactive queue %p\n", m);
841 continue;
842 }
843
844 /*
b12defdc 845 * A held page may be undergoing I/O, so skip it.
984263bc 846 */
b12defdc
MD
847 if (m->hold_count) {
848 vm_page_and_queue_spin_lock(m);
027193eb
MD
849 if (m->queue - m->pc == PQ_INACTIVE) {
850 TAILQ_REMOVE(
851 &vm_page_queues[PQ_INACTIVE + q].pl,
852 m, pageq);
853 TAILQ_INSERT_TAIL(
854 &vm_page_queues[PQ_INACTIVE + q].pl,
855 m, pageq);
64949baa 856 ++vm_swapcache_inactive_heuristic;
b12defdc
MD
857 }
858 vm_page_and_queue_spin_unlock(m);
b12defdc 859 vm_page_wakeup(m);
984263bc
MD
860 continue;
861 }
862
19cd98ea 863 if (m->object == NULL || m->object->ref_count == 0) {
06ecca5a
MD
864 /*
865 * If the object is not being used, we ignore previous
866 * references.
867 */
984263bc
MD
868 vm_page_flag_clear(m, PG_REFERENCED);
869 pmap_clear_reference(m);
b12defdc 870 /* fall through to end */
984263bc 871 } else if (((m->flags & PG_REFERENCED) == 0) &&
06ecca5a
MD
872 (actcount = pmap_ts_referenced(m))) {
873 /*
874 * Otherwise, if the page has been referenced while
875 * in the inactive queue, we bump the "activation
876 * count" upwards, making it less likely that the
877 * page will be added back to the inactive queue
878 * prematurely again. Here we check the page tables
879 * (or emulated bits, if any), given the upper level
880 * VM system not knowing anything about existing
881 * references.
882 */
984263bc
MD
883 vm_page_activate(m);
884 m->act_count += (actcount + ACT_ADVANCE);
b12defdc 885 vm_page_wakeup(m);
984263bc
MD
886 continue;
887 }
888
889 /*
b12defdc
MD
890 * (m) is still busied.
891 *
984263bc
MD
892 * If the upper level VM system knows about any page
893 * references, we activate the page. We also set the
894 * "activation count" higher than normal so that we will less
895 * likely place pages back onto the inactive queue again.
896 */
897 if ((m->flags & PG_REFERENCED) != 0) {
898 vm_page_flag_clear(m, PG_REFERENCED);
899 actcount = pmap_ts_referenced(m);
900 vm_page_activate(m);
901 m->act_count += (actcount + ACT_ADVANCE + 1);
b12defdc 902 vm_page_wakeup(m);
984263bc
MD
903 continue;
904 }
905
906 /*
907 * If the upper level VM system doesn't know anything about
908 * the page being dirty, we have to check for it again. As
909 * far as the VM code knows, any partially dirty pages are
910 * fully dirty.
41a01a4d
MD
911 *
912 * Pages marked PG_WRITEABLE may be mapped into the user
913 * address space of a process running on another cpu. A
914 * user process (without holding the MP lock) running on
915 * another cpu may be able to touch the page while we are
17cde63e
MD
916 * trying to remove it. vm_page_cache() will handle this
917 * case for us.
984263bc
MD
918 */
919 if (m->dirty == 0) {
920 vm_page_test_dirty(m);
921 } else {
922 vm_page_dirty(m);
923 }
924
9bf025db 925 if (m->valid == 0 && (m->flags & PG_NEED_COMMIT) == 0) {
41a01a4d
MD
926 /*
927 * Invalid pages can be easily freed
928 */
984263bc 929 vm_pageout_page_free(m);
12e4aaff 930 mycpu->gd_cnt.v_dfree++;
027193eb 931 ++delta;
9bf025db 932 } else if (m->dirty == 0 && (m->flags & PG_NEED_COMMIT) == 0) {
984263bc 933 /*
41a01a4d
MD
934 * Clean pages can be placed onto the cache queue.
935 * This effectively frees them.
984263bc
MD
936 */
937 vm_page_cache(m);
027193eb 938 ++delta;
984263bc
MD
939 } else if ((m->flags & PG_WINATCFLS) == 0 && pass == 0) {
940 /*
941 * Dirty pages need to be paged out, but flushing
942 * a page is extremely expensive verses freeing
943 * a clean page. Rather then artificially limiting
944 * the number of pages we can flush, we instead give
945 * dirty pages extra priority on the inactive queue
946 * by forcing them to be cycled through the queue
947 * twice before being flushed, after which the
948 * (now clean) page will cycle through once more
949 * before being freed. This significantly extends
950 * the thrash point for a heavily loaded machine.
951 */
984263bc 952 vm_page_flag_set(m, PG_WINATCFLS);
b12defdc 953 vm_page_and_queue_spin_lock(m);
027193eb
MD
954 if (m->queue - m->pc == PQ_INACTIVE) {
955 TAILQ_REMOVE(
956 &vm_page_queues[PQ_INACTIVE + q].pl,
957 m, pageq);
958 TAILQ_INSERT_TAIL(
959 &vm_page_queues[PQ_INACTIVE + q].pl,
960 m, pageq);
64949baa 961 ++vm_swapcache_inactive_heuristic;
b12defdc
MD
962 }
963 vm_page_and_queue_spin_unlock(m);
b12defdc 964 vm_page_wakeup(m);
984263bc
MD
965 } else if (maxlaunder > 0) {
966 /*
967 * We always want to try to flush some dirty pages if
968 * we encounter them, to keep the system stable.
969 * Normally this number is small, but under extreme
970 * pressure where there are insufficient clean pages
971 * on the inactive queue, we may have to go all out.
972 */
973 int swap_pageouts_ok;
974 struct vnode *vp = NULL;
975
19cd98ea 976 swap_pageouts_ok = 0;
984263bc 977 object = m->object;
19cd98ea
VS
978 if (object &&
979 (object->type != OBJT_SWAP) &&
980 (object->type != OBJT_DEFAULT)) {
984263bc
MD
981 swap_pageouts_ok = 1;
982 } else {
983 swap_pageouts_ok = !(defer_swap_pageouts || disable_swap_pageouts);
984 swap_pageouts_ok |= (!disable_swap_pageouts && defer_swap_pageouts &&
20479584 985 vm_page_count_min(0));
984263bc
MD
986
987 }
988
989 /*
990 * We don't bother paging objects that are "dead".
991 * Those objects are in a "rundown" state.
992 */
19cd98ea
VS
993 if (!swap_pageouts_ok ||
994 (object == NULL) ||
995 (object->flags & OBJ_DEAD)) {
b12defdc 996 vm_page_and_queue_spin_lock(m);
027193eb
MD
997 if (m->queue - m->pc == PQ_INACTIVE) {
998 TAILQ_REMOVE(
999 &vm_page_queues[PQ_INACTIVE + q].pl,
1000 m, pageq);
1001 TAILQ_INSERT_TAIL(
1002 &vm_page_queues[PQ_INACTIVE + q].pl,
1003 m, pageq);
64949baa 1004 ++vm_swapcache_inactive_heuristic;
b12defdc
MD
1005 }
1006 vm_page_and_queue_spin_unlock(m);
b12defdc 1007 vm_page_wakeup(m);
984263bc
MD
1008 continue;
1009 }
1010
1011 /*
b12defdc
MD
1012 * (m) is still busied.
1013 *
984263bc
MD
1014 * The object is already known NOT to be dead. It
1015 * is possible for the vget() to block the whole
1016 * pageout daemon, but the new low-memory handling
1017 * code should prevent it.
1018 *
1019 * The previous code skipped locked vnodes and, worse,
1020 * reordered pages in the queue. This results in
1021 * completely non-deterministic operation because,
1022 * quite often, a vm_fault has initiated an I/O and
1023 * is holding a locked vnode at just the point where
1024 * the pageout daemon is woken up.
1025 *
1026 * We can't wait forever for the vnode lock, we might
1027 * deadlock due to a vn_read() getting stuck in
1028 * vm_wait while holding this vnode. We skip the
1029 * vnode if we can't get it in a reasonable amount
1030 * of time.
5d6a945b
MD
1031 *
1032 * vpfailed is used to (try to) avoid the case where
1033 * a large number of pages are associated with a
1034 * locked vnode, which could cause the pageout daemon
1035 * to stall for an excessive amount of time.
984263bc 1036 */
984263bc 1037 if (object->type == OBJT_VNODE) {
5d6a945b 1038 int flags;
984263bc 1039
5d6a945b
MD
1040 vp = object->handle;
1041 flags = LK_EXCLUSIVE | LK_NOOBJ;
1042 if (vp == vpfailed)
1043 flags |= LK_NOWAIT;
1044 else
1045 flags |= LK_TIMELOCK;
b12defdc
MD
1046 vm_page_hold(m);
1047 vm_page_wakeup(m);
1048
1049 /*
1050 * We have unbusied (m) temporarily so we can
1051 * acquire the vp lock without deadlocking.
1052 * (m) is held to prevent destruction.
1053 */
5d6a945b
MD
1054 if (vget(vp, flags) != 0) {
1055 vpfailed = vp;
984263bc
MD
1056 ++pageout_lock_miss;
1057 if (object->flags & OBJ_MIGHTBEDIRTY)
027193eb 1058 ++*vnodes_skippedp;
b12defdc 1059 vm_page_unhold(m);
984263bc
MD
1060 continue;
1061 }
1062
1063 /*
1064 * The page might have been moved to another
1065 * queue during potential blocking in vget()
1066 * above. The page might have been freed and
1067 * reused for another vnode. The object might
1068 * have been reused for another vnode.
1069 */
027193eb 1070 if (m->queue - m->pc != PQ_INACTIVE ||
984263bc
MD
1071 m->object != object ||
1072 object->handle != vp) {
1073 if (object->flags & OBJ_MIGHTBEDIRTY)
027193eb 1074 ++*vnodes_skippedp;
984263bc 1075 vput(vp);
b12defdc 1076 vm_page_unhold(m);
984263bc
MD
1077 continue;
1078 }
1079
1080 /*
1081 * The page may have been busied during the
1082 * blocking in vput(); We don't move the
1083 * page back onto the end of the queue so that
1084 * statistics are more correct if we don't.
1085 */
b12defdc 1086 if (vm_page_busy_try(m, TRUE)) {
984263bc 1087 vput(vp);
b12defdc 1088 vm_page_unhold(m);
984263bc
MD
1089 continue;
1090 }
b12defdc 1091 vm_page_unhold(m);
984263bc
MD
1092
1093 /*
b12defdc
MD
1094 * (m) is busied again
1095 *
1096 * We own the busy bit and remove our hold
1097 * bit. If the page is still held it
1098 * might be undergoing I/O, so skip it.
984263bc
MD
1099 */
1100 if (m->hold_count) {
b12defdc 1101 vm_page_and_queue_spin_lock(m);
027193eb
MD
1102 if (m->queue - m->pc == PQ_INACTIVE) {
1103 TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE + q].pl, m, pageq);
1104 TAILQ_INSERT_TAIL(&vm_page_queues[PQ_INACTIVE + q].pl, m, pageq);
64949baa 1105 ++vm_swapcache_inactive_heuristic;
b12defdc
MD
1106 }
1107 vm_page_and_queue_spin_unlock(m);
984263bc 1108 if (object->flags & OBJ_MIGHTBEDIRTY)
027193eb 1109 ++*vnodes_skippedp;
b12defdc 1110 vm_page_wakeup(m);
984263bc
MD
1111 vput(vp);
1112 continue;
1113 }
b12defdc 1114 /* (m) is left busied as we fall through */
984263bc
MD
1115 }
1116
1117 /*
b12defdc
MD
1118 * page is busy and not held here.
1119 *
984263bc
MD
1120 * If a page is dirty, then it is either being washed
1121 * (but not yet cleaned) or it is still in the
1122 * laundry. If it is still in the laundry, then we
1123 * start the cleaning operation.
1124 *
20479584
MD
1125 * decrement inactive_shortage on success to account
1126 * for the (future) cleaned page. Otherwise we
1127 * could wind up laundering or cleaning too many
1128 * pages.
984263bc 1129 */
984263bc 1130 if (vm_pageout_clean(m) != 0) {
027193eb 1131 ++delta;
984263bc 1132 --maxlaunder;
c84c24da 1133 }
b12defdc 1134 /* clean ate busy, page no longer accessible */
984263bc
MD
1135 if (vp != NULL)
1136 vput(vp);
b12defdc
MD
1137 } else {
1138 vm_page_wakeup(m);
984263bc
MD
1139 }
1140 }
027193eb
MD
1141 vm_page_queues_spin_lock(PQ_INACTIVE + q);
1142 TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE + q].pl, &marker, pageq);
1143 vm_page_queues_spin_unlock(PQ_INACTIVE + q);
027193eb
MD
1144 return (delta);
1145}
1146
1147static int
1148vm_pageout_scan_active(int pass, int q,
51c99c61 1149 int avail_shortage, int inactive_shortage,
027193eb
MD
1150 int *recycle_countp)
1151{
1152 struct vm_page marker;
1153 vm_page_t m;
1154 int actcount;
1155 int delta = 0;
51c99c61 1156 int maxscan;
984263bc
MD
1157
1158 /*
20479584
MD
1159 * We want to move pages from the active queue to the inactive
1160 * queue to get the inactive queue to the inactive target. If
1161 * we still have a page shortage from above we try to directly free
1162 * clean pages instead of moving them.
06ecca5a 1163 *
20479584
MD
1164 * If we do still have a shortage we keep track of the number of
1165 * pages we free or cache (recycle_count) as a measure of thrashing
1166 * between the active and inactive queues.
1167 *
51db7ca2
MD
1168 * If we were able to completely satisfy the free+cache targets
1169 * from the inactive pool we limit the number of pages we move
1170 * from the active pool to the inactive pool to 2x the pages we
e6e9a0c3
MD
1171 * had removed from the inactive pool (with a minimum of 1/5 the
1172 * inactive target). If we were not able to completely satisfy
1173 * the free+cache targets we go for the whole target aggressively.
20479584
MD
1174 *
1175 * NOTE: Both variables can end up negative.
1176 * NOTE: We are still in a critical section.
984263bc 1177 */
20479584 1178
027193eb
MD
1179 bzero(&marker, sizeof(marker));
1180 marker.flags = PG_BUSY | PG_FICTITIOUS | PG_MARKER;
1181 marker.queue = PQ_ACTIVE + q;
1182 marker.pc = q;
1183 marker.wire_count = 1;
b12defdc 1184
027193eb
MD
1185 vm_page_queues_spin_lock(PQ_ACTIVE + q);
1186 TAILQ_INSERT_HEAD(&vm_page_queues[PQ_ACTIVE + q].pl, &marker, pageq);
51c99c61 1187 maxscan = vm_page_queues[PQ_ACTIVE + q].lcnt;
027193eb 1188 vm_page_queues_spin_unlock(PQ_ACTIVE + q);
b12defdc
MD
1189
1190 while ((m = TAILQ_NEXT(&marker, pageq)) != NULL &&
51c99c61
MD
1191 maxscan-- > 0 && (avail_shortage - delta > 0 ||
1192 inactive_shortage > 0))
b12defdc
MD
1193 {
1194 vm_page_and_queue_spin_lock(m);
1195 if (m != TAILQ_NEXT(&marker, pageq)) {
1196 vm_page_and_queue_spin_unlock(m);
51c99c61 1197 ++maxscan;
b12defdc
MD
1198 continue;
1199 }
027193eb
MD
1200 KKASSERT(m->queue - m->pc == PQ_ACTIVE);
1201 TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE + q].pl,
b12defdc 1202 &marker, pageq);
027193eb 1203 TAILQ_INSERT_AFTER(&vm_page_queues[PQ_ACTIVE + q].pl, m,
b12defdc 1204 &marker, pageq);
984263bc 1205
06ecca5a 1206 /*
b12defdc 1207 * Skip marker pages
984263bc 1208 */
b12defdc
MD
1209 if (m->flags & PG_MARKER) {
1210 vm_page_and_queue_spin_unlock(m);
1211 continue;
1212 }
06ecca5a 1213
984263bc 1214 /*
b12defdc
MD
1215 * Try to busy the page. Don't mess with pages which are
1216 * already busy or reorder them in the queue.
984263bc 1217 */
b12defdc
MD
1218 if (vm_page_busy_try(m, TRUE)) {
1219 vm_page_and_queue_spin_unlock(m);
984263bc
MD
1220 continue;
1221 }
1222
1223 /*
b12defdc
MD
1224 * Don't deactivate pages that are held, even if we can
1225 * busy them. (XXX why not?)
1226 */
1227 if (m->hold_count != 0) {
027193eb 1228 TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE + q].pl,
b12defdc 1229 m, pageq);
027193eb 1230 TAILQ_INSERT_TAIL(&vm_page_queues[PQ_ACTIVE + q].pl,
b12defdc
MD
1231 m, pageq);
1232 vm_page_and_queue_spin_unlock(m);
1233 vm_page_wakeup(m);
1234 continue;
1235 }
1236 vm_page_and_queue_spin_unlock(m);
d2d8515b 1237 lwkt_yield();
b12defdc
MD
1238
1239 /*
1240 * The page has been successfully busied and the page and
1241 * queue are no longer locked.
1242 */
1243
1244 /*
984263bc
MD
1245 * The count for pagedaemon pages is done after checking the
1246 * page for eligibility...
1247 */
12e4aaff 1248 mycpu->gd_cnt.v_pdpages++;
984263bc
MD
1249
1250 /*
20479584
MD
1251 * Check to see "how much" the page has been used and clear
1252 * the tracking access bits. If the object has no references
1253 * don't bother paying the expense.
984263bc
MD
1254 */
1255 actcount = 0;
19cd98ea 1256 if (m->object && m->object->ref_count != 0) {
20479584
MD
1257 if (m->flags & PG_REFERENCED)
1258 ++actcount;
984263bc
MD
1259 actcount += pmap_ts_referenced(m);
1260 if (actcount) {
1261 m->act_count += ACT_ADVANCE + actcount;
1262 if (m->act_count > ACT_MAX)
1263 m->act_count = ACT_MAX;
1264 }
1265 }
984263bc
MD
1266 vm_page_flag_clear(m, PG_REFERENCED);
1267
1268 /*
20479584 1269 * actcount is only valid if the object ref_count is non-zero.
19cd98ea 1270 * If the page does not have an object, actcount will be zero.
984263bc 1271 */
20479584 1272 if (actcount && m->object->ref_count != 0) {
b12defdc 1273 vm_page_and_queue_spin_lock(m);
027193eb
MD
1274 if (m->queue - m->pc == PQ_ACTIVE) {
1275 TAILQ_REMOVE(
1276 &vm_page_queues[PQ_ACTIVE + q].pl,
1277 m, pageq);
1278 TAILQ_INSERT_TAIL(
1279 &vm_page_queues[PQ_ACTIVE + q].pl,
1280 m, pageq);
b12defdc
MD
1281 }
1282 vm_page_and_queue_spin_unlock(m);
1283 vm_page_wakeup(m);
984263bc
MD
1284 } else {
1285 m->act_count -= min(m->act_count, ACT_DECLINE);
1286 if (vm_pageout_algorithm ||
19cd98ea
VS
1287 (m->object == NULL) ||
1288 (m->object && (m->object->ref_count == 0)) ||
20479584
MD
1289 m->act_count < pass + 1
1290 ) {
1291 /*
1292 * Deactivate the page. If we had a
1293 * shortage from our inactive scan try to
1294 * free (cache) the page instead.
e6e9a0c3
MD
1295 *
1296 * Don't just blindly cache the page if
1297 * we do not have a shortage from the
1298 * inactive scan, that could lead to
1299 * gigabytes being moved.
20479584 1300 */
51c99c61
MD
1301 --inactive_shortage;
1302 if (avail_shortage - delta > 0 ||
19cd98ea
VS
1303 (m->object && (m->object->ref_count == 0)))
1304 {
51c99c61 1305 if (avail_shortage - delta > 0)
027193eb 1306 ++*recycle_countp;
984263bc 1307 vm_page_protect(m, VM_PROT_NONE);
e6e9a0c3 1308 if (m->dirty == 0 &&
9bf025db 1309 (m->flags & PG_NEED_COMMIT) == 0 &&
51c99c61 1310 avail_shortage - delta > 0) {
984263bc 1311 vm_page_cache(m);
c84c24da 1312 } else {
984263bc 1313 vm_page_deactivate(m);
a491077e 1314 vm_page_wakeup(m);
c84c24da 1315 }
984263bc
MD
1316 } else {
1317 vm_page_deactivate(m);
b12defdc 1318 vm_page_wakeup(m);
984263bc 1319 }
51c99c61 1320 ++delta;
984263bc 1321 } else {
b12defdc 1322 vm_page_and_queue_spin_lock(m);
027193eb 1323 if (m->queue - m->pc == PQ_ACTIVE) {
b12defdc 1324 TAILQ_REMOVE(
027193eb
MD
1325 &vm_page_queues[PQ_ACTIVE + q].pl,
1326 m, pageq);
b12defdc 1327 TAILQ_INSERT_TAIL(
027193eb
MD
1328 &vm_page_queues[PQ_ACTIVE + q].pl,
1329 m, pageq);
b12defdc
MD
1330 }
1331 vm_page_and_queue_spin_unlock(m);
1332 vm_page_wakeup(m);
984263bc
MD
1333 }
1334 }
984263bc
MD
1335 }
1336
984263bc 1337 /*
b12defdc
MD
1338 * Clean out our local marker.
1339 */
027193eb
MD
1340 vm_page_queues_spin_lock(PQ_ACTIVE + q);
1341 TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE + q].pl, &marker, pageq);
1342 vm_page_queues_spin_unlock(PQ_ACTIVE + q);
1343
1344 return (delta);
1345}
1346
1347/*
1348 * The number of actually free pages can drop down to v_free_reserved,
1349 * we try to build the free count back above v_free_min. Note that
1350 * vm_paging_needed() also returns TRUE if v_free_count is not at
1351 * least v_free_min so that is the minimum we must build the free
1352 * count to.
1353 *
1354 * We use a slightly higher target to improve hysteresis,
1355 * ((v_free_target + v_free_min) / 2). Since v_free_target
1356 * is usually the same as v_cache_min this maintains about
1357 * half the pages in the free queue as are in the cache queue,
1358 * providing pretty good pipelining for pageout operation.
1359 *
1360 * The system operator can manipulate vm.v_cache_min and
1361 * vm.v_free_target to tune the pageout demon. Be sure
1362 * to keep vm.v_free_min < vm.v_free_target.
1363 *
1364 * Note that the original paging target is to get at least
1365 * (free_min + cache_min) into (free + cache). The slightly
1366 * higher target will shift additional pages from cache to free
1367 * without effecting the original paging target in order to
1368 * maintain better hysteresis and not have the free count always
1369 * be dead-on v_free_min.
1370 *
1371 * NOTE: we are still in a critical section.
1372 *
1373 * Pages moved from PQ_CACHE to totally free are not counted in the
1374 * pages_freed counter.
1375 */
1376static void
51c99c61 1377vm_pageout_scan_cache(int avail_shortage, int vnodes_skipped, int recycle_count)
027193eb
MD
1378{
1379 struct vm_pageout_scan_info info;
1380 vm_page_t m;
b12defdc 1381
cd3c66bd
MD
1382 while (vmstats.v_free_count <
1383 (vmstats.v_free_min + vmstats.v_free_target) / 2) {
1384 /*
b12defdc 1385 * This steals some code from vm/vm_page.c
cd3c66bd 1386 */
984263bc 1387 static int cache_rover = 0;
b12defdc
MD
1388
1389 m = vm_page_list_find(PQ_CACHE, cache_rover & PQ_L2_MASK, FALSE);
20479584 1390 if (m == NULL)
984263bc 1391 break;
b12defdc
MD
1392 /* page is returned removed from its queue and spinlocked */
1393 if (vm_page_busy_try(m, TRUE)) {
1394 vm_page_deactivate_locked(m);
1395 vm_page_spin_unlock(m);
984263bc 1396#ifdef INVARIANTS
086c1d7e 1397 kprintf("Warning: busy page %p found in cache\n", m);
984263bc 1398#endif
b12defdc
MD
1399 continue;
1400 }
1401 vm_page_spin_unlock(m);
1402 pagedaemon_wakeup();
d2d8515b 1403 lwkt_yield();
b12defdc
MD
1404
1405 /*
1406 * Page has been successfully busied and it and its queue
1407 * is no longer spinlocked.
1408 */
9bf025db 1409 if ((m->flags & (PG_UNMANAGED | PG_NEED_COMMIT)) ||
b12defdc
MD
1410 m->hold_count ||
1411 m->wire_count) {
984263bc 1412 vm_page_deactivate(m);
b12defdc 1413 vm_page_wakeup(m);
984263bc
MD
1414 continue;
1415 }
17cde63e
MD
1416 KKASSERT((m->flags & PG_MAPPED) == 0);
1417 KKASSERT(m->dirty == 0);
b12defdc 1418 cache_rover += PQ_PRIME2;
984263bc 1419 vm_pageout_page_free(m);
12e4aaff 1420 mycpu->gd_cnt.v_dfree++;
984263bc 1421 }
06ecca5a 1422
984263bc
MD
1423#if !defined(NO_SWAPPING)
1424 /*
1425 * Idle process swapout -- run once per second.
1426 */
1427 if (vm_swap_idle_enabled) {
1428 static long lsec;
1429 if (time_second != lsec) {
1430 vm_pageout_req_swapout |= VM_SWAP_IDLE;
1431 vm_req_vmdaemon();
1432 lsec = time_second;
1433 }
1434 }
1435#endif
1436
1437 /*
1438 * If we didn't get enough free pages, and we have skipped a vnode
1439 * in a writeable object, wakeup the sync daemon. And kick swapout
1440 * if we did not get enough free pages.
1441 */
1442 if (vm_paging_target() > 0) {
20479584 1443 if (vnodes_skipped && vm_page_count_min(0))
418ff780 1444 speedup_syncer();
984263bc
MD
1445#if !defined(NO_SWAPPING)
1446 if (vm_swap_enabled && vm_page_count_target()) {
1447 vm_req_vmdaemon();
1448 vm_pageout_req_swapout |= VM_SWAP_NORMAL;
1449 }
1450#endif
1451 }
1452
1453 /*
20479584
MD
1454 * Handle catastrophic conditions. Under good conditions we should
1455 * be at the target, well beyond our minimum. If we could not even
1456 * reach our minimum the system is under heavy stress.
1457 *
1458 * Determine whether we have run out of memory. This occurs when
1459 * swap_pager_full is TRUE and the only pages left in the page
1460 * queues are dirty. We will still likely have page shortages.
c84c24da
MD
1461 *
1462 * - swap_pager_full is set if insufficient swap was
1463 * available to satisfy a requested pageout.
1464 *
20479584
MD
1465 * - the inactive queue is bloated (4 x size of active queue),
1466 * meaning it is unable to get rid of dirty pages and.
c84c24da 1467 *
20479584
MD
1468 * - vm_page_count_min() without counting pages recycled from the
1469 * active queue (recycle_count) means we could not recover
1470 * enough pages to meet bare minimum needs. This test only
1471 * works if the inactive queue is bloated.
c84c24da 1472 *
51c99c61 1473 * - due to a positive avail_shortage we shifted the remaining
20479584
MD
1474 * dirty pages from the active queue to the inactive queue
1475 * trying to find clean ones to free.
984263bc 1476 */
20479584 1477 if (swap_pager_full && vm_page_count_min(recycle_count))
c84c24da 1478 kprintf("Warning: system low on memory+swap!\n");
20479584
MD
1479 if (swap_pager_full && vm_page_count_min(recycle_count) &&
1480 vmstats.v_inactive_count > vmstats.v_active_count * 4 &&
51c99c61 1481 avail_shortage > 0) {
20479584
MD
1482 /*
1483 * Kill something.
1484 */
8fa76237
MD
1485 info.bigproc = NULL;
1486 info.bigsize = 0;
1487 allproc_scan(vm_pageout_scan_callback, &info);
1488 if (info.bigproc != NULL) {
1489 killproc(info.bigproc, "out of swap space");
1490 info.bigproc->p_nice = PRIO_MIN;
08f2f1bb
SS
1491 info.bigproc->p_usched->resetpriority(
1492 FIRST_LWP_IN_PROC(info.bigproc));
12e4aaff 1493 wakeup(&vmstats.v_free_count);
8fa76237 1494 PRELE(info.bigproc);
984263bc
MD
1495 }
1496 }
1497}
1498
99ad9bc4 1499/*
b12defdc 1500 * The caller must hold proc_token.
99ad9bc4 1501 */
8fa76237
MD
1502static int
1503vm_pageout_scan_callback(struct proc *p, void *data)
1504{
1505 struct vm_pageout_scan_info *info = data;
1506 vm_offset_t size;
1507
1508 /*
20479584
MD
1509 * Never kill system processes or init. If we have configured swap
1510 * then try to avoid killing low-numbered pids.
8fa76237 1511 */
4643740a 1512 if ((p->p_flags & P_SYSTEM) || (p->p_pid == 1) ||
8fa76237
MD
1513 ((p->p_pid < 48) && (vm_swap_size != 0))) {
1514 return (0);
1515 }
1516
1517 /*
1518 * if the process is in a non-running type state,
1519 * don't touch it.
1520 */
20479584 1521 if (p->p_stat != SACTIVE && p->p_stat != SSTOP)
8fa76237 1522 return (0);
8fa76237
MD
1523
1524 /*
20479584
MD
1525 * Get the approximate process size. Note that anonymous pages
1526 * with backing swap will be counted twice, but there should not
1527 * be too many such pages due to the stress the VM system is
1528 * under at this point.
8fa76237 1529 */
20479584 1530 size = vmspace_anonymous_count(p->p_vmspace) +
8fa76237
MD
1531 vmspace_swap_count(p->p_vmspace);
1532
1533 /*
1534 * If the this process is bigger than the biggest one
1535 * remember it.
1536 */
20479584 1537 if (info->bigsize < size) {
8fa76237
MD
1538 if (info->bigproc)
1539 PRELE(info->bigproc);
1540 PHOLD(p);
1541 info->bigproc = p;
1542 info->bigsize = size;
1543 }
d2d8515b 1544 lwkt_yield();
8fa76237
MD
1545 return(0);
1546}
1547
984263bc
MD
1548/*
1549 * This routine tries to maintain the pseudo LRU active queue,
1550 * so that during long periods of time where there is no paging,
1551 * that some statistic accumulation still occurs. This code
1552 * helps the situation where paging just starts to occur.
1553 */
1554static void
027193eb 1555vm_pageout_page_stats(int q)
984263bc 1556{
984263bc 1557 static int fullintervalcount = 0;
b12defdc
MD
1558 struct vm_page marker;
1559 vm_page_t m;
1560 int pcount, tpcount; /* Number of pages to check */
984263bc 1561 int page_shortage;
984263bc 1562
b12defdc
MD
1563 page_shortage = (vmstats.v_inactive_target + vmstats.v_cache_max +
1564 vmstats.v_free_min) -
1565 (vmstats.v_free_count + vmstats.v_inactive_count +
1566 vmstats.v_cache_count);
984263bc
MD
1567
1568 if (page_shortage <= 0)
1569 return;
1570
51c99c61 1571 pcount = vm_page_queues[PQ_ACTIVE + q].lcnt;
984263bc
MD
1572 fullintervalcount += vm_pageout_stats_interval;
1573 if (fullintervalcount < vm_pageout_full_stats_interval) {
51c99c61
MD
1574 tpcount = (vm_pageout_stats_max * pcount) /
1575 vmstats.v_page_count + 1;
984263bc
MD
1576 if (pcount > tpcount)
1577 pcount = tpcount;
1578 } else {
1579 fullintervalcount = 0;
1580 }
1581
b12defdc
MD
1582 bzero(&marker, sizeof(marker));
1583 marker.flags = PG_BUSY | PG_FICTITIOUS | PG_MARKER;
027193eb
MD
1584 marker.queue = PQ_ACTIVE + q;
1585 marker.pc = q;
b12defdc
MD
1586 marker.wire_count = 1;
1587
027193eb
MD
1588 vm_page_queues_spin_lock(PQ_ACTIVE + q);
1589 TAILQ_INSERT_HEAD(&vm_page_queues[PQ_ACTIVE + q].pl, &marker, pageq);
1590 vm_page_queues_spin_unlock(PQ_ACTIVE + q);
b12defdc
MD
1591
1592 while ((m = TAILQ_NEXT(&marker, pageq)) != NULL &&
1593 pcount-- > 0)
1594 {
984263bc
MD
1595 int actcount;
1596
b12defdc
MD
1597 vm_page_and_queue_spin_lock(m);
1598 if (m != TAILQ_NEXT(&marker, pageq)) {
1599 vm_page_and_queue_spin_unlock(m);
1600 ++pcount;
1601 continue;
984263bc 1602 }
027193eb
MD
1603 KKASSERT(m->queue - m->pc == PQ_ACTIVE);
1604 TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE + q].pl, &marker, pageq);
1605 TAILQ_INSERT_AFTER(&vm_page_queues[PQ_ACTIVE + q].pl, m,
b12defdc 1606 &marker, pageq);
984263bc 1607
984263bc 1608 /*
b12defdc 1609 * Ignore markers
984263bc 1610 */
b12defdc
MD
1611 if (m->flags & PG_MARKER) {
1612 vm_page_and_queue_spin_unlock(m);
984263bc
MD
1613 continue;
1614 }
1615
b12defdc
MD
1616 /*
1617 * Ignore pages we can't busy
1618 */
1619 if (vm_page_busy_try(m, TRUE)) {
1620 vm_page_and_queue_spin_unlock(m);
1621 continue;
1622 }
1623 vm_page_and_queue_spin_unlock(m);
027193eb 1624 KKASSERT(m->queue - m->pc == PQ_ACTIVE);
b12defdc
MD
1625
1626 /*
1627 * We now have a safely busied page, the page and queue
1628 * spinlocks have been released.
1629 *
1630 * Ignore held pages
1631 */
1632 if (m->hold_count) {
1633 vm_page_wakeup(m);
1634 continue;
1635 }
1636
1637 /*
1638 * Calculate activity
1639 */
984263bc
MD
1640 actcount = 0;
1641 if (m->flags & PG_REFERENCED) {
1642 vm_page_flag_clear(m, PG_REFERENCED);
1643 actcount += 1;
1644 }
984263bc 1645 actcount += pmap_ts_referenced(m);
b12defdc
MD
1646
1647 /*
1648 * Update act_count and move page to end of queue.
1649 */
984263bc
MD
1650 if (actcount) {
1651 m->act_count += ACT_ADVANCE + actcount;
1652 if (m->act_count > ACT_MAX)
1653 m->act_count = ACT_MAX;
b12defdc 1654 vm_page_and_queue_spin_lock(m);
027193eb
MD
1655 if (m->queue - m->pc == PQ_ACTIVE) {
1656 TAILQ_REMOVE(
1657 &vm_page_queues[PQ_ACTIVE + q].pl,
1658 m, pageq);
1659 TAILQ_INSERT_TAIL(
1660 &vm_page_queues[PQ_ACTIVE + q].pl,
1661 m, pageq);
984263bc 1662 }
b12defdc
MD
1663 vm_page_and_queue_spin_unlock(m);
1664 vm_page_wakeup(m);
1665 continue;
984263bc
MD
1666 }
1667
b12defdc
MD
1668 if (m->act_count == 0) {
1669 /*
1670 * We turn off page access, so that we have
1671 * more accurate RSS stats. We don't do this
1672 * in the normal page deactivation when the
1673 * system is loaded VM wise, because the
1674 * cost of the large number of page protect
1675 * operations would be higher than the value
1676 * of doing the operation.
1677 *
1678 * We use the marker to save our place so
1679 * we can release the spin lock. both (m)
1680 * and (next) will be invalid.
1681 */
1682 vm_page_protect(m, VM_PROT_NONE);
1683 vm_page_deactivate(m);
1684 } else {
1685 m->act_count -= min(m->act_count, ACT_DECLINE);
1686 vm_page_and_queue_spin_lock(m);
027193eb
MD
1687 if (m->queue - m->pc == PQ_ACTIVE) {
1688 TAILQ_REMOVE(
1689 &vm_page_queues[PQ_ACTIVE + q].pl,
1690 m, pageq);
1691 TAILQ_INSERT_TAIL(
1692 &vm_page_queues[PQ_ACTIVE + q].pl,
1693 m, pageq);
b12defdc
MD
1694 }
1695 vm_page_and_queue_spin_unlock(m);
1696 }
1697 vm_page_wakeup(m);
984263bc 1698 }
b12defdc
MD
1699
1700 /*
1701 * Remove our local marker
1702 */
027193eb
MD
1703 vm_page_queues_spin_lock(PQ_ACTIVE + q);
1704 TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE + q].pl, &marker, pageq);
1705 vm_page_queues_spin_unlock(PQ_ACTIVE + q);
984263bc
MD
1706}
1707
1708static int
57e43348 1709vm_pageout_free_page_calc(vm_size_t count)
984263bc 1710{
12e4aaff 1711 if (count < vmstats.v_page_count)
984263bc
MD
1712 return 0;
1713 /*
1714 * free_reserved needs to include enough for the largest swap pager
1715 * structures plus enough for any pv_entry structs when paging.
0a4d4828
MD
1716 *
1717 * v_free_min normal allocations
1718 * v_free_reserved system allocations
1719 * v_pageout_free_min allocations by pageout daemon
1720 * v_interrupt_free_min low level allocations (e.g swap structures)
984263bc 1721 */
12e4aaff 1722 if (vmstats.v_page_count > 1024)
0a4d4828 1723 vmstats.v_free_min = 64 + (vmstats.v_page_count - 1024) / 200;
984263bc 1724 else
0a4d4828
MD
1725 vmstats.v_free_min = 64;
1726 vmstats.v_free_reserved = vmstats.v_free_min * 4 / 8 + 7;
1727 vmstats.v_free_severe = vmstats.v_free_min * 4 / 8 + 0;
1728 vmstats.v_pageout_free_min = vmstats.v_free_min * 2 / 8 + 7;
1729 vmstats.v_interrupt_free_min = vmstats.v_free_min * 1 / 8 + 7;
1730
984263bc
MD
1731 return 1;
1732}
1733
1734
1735/*
20479584 1736 * vm_pageout is the high level pageout daemon.
99ad9bc4
MD
1737 *
1738 * No requirements.
984263bc
MD
1739 */
1740static void
cd8ab232 1741vm_pageout_thread(void)
984263bc
MD
1742{
1743 int pass;
027193eb 1744 int q;
984263bc
MD
1745
1746 /*
1747 * Initialize some paging parameters.
1748 */
4ecf7cc9 1749 curthread->td_flags |= TDF_SYSTHREAD;
984263bc 1750
12e4aaff 1751 if (vmstats.v_page_count < 2000)
984263bc
MD
1752 vm_pageout_page_count = 8;
1753
12e4aaff 1754 vm_pageout_free_page_calc(vmstats.v_page_count);
20479584 1755
984263bc
MD
1756 /*
1757 * v_free_target and v_cache_min control pageout hysteresis. Note
1758 * that these are more a measure of the VM cache queue hysteresis
1759 * then the VM free queue. Specifically, v_free_target is the
1760 * high water mark (free+cache pages).
1761 *
1762 * v_free_reserved + v_cache_min (mostly means v_cache_min) is the
1763 * low water mark, while v_free_min is the stop. v_cache_min must
1764 * be big enough to handle memory needs while the pageout daemon
1765 * is signalled and run to free more pages.
1766 */
12e4aaff
MD
1767 if (vmstats.v_free_count > 6144)
1768 vmstats.v_free_target = 4 * vmstats.v_free_min + vmstats.v_free_reserved;
984263bc 1769 else
12e4aaff 1770 vmstats.v_free_target = 2 * vmstats.v_free_min + vmstats.v_free_reserved;
984263bc 1771
0e8bd897
MD
1772 /*
1773 * NOTE: With the new buffer cache b_act_count we want the default
1774 * inactive target to be a percentage of available memory.
1775 *
1776 * The inactive target essentially determines the minimum
1777 * number of 'temporary' pages capable of caching one-time-use
1778 * files when the VM system is otherwise full of pages
1779 * belonging to multi-time-use files or active program data.
51db7ca2
MD
1780 *
1781 * NOTE: The inactive target is aggressively persued only if the
1782 * inactive queue becomes too small. If the inactive queue
1783 * is large enough to satisfy page movement to free+cache
1784 * then it is repopulated more slowly from the active queue.
e15708fc 1785 * This allows a general inactive_target default to be set.
51db7ca2
MD
1786 *
1787 * There is an issue here for processes which sit mostly idle
1788 * 'overnight', such as sshd, tcsh, and X. Any movement from
1789 * the active queue will eventually cause such pages to
1790 * recycle eventually causing a lot of paging in the morning.
1791 * To reduce the incidence of this pages cycled out of the
1792 * buffer cache are moved directly to the inactive queue if
e15708fc
MD
1793 * they were only used once or twice.
1794 *
1795 * The vfs.vm_cycle_point sysctl can be used to adjust this.
1796 * Increasing the value (up to 64) increases the number of
1797 * buffer recyclements which go directly to the inactive queue.
0e8bd897 1798 */
12e4aaff
MD
1799 if (vmstats.v_free_count > 2048) {
1800 vmstats.v_cache_min = vmstats.v_free_target;
1801 vmstats.v_cache_max = 2 * vmstats.v_cache_min;
984263bc 1802 } else {
12e4aaff
MD
1803 vmstats.v_cache_min = 0;
1804 vmstats.v_cache_max = 0;
984263bc 1805 }
e15708fc 1806 vmstats.v_inactive_target = vmstats.v_free_count / 4;
984263bc
MD
1807
1808 /* XXX does not really belong here */
1809 if (vm_page_max_wired == 0)
12e4aaff 1810 vm_page_max_wired = vmstats.v_free_count / 3;
984263bc
MD
1811
1812 if (vm_pageout_stats_max == 0)
12e4aaff 1813 vm_pageout_stats_max = vmstats.v_free_target;
984263bc
MD
1814
1815 /*
1816 * Set interval in seconds for stats scan.
1817 */
1818 if (vm_pageout_stats_interval == 0)
1819 vm_pageout_stats_interval = 5;
1820 if (vm_pageout_full_stats_interval == 0)
1821 vm_pageout_full_stats_interval = vm_pageout_stats_interval * 4;
1822
1823
1824 /*
1825 * Set maximum free per pass
1826 */
1827 if (vm_pageout_stats_free_max == 0)
1828 vm_pageout_stats_free_max = 5;
1829
1830 swap_pager_swap_init();
1831 pass = 0;
20479584 1832
984263bc
MD
1833 /*
1834 * The pageout daemon is never done, so loop forever.
1835 */
1836 while (TRUE) {
1837 int error;
027193eb
MD
1838 int delta1;
1839 int delta2;
51c99c61 1840 int avail_shortage;
027193eb 1841 int inactive_shortage;
027193eb
MD
1842 int vnodes_skipped = 0;
1843 int recycle_count = 0;
1844 int tmp;
984263bc 1845
12d8aca7 1846 /*
1bfac262
MD
1847 * Wait for an action request. If we timeout check to
1848 * see if paging is needed (in case the normal wakeup
1849 * code raced us).
12d8aca7 1850 */
20479584 1851 if (vm_pages_needed == 0) {
984263bc 1852 error = tsleep(&vm_pages_needed,
20479584
MD
1853 0, "psleep",
1854 vm_pageout_stats_interval * hz);
1bfac262
MD
1855 if (error &&
1856 vm_paging_needed() == 0 &&
1857 vm_pages_needed == 0) {
51c99c61 1858 for (q = 0; q < PQ_L2_SIZE; ++q)
027193eb 1859 vm_pageout_page_stats(q);
984263bc
MD
1860 continue;
1861 }
20479584 1862 vm_pages_needed = 1;
984263bc
MD
1863 }
1864
20479584 1865 mycpu->gd_cnt.v_pdwakeups++;
20479584
MD
1866
1867 /*
027193eb
MD
1868 * Do whatever cleanup that the pmap code can.
1869 */
1870 pmap_collect();
1871
1872 /*
12d8aca7
MD
1873 * Scan for pageout. Try to avoid thrashing the system
1874 * with activity.
027193eb
MD
1875 *
1876 * Calculate our target for the number of free+cache pages we
1877 * want to get to. This is higher then the number that causes
1878 * allocations to stall (severe) in order to provide hysteresis,
1879 * and if we don't make it all the way but get to the minimum
51c99c61
MD
1880 * we're happy. Goose it a bit if there are multipler
1881 * requests for memory.
027193eb 1882 */
51c99c61 1883 avail_shortage = vm_paging_target() + vm_pageout_deficit;
027193eb
MD
1884 vm_pageout_deficit = 0;
1885 delta1 = 0;
51c99c61
MD
1886 if (avail_shortage > 0) {
1887 for (q = 0; q < PQ_L2_SIZE; ++q) {
1888 delta1 += vm_pageout_scan_inactive(
1889 pass, q,
1890 PQAVERAGE(avail_shortage),
1891 &vnodes_skipped);
1892 }
1893 avail_shortage -= delta1;
027193eb
MD
1894 }
1895
1896 /*
1897 * Figure out how many active pages we must deactivate. If
1898 * we were able to reach our target with just the inactive
1899 * scan above we limit the number of active pages we
1900 * deactivate to reduce unnecessary work.
1901 */
51c99c61
MD
1902 inactive_shortage = vmstats.v_inactive_target -
1903 vmstats.v_inactive_count;
027193eb 1904
3038a8ca
MD
1905 /*
1906 * If we were unable to free sufficient inactive pages to
1907 * satisfy the free/cache queue requirements then simply
1908 * reaching the inactive target may not be good enough.
1909 * Try to deactivate pages in excess of the target based
1910 * on the shortfall.
1911 *
1912 * However to prevent thrashing the VM system do not
1913 * deactivate more than an additional 1/10 the inactive
1914 * target's worth of active pages.
1915 */
51c99c61
MD
1916 if (avail_shortage > 0) {
1917 tmp = avail_shortage * 2;
3038a8ca
MD
1918 if (tmp > vmstats.v_inactive_target / 10)
1919 tmp = vmstats.v_inactive_target / 10;
51c99c61 1920 inactive_shortage += tmp;
3038a8ca 1921 }
027193eb 1922
51c99c61
MD
1923 if (avail_shortage > 0 || inactive_shortage > 0) {
1924 delta2 = 0;
1925 for (q = 0; q < PQ_L2_SIZE; ++q) {
1926 delta2 += vm_pageout_scan_active(
1927 pass, q,
1928 PQAVERAGE(avail_shortage),
1929 PQAVERAGE(inactive_shortage),
1930 &recycle_count);
1931 }
1932 inactive_shortage -= delta2;
1933 avail_shortage -= delta2;
027193eb
MD
1934 }
1935
1936 /*
1937 * Finally free enough cache pages to meet our free page
1938 * requirement and take more drastic measures if we are
1939 * still in trouble.
1940 */
51c99c61 1941 vm_pageout_scan_cache(avail_shortage, vnodes_skipped,
027193eb
MD
1942 recycle_count);
1943
1944 /*
1945 * Wait for more work.
20479584 1946 */
51c99c61 1947 if (avail_shortage > 0) {
20479584
MD
1948 ++pass;
1949 if (swap_pager_full) {
1950 /*
1951 * Running out of memory, catastrophic back-off
1952 * to one-second intervals.
1953 */
1954 tsleep(&vm_pages_needed, 0, "pdelay", hz);
1955 } else if (pass < 10 && vm_pages_needed > 1) {
1956 /*
1957 * Normal operation, additional processes
1958 * have already kicked us. Retry immediately.
1959 */
1960 } else if (pass < 10) {
1961 /*
1962 * Normal operation, fewer processes. Delay
1963 * a bit but allow wakeups.
1964 */
1965 vm_pages_needed = 0;
1966 tsleep(&vm_pages_needed, 0, "pdelay", hz / 10);
1967 vm_pages_needed = 1;
1968 } else {
1969 /*
1970 * We've taken too many passes, forced delay.
1971 */
1972 tsleep(&vm_pages_needed, 0, "pdelay", hz / 10);
1973 }
1974 } else {
12d8aca7
MD
1975 /*
1976 * Interlocked wakeup of waiters (non-optional)
1977 */
20479584 1978 pass = 0;
12d8aca7
MD
1979 if (vm_pages_needed && !vm_page_count_min(0)) {
1980 wakeup(&vmstats.v_free_count);
1981 vm_pages_needed = 0;
1982 }
20479584 1983 }
984263bc
MD
1984 }
1985}
1986
cd8ab232
MD
1987static struct kproc_desc page_kp = {
1988 "pagedaemon",
1989 vm_pageout_thread,
1990 &pagethread
1991};
1992SYSINIT(pagedaemon, SI_SUB_KTHREAD_PAGE, SI_ORDER_FIRST, kproc_start, &page_kp)
1993
1994
20479584
MD
1995/*
1996 * Called after allocating a page out of the cache or free queue
1997 * to possibly wake the pagedaemon up to replentish our supply.
1998 *
1999 * We try to generate some hysteresis by waking the pagedaemon up
1bfac262
MD
2000 * when our free+cache pages go below the free_min+cache_min level.
2001 * The pagedaemon tries to get the count back up to at least the
2002 * minimum, and through to the target level if possible.
20479584
MD
2003 *
2004 * If the pagedaemon is already active bump vm_pages_needed as a hint
2005 * that there are even more requests pending.
99ad9bc4
MD
2006 *
2007 * SMP races ok?
2008 * No requirements.
20479584 2009 */
984263bc 2010void
57e43348 2011pagedaemon_wakeup(void)
984263bc 2012{
1bfac262 2013 if (vm_paging_needed() && curthread != pagethread) {
20479584 2014 if (vm_pages_needed == 0) {
1bfac262 2015 vm_pages_needed = 1; /* SMP race ok */
20479584
MD
2016 wakeup(&vm_pages_needed);
2017 } else if (vm_page_count_min(0)) {
1bfac262 2018 ++vm_pages_needed; /* SMP race ok */
20479584 2019 }
984263bc
MD
2020 }
2021}
2022
2023#if !defined(NO_SWAPPING)
99ad9bc4
MD
2024
2025/*
2026 * SMP races ok?
2027 * No requirements.
2028 */
984263bc 2029static void
57e43348 2030vm_req_vmdaemon(void)
984263bc
MD
2031{
2032 static int lastrun = 0;
2033
2034 if ((ticks > (lastrun + hz)) || (ticks < lastrun)) {
2035 wakeup(&vm_daemon_needed);
2036 lastrun = ticks;
2037 }
2038}
2039
8fa76237
MD
2040static int vm_daemon_callback(struct proc *p, void *data __unused);
2041
99ad9bc4
MD
2042/*
2043 * No requirements.
2044 */
984263bc 2045static void
57e43348 2046vm_daemon(void)
984263bc 2047{
99ad9bc4 2048 /*
b12defdc 2049 * XXX vm_daemon_needed specific token?
99ad9bc4 2050 */
984263bc 2051 while (TRUE) {
377d4740 2052 tsleep(&vm_daemon_needed, 0, "psleep", 0);
984263bc
MD
2053 if (vm_pageout_req_swapout) {
2054 swapout_procs(vm_pageout_req_swapout);
2055 vm_pageout_req_swapout = 0;
2056 }
2057 /*
2058 * scan the processes for exceeding their rlimits or if
2059 * process is swapped out -- deactivate pages
2060 */
8fa76237
MD
2061 allproc_scan(vm_daemon_callback, NULL);
2062 }
2063}
984263bc 2064
99ad9bc4 2065/*
b12defdc 2066 * Caller must hold proc_token.
99ad9bc4 2067 */
8fa76237
MD
2068static int
2069vm_daemon_callback(struct proc *p, void *data __unused)
2070{
2071 vm_pindex_t limit, size;
984263bc 2072
8fa76237
MD
2073 /*
2074 * if this is a system process or if we have already
2075 * looked at this process, skip it.
2076 */
4643740a 2077 if (p->p_flags & (P_SYSTEM | P_WEXIT))
8fa76237 2078 return (0);
984263bc 2079
8fa76237
MD
2080 /*
2081 * if the process is in a non-running type state,
2082 * don't touch it.
2083 */
164b8401 2084 if (p->p_stat != SACTIVE && p->p_stat != SSTOP)
8fa76237 2085 return (0);
984263bc 2086
8fa76237
MD
2087 /*
2088 * get a limit
2089 */
2090 limit = OFF_TO_IDX(qmin(p->p_rlimit[RLIMIT_RSS].rlim_cur,
2091 p->p_rlimit[RLIMIT_RSS].rlim_max));
2092
2093 /*
2094 * let processes that are swapped out really be
2095 * swapped out. Set the limit to nothing to get as
2096 * many pages out to swap as possible.
2097 */
4643740a 2098 if (p->p_flags & P_SWAPPEDOUT)
8fa76237
MD
2099 limit = 0;
2100
b12defdc 2101 lwkt_gettoken(&p->p_vmspace->vm_map.token);
8fa76237
MD
2102 size = vmspace_resident_count(p->p_vmspace);
2103 if (limit >= 0 && size >= limit) {
b12defdc 2104 vm_pageout_map_deactivate_pages(&p->p_vmspace->vm_map, limit);
984263bc 2105 }
b12defdc 2106 lwkt_reltoken(&p->p_vmspace->vm_map.token);
8fa76237 2107 return (0);
984263bc 2108}
8fa76237 2109
984263bc 2110#endif