4 * Copyright (c) 1991 Regents of the University of California.
6 * Copyright (c) 1994 John S. Dyson
8 * Copyright (c) 1994 David Greenman
11 * This code is derived from software contributed to Berkeley by
12 * The Mach Operating System project at Carnegie-Mellon University.
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in the
21 * documentation and/or other materials provided with the distribution.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * from: @(#)vm_pageout.c 7.4 (Berkeley) 5/7/91
41 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
42 * All rights reserved.
44 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
46 * Permission to use, copy, modify and distribute this software and
47 * its documentation is hereby granted, provided that both the copyright
48 * notice and this permission notice appear in all copies of the
49 * software, derivative works or modified versions, and any portions
50 * thereof, and that both notices appear in supporting documentation.
52 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
53 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
54 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
56 * Carnegie Mellon requests users of this software to return to
58 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
59 * School of Computer Science
60 * Carnegie Mellon University
61 * Pittsburgh PA 15213-3890
63 * any improvements or extensions that they make and grant Carnegie the
64 * rights to redistribute these changes.
66 * $FreeBSD: src/sys/vm/vm_pageout.c,v 1.151.2.15 2002/12/29 18:21:04 dillon Exp $
70 * The proverbial page-out daemon.
74 #include <sys/param.h>
75 #include <sys/systm.h>
76 #include <sys/kernel.h>
78 #include <sys/kthread.h>
79 #include <sys/resourcevar.h>
80 #include <sys/signalvar.h>
81 #include <sys/vnode.h>
82 #include <sys/vmmeter.h>
83 #include <sys/sysctl.h>
86 #include <vm/vm_param.h>
88 #include <vm/vm_object.h>
89 #include <vm/vm_page.h>
90 #include <vm/vm_map.h>
91 #include <vm/vm_pageout.h>
92 #include <vm/vm_pager.h>
93 #include <vm/swap_pager.h>
94 #include <vm/vm_extern.h>
96 #include <sys/thread2.h>
97 #include <sys/spinlock2.h>
98 #include <vm/vm_page2.h>
101 * System initialization
104 /* the kernel process "vm_pageout"*/
105 static int vm_pageout_clean (vm_page_t);
106 static int vm_pageout_free_page_calc (vm_size_t count);
107 struct thread *pagethread;
109 #if !defined(NO_SWAPPING)
110 /* the kernel process "vm_daemon"*/
111 static void vm_daemon (void);
112 static struct thread *vmthread;
114 static struct kproc_desc vm_kp = {
119 SYSINIT(vmdaemon, SI_SUB_KTHREAD_VM, SI_ORDER_FIRST, kproc_start, &vm_kp)
122 int vm_pages_needed=0; /* Event on which pageout daemon sleeps */
123 int vm_pageout_deficit=0; /* Estimated number of pages deficit */
124 int vm_pageout_pages_needed=0; /* flag saying that the pageout daemon needs pages */
126 #if !defined(NO_SWAPPING)
127 static int vm_pageout_req_swapout; /* XXX */
128 static int vm_daemon_needed;
130 static int vm_max_launder = 32;
131 static int vm_pageout_stats_max=0, vm_pageout_stats_interval = 0;
132 static int vm_pageout_full_stats_interval = 0;
133 static int vm_pageout_stats_free_max=0, vm_pageout_algorithm=0;
134 static int defer_swap_pageouts=0;
135 static int disable_swap_pageouts=0;
137 #if defined(NO_SWAPPING)
138 static int vm_swap_enabled=0;
139 static int vm_swap_idle_enabled=0;
141 static int vm_swap_enabled=1;
142 static int vm_swap_idle_enabled=0;
145 SYSCTL_INT(_vm, VM_PAGEOUT_ALGORITHM, pageout_algorithm,
146 CTLFLAG_RW, &vm_pageout_algorithm, 0, "LRU page mgmt");
148 SYSCTL_INT(_vm, OID_AUTO, max_launder,
149 CTLFLAG_RW, &vm_max_launder, 0, "Limit dirty flushes in pageout");
151 SYSCTL_INT(_vm, OID_AUTO, pageout_stats_max,
152 CTLFLAG_RW, &vm_pageout_stats_max, 0, "Max pageout stats scan length");
154 SYSCTL_INT(_vm, OID_AUTO, pageout_full_stats_interval,
155 CTLFLAG_RW, &vm_pageout_full_stats_interval, 0, "Interval for full stats scan");
157 SYSCTL_INT(_vm, OID_AUTO, pageout_stats_interval,
158 CTLFLAG_RW, &vm_pageout_stats_interval, 0, "Interval for partial stats scan");
160 SYSCTL_INT(_vm, OID_AUTO, pageout_stats_free_max,
161 CTLFLAG_RW, &vm_pageout_stats_free_max, 0, "Not implemented");
163 #if defined(NO_SWAPPING)
164 SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled,
165 CTLFLAG_RD, &vm_swap_enabled, 0, "");
166 SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled,
167 CTLFLAG_RD, &vm_swap_idle_enabled, 0, "");
169 SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled,
170 CTLFLAG_RW, &vm_swap_enabled, 0, "Enable entire process swapout");
171 SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled,
172 CTLFLAG_RW, &vm_swap_idle_enabled, 0, "Allow swapout on idle criteria");
175 SYSCTL_INT(_vm, OID_AUTO, defer_swapspace_pageouts,
176 CTLFLAG_RW, &defer_swap_pageouts, 0, "Give preference to dirty pages in mem");
178 SYSCTL_INT(_vm, OID_AUTO, disable_swapspace_pageouts,
179 CTLFLAG_RW, &disable_swap_pageouts, 0, "Disallow swapout of dirty pages");
181 static int pageout_lock_miss;
182 SYSCTL_INT(_vm, OID_AUTO, pageout_lock_miss,
183 CTLFLAG_RD, &pageout_lock_miss, 0, "vget() lock misses during pageout");
185 #define VM_PAGEOUT_PAGE_COUNT 16
186 int vm_pageout_page_count = VM_PAGEOUT_PAGE_COUNT;
188 int vm_page_max_wired; /* XXX max # of wired pages system-wide */
190 #if !defined(NO_SWAPPING)
191 typedef void freeer_fcn_t (vm_map_t, vm_object_t, vm_pindex_t, int);
192 static void vm_pageout_map_deactivate_pages (vm_map_t, vm_pindex_t);
193 static freeer_fcn_t vm_pageout_object_deactivate_pages;
194 static void vm_req_vmdaemon (void);
196 static void vm_pageout_page_stats(int q);
202 return((n + (PQ_L2_SIZE - 1)) / PQ_L2_SIZE + 1);
204 return((n - (PQ_L2_SIZE - 1)) / PQ_L2_SIZE - 1);
210 * Clean the page and remove it from the laundry. The page must not be
213 * We set the busy bit to cause potential page faults on this page to
214 * block. Note the careful timing, however, the busy bit isn't set till
215 * late and we cannot do anything that will mess with the page.
218 vm_pageout_clean(vm_page_t m)
221 vm_page_t mc[2*vm_pageout_page_count];
224 int ib, is, page_base;
225 vm_pindex_t pindex = m->pindex;
230 * It doesn't cost us anything to pageout OBJT_DEFAULT or OBJT_SWAP
231 * with the new swapper, but we could have serious problems paging
232 * out other object types if there is insufficient memory.
234 * Unfortunately, checking free memory here is far too late, so the
235 * check has been moved up a procedural level.
239 * Don't mess with the page if it's busy, held, or special
241 * XXX do we really need to check hold_count here? hold_count
242 * isn't supposed to mess with vm_page ops except prevent the
243 * page from being reused.
245 if (m->hold_count != 0 || (m->flags & PG_UNMANAGED)) {
250 mc[vm_pageout_page_count] = m;
252 page_base = vm_pageout_page_count;
257 * Scan object for clusterable pages.
259 * We can cluster ONLY if: ->> the page is NOT
260 * clean, wired, busy, held, or mapped into a
261 * buffer, and one of the following:
262 * 1) The page is inactive, or a seldom used
265 * 2) we force the issue.
267 * During heavy mmap/modification loads the pageout
268 * daemon can really fragment the underlying file
269 * due to flushing pages out of order and not trying
270 * align the clusters (which leave sporatic out-of-order
271 * holes). To solve this problem we do the reverse scan
272 * first and attempt to align our cluster, then do a
273 * forward scan if room remains.
276 vm_object_hold(object);
278 while (ib && pageout_count < vm_pageout_page_count) {
286 p = vm_page_lookup_busy_try(object, pindex - ib, TRUE, &error);
287 if (error || p == NULL) {
291 if ((p->queue - p->pc) == PQ_CACHE ||
292 (p->flags & PG_UNMANAGED)) {
297 vm_page_test_dirty(p);
298 if (((p->dirty & p->valid) == 0 &&
299 (p->flags & PG_NEED_COMMIT) == 0) ||
300 p->queue - p->pc != PQ_INACTIVE ||
301 p->wire_count != 0 || /* may be held by buf cache */
302 p->hold_count != 0) { /* may be undergoing I/O */
311 * alignment boundry, stop here and switch directions. Do
314 if ((pindex - (ib - 1)) % vm_pageout_page_count == 0)
318 while (pageout_count < vm_pageout_page_count &&
319 pindex + is < object->size) {
322 p = vm_page_lookup_busy_try(object, pindex + is, TRUE, &error);
323 if (error || p == NULL)
325 if (((p->queue - p->pc) == PQ_CACHE) ||
326 (p->flags & (PG_BUSY|PG_UNMANAGED)) || p->busy) {
330 vm_page_test_dirty(p);
331 if (((p->dirty & p->valid) == 0 &&
332 (p->flags & PG_NEED_COMMIT) == 0) ||
333 p->queue - p->pc != PQ_INACTIVE ||
334 p->wire_count != 0 || /* may be held by buf cache */
335 p->hold_count != 0) { /* may be undergoing I/O */
339 mc[page_base + pageout_count] = p;
345 * If we exhausted our forward scan, continue with the reverse scan
346 * when possible, even past a page boundry. This catches boundry
349 if (ib && pageout_count < vm_pageout_page_count)
352 vm_object_drop(object);
355 * we allow reads during pageouts...
357 return vm_pageout_flush(&mc[page_base], pageout_count, 0);
361 * vm_pageout_flush() - launder the given pages
363 * The given pages are laundered. Note that we setup for the start of
364 * I/O ( i.e. busy the page ), mark it read-only, and bump the object
365 * reference count all in here rather then in the parent. If we want
366 * the parent to do more sophisticated things we may have to change
369 * The pages in the array must be busied by the caller and will be
370 * unbusied by this function.
373 vm_pageout_flush(vm_page_t *mc, int count, int flags)
376 int pageout_status[count];
381 * Initiate I/O. Bump the vm_page_t->busy counter.
383 for (i = 0; i < count; i++) {
384 KASSERT(mc[i]->valid == VM_PAGE_BITS_ALL,
385 ("vm_pageout_flush page %p index %d/%d: partially "
386 "invalid page", mc[i], i, count));
387 vm_page_io_start(mc[i]);
391 * We must make the pages read-only. This will also force the
392 * modified bit in the related pmaps to be cleared. The pager
393 * cannot clear the bit for us since the I/O completion code
394 * typically runs from an interrupt. The act of making the page
395 * read-only handles the case for us.
397 * Then we can unbusy the pages, we still hold a reference by virtue
400 for (i = 0; i < count; i++) {
401 vm_page_protect(mc[i], VM_PROT_READ);
402 vm_page_wakeup(mc[i]);
405 object = mc[0]->object;
406 vm_object_pip_add(object, count);
408 vm_pager_put_pages(object, mc, count,
409 (flags | ((object == &kernel_object) ? VM_PAGER_PUT_SYNC : 0)),
412 for (i = 0; i < count; i++) {
413 vm_page_t mt = mc[i];
415 switch (pageout_status[i]) {
424 * Page outside of range of object. Right now we
425 * essentially lose the changes by pretending it
428 vm_page_busy_wait(mt, FALSE, "pgbad");
429 pmap_clear_modify(mt);
436 * A page typically cannot be paged out when we
437 * have run out of swap. We leave the page
438 * marked inactive and will try to page it out
441 * Starvation of the active page list is used to
442 * determine when the system is massively memory
451 * If the operation is still going, leave the page busy to
452 * block all other accesses. Also, leave the paging in
453 * progress indicator set so that we don't attempt an object
456 * For any pages which have completed synchronously,
457 * deactivate the page if we are under a severe deficit.
458 * Do not try to enter them into the cache, though, they
459 * might still be read-heavy.
461 if (pageout_status[i] != VM_PAGER_PEND) {
462 vm_page_busy_wait(mt, FALSE, "pgouw");
463 if (vm_page_count_severe())
464 vm_page_deactivate(mt);
466 if (!vm_page_count_severe() || !vm_page_try_to_cache(mt))
467 vm_page_protect(mt, VM_PROT_READ);
469 vm_page_io_finish(mt);
471 vm_object_pip_wakeup(object);
477 #if !defined(NO_SWAPPING)
479 * deactivate enough pages to satisfy the inactive target
480 * requirements or if vm_page_proc_limit is set, then
481 * deactivate all of the pages in the object and its
484 * The map must be locked.
485 * The caller must hold the vm_object.
487 static int vm_pageout_object_deactivate_pages_callback(vm_page_t, void *);
490 vm_pageout_object_deactivate_pages(vm_map_t map, vm_object_t object,
491 vm_pindex_t desired, int map_remove_only)
493 struct rb_vm_page_scan_info info;
498 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object));
502 if (pmap_resident_count(vm_map_pmap(map)) <= desired)
504 if (lobject->type == OBJT_DEVICE || lobject->type == OBJT_PHYS)
506 if (lobject->paging_in_progress)
509 remove_mode = map_remove_only;
510 if (lobject->shadow_count > 1)
514 * scan the objects entire memory queue. We hold the
515 * object's token so the scan should not race anything.
517 info.limit = remove_mode;
519 info.desired = desired;
520 vm_page_rb_tree_RB_SCAN(&lobject->rb_memq, NULL,
521 vm_pageout_object_deactivate_pages_callback,
524 while ((tobject = lobject->backing_object) != NULL) {
525 KKASSERT(tobject != object);
526 vm_object_hold(tobject);
527 if (tobject == lobject->backing_object)
529 vm_object_drop(tobject);
531 if (lobject != object) {
532 vm_object_lock_swap();
533 vm_object_drop(lobject);
537 if (lobject != object)
538 vm_object_drop(lobject);
542 * The caller must hold the vm_object.
545 vm_pageout_object_deactivate_pages_callback(vm_page_t p, void *data)
547 struct rb_vm_page_scan_info *info = data;
550 if (pmap_resident_count(vm_map_pmap(info->map)) <= info->desired) {
553 mycpu->gd_cnt.v_pdpages++;
555 if (vm_page_busy_try(p, TRUE))
557 if (p->wire_count || p->hold_count || (p->flags & PG_UNMANAGED)) {
561 if (!pmap_page_exists_quick(vm_map_pmap(info->map), p)) {
566 actcount = pmap_ts_referenced(p);
568 vm_page_flag_set(p, PG_REFERENCED);
569 } else if (p->flags & PG_REFERENCED) {
573 vm_page_and_queue_spin_lock(p);
574 if (p->queue - p->pc != PQ_ACTIVE && (p->flags & PG_REFERENCED)) {
575 vm_page_and_queue_spin_unlock(p);
577 p->act_count += actcount;
578 vm_page_flag_clear(p, PG_REFERENCED);
579 } else if (p->queue - p->pc == PQ_ACTIVE) {
580 if ((p->flags & PG_REFERENCED) == 0) {
581 p->act_count -= min(p->act_count, ACT_DECLINE);
583 (vm_pageout_algorithm || (p->act_count == 0))) {
584 vm_page_and_queue_spin_unlock(p);
585 vm_page_protect(p, VM_PROT_NONE);
586 vm_page_deactivate(p);
588 TAILQ_REMOVE(&vm_page_queues[p->queue].pl,
590 TAILQ_INSERT_TAIL(&vm_page_queues[p->queue].pl,
592 vm_page_and_queue_spin_unlock(p);
595 vm_page_and_queue_spin_unlock(p);
597 vm_page_flag_clear(p, PG_REFERENCED);
599 vm_page_and_queue_spin_lock(p);
600 if (p->queue - p->pc == PQ_ACTIVE) {
601 if (p->act_count < (ACT_MAX - ACT_ADVANCE))
602 p->act_count += ACT_ADVANCE;
603 TAILQ_REMOVE(&vm_page_queues[p->queue].pl,
605 TAILQ_INSERT_TAIL(&vm_page_queues[p->queue].pl,
608 vm_page_and_queue_spin_unlock(p);
610 } else if (p->queue - p->pc == PQ_INACTIVE) {
611 vm_page_and_queue_spin_unlock(p);
612 vm_page_protect(p, VM_PROT_NONE);
614 vm_page_and_queue_spin_unlock(p);
621 * Deactivate some number of pages in a map, try to do it fairly, but
622 * that is really hard to do.
625 vm_pageout_map_deactivate_pages(vm_map_t map, vm_pindex_t desired)
628 vm_object_t obj, bigobj;
631 if (lockmgr(&map->lock, LK_EXCLUSIVE | LK_NOWAIT)) {
639 * first, search out the biggest object, and try to free pages from
642 tmpe = map->header.next;
643 while (tmpe != &map->header) {
644 switch(tmpe->maptype) {
645 case VM_MAPTYPE_NORMAL:
646 case VM_MAPTYPE_VPAGETABLE:
647 obj = tmpe->object.vm_object;
648 if ((obj != NULL) && (obj->shadow_count <= 1) &&
650 (bigobj->resident_page_count < obj->resident_page_count))) {
657 if (tmpe->wired_count > 0)
658 nothingwired = FALSE;
663 vm_object_hold(bigobj);
664 vm_pageout_object_deactivate_pages(map, bigobj, desired, 0);
665 vm_object_drop(bigobj);
669 * Next, hunt around for other pages to deactivate. We actually
670 * do this search sort of wrong -- .text first is not the best idea.
672 tmpe = map->header.next;
673 while (tmpe != &map->header) {
674 if (pmap_resident_count(vm_map_pmap(map)) <= desired)
676 switch(tmpe->maptype) {
677 case VM_MAPTYPE_NORMAL:
678 case VM_MAPTYPE_VPAGETABLE:
679 obj = tmpe->object.vm_object;
682 vm_pageout_object_deactivate_pages(map, obj, desired, 0);
693 * Remove all mappings if a process is swapped out, this will free page
696 if (desired == 0 && nothingwired)
697 pmap_remove(vm_map_pmap(map),
698 VM_MIN_USER_ADDRESS, VM_MAX_USER_ADDRESS);
704 * Called when the pageout scan wants to free a page. We no longer
705 * try to cycle the vm_object here with a reference & dealloc, which can
706 * cause a non-trivial object collapse in a critical path.
708 * It is unclear why we cycled the ref_count in the past, perhaps to try
709 * to optimize shadow chain collapses but I don't quite see why it would
710 * be necessary. An OBJ_DEAD object should terminate any and all vm_pages
711 * synchronously and not have to be kicked-start.
714 vm_pageout_page_free(vm_page_t m)
716 vm_page_protect(m, VM_PROT_NONE);
721 * vm_pageout_scan does the dirty work for the pageout daemon.
723 struct vm_pageout_scan_info {
724 struct proc *bigproc;
728 static int vm_pageout_scan_callback(struct proc *p, void *data);
731 vm_pageout_scan_inactive(int pass, int q, int avail_shortage,
732 int *vnodes_skippedp)
735 struct vm_page marker;
736 struct vnode *vpfailed; /* warning, allowed to be stale */
744 * Start scanning the inactive queue for pages we can move to the
745 * cache or free. The scan will stop when the target is reached or
746 * we have scanned the entire inactive queue. Note that m->act_count
747 * is not used to form decisions for the inactive queue, only for the
750 * maxlaunder limits the number of dirty pages we flush per scan.
751 * For most systems a smaller value (16 or 32) is more robust under
752 * extreme memory and disk pressure because any unnecessary writes
753 * to disk can result in extreme performance degredation. However,
754 * systems with excessive dirty pages (especially when MAP_NOSYNC is
755 * used) will die horribly with limited laundering. If the pageout
756 * daemon cannot clean enough pages in the first pass, we let it go
757 * all out in succeeding passes.
759 if ((maxlaunder = vm_max_launder) <= 1)
765 * Initialize our marker
767 bzero(&marker, sizeof(marker));
768 marker.flags = PG_BUSY | PG_FICTITIOUS | PG_MARKER;
769 marker.queue = PQ_INACTIVE + q;
771 marker.wire_count = 1;
774 * Inactive queue scan.
776 * NOTE: The vm_page must be spinlocked before the queue to avoid
777 * deadlocks, so it is easiest to simply iterate the loop
778 * with the queue unlocked at the top.
782 vm_page_queues_spin_lock(PQ_INACTIVE + q);
783 TAILQ_INSERT_HEAD(&vm_page_queues[PQ_INACTIVE + q].pl, &marker, pageq);
784 maxscan = vm_page_queues[PQ_INACTIVE + q].lcnt;
785 vm_page_queues_spin_unlock(PQ_INACTIVE + q);
787 while ((m = TAILQ_NEXT(&marker, pageq)) != NULL &&
788 maxscan-- > 0 && avail_shortage - delta > 0)
790 vm_page_and_queue_spin_lock(m);
791 if (m != TAILQ_NEXT(&marker, pageq)) {
792 vm_page_and_queue_spin_unlock(m);
796 KKASSERT(m->queue - m->pc == PQ_INACTIVE);
797 TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE + q].pl,
799 TAILQ_INSERT_AFTER(&vm_page_queues[PQ_INACTIVE + q].pl, m,
801 mycpu->gd_cnt.v_pdpages++;
806 if (m->flags & PG_MARKER) {
807 vm_page_and_queue_spin_unlock(m);
812 * Try to busy the page. Don't mess with pages which are
813 * already busy or reorder them in the queue.
815 if (vm_page_busy_try(m, TRUE)) {
816 vm_page_and_queue_spin_unlock(m);
819 vm_page_and_queue_spin_unlock(m);
820 KKASSERT(m->queue - m->pc == PQ_INACTIVE);
825 * The page has been successfully busied and is now no
826 * longer spinlocked. The queue is no longer spinlocked
831 * It is possible for a page to be busied ad-hoc (e.g. the
832 * pmap_collect() code) and wired and race against the
833 * allocation of a new page. vm_page_alloc() may be forced
834 * to deactivate the wired page in which case it winds up
835 * on the inactive queue and must be handled here. We
836 * correct the problem simply by unqueuing the page.
839 vm_page_unqueue_nowakeup(m);
841 kprintf("WARNING: pagedaemon: wired page on "
842 "inactive queue %p\n", m);
847 * A held page may be undergoing I/O, so skip it.
850 vm_page_and_queue_spin_lock(m);
851 if (m->queue - m->pc == PQ_INACTIVE) {
853 &vm_page_queues[PQ_INACTIVE + q].pl,
856 &vm_page_queues[PQ_INACTIVE + q].pl,
858 ++vm_swapcache_inactive_heuristic;
860 vm_page_and_queue_spin_unlock(m);
865 if (m->object->ref_count == 0) {
867 * If the object is not being used, we ignore previous
870 vm_page_flag_clear(m, PG_REFERENCED);
871 pmap_clear_reference(m);
872 /* fall through to end */
873 } else if (((m->flags & PG_REFERENCED) == 0) &&
874 (actcount = pmap_ts_referenced(m))) {
876 * Otherwise, if the page has been referenced while
877 * in the inactive queue, we bump the "activation
878 * count" upwards, making it less likely that the
879 * page will be added back to the inactive queue
880 * prematurely again. Here we check the page tables
881 * (or emulated bits, if any), given the upper level
882 * VM system not knowing anything about existing
886 m->act_count += (actcount + ACT_ADVANCE);
892 * (m) is still busied.
894 * If the upper level VM system knows about any page
895 * references, we activate the page. We also set the
896 * "activation count" higher than normal so that we will less
897 * likely place pages back onto the inactive queue again.
899 if ((m->flags & PG_REFERENCED) != 0) {
900 vm_page_flag_clear(m, PG_REFERENCED);
901 actcount = pmap_ts_referenced(m);
903 m->act_count += (actcount + ACT_ADVANCE + 1);
909 * If the upper level VM system doesn't know anything about
910 * the page being dirty, we have to check for it again. As
911 * far as the VM code knows, any partially dirty pages are
914 * Pages marked PG_WRITEABLE may be mapped into the user
915 * address space of a process running on another cpu. A
916 * user process (without holding the MP lock) running on
917 * another cpu may be able to touch the page while we are
918 * trying to remove it. vm_page_cache() will handle this
922 vm_page_test_dirty(m);
927 if (m->valid == 0 && (m->flags & PG_NEED_COMMIT) == 0) {
929 * Invalid pages can be easily freed
931 vm_pageout_page_free(m);
932 mycpu->gd_cnt.v_dfree++;
934 } else if (m->dirty == 0 && (m->flags & PG_NEED_COMMIT) == 0) {
936 * Clean pages can be placed onto the cache queue.
937 * This effectively frees them.
941 } else if ((m->flags & PG_WINATCFLS) == 0 && pass == 0) {
943 * Dirty pages need to be paged out, but flushing
944 * a page is extremely expensive verses freeing
945 * a clean page. Rather then artificially limiting
946 * the number of pages we can flush, we instead give
947 * dirty pages extra priority on the inactive queue
948 * by forcing them to be cycled through the queue
949 * twice before being flushed, after which the
950 * (now clean) page will cycle through once more
951 * before being freed. This significantly extends
952 * the thrash point for a heavily loaded machine.
954 vm_page_flag_set(m, PG_WINATCFLS);
955 vm_page_and_queue_spin_lock(m);
956 if (m->queue - m->pc == PQ_INACTIVE) {
958 &vm_page_queues[PQ_INACTIVE + q].pl,
961 &vm_page_queues[PQ_INACTIVE + q].pl,
963 ++vm_swapcache_inactive_heuristic;
965 vm_page_and_queue_spin_unlock(m);
967 } else if (maxlaunder > 0) {
969 * We always want to try to flush some dirty pages if
970 * we encounter them, to keep the system stable.
971 * Normally this number is small, but under extreme
972 * pressure where there are insufficient clean pages
973 * on the inactive queue, we may have to go all out.
975 int swap_pageouts_ok;
976 struct vnode *vp = NULL;
980 if ((object->type != OBJT_SWAP) && (object->type != OBJT_DEFAULT)) {
981 swap_pageouts_ok = 1;
983 swap_pageouts_ok = !(defer_swap_pageouts || disable_swap_pageouts);
984 swap_pageouts_ok |= (!disable_swap_pageouts && defer_swap_pageouts &&
985 vm_page_count_min(0));
990 * We don't bother paging objects that are "dead".
991 * Those objects are in a "rundown" state.
993 if (!swap_pageouts_ok || (object->flags & OBJ_DEAD)) {
994 vm_page_and_queue_spin_lock(m);
995 if (m->queue - m->pc == PQ_INACTIVE) {
997 &vm_page_queues[PQ_INACTIVE + q].pl,
1000 &vm_page_queues[PQ_INACTIVE + q].pl,
1002 ++vm_swapcache_inactive_heuristic;
1004 vm_page_and_queue_spin_unlock(m);
1010 * (m) is still busied.
1012 * The object is already known NOT to be dead. It
1013 * is possible for the vget() to block the whole
1014 * pageout daemon, but the new low-memory handling
1015 * code should prevent it.
1017 * The previous code skipped locked vnodes and, worse,
1018 * reordered pages in the queue. This results in
1019 * completely non-deterministic operation because,
1020 * quite often, a vm_fault has initiated an I/O and
1021 * is holding a locked vnode at just the point where
1022 * the pageout daemon is woken up.
1024 * We can't wait forever for the vnode lock, we might
1025 * deadlock due to a vn_read() getting stuck in
1026 * vm_wait while holding this vnode. We skip the
1027 * vnode if we can't get it in a reasonable amount
1030 * vpfailed is used to (try to) avoid the case where
1031 * a large number of pages are associated with a
1032 * locked vnode, which could cause the pageout daemon
1033 * to stall for an excessive amount of time.
1035 if (object->type == OBJT_VNODE) {
1038 vp = object->handle;
1039 flags = LK_EXCLUSIVE | LK_NOOBJ;
1043 flags |= LK_TIMELOCK;
1048 * We have unbusied (m) temporarily so we can
1049 * acquire the vp lock without deadlocking.
1050 * (m) is held to prevent destruction.
1052 if (vget(vp, flags) != 0) {
1054 ++pageout_lock_miss;
1055 if (object->flags & OBJ_MIGHTBEDIRTY)
1062 * The page might have been moved to another
1063 * queue during potential blocking in vget()
1064 * above. The page might have been freed and
1065 * reused for another vnode. The object might
1066 * have been reused for another vnode.
1068 if (m->queue - m->pc != PQ_INACTIVE ||
1069 m->object != object ||
1070 object->handle != vp) {
1071 if (object->flags & OBJ_MIGHTBEDIRTY)
1079 * The page may have been busied during the
1080 * blocking in vput(); We don't move the
1081 * page back onto the end of the queue so that
1082 * statistics are more correct if we don't.
1084 if (vm_page_busy_try(m, TRUE)) {
1092 * (m) is busied again
1094 * We own the busy bit and remove our hold
1095 * bit. If the page is still held it
1096 * might be undergoing I/O, so skip it.
1098 if (m->hold_count) {
1099 vm_page_and_queue_spin_lock(m);
1100 if (m->queue - m->pc == PQ_INACTIVE) {
1101 TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE + q].pl, m, pageq);
1102 TAILQ_INSERT_TAIL(&vm_page_queues[PQ_INACTIVE + q].pl, m, pageq);
1103 ++vm_swapcache_inactive_heuristic;
1105 vm_page_and_queue_spin_unlock(m);
1106 if (object->flags & OBJ_MIGHTBEDIRTY)
1112 /* (m) is left busied as we fall through */
1116 * page is busy and not held here.
1118 * If a page is dirty, then it is either being washed
1119 * (but not yet cleaned) or it is still in the
1120 * laundry. If it is still in the laundry, then we
1121 * start the cleaning operation.
1123 * decrement inactive_shortage on success to account
1124 * for the (future) cleaned page. Otherwise we
1125 * could wind up laundering or cleaning too many
1128 if (vm_pageout_clean(m) != 0) {
1132 /* clean ate busy, page no longer accessible */
1139 vm_page_queues_spin_lock(PQ_INACTIVE + q);
1140 TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE + q].pl, &marker, pageq);
1141 vm_page_queues_spin_unlock(PQ_INACTIVE + q);
1146 vm_pageout_scan_active(int pass, int q,
1147 int avail_shortage, int inactive_shortage,
1148 int *recycle_countp)
1150 struct vm_page marker;
1157 * We want to move pages from the active queue to the inactive
1158 * queue to get the inactive queue to the inactive target. If
1159 * we still have a page shortage from above we try to directly free
1160 * clean pages instead of moving them.
1162 * If we do still have a shortage we keep track of the number of
1163 * pages we free or cache (recycle_count) as a measure of thrashing
1164 * between the active and inactive queues.
1166 * If we were able to completely satisfy the free+cache targets
1167 * from the inactive pool we limit the number of pages we move
1168 * from the active pool to the inactive pool to 2x the pages we
1169 * had removed from the inactive pool (with a minimum of 1/5 the
1170 * inactive target). If we were not able to completely satisfy
1171 * the free+cache targets we go for the whole target aggressively.
1173 * NOTE: Both variables can end up negative.
1174 * NOTE: We are still in a critical section.
1177 bzero(&marker, sizeof(marker));
1178 marker.flags = PG_BUSY | PG_FICTITIOUS | PG_MARKER;
1179 marker.queue = PQ_ACTIVE + q;
1181 marker.wire_count = 1;
1183 vm_page_queues_spin_lock(PQ_ACTIVE + q);
1184 TAILQ_INSERT_HEAD(&vm_page_queues[PQ_ACTIVE + q].pl, &marker, pageq);
1185 maxscan = vm_page_queues[PQ_ACTIVE + q].lcnt;
1186 vm_page_queues_spin_unlock(PQ_ACTIVE + q);
1188 while ((m = TAILQ_NEXT(&marker, pageq)) != NULL &&
1189 maxscan-- > 0 && (avail_shortage - delta > 0 ||
1190 inactive_shortage > 0))
1192 vm_page_and_queue_spin_lock(m);
1193 if (m != TAILQ_NEXT(&marker, pageq)) {
1194 vm_page_and_queue_spin_unlock(m);
1198 KKASSERT(m->queue - m->pc == PQ_ACTIVE);
1199 TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE + q].pl,
1201 TAILQ_INSERT_AFTER(&vm_page_queues[PQ_ACTIVE + q].pl, m,
1207 if (m->flags & PG_MARKER) {
1208 vm_page_and_queue_spin_unlock(m);
1213 * Try to busy the page. Don't mess with pages which are
1214 * already busy or reorder them in the queue.
1216 if (vm_page_busy_try(m, TRUE)) {
1217 vm_page_and_queue_spin_unlock(m);
1222 * Don't deactivate pages that are held, even if we can
1223 * busy them. (XXX why not?)
1225 if (m->hold_count != 0) {
1226 TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE + q].pl,
1228 TAILQ_INSERT_TAIL(&vm_page_queues[PQ_ACTIVE + q].pl,
1230 vm_page_and_queue_spin_unlock(m);
1234 vm_page_and_queue_spin_unlock(m);
1238 * The page has been successfully busied and the page and
1239 * queue are no longer locked.
1243 * The count for pagedaemon pages is done after checking the
1244 * page for eligibility...
1246 mycpu->gd_cnt.v_pdpages++;
1249 * Check to see "how much" the page has been used and clear
1250 * the tracking access bits. If the object has no references
1251 * don't bother paying the expense.
1254 if (m->object->ref_count != 0) {
1255 if (m->flags & PG_REFERENCED)
1257 actcount += pmap_ts_referenced(m);
1259 m->act_count += ACT_ADVANCE + actcount;
1260 if (m->act_count > ACT_MAX)
1261 m->act_count = ACT_MAX;
1264 vm_page_flag_clear(m, PG_REFERENCED);
1267 * actcount is only valid if the object ref_count is non-zero.
1269 if (actcount && m->object->ref_count != 0) {
1270 vm_page_and_queue_spin_lock(m);
1271 if (m->queue - m->pc == PQ_ACTIVE) {
1273 &vm_page_queues[PQ_ACTIVE + q].pl,
1276 &vm_page_queues[PQ_ACTIVE + q].pl,
1279 vm_page_and_queue_spin_unlock(m);
1282 m->act_count -= min(m->act_count, ACT_DECLINE);
1283 if (vm_pageout_algorithm ||
1284 m->object->ref_count == 0 ||
1285 m->act_count < pass + 1
1288 * Deactivate the page. If we had a
1289 * shortage from our inactive scan try to
1290 * free (cache) the page instead.
1292 * Don't just blindly cache the page if
1293 * we do not have a shortage from the
1294 * inactive scan, that could lead to
1295 * gigabytes being moved.
1297 --inactive_shortage;
1298 if (avail_shortage - delta > 0 ||
1299 m->object->ref_count == 0) {
1300 if (avail_shortage - delta > 0)
1302 vm_page_protect(m, VM_PROT_NONE);
1303 if (m->dirty == 0 &&
1304 (m->flags & PG_NEED_COMMIT) == 0 &&
1305 avail_shortage - delta > 0) {
1308 vm_page_deactivate(m);
1312 vm_page_deactivate(m);
1317 vm_page_and_queue_spin_lock(m);
1318 if (m->queue - m->pc == PQ_ACTIVE) {
1320 &vm_page_queues[PQ_ACTIVE + q].pl,
1323 &vm_page_queues[PQ_ACTIVE + q].pl,
1326 vm_page_and_queue_spin_unlock(m);
1333 * Clean out our local marker.
1335 vm_page_queues_spin_lock(PQ_ACTIVE + q);
1336 TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE + q].pl, &marker, pageq);
1337 vm_page_queues_spin_unlock(PQ_ACTIVE + q);
1343 * The number of actually free pages can drop down to v_free_reserved,
1344 * we try to build the free count back above v_free_min. Note that
1345 * vm_paging_needed() also returns TRUE if v_free_count is not at
1346 * least v_free_min so that is the minimum we must build the free
1349 * We use a slightly higher target to improve hysteresis,
1350 * ((v_free_target + v_free_min) / 2). Since v_free_target
1351 * is usually the same as v_cache_min this maintains about
1352 * half the pages in the free queue as are in the cache queue,
1353 * providing pretty good pipelining for pageout operation.
1355 * The system operator can manipulate vm.v_cache_min and
1356 * vm.v_free_target to tune the pageout demon. Be sure
1357 * to keep vm.v_free_min < vm.v_free_target.
1359 * Note that the original paging target is to get at least
1360 * (free_min + cache_min) into (free + cache). The slightly
1361 * higher target will shift additional pages from cache to free
1362 * without effecting the original paging target in order to
1363 * maintain better hysteresis and not have the free count always
1364 * be dead-on v_free_min.
1366 * NOTE: we are still in a critical section.
1368 * Pages moved from PQ_CACHE to totally free are not counted in the
1369 * pages_freed counter.
1372 vm_pageout_scan_cache(int avail_shortage, int vnodes_skipped, int recycle_count)
1374 struct vm_pageout_scan_info info;
1377 while (vmstats.v_free_count <
1378 (vmstats.v_free_min + vmstats.v_free_target) / 2) {
1380 * This steals some code from vm/vm_page.c
1382 static int cache_rover = 0;
1384 m = vm_page_list_find(PQ_CACHE, cache_rover & PQ_L2_MASK, FALSE);
1387 /* page is returned removed from its queue and spinlocked */
1388 if (vm_page_busy_try(m, TRUE)) {
1389 vm_page_deactivate_locked(m);
1390 vm_page_spin_unlock(m);
1392 kprintf("Warning: busy page %p found in cache\n", m);
1396 vm_page_spin_unlock(m);
1397 pagedaemon_wakeup();
1401 * Page has been successfully busied and it and its queue
1402 * is no longer spinlocked.
1404 if ((m->flags & (PG_UNMANAGED | PG_NEED_COMMIT)) ||
1407 vm_page_deactivate(m);
1411 KKASSERT((m->flags & PG_MAPPED) == 0);
1412 KKASSERT(m->dirty == 0);
1413 cache_rover += PQ_PRIME2;
1414 vm_pageout_page_free(m);
1415 mycpu->gd_cnt.v_dfree++;
1418 #if !defined(NO_SWAPPING)
1420 * Idle process swapout -- run once per second.
1422 if (vm_swap_idle_enabled) {
1424 if (time_second != lsec) {
1425 vm_pageout_req_swapout |= VM_SWAP_IDLE;
1433 * If we didn't get enough free pages, and we have skipped a vnode
1434 * in a writeable object, wakeup the sync daemon. And kick swapout
1435 * if we did not get enough free pages.
1437 if (vm_paging_target() > 0) {
1438 if (vnodes_skipped && vm_page_count_min(0))
1440 #if !defined(NO_SWAPPING)
1441 if (vm_swap_enabled && vm_page_count_target()) {
1443 vm_pageout_req_swapout |= VM_SWAP_NORMAL;
1449 * Handle catastrophic conditions. Under good conditions we should
1450 * be at the target, well beyond our minimum. If we could not even
1451 * reach our minimum the system is under heavy stress.
1453 * Determine whether we have run out of memory. This occurs when
1454 * swap_pager_full is TRUE and the only pages left in the page
1455 * queues are dirty. We will still likely have page shortages.
1457 * - swap_pager_full is set if insufficient swap was
1458 * available to satisfy a requested pageout.
1460 * - the inactive queue is bloated (4 x size of active queue),
1461 * meaning it is unable to get rid of dirty pages and.
1463 * - vm_page_count_min() without counting pages recycled from the
1464 * active queue (recycle_count) means we could not recover
1465 * enough pages to meet bare minimum needs. This test only
1466 * works if the inactive queue is bloated.
1468 * - due to a positive avail_shortage we shifted the remaining
1469 * dirty pages from the active queue to the inactive queue
1470 * trying to find clean ones to free.
1472 if (swap_pager_full && vm_page_count_min(recycle_count))
1473 kprintf("Warning: system low on memory+swap!\n");
1474 if (swap_pager_full && vm_page_count_min(recycle_count) &&
1475 vmstats.v_inactive_count > vmstats.v_active_count * 4 &&
1476 avail_shortage > 0) {
1480 info.bigproc = NULL;
1482 allproc_scan(vm_pageout_scan_callback, &info);
1483 if (info.bigproc != NULL) {
1484 killproc(info.bigproc, "out of swap space");
1485 info.bigproc->p_nice = PRIO_MIN;
1486 info.bigproc->p_usched->resetpriority(
1487 FIRST_LWP_IN_PROC(info.bigproc));
1488 wakeup(&vmstats.v_free_count);
1489 PRELE(info.bigproc);
1495 * The caller must hold proc_token.
1498 vm_pageout_scan_callback(struct proc *p, void *data)
1500 struct vm_pageout_scan_info *info = data;
1504 * Never kill system processes or init. If we have configured swap
1505 * then try to avoid killing low-numbered pids.
1507 if ((p->p_flags & P_SYSTEM) || (p->p_pid == 1) ||
1508 ((p->p_pid < 48) && (vm_swap_size != 0))) {
1513 * if the process is in a non-running type state,
1516 if (p->p_stat != SACTIVE && p->p_stat != SSTOP)
1520 * Get the approximate process size. Note that anonymous pages
1521 * with backing swap will be counted twice, but there should not
1522 * be too many such pages due to the stress the VM system is
1523 * under at this point.
1525 size = vmspace_anonymous_count(p->p_vmspace) +
1526 vmspace_swap_count(p->p_vmspace);
1529 * If the this process is bigger than the biggest one
1532 if (info->bigsize < size) {
1534 PRELE(info->bigproc);
1537 info->bigsize = size;
1544 * This routine tries to maintain the pseudo LRU active queue,
1545 * so that during long periods of time where there is no paging,
1546 * that some statistic accumulation still occurs. This code
1547 * helps the situation where paging just starts to occur.
1550 vm_pageout_page_stats(int q)
1552 static int fullintervalcount = 0;
1553 struct vm_page marker;
1555 int pcount, tpcount; /* Number of pages to check */
1558 page_shortage = (vmstats.v_inactive_target + vmstats.v_cache_max +
1559 vmstats.v_free_min) -
1560 (vmstats.v_free_count + vmstats.v_inactive_count +
1561 vmstats.v_cache_count);
1563 if (page_shortage <= 0)
1566 pcount = vm_page_queues[PQ_ACTIVE + q].lcnt;
1567 fullintervalcount += vm_pageout_stats_interval;
1568 if (fullintervalcount < vm_pageout_full_stats_interval) {
1569 tpcount = (vm_pageout_stats_max * pcount) /
1570 vmstats.v_page_count + 1;
1571 if (pcount > tpcount)
1574 fullintervalcount = 0;
1577 bzero(&marker, sizeof(marker));
1578 marker.flags = PG_BUSY | PG_FICTITIOUS | PG_MARKER;
1579 marker.queue = PQ_ACTIVE + q;
1581 marker.wire_count = 1;
1583 vm_page_queues_spin_lock(PQ_ACTIVE + q);
1584 TAILQ_INSERT_HEAD(&vm_page_queues[PQ_ACTIVE + q].pl, &marker, pageq);
1585 vm_page_queues_spin_unlock(PQ_ACTIVE + q);
1587 while ((m = TAILQ_NEXT(&marker, pageq)) != NULL &&
1592 vm_page_and_queue_spin_lock(m);
1593 if (m != TAILQ_NEXT(&marker, pageq)) {
1594 vm_page_and_queue_spin_unlock(m);
1598 KKASSERT(m->queue - m->pc == PQ_ACTIVE);
1599 TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE + q].pl, &marker, pageq);
1600 TAILQ_INSERT_AFTER(&vm_page_queues[PQ_ACTIVE + q].pl, m,
1606 if (m->flags & PG_MARKER) {
1607 vm_page_and_queue_spin_unlock(m);
1612 * Ignore pages we can't busy
1614 if (vm_page_busy_try(m, TRUE)) {
1615 vm_page_and_queue_spin_unlock(m);
1618 vm_page_and_queue_spin_unlock(m);
1619 KKASSERT(m->queue - m->pc == PQ_ACTIVE);
1622 * We now have a safely busied page, the page and queue
1623 * spinlocks have been released.
1627 if (m->hold_count) {
1633 * Calculate activity
1636 if (m->flags & PG_REFERENCED) {
1637 vm_page_flag_clear(m, PG_REFERENCED);
1640 actcount += pmap_ts_referenced(m);
1643 * Update act_count and move page to end of queue.
1646 m->act_count += ACT_ADVANCE + actcount;
1647 if (m->act_count > ACT_MAX)
1648 m->act_count = ACT_MAX;
1649 vm_page_and_queue_spin_lock(m);
1650 if (m->queue - m->pc == PQ_ACTIVE) {
1652 &vm_page_queues[PQ_ACTIVE + q].pl,
1655 &vm_page_queues[PQ_ACTIVE + q].pl,
1658 vm_page_and_queue_spin_unlock(m);
1663 if (m->act_count == 0) {
1665 * We turn off page access, so that we have
1666 * more accurate RSS stats. We don't do this
1667 * in the normal page deactivation when the
1668 * system is loaded VM wise, because the
1669 * cost of the large number of page protect
1670 * operations would be higher than the value
1671 * of doing the operation.
1673 * We use the marker to save our place so
1674 * we can release the spin lock. both (m)
1675 * and (next) will be invalid.
1677 vm_page_protect(m, VM_PROT_NONE);
1678 vm_page_deactivate(m);
1680 m->act_count -= min(m->act_count, ACT_DECLINE);
1681 vm_page_and_queue_spin_lock(m);
1682 if (m->queue - m->pc == PQ_ACTIVE) {
1684 &vm_page_queues[PQ_ACTIVE + q].pl,
1687 &vm_page_queues[PQ_ACTIVE + q].pl,
1690 vm_page_and_queue_spin_unlock(m);
1696 * Remove our local marker
1698 vm_page_queues_spin_lock(PQ_ACTIVE + q);
1699 TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE + q].pl, &marker, pageq);
1700 vm_page_queues_spin_unlock(PQ_ACTIVE + q);
1704 vm_pageout_free_page_calc(vm_size_t count)
1706 if (count < vmstats.v_page_count)
1709 * free_reserved needs to include enough for the largest swap pager
1710 * structures plus enough for any pv_entry structs when paging.
1712 * v_free_min normal allocations
1713 * v_free_reserved system allocations
1714 * v_pageout_free_min allocations by pageout daemon
1715 * v_interrupt_free_min low level allocations (e.g swap structures)
1717 if (vmstats.v_page_count > 1024)
1718 vmstats.v_free_min = 64 + (vmstats.v_page_count - 1024) / 200;
1720 vmstats.v_free_min = 64;
1721 vmstats.v_free_reserved = vmstats.v_free_min * 4 / 8 + 7;
1722 vmstats.v_free_severe = vmstats.v_free_min * 4 / 8 + 0;
1723 vmstats.v_pageout_free_min = vmstats.v_free_min * 2 / 8 + 7;
1724 vmstats.v_interrupt_free_min = vmstats.v_free_min * 1 / 8 + 7;
1731 * vm_pageout is the high level pageout daemon.
1736 vm_pageout_thread(void)
1742 * Initialize some paging parameters.
1744 curthread->td_flags |= TDF_SYSTHREAD;
1746 if (vmstats.v_page_count < 2000)
1747 vm_pageout_page_count = 8;
1749 vm_pageout_free_page_calc(vmstats.v_page_count);
1752 * v_free_target and v_cache_min control pageout hysteresis. Note
1753 * that these are more a measure of the VM cache queue hysteresis
1754 * then the VM free queue. Specifically, v_free_target is the
1755 * high water mark (free+cache pages).
1757 * v_free_reserved + v_cache_min (mostly means v_cache_min) is the
1758 * low water mark, while v_free_min is the stop. v_cache_min must
1759 * be big enough to handle memory needs while the pageout daemon
1760 * is signalled and run to free more pages.
1762 if (vmstats.v_free_count > 6144)
1763 vmstats.v_free_target = 4 * vmstats.v_free_min + vmstats.v_free_reserved;
1765 vmstats.v_free_target = 2 * vmstats.v_free_min + vmstats.v_free_reserved;
1768 * NOTE: With the new buffer cache b_act_count we want the default
1769 * inactive target to be a percentage of available memory.
1771 * The inactive target essentially determines the minimum
1772 * number of 'temporary' pages capable of caching one-time-use
1773 * files when the VM system is otherwise full of pages
1774 * belonging to multi-time-use files or active program data.
1776 * NOTE: The inactive target is aggressively persued only if the
1777 * inactive queue becomes too small. If the inactive queue
1778 * is large enough to satisfy page movement to free+cache
1779 * then it is repopulated more slowly from the active queue.
1780 * This allows a general inactive_target default to be set.
1782 * There is an issue here for processes which sit mostly idle
1783 * 'overnight', such as sshd, tcsh, and X. Any movement from
1784 * the active queue will eventually cause such pages to
1785 * recycle eventually causing a lot of paging in the morning.
1786 * To reduce the incidence of this pages cycled out of the
1787 * buffer cache are moved directly to the inactive queue if
1788 * they were only used once or twice.
1790 * The vfs.vm_cycle_point sysctl can be used to adjust this.
1791 * Increasing the value (up to 64) increases the number of
1792 * buffer recyclements which go directly to the inactive queue.
1794 if (vmstats.v_free_count > 2048) {
1795 vmstats.v_cache_min = vmstats.v_free_target;
1796 vmstats.v_cache_max = 2 * vmstats.v_cache_min;
1798 vmstats.v_cache_min = 0;
1799 vmstats.v_cache_max = 0;
1801 vmstats.v_inactive_target = vmstats.v_free_count / 4;
1803 /* XXX does not really belong here */
1804 if (vm_page_max_wired == 0)
1805 vm_page_max_wired = vmstats.v_free_count / 3;
1807 if (vm_pageout_stats_max == 0)
1808 vm_pageout_stats_max = vmstats.v_free_target;
1811 * Set interval in seconds for stats scan.
1813 if (vm_pageout_stats_interval == 0)
1814 vm_pageout_stats_interval = 5;
1815 if (vm_pageout_full_stats_interval == 0)
1816 vm_pageout_full_stats_interval = vm_pageout_stats_interval * 4;
1820 * Set maximum free per pass
1822 if (vm_pageout_stats_free_max == 0)
1823 vm_pageout_stats_free_max = 5;
1825 swap_pager_swap_init();
1829 * The pageout daemon is never done, so loop forever.
1836 int inactive_shortage;
1837 int vnodes_skipped = 0;
1838 int recycle_count = 0;
1842 * Wait for an action request. If we timeout check to
1843 * see if paging is needed (in case the normal wakeup
1846 if (vm_pages_needed == 0) {
1847 error = tsleep(&vm_pages_needed,
1849 vm_pageout_stats_interval * hz);
1851 vm_paging_needed() == 0 &&
1852 vm_pages_needed == 0) {
1853 for (q = 0; q < PQ_L2_SIZE; ++q)
1854 vm_pageout_page_stats(q);
1857 vm_pages_needed = 1;
1860 mycpu->gd_cnt.v_pdwakeups++;
1863 * Do whatever cleanup that the pmap code can.
1868 * Scan for pageout. Try to avoid thrashing the system
1871 * Calculate our target for the number of free+cache pages we
1872 * want to get to. This is higher then the number that causes
1873 * allocations to stall (severe) in order to provide hysteresis,
1874 * and if we don't make it all the way but get to the minimum
1875 * we're happy. Goose it a bit if there are multipler
1876 * requests for memory.
1878 avail_shortage = vm_paging_target() + vm_pageout_deficit;
1879 vm_pageout_deficit = 0;
1881 if (avail_shortage > 0) {
1882 for (q = 0; q < PQ_L2_SIZE; ++q) {
1883 delta1 += vm_pageout_scan_inactive(
1885 PQAVERAGE(avail_shortage),
1888 avail_shortage -= delta1;
1892 * Figure out how many active pages we must deactivate. If
1893 * we were able to reach our target with just the inactive
1894 * scan above we limit the number of active pages we
1895 * deactivate to reduce unnecessary work.
1897 inactive_shortage = vmstats.v_inactive_target -
1898 vmstats.v_inactive_count;
1901 * If we were unable to free sufficient inactive pages to
1902 * satisfy the free/cache queue requirements then simply
1903 * reaching the inactive target may not be good enough.
1904 * Try to deactivate pages in excess of the target based
1907 * However to prevent thrashing the VM system do not
1908 * deactivate more than an additional 1/10 the inactive
1909 * target's worth of active pages.
1911 if (avail_shortage > 0) {
1912 tmp = avail_shortage * 2;
1913 if (tmp > vmstats.v_inactive_target / 10)
1914 tmp = vmstats.v_inactive_target / 10;
1915 inactive_shortage += tmp;
1918 if (avail_shortage > 0 || inactive_shortage > 0) {
1920 for (q = 0; q < PQ_L2_SIZE; ++q) {
1921 delta2 += vm_pageout_scan_active(
1923 PQAVERAGE(avail_shortage),
1924 PQAVERAGE(inactive_shortage),
1927 inactive_shortage -= delta2;
1928 avail_shortage -= delta2;
1932 * Finally free enough cache pages to meet our free page
1933 * requirement and take more drastic measures if we are
1936 vm_pageout_scan_cache(avail_shortage, vnodes_skipped,
1940 * Wait for more work.
1942 if (avail_shortage > 0) {
1944 if (swap_pager_full) {
1946 * Running out of memory, catastrophic back-off
1947 * to one-second intervals.
1949 tsleep(&vm_pages_needed, 0, "pdelay", hz);
1950 } else if (pass < 10 && vm_pages_needed > 1) {
1952 * Normal operation, additional processes
1953 * have already kicked us. Retry immediately.
1955 } else if (pass < 10) {
1957 * Normal operation, fewer processes. Delay
1958 * a bit but allow wakeups.
1960 vm_pages_needed = 0;
1961 tsleep(&vm_pages_needed, 0, "pdelay", hz / 10);
1962 vm_pages_needed = 1;
1965 * We've taken too many passes, forced delay.
1967 tsleep(&vm_pages_needed, 0, "pdelay", hz / 10);
1971 * Interlocked wakeup of waiters (non-optional)
1974 if (vm_pages_needed && !vm_page_count_min(0)) {
1975 wakeup(&vmstats.v_free_count);
1976 vm_pages_needed = 0;
1982 static struct kproc_desc page_kp = {
1987 SYSINIT(pagedaemon, SI_SUB_KTHREAD_PAGE, SI_ORDER_FIRST, kproc_start, &page_kp)
1991 * Called after allocating a page out of the cache or free queue
1992 * to possibly wake the pagedaemon up to replentish our supply.
1994 * We try to generate some hysteresis by waking the pagedaemon up
1995 * when our free+cache pages go below the free_min+cache_min level.
1996 * The pagedaemon tries to get the count back up to at least the
1997 * minimum, and through to the target level if possible.
1999 * If the pagedaemon is already active bump vm_pages_needed as a hint
2000 * that there are even more requests pending.
2006 pagedaemon_wakeup(void)
2008 if (vm_paging_needed() && curthread != pagethread) {
2009 if (vm_pages_needed == 0) {
2010 vm_pages_needed = 1; /* SMP race ok */
2011 wakeup(&vm_pages_needed);
2012 } else if (vm_page_count_min(0)) {
2013 ++vm_pages_needed; /* SMP race ok */
2018 #if !defined(NO_SWAPPING)
2025 vm_req_vmdaemon(void)
2027 static int lastrun = 0;
2029 if ((ticks > (lastrun + hz)) || (ticks < lastrun)) {
2030 wakeup(&vm_daemon_needed);
2035 static int vm_daemon_callback(struct proc *p, void *data __unused);
2044 * XXX vm_daemon_needed specific token?
2047 tsleep(&vm_daemon_needed, 0, "psleep", 0);
2048 if (vm_pageout_req_swapout) {
2049 swapout_procs(vm_pageout_req_swapout);
2050 vm_pageout_req_swapout = 0;
2053 * scan the processes for exceeding their rlimits or if
2054 * process is swapped out -- deactivate pages
2056 allproc_scan(vm_daemon_callback, NULL);
2061 * Caller must hold proc_token.
2064 vm_daemon_callback(struct proc *p, void *data __unused)
2066 vm_pindex_t limit, size;
2069 * if this is a system process or if we have already
2070 * looked at this process, skip it.
2072 if (p->p_flags & (P_SYSTEM | P_WEXIT))
2076 * if the process is in a non-running type state,
2079 if (p->p_stat != SACTIVE && p->p_stat != SSTOP)
2085 limit = OFF_TO_IDX(qmin(p->p_rlimit[RLIMIT_RSS].rlim_cur,
2086 p->p_rlimit[RLIMIT_RSS].rlim_max));
2089 * let processes that are swapped out really be
2090 * swapped out. Set the limit to nothing to get as
2091 * many pages out to swap as possible.
2093 if (p->p_flags & P_SWAPPEDOUT)
2096 lwkt_gettoken(&p->p_vmspace->vm_map.token);
2097 size = vmspace_resident_count(p->p_vmspace);
2098 if (limit >= 0 && size >= limit) {
2099 vm_pageout_map_deactivate_pages(&p->p_vmspace->vm_map, limit);
2101 lwkt_reltoken(&p->p_vmspace->vm_map.token);