2 * Copyright (c) 1991 Regents of the University of California.
4 * Copyright (c) 1994 John S. Dyson
6 * Copyright (c) 1994 David Greenman
9 * This code is derived from software contributed to Berkeley by
10 * The Mach Operating System project at Carnegie-Mellon University.
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * from: @(#)vm_pageout.c 7.4 (Berkeley) 5/7/91
39 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
40 * All rights reserved.
42 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
44 * Permission to use, copy, modify and distribute this software and
45 * its documentation is hereby granted, provided that both the copyright
46 * notice and this permission notice appear in all copies of the
47 * software, derivative works or modified versions, and any portions
48 * thereof, and that both notices appear in supporting documentation.
50 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
51 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
52 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
54 * Carnegie Mellon requests users of this software to return to
56 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
57 * School of Computer Science
58 * Carnegie Mellon University
59 * Pittsburgh PA 15213-3890
61 * any improvements or extensions that they make and grant Carnegie the
62 * rights to redistribute these changes.
64 * $FreeBSD: src/sys/vm/vm_pageout.c,v 1.151.2.15 2002/12/29 18:21:04 dillon Exp $
68 * The proverbial page-out daemon.
72 #include <sys/param.h>
73 #include <sys/systm.h>
74 #include <sys/kernel.h>
76 #include <sys/kthread.h>
77 #include <sys/resourcevar.h>
78 #include <sys/signalvar.h>
79 #include <sys/vnode.h>
80 #include <sys/vmmeter.h>
81 #include <sys/sysctl.h>
84 #include <vm/vm_param.h>
86 #include <vm/vm_object.h>
87 #include <vm/vm_page.h>
88 #include <vm/vm_map.h>
89 #include <vm/vm_pageout.h>
90 #include <vm/vm_pager.h>
91 #include <vm/swap_pager.h>
92 #include <vm/vm_extern.h>
94 #include <sys/thread2.h>
95 #include <sys/spinlock2.h>
96 #include <vm/vm_page2.h>
99 * System initialization
102 /* the kernel process "vm_pageout"*/
103 static int vm_pageout_page(vm_page_t m, int *max_launderp,
104 int *vnodes_skippedp, struct vnode **vpfailedp,
105 int pass, int vmflush_flags);
106 static int vm_pageout_clean_helper (vm_page_t, int);
107 static int vm_pageout_free_page_calc (vm_size_t count);
108 static void vm_pageout_page_free(vm_page_t m) ;
109 struct thread *pagethread;
111 #if !defined(NO_SWAPPING)
112 /* the kernel process "vm_daemon"*/
113 static void vm_daemon (void);
114 static struct thread *vmthread;
116 static struct kproc_desc vm_kp = {
121 SYSINIT(vmdaemon, SI_SUB_KTHREAD_VM, SI_ORDER_FIRST, kproc_start, &vm_kp);
124 int vm_pages_needed = 0; /* Event on which pageout daemon sleeps */
125 int vm_pageout_deficit = 0; /* Estimated number of pages deficit */
126 int vm_pageout_pages_needed = 0;/* pageout daemon needs pages */
127 int vm_page_free_hysteresis = 16;
129 #if !defined(NO_SWAPPING)
130 static int vm_pageout_req_swapout;
131 static int vm_daemon_needed;
133 static int vm_max_launder = 4096;
134 static int vm_pageout_stats_max=0, vm_pageout_stats_interval = 0;
135 static int vm_pageout_full_stats_interval = 0;
136 static int vm_pageout_stats_free_max=0, vm_pageout_algorithm=0;
137 static int defer_swap_pageouts=0;
138 static int disable_swap_pageouts=0;
139 static u_int vm_anonmem_decline = ACT_DECLINE;
140 static u_int vm_filemem_decline = ACT_DECLINE * 2;
142 #if defined(NO_SWAPPING)
143 static int vm_swap_enabled=0;
144 static int vm_swap_idle_enabled=0;
146 static int vm_swap_enabled=1;
147 static int vm_swap_idle_enabled=0;
149 int vm_pageout_memuse_mode=1; /* 0-disable, 1-passive, 2-active swp*/
151 SYSCTL_UINT(_vm, VM_PAGEOUT_ALGORITHM, anonmem_decline,
152 CTLFLAG_RW, &vm_anonmem_decline, 0, "active->inactive anon memory");
154 SYSCTL_INT(_vm, VM_PAGEOUT_ALGORITHM, filemem_decline,
155 CTLFLAG_RW, &vm_filemem_decline, 0, "active->inactive file cache");
157 SYSCTL_INT(_vm, OID_AUTO, page_free_hysteresis,
158 CTLFLAG_RW, &vm_page_free_hysteresis, 0,
159 "Free more pages than the minimum required");
161 SYSCTL_INT(_vm, OID_AUTO, max_launder,
162 CTLFLAG_RW, &vm_max_launder, 0, "Limit dirty flushes in pageout");
164 SYSCTL_INT(_vm, OID_AUTO, pageout_stats_max,
165 CTLFLAG_RW, &vm_pageout_stats_max, 0, "Max pageout stats scan length");
167 SYSCTL_INT(_vm, OID_AUTO, pageout_full_stats_interval,
168 CTLFLAG_RW, &vm_pageout_full_stats_interval, 0, "Interval for full stats scan");
170 SYSCTL_INT(_vm, OID_AUTO, pageout_stats_interval,
171 CTLFLAG_RW, &vm_pageout_stats_interval, 0, "Interval for partial stats scan");
173 SYSCTL_INT(_vm, OID_AUTO, pageout_stats_free_max,
174 CTLFLAG_RW, &vm_pageout_stats_free_max, 0, "Not implemented");
175 SYSCTL_INT(_vm, OID_AUTO, pageout_memuse_mode,
176 CTLFLAG_RW, &vm_pageout_memuse_mode, 0, "memoryuse resource mode");
178 #if defined(NO_SWAPPING)
179 SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled,
180 CTLFLAG_RD, &vm_swap_enabled, 0, "");
181 SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled,
182 CTLFLAG_RD, &vm_swap_idle_enabled, 0, "");
184 SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled,
185 CTLFLAG_RW, &vm_swap_enabled, 0, "Enable entire process swapout");
186 SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled,
187 CTLFLAG_RW, &vm_swap_idle_enabled, 0, "Allow swapout on idle criteria");
190 SYSCTL_INT(_vm, OID_AUTO, defer_swapspace_pageouts,
191 CTLFLAG_RW, &defer_swap_pageouts, 0, "Give preference to dirty pages in mem");
193 SYSCTL_INT(_vm, OID_AUTO, disable_swapspace_pageouts,
194 CTLFLAG_RW, &disable_swap_pageouts, 0, "Disallow swapout of dirty pages");
196 static int pageout_lock_miss;
197 SYSCTL_INT(_vm, OID_AUTO, pageout_lock_miss,
198 CTLFLAG_RD, &pageout_lock_miss, 0, "vget() lock misses during pageout");
200 int vm_page_max_wired; /* XXX max # of wired pages system-wide */
202 #if !defined(NO_SWAPPING)
203 static vm_pindex_t vm_pageout_object_deactivate_pages(vm_map_t map,
204 vm_object_t object, vm_pindex_t limit,
205 vm_pindex_t obj_beg, vm_pindex_t obj_end);
206 static void vm_req_vmdaemon (void);
208 static void vm_pageout_page_stats(int q);
211 * Calculate approximately how many pages on each queue to try to
212 * clean. An exact calculation creates an edge condition when the
213 * queues are unbalanced so add significant slop. The queue scans
214 * will stop early when targets are reached and will start where they
215 * left off on the next pass.
217 * We need to be generous here because there are all sorts of loading
218 * conditions that can cause edge cases if try to average over all queues.
219 * In particular, storage subsystems have become so fast that paging
220 * activity can become quite frantic. Eventually we will probably need
221 * two paging threads, one for dirty pages and one for clean, to deal
222 * with the bandwidth requirements.
224 * So what we do is calculate a value that can be satisfied nominally by
225 * only having to scan half the queues.
233 avg = ((n + (PQ_L2_SIZE - 1)) / (PQ_L2_SIZE / 2) + 1);
235 avg = ((n - (PQ_L2_SIZE - 1)) / (PQ_L2_SIZE / 2) - 1);
241 * vm_pageout_clean_helper:
243 * Clean the page and remove it from the laundry. The page must not be
246 * We set the busy bit to cause potential page faults on this page to
247 * block. Note the careful timing, however, the busy bit isn't set till
248 * late and we cannot do anything that will mess with the page.
251 vm_pageout_clean_helper(vm_page_t m, int vmflush_flags)
254 vm_page_t mc[BLIST_MAX_ALLOC];
256 int ib, is, page_base;
257 vm_pindex_t pindex = m->pindex;
262 * It doesn't cost us anything to pageout OBJT_DEFAULT or OBJT_SWAP
263 * with the new swapper, but we could have serious problems paging
264 * out other object types if there is insufficient memory.
266 * Unfortunately, checking free memory here is far too late, so the
267 * check has been moved up a procedural level.
271 * Don't mess with the page if it's busy, held, or special
273 * XXX do we really need to check hold_count here? hold_count
274 * isn't supposed to mess with vm_page ops except prevent the
275 * page from being reused.
277 if (m->hold_count != 0 || (m->flags & PG_UNMANAGED)) {
283 * Place page in cluster. Align cluster for optimal swap space
284 * allocation (whether it is swap or not). This is typically ~16-32
285 * pages, which also tends to align the cluster to multiples of the
286 * filesystem block size if backed by a filesystem.
288 page_base = pindex % BLIST_MAX_ALLOC;
294 * Scan object for clusterable pages.
296 * We can cluster ONLY if: ->> the page is NOT
297 * clean, wired, busy, held, or mapped into a
298 * buffer, and one of the following:
299 * 1) The page is inactive, or a seldom used
302 * 2) we force the issue.
304 * During heavy mmap/modification loads the pageout
305 * daemon can really fragment the underlying file
306 * due to flushing pages out of order and not trying
307 * align the clusters (which leave sporatic out-of-order
308 * holes). To solve this problem we do the reverse scan
309 * first and attempt to align our cluster, then do a
310 * forward scan if room remains.
312 vm_object_hold(object);
317 p = vm_page_lookup_busy_try(object, pindex - page_base + ib,
319 if (error || p == NULL)
321 if ((p->queue - p->pc) == PQ_CACHE ||
322 (p->flags & PG_UNMANAGED)) {
326 vm_page_test_dirty(p);
327 if (((p->dirty & p->valid) == 0 &&
328 (p->flags & PG_NEED_COMMIT) == 0) ||
329 p->wire_count != 0 || /* may be held by buf cache */
330 p->hold_count != 0) { /* may be undergoing I/O */
334 if (p->queue - p->pc != PQ_INACTIVE) {
335 if (p->queue - p->pc != PQ_ACTIVE ||
336 (vmflush_flags & VM_PAGER_ALLOW_ACTIVE) == 0) {
343 * Try to maintain page groupings in the cluster.
345 if (m->flags & PG_WINATCFLS)
346 vm_page_flag_set(p, PG_WINATCFLS);
348 vm_page_flag_clear(p, PG_WINATCFLS);
349 p->act_count = m->act_count;
356 while (is < BLIST_MAX_ALLOC &&
357 pindex - page_base + is < object->size) {
360 p = vm_page_lookup_busy_try(object, pindex - page_base + is,
362 if (error || p == NULL)
364 if (((p->queue - p->pc) == PQ_CACHE) ||
365 (p->flags & PG_UNMANAGED)) {
369 vm_page_test_dirty(p);
370 if (((p->dirty & p->valid) == 0 &&
371 (p->flags & PG_NEED_COMMIT) == 0) ||
372 p->wire_count != 0 || /* may be held by buf cache */
373 p->hold_count != 0) { /* may be undergoing I/O */
377 if (p->queue - p->pc != PQ_INACTIVE) {
378 if (p->queue - p->pc != PQ_ACTIVE ||
379 (vmflush_flags & VM_PAGER_ALLOW_ACTIVE) == 0) {
386 * Try to maintain page groupings in the cluster.
388 if (m->flags & PG_WINATCFLS)
389 vm_page_flag_set(p, PG_WINATCFLS);
391 vm_page_flag_clear(p, PG_WINATCFLS);
392 p->act_count = m->act_count;
398 vm_object_drop(object);
401 * we allow reads during pageouts...
403 return vm_pageout_flush(&mc[ib], is - ib, vmflush_flags);
407 * vm_pageout_flush() - launder the given pages
409 * The given pages are laundered. Note that we setup for the start of
410 * I/O ( i.e. busy the page ), mark it read-only, and bump the object
411 * reference count all in here rather then in the parent. If we want
412 * the parent to do more sophisticated things we may have to change
415 * The pages in the array must be busied by the caller and will be
416 * unbusied by this function.
419 vm_pageout_flush(vm_page_t *mc, int count, int vmflush_flags)
422 int pageout_status[count];
427 * Initiate I/O. Bump the vm_page_t->busy counter.
429 for (i = 0; i < count; i++) {
430 KASSERT(mc[i]->valid == VM_PAGE_BITS_ALL,
431 ("vm_pageout_flush page %p index %d/%d: partially "
432 "invalid page", mc[i], i, count));
433 vm_page_io_start(mc[i]);
437 * We must make the pages read-only. This will also force the
438 * modified bit in the related pmaps to be cleared. The pager
439 * cannot clear the bit for us since the I/O completion code
440 * typically runs from an interrupt. The act of making the page
441 * read-only handles the case for us.
443 * Then we can unbusy the pages, we still hold a reference by virtue
446 for (i = 0; i < count; i++) {
447 if (vmflush_flags & VM_PAGER_TRY_TO_CACHE)
448 vm_page_protect(mc[i], VM_PROT_NONE);
450 vm_page_protect(mc[i], VM_PROT_READ);
451 vm_page_wakeup(mc[i]);
454 object = mc[0]->object;
455 vm_object_pip_add(object, count);
457 vm_pager_put_pages(object, mc, count,
459 ((object == &kernel_object) ? VM_PAGER_PUT_SYNC : 0)),
462 for (i = 0; i < count; i++) {
463 vm_page_t mt = mc[i];
465 switch (pageout_status[i]) {
474 * Page outside of range of object. Right now we
475 * essentially lose the changes by pretending it
478 vm_page_busy_wait(mt, FALSE, "pgbad");
479 pmap_clear_modify(mt);
486 * A page typically cannot be paged out when we
487 * have run out of swap. We leave the page
488 * marked inactive and will try to page it out
491 * Starvation of the active page list is used to
492 * determine when the system is massively memory
501 * If not PENDing this was a synchronous operation and we
502 * clean up after the I/O. If it is PENDing the mess is
503 * cleaned up asynchronously.
505 * Also nominally act on the caller's wishes if the caller
506 * wants to try to really clean (cache or free) the page.
508 * Also nominally deactivate the page if the system is
511 if (pageout_status[i] != VM_PAGER_PEND) {
512 vm_page_busy_wait(mt, FALSE, "pgouw");
513 vm_page_io_finish(mt);
514 if (vmflush_flags & VM_PAGER_TRY_TO_CACHE) {
515 vm_page_try_to_cache(mt);
516 } else if (vm_page_count_severe()) {
517 vm_page_deactivate(mt);
522 vm_object_pip_wakeup(object);
528 #if !defined(NO_SWAPPING)
531 * Deactivate pages until the map RSS falls below the specified limit.
533 * This code is part of the process rlimit and vm_daemon handler and not
534 * part of the normal demand-paging code. We only check the top-level
537 * The map must be locked.
538 * The caller must hold the vm_object.
540 static int vm_pageout_object_deactivate_pages_callback(vm_page_t, void *);
541 static int vm_pageout_object_deactivate_pages_cmp(vm_page_t, void *);
544 vm_pageout_object_deactivate_pages(vm_map_t map, vm_object_t object,
549 struct rb_vm_page_scan_info info;
552 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object));
555 info.backing_offset_index = obj_beg;
556 info.backing_object = object;
561 if (pmap_resident_tlnw_count(vm_map_pmap(map)) <= limit)
563 if (object->type == OBJT_DEVICE ||
564 object->type == OBJT_MGTDEVICE ||
565 object->type == OBJT_PHYS) {
569 if (object->paging_in_progress)
574 if (object->shadow_count > 1)
578 * scan the objects entire memory queue. We hold the
579 * object's token so the scan should not race anything.
581 * The callback will adjust backing_offset_index past the
582 * last index scanned. This value only matters if we
585 info.limit = remove_mode;
587 info.desired = limit;
588 info.start_pindex = obj_beg;
589 info.end_pindex = obj_end;
590 info.object = object;
592 vm_page_rb_tree_RB_SCAN(&object->rb_memq,
593 vm_pageout_object_deactivate_pages_cmp,
594 vm_pageout_object_deactivate_pages_callback,
598 * Backing object recursion (we will loop up).
600 while ((object = info.object->backing_object) != NULL) {
601 vm_object_hold(object);
602 if (object != info.object->backing_object) {
603 vm_object_drop(object);
608 if (object == NULL) {
609 if (info.object != info.backing_object)
610 vm_object_drop(info.object);
613 advance = OFF_TO_IDX(info.object->backing_object_offset);
614 info.start_pindex += advance;
615 info.end_pindex += advance;
616 info.backing_offset_index += advance;
617 if (info.object != info.backing_object) {
618 vm_object_lock_swap();
619 vm_object_drop(info.object);
621 info.object = object;
625 * Return how far we want the caller to advance. The caller will
626 * ignore this value and use obj_end if the RSS limit is still not
629 return (info.backing_offset_index - info.start_pindex);
633 * Only page indices above start_pindex
637 vm_pageout_object_deactivate_pages_cmp(vm_page_t p, void *data)
639 struct rb_vm_page_scan_info *info = data;
641 if (p->pindex < info->start_pindex)
643 if (p->pindex >= info->end_pindex)
649 * The caller must hold the vm_object.
651 * info->count is bumped for every page removed from the process pmap.
653 * info->backing_offset_index is updated past the last scanned page.
654 * This value will be ignored and the scan forced to the mapent boundary
655 * by the caller if the resident count remains too high.
658 vm_pageout_object_deactivate_pages_callback(vm_page_t p, void *data)
660 struct rb_vm_page_scan_info *info = data;
665 * Basic tests - There should never be a marker, and we can stop
666 * once the RSS is below the required level.
668 KKASSERT((p->flags & PG_MARKER) == 0);
669 if (pmap_resident_tlnw_count(vm_map_pmap(info->map)) <=
674 mycpu->gd_cnt.v_pdpages++;
675 info->backing_offset_index = p->pindex + 1;
677 if (vm_page_busy_try(p, TRUE))
680 if (p->object != info->object) {
684 if (p->wire_count || p->hold_count || (p->flags & PG_UNMANAGED)) {
688 if (!pmap_page_exists_quick(vm_map_pmap(info->map), p)) {
693 actcount = pmap_ts_referenced(p);
695 vm_page_flag_set(p, PG_REFERENCED);
696 } else if (p->flags & PG_REFERENCED) {
700 vm_page_and_queue_spin_lock(p);
701 if (p->queue - p->pc != PQ_ACTIVE && (p->flags & PG_REFERENCED)) {
702 vm_page_and_queue_spin_unlock(p);
704 p->act_count += actcount;
705 vm_page_flag_clear(p, PG_REFERENCED);
706 } else if (p->queue - p->pc == PQ_ACTIVE) {
707 if ((p->flags & PG_REFERENCED) == 0) {
708 /* use ACT_ADVANCE for a faster decline */
709 p->act_count -= min(p->act_count, ACT_ADVANCE);
711 (vm_pageout_algorithm || (p->act_count == 0))) {
712 vm_page_and_queue_spin_unlock(p);
713 vm_page_deactivate(p);
716 TAILQ_REMOVE(&vm_page_queues[p->queue].pl,
718 TAILQ_INSERT_TAIL(&vm_page_queues[p->queue].pl,
720 vm_page_and_queue_spin_unlock(p);
723 vm_page_and_queue_spin_unlock(p);
725 vm_page_flag_clear(p, PG_REFERENCED);
727 vm_page_and_queue_spin_lock(p);
728 if (p->queue - p->pc == PQ_ACTIVE) {
729 if (p->act_count < (ACT_MAX - ACT_ADVANCE))
730 p->act_count += ACT_ADVANCE;
731 TAILQ_REMOVE(&vm_page_queues[p->queue].pl,
733 TAILQ_INSERT_TAIL(&vm_page_queues[p->queue].pl,
736 vm_page_and_queue_spin_unlock(p);
738 } else if (p->queue - p->pc == PQ_INACTIVE) {
740 TAILQ_REMOVE(&vm_page_queues[p->queue].pl,
742 TAILQ_INSERT_HEAD(&vm_page_queues[p->queue].pl,
745 /* use ACT_ADVANCE for a faster decline */
746 p->act_count -= min(p->act_count, ACT_ADVANCE);
747 vm_page_and_queue_spin_unlock(p);
748 if (p->act_count == 0) {
752 vm_page_and_queue_spin_unlock(p);
756 * Ok, try to fully clean the page and any nearby pages such that at
757 * least the requested page is freed or moved to the cache queue.
759 * We usually do this synchronously to allow us to get the page into
760 * the CACHE queue quickly, which will prevent memory exhaustion if
761 * a process with a memoryuse limit is running away. However, the
762 * sysadmin may desire to set vm.swap_user_async which relaxes this
763 * and improves write performance.
766 int max_launder = 0x7FFF;
767 int vnodes_skipped = 0;
769 struct vnode *vpfailed = NULL;
771 vmflush_flags = VM_PAGER_TRY_TO_CACHE | VM_PAGER_ALLOW_ACTIVE;
772 if (swap_user_async == 0)
773 vmflush_flags |= VM_PAGER_PUT_SYNC;
775 if (vm_pageout_memuse_mode >= 1)
776 vm_page_protect(p, VM_PROT_NONE);
777 if (vm_pageout_memuse_mode >= 2) {
778 vm_page_flag_set(p, PG_WINATCFLS);
779 info->count += vm_pageout_page(p, &max_launder,
781 &vpfailed, 1, vmflush_flags);
796 * Deactivate some number of pages in a map due to set RLIMIT_RSS limits.
797 * that is relatively difficult to do.
799 * Called when vm_pageout_memuse_mode is >= 1.
802 vm_pageout_map_deactivate_pages(vm_map_t map, vm_pindex_t limit)
806 vm_ooffset_t pgout_offset;
807 vm_ooffset_t tmpe_end;
813 lockmgr(&map->lock, LK_EXCLUSIVE);
816 * Scan the map incrementally.
818 pgout_offset = map->pgout_offset;
820 tmpe = map->header.next;
826 while (tmpe != &map->header) {
827 if (tmpe->end <= pgout_offset) {
831 if (tmpe->maptype == VM_MAPTYPE_NORMAL ||
832 tmpe->maptype == VM_MAPTYPE_VPAGETABLE) {
833 obj = tmpe->object.vm_object;
834 if (obj && obj->shadow_count <= 1) {
835 if (pgout_offset < tmpe->start) {
836 obj_beg = tmpe->offset >> PAGE_SHIFT;
837 obj_end = ((tmpe->end - tmpe->start) +
838 tmpe->offset) >> PAGE_SHIFT;
840 obj_beg = (pgout_offset - tmpe->start +
841 tmpe->offset) >> PAGE_SHIFT;
842 obj_end = (tmpe->end - tmpe->start +
843 tmpe->offset) >> PAGE_SHIFT;
845 tmpe_end = tmpe->end;
854 * Attempt to continue where we left off until the RLIMIT is
855 * satisfied or we run out of retries. Note that the map remains
856 * locked, so the program is not going to be taking any faults
857 * while we are doing this.
859 * Only circle around in this particular function when the
860 * memuse_mode is >= 2.
864 count = vm_pageout_object_deactivate_pages(map, obj, limit,
867 if (pmap_resident_tlnw_count(vm_map_pmap(map)) > limit) {
868 pgout_offset = tmpe_end;
871 pgout_offset += count << PAGE_SHIFT;
874 if (pmap_resident_tlnw_count(vm_map_pmap(map)) > limit) {
875 if (retries && vm_pageout_memuse_mode >= 2) {
882 map->pgout_offset = pgout_offset;
889 * Called when the pageout scan wants to free a page. We no longer
890 * try to cycle the vm_object here with a reference & dealloc, which can
891 * cause a non-trivial object collapse in a critical path.
893 * It is unclear why we cycled the ref_count in the past, perhaps to try
894 * to optimize shadow chain collapses but I don't quite see why it would
895 * be necessary. An OBJ_DEAD object should terminate any and all vm_pages
896 * synchronously and not have to be kicked-start.
899 vm_pageout_page_free(vm_page_t m)
901 vm_page_protect(m, VM_PROT_NONE);
906 * vm_pageout_scan does the dirty work for the pageout daemon.
908 struct vm_pageout_scan_info {
909 struct proc *bigproc;
913 static int vm_pageout_scan_callback(struct proc *p, void *data);
916 vm_pageout_scan_inactive(int pass, int q, int avail_shortage,
920 struct vm_page marker;
921 struct vnode *vpfailed; /* warning, allowed to be stale */
927 * Start scanning the inactive queue for pages we can move to the
928 * cache or free. The scan will stop when the target is reached or
929 * we have scanned the entire inactive queue. Note that m->act_count
930 * is not used to form decisions for the inactive queue, only for the
933 * max_launder limits the number of dirty pages we flush per scan.
934 * For most systems a smaller value (16 or 32) is more robust under
935 * extreme memory and disk pressure because any unnecessary writes
936 * to disk can result in extreme performance degredation. However,
937 * systems with excessive dirty pages (especially when MAP_NOSYNC is
938 * used) will die horribly with limited laundering. If the pageout
939 * daemon cannot clean enough pages in the first pass, we let it go
940 * all out in succeeding passes.
942 if ((max_launder = vm_max_launder) <= 1)
948 * Initialize our marker
950 bzero(&marker, sizeof(marker));
951 marker.flags = PG_BUSY | PG_FICTITIOUS | PG_MARKER;
952 marker.queue = PQ_INACTIVE + q;
954 marker.wire_count = 1;
957 * Inactive queue scan.
959 * NOTE: The vm_page must be spinlocked before the queue to avoid
960 * deadlocks, so it is easiest to simply iterate the loop
961 * with the queue unlocked at the top.
965 vm_page_queues_spin_lock(PQ_INACTIVE + q);
966 TAILQ_INSERT_HEAD(&vm_page_queues[PQ_INACTIVE + q].pl, &marker, pageq);
967 maxscan = vm_page_queues[PQ_INACTIVE + q].lcnt;
970 * Queue locked at top of loop to avoid stack marker issues.
972 while ((m = TAILQ_NEXT(&marker, pageq)) != NULL &&
973 maxscan-- > 0 && avail_shortage - delta > 0)
977 KKASSERT(m->queue == PQ_INACTIVE + q);
978 TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE + q].pl,
980 TAILQ_INSERT_AFTER(&vm_page_queues[PQ_INACTIVE + q].pl, m,
982 mycpu->gd_cnt.v_pdpages++;
985 * Skip marker pages (atomic against other markers to avoid
986 * infinite hop-over scans).
988 if (m->flags & PG_MARKER)
992 * Try to busy the page. Don't mess with pages which are
993 * already busy or reorder them in the queue.
995 if (vm_page_busy_try(m, TRUE))
999 * Remaining operations run with the page busy and neither
1000 * the page or the queue will be spin-locked.
1002 vm_page_queues_spin_unlock(PQ_INACTIVE + q);
1003 KKASSERT(m->queue == PQ_INACTIVE + q);
1005 count = vm_pageout_page(m, &max_launder, vnodes_skipped,
1006 &vpfailed, pass, 0);
1010 * Systems with a ton of memory can wind up with huge
1011 * deactivation counts. Because the inactive scan is
1012 * doing a lot of flushing, the combination can result
1013 * in excessive paging even in situations where other
1014 * unrelated threads free up sufficient VM.
1016 * To deal with this we abort the nominal active->inactive
1017 * scan before we hit the inactive target when free+cache
1018 * levels have reached a reasonable target.
1020 * When deciding to stop early we need to add some slop to
1021 * the test and we need to return full completion to the caller
1022 * to prevent the caller from thinking there is something
1023 * wrong and issuing a low-memory+swap warning or pkill.
1025 * A deficit forces paging regardless of the state of the
1026 * VM page queues (used for RSS enforcement).
1029 vm_page_queues_spin_lock(PQ_INACTIVE + q);
1030 if (vm_paging_target() < -vm_max_launder) {
1032 * Stopping early, return full completion to caller.
1034 if (delta < avail_shortage)
1035 delta = avail_shortage;
1040 /* page queue still spin-locked */
1041 TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE + q].pl, &marker, pageq);
1042 vm_page_queues_spin_unlock(PQ_INACTIVE + q);
1048 * Pageout the specified page, return the total number of pages paged out
1049 * (this routine may cluster).
1051 * The page must be busied and soft-busied by the caller and will be disposed
1052 * of by this function.
1055 vm_pageout_page(vm_page_t m, int *max_launderp, int *vnodes_skippedp,
1056 struct vnode **vpfailedp, int pass, int vmflush_flags)
1063 * It is possible for a page to be busied ad-hoc (e.g. the
1064 * pmap_collect() code) and wired and race against the
1065 * allocation of a new page. vm_page_alloc() may be forced
1066 * to deactivate the wired page in which case it winds up
1067 * on the inactive queue and must be handled here. We
1068 * correct the problem simply by unqueuing the page.
1070 if (m->wire_count) {
1071 vm_page_unqueue_nowakeup(m);
1073 kprintf("WARNING: pagedaemon: wired page on "
1074 "inactive queue %p\n", m);
1079 * A held page may be undergoing I/O, so skip it.
1081 if (m->hold_count) {
1082 vm_page_and_queue_spin_lock(m);
1083 if (m->queue - m->pc == PQ_INACTIVE) {
1085 &vm_page_queues[m->queue].pl, m, pageq);
1087 &vm_page_queues[m->queue].pl, m, pageq);
1088 ++vm_swapcache_inactive_heuristic;
1090 vm_page_and_queue_spin_unlock(m);
1095 if (m->object == NULL || m->object->ref_count == 0) {
1097 * If the object is not being used, we ignore previous
1100 vm_page_flag_clear(m, PG_REFERENCED);
1101 pmap_clear_reference(m);
1102 /* fall through to end */
1103 } else if (((m->flags & PG_REFERENCED) == 0) &&
1104 (actcount = pmap_ts_referenced(m))) {
1106 * Otherwise, if the page has been referenced while
1107 * in the inactive queue, we bump the "activation
1108 * count" upwards, making it less likely that the
1109 * page will be added back to the inactive queue
1110 * prematurely again. Here we check the page tables
1111 * (or emulated bits, if any), given the upper level
1112 * VM system not knowing anything about existing
1115 vm_page_activate(m);
1116 m->act_count += (actcount + ACT_ADVANCE);
1122 * (m) is still busied.
1124 * If the upper level VM system knows about any page
1125 * references, we activate the page. We also set the
1126 * "activation count" higher than normal so that we will less
1127 * likely place pages back onto the inactive queue again.
1129 if ((m->flags & PG_REFERENCED) != 0) {
1130 vm_page_flag_clear(m, PG_REFERENCED);
1131 actcount = pmap_ts_referenced(m);
1132 vm_page_activate(m);
1133 m->act_count += (actcount + ACT_ADVANCE + 1);
1139 * If the upper level VM system doesn't know anything about
1140 * the page being dirty, we have to check for it again. As
1141 * far as the VM code knows, any partially dirty pages are
1144 * Pages marked PG_WRITEABLE may be mapped into the user
1145 * address space of a process running on another cpu. A
1146 * user process (without holding the MP lock) running on
1147 * another cpu may be able to touch the page while we are
1148 * trying to remove it. vm_page_cache() will handle this
1151 if (m->dirty == 0) {
1152 vm_page_test_dirty(m);
1157 if (m->valid == 0 && (m->flags & PG_NEED_COMMIT) == 0) {
1159 * Invalid pages can be easily freed
1161 vm_pageout_page_free(m);
1162 mycpu->gd_cnt.v_dfree++;
1164 } else if (m->dirty == 0 && (m->flags & PG_NEED_COMMIT) == 0) {
1166 * Clean pages can be placed onto the cache queue.
1167 * This effectively frees them.
1171 } else if ((m->flags & PG_WINATCFLS) == 0 && pass == 0) {
1173 * Dirty pages need to be paged out, but flushing
1174 * a page is extremely expensive verses freeing
1175 * a clean page. Rather then artificially limiting
1176 * the number of pages we can flush, we instead give
1177 * dirty pages extra priority on the inactive queue
1178 * by forcing them to be cycled through the queue
1179 * twice before being flushed, after which the
1180 * (now clean) page will cycle through once more
1181 * before being freed. This significantly extends
1182 * the thrash point for a heavily loaded machine.
1184 vm_page_flag_set(m, PG_WINATCFLS);
1185 vm_page_and_queue_spin_lock(m);
1186 if (m->queue - m->pc == PQ_INACTIVE) {
1188 &vm_page_queues[m->queue].pl, m, pageq);
1190 &vm_page_queues[m->queue].pl, m, pageq);
1191 ++vm_swapcache_inactive_heuristic;
1193 vm_page_and_queue_spin_unlock(m);
1195 } else if (*max_launderp > 0) {
1197 * We always want to try to flush some dirty pages if
1198 * we encounter them, to keep the system stable.
1199 * Normally this number is small, but under extreme
1200 * pressure where there are insufficient clean pages
1201 * on the inactive queue, we may have to go all out.
1203 int swap_pageouts_ok;
1204 struct vnode *vp = NULL;
1206 swap_pageouts_ok = 0;
1209 (object->type != OBJT_SWAP) &&
1210 (object->type != OBJT_DEFAULT)) {
1211 swap_pageouts_ok = 1;
1213 swap_pageouts_ok = !(defer_swap_pageouts || disable_swap_pageouts);
1214 swap_pageouts_ok |= (!disable_swap_pageouts && defer_swap_pageouts &&
1215 vm_page_count_min(0));
1219 * We don't bother paging objects that are "dead".
1220 * Those objects are in a "rundown" state.
1222 if (!swap_pageouts_ok ||
1224 (object->flags & OBJ_DEAD)) {
1225 vm_page_and_queue_spin_lock(m);
1226 if (m->queue - m->pc == PQ_INACTIVE) {
1228 &vm_page_queues[m->queue].pl,
1231 &vm_page_queues[m->queue].pl,
1233 ++vm_swapcache_inactive_heuristic;
1235 vm_page_and_queue_spin_unlock(m);
1241 * (m) is still busied.
1243 * The object is already known NOT to be dead. It
1244 * is possible for the vget() to block the whole
1245 * pageout daemon, but the new low-memory handling
1246 * code should prevent it.
1248 * The previous code skipped locked vnodes and, worse,
1249 * reordered pages in the queue. This results in
1250 * completely non-deterministic operation because,
1251 * quite often, a vm_fault has initiated an I/O and
1252 * is holding a locked vnode at just the point where
1253 * the pageout daemon is woken up.
1255 * We can't wait forever for the vnode lock, we might
1256 * deadlock due to a vn_read() getting stuck in
1257 * vm_wait while holding this vnode. We skip the
1258 * vnode if we can't get it in a reasonable amount
1261 * vpfailed is used to (try to) avoid the case where
1262 * a large number of pages are associated with a
1263 * locked vnode, which could cause the pageout daemon
1264 * to stall for an excessive amount of time.
1266 if (object->type == OBJT_VNODE) {
1269 vp = object->handle;
1270 flags = LK_EXCLUSIVE;
1271 if (vp == *vpfailedp)
1274 flags |= LK_TIMELOCK;
1279 * We have unbusied (m) temporarily so we can
1280 * acquire the vp lock without deadlocking.
1281 * (m) is held to prevent destruction.
1283 if (vget(vp, flags) != 0) {
1285 ++pageout_lock_miss;
1286 if (object->flags & OBJ_MIGHTBEDIRTY)
1293 * The page might have been moved to another
1294 * queue during potential blocking in vget()
1295 * above. The page might have been freed and
1296 * reused for another vnode. The object might
1297 * have been reused for another vnode.
1299 if (m->queue - m->pc != PQ_INACTIVE ||
1300 m->object != object ||
1301 object->handle != vp) {
1302 if (object->flags & OBJ_MIGHTBEDIRTY)
1310 * The page may have been busied during the
1311 * blocking in vput(); We don't move the
1312 * page back onto the end of the queue so that
1313 * statistics are more correct if we don't.
1315 if (vm_page_busy_try(m, TRUE)) {
1323 * (m) is busied again
1325 * We own the busy bit and remove our hold
1326 * bit. If the page is still held it
1327 * might be undergoing I/O, so skip it.
1329 if (m->hold_count) {
1330 vm_page_and_queue_spin_lock(m);
1331 if (m->queue - m->pc == PQ_INACTIVE) {
1332 TAILQ_REMOVE(&vm_page_queues[m->queue].pl, m, pageq);
1333 TAILQ_INSERT_TAIL(&vm_page_queues[m->queue].pl, m, pageq);
1334 ++vm_swapcache_inactive_heuristic;
1336 vm_page_and_queue_spin_unlock(m);
1337 if (object->flags & OBJ_MIGHTBEDIRTY)
1343 /* (m) is left busied as we fall through */
1347 * page is busy and not held here.
1349 * If a page is dirty, then it is either being washed
1350 * (but not yet cleaned) or it is still in the
1351 * laundry. If it is still in the laundry, then we
1352 * start the cleaning operation.
1354 * decrement inactive_shortage on success to account
1355 * for the (future) cleaned page. Otherwise we
1356 * could wind up laundering or cleaning too many
1359 * NOTE: Cleaning the page here does not cause
1360 * force_deficit to be adjusted, because the
1361 * page is not being freed or moved to the
1364 count = vm_pageout_clean_helper(m, vmflush_flags);
1365 *max_launderp -= count;
1368 * Clean ate busy, page no longer accessible
1379 vm_pageout_scan_active(int pass, int q,
1380 int avail_shortage, int inactive_shortage,
1381 int *recycle_countp)
1383 struct vm_page marker;
1390 * We want to move pages from the active queue to the inactive
1391 * queue to get the inactive queue to the inactive target. If
1392 * we still have a page shortage from above we try to directly free
1393 * clean pages instead of moving them.
1395 * If we do still have a shortage we keep track of the number of
1396 * pages we free or cache (recycle_count) as a measure of thrashing
1397 * between the active and inactive queues.
1399 * If we were able to completely satisfy the free+cache targets
1400 * from the inactive pool we limit the number of pages we move
1401 * from the active pool to the inactive pool to 2x the pages we
1402 * had removed from the inactive pool (with a minimum of 1/5 the
1403 * inactive target). If we were not able to completely satisfy
1404 * the free+cache targets we go for the whole target aggressively.
1406 * NOTE: Both variables can end up negative.
1407 * NOTE: We are still in a critical section.
1410 bzero(&marker, sizeof(marker));
1411 marker.flags = PG_BUSY | PG_FICTITIOUS | PG_MARKER;
1412 marker.queue = PQ_ACTIVE + q;
1414 marker.wire_count = 1;
1416 vm_page_queues_spin_lock(PQ_ACTIVE + q);
1417 TAILQ_INSERT_HEAD(&vm_page_queues[PQ_ACTIVE + q].pl, &marker, pageq);
1418 maxscan = vm_page_queues[PQ_ACTIVE + q].lcnt;
1421 * Queue locked at top of loop to avoid stack marker issues.
1423 while ((m = TAILQ_NEXT(&marker, pageq)) != NULL &&
1424 maxscan-- > 0 && (avail_shortage - delta > 0 ||
1425 inactive_shortage > 0))
1427 KKASSERT(m->queue == PQ_ACTIVE + q);
1428 TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE + q].pl,
1430 TAILQ_INSERT_AFTER(&vm_page_queues[PQ_ACTIVE + q].pl, m,
1434 * Skip marker pages (atomic against other markers to avoid
1435 * infinite hop-over scans).
1437 if (m->flags & PG_MARKER)
1441 * Try to busy the page. Don't mess with pages which are
1442 * already busy or reorder them in the queue.
1444 if (vm_page_busy_try(m, TRUE))
1448 * Remaining operations run with the page busy and neither
1449 * the page or the queue will be spin-locked.
1451 vm_page_queues_spin_unlock(PQ_ACTIVE + q);
1452 KKASSERT(m->queue == PQ_ACTIVE + q);
1455 * Don't deactivate pages that are held, even if we can
1456 * busy them. (XXX why not?)
1458 if (m->hold_count != 0) {
1459 vm_page_and_queue_spin_lock(m);
1460 if (m->queue - m->pc == PQ_ACTIVE) {
1462 &vm_page_queues[PQ_ACTIVE + q].pl,
1465 &vm_page_queues[PQ_ACTIVE + q].pl,
1468 vm_page_and_queue_spin_unlock(m);
1474 * The count for pagedaemon pages is done after checking the
1475 * page for eligibility...
1477 mycpu->gd_cnt.v_pdpages++;
1480 * Check to see "how much" the page has been used and clear
1481 * the tracking access bits. If the object has no references
1482 * don't bother paying the expense.
1485 if (m->object && m->object->ref_count != 0) {
1486 if (m->flags & PG_REFERENCED)
1488 actcount += pmap_ts_referenced(m);
1490 m->act_count += ACT_ADVANCE + actcount;
1491 if (m->act_count > ACT_MAX)
1492 m->act_count = ACT_MAX;
1495 vm_page_flag_clear(m, PG_REFERENCED);
1498 * actcount is only valid if the object ref_count is non-zero.
1499 * If the page does not have an object, actcount will be zero.
1501 if (actcount && m->object->ref_count != 0) {
1502 vm_page_and_queue_spin_lock(m);
1503 if (m->queue - m->pc == PQ_ACTIVE) {
1505 &vm_page_queues[PQ_ACTIVE + q].pl,
1508 &vm_page_queues[PQ_ACTIVE + q].pl,
1511 vm_page_and_queue_spin_unlock(m);
1514 switch(m->object->type) {
1517 m->act_count -= min(m->act_count,
1518 vm_anonmem_decline);
1521 m->act_count -= min(m->act_count,
1522 vm_filemem_decline);
1525 if (vm_pageout_algorithm ||
1526 (m->object == NULL) ||
1527 (m->object && (m->object->ref_count == 0)) ||
1528 m->act_count < pass + 1
1531 * Deactivate the page. If we had a
1532 * shortage from our inactive scan try to
1533 * free (cache) the page instead.
1535 * Don't just blindly cache the page if
1536 * we do not have a shortage from the
1537 * inactive scan, that could lead to
1538 * gigabytes being moved.
1540 --inactive_shortage;
1541 if (avail_shortage - delta > 0 ||
1542 (m->object && (m->object->ref_count == 0)))
1544 if (avail_shortage - delta > 0)
1546 vm_page_protect(m, VM_PROT_NONE);
1547 if (m->dirty == 0 &&
1548 (m->flags & PG_NEED_COMMIT) == 0 &&
1549 avail_shortage - delta > 0) {
1552 vm_page_deactivate(m);
1556 vm_page_deactivate(m);
1561 vm_page_and_queue_spin_lock(m);
1562 if (m->queue - m->pc == PQ_ACTIVE) {
1564 &vm_page_queues[PQ_ACTIVE + q].pl,
1567 &vm_page_queues[PQ_ACTIVE + q].pl,
1570 vm_page_and_queue_spin_unlock(m);
1576 vm_page_queues_spin_lock(PQ_ACTIVE + q);
1580 * Clean out our local marker.
1582 * Page queue still spin-locked.
1584 TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE + q].pl, &marker, pageq);
1585 vm_page_queues_spin_unlock(PQ_ACTIVE + q);
1591 * The number of actually free pages can drop down to v_free_reserved,
1592 * we try to build the free count back above v_free_min. Note that
1593 * vm_paging_needed() also returns TRUE if v_free_count is not at
1594 * least v_free_min so that is the minimum we must build the free
1597 * We use a slightly higher target to improve hysteresis,
1598 * ((v_free_target + v_free_min) / 2). Since v_free_target
1599 * is usually the same as v_cache_min this maintains about
1600 * half the pages in the free queue as are in the cache queue,
1601 * providing pretty good pipelining for pageout operation.
1603 * The system operator can manipulate vm.v_cache_min and
1604 * vm.v_free_target to tune the pageout demon. Be sure
1605 * to keep vm.v_free_min < vm.v_free_target.
1607 * Note that the original paging target is to get at least
1608 * (free_min + cache_min) into (free + cache). The slightly
1609 * higher target will shift additional pages from cache to free
1610 * without effecting the original paging target in order to
1611 * maintain better hysteresis and not have the free count always
1612 * be dead-on v_free_min.
1614 * NOTE: we are still in a critical section.
1616 * Pages moved from PQ_CACHE to totally free are not counted in the
1617 * pages_freed counter.
1620 vm_pageout_scan_cache(int avail_shortage, int pass,
1621 int vnodes_skipped, int recycle_count)
1623 static int lastkillticks;
1624 struct vm_pageout_scan_info info;
1627 while (vmstats.v_free_count <
1628 (vmstats.v_free_min + vmstats.v_free_target) / 2) {
1630 * This steals some code from vm/vm_page.c
1632 static int cache_rover = 0;
1634 m = vm_page_list_find(PQ_CACHE,
1635 cache_rover & PQ_L2_MASK, FALSE);
1638 /* page is returned removed from its queue and spinlocked */
1639 if (vm_page_busy_try(m, TRUE)) {
1640 vm_page_deactivate_locked(m);
1641 vm_page_spin_unlock(m);
1644 vm_page_spin_unlock(m);
1645 pagedaemon_wakeup();
1649 * Remaining operations run with the page busy and neither
1650 * the page or the queue will be spin-locked.
1652 if ((m->flags & (PG_UNMANAGED | PG_NEED_COMMIT)) ||
1655 vm_page_deactivate(m);
1659 KKASSERT((m->flags & PG_MAPPED) == 0);
1660 KKASSERT(m->dirty == 0);
1661 cache_rover += PQ_PRIME2;
1662 vm_pageout_page_free(m);
1663 mycpu->gd_cnt.v_dfree++;
1666 #if !defined(NO_SWAPPING)
1668 * Idle process swapout -- run once per second.
1670 if (vm_swap_idle_enabled) {
1672 if (time_uptime != lsec) {
1673 atomic_set_int(&vm_pageout_req_swapout, VM_SWAP_IDLE);
1681 * If we didn't get enough free pages, and we have skipped a vnode
1682 * in a writeable object, wakeup the sync daemon. And kick swapout
1683 * if we did not get enough free pages.
1685 if (vm_paging_target() > 0) {
1686 if (vnodes_skipped && vm_page_count_min(0))
1687 speedup_syncer(NULL);
1688 #if !defined(NO_SWAPPING)
1689 if (vm_swap_enabled && vm_page_count_target()) {
1690 atomic_set_int(&vm_pageout_req_swapout, VM_SWAP_NORMAL);
1697 * Handle catastrophic conditions. Under good conditions we should
1698 * be at the target, well beyond our minimum. If we could not even
1699 * reach our minimum the system is under heavy stress. But just being
1700 * under heavy stress does not trigger process killing.
1702 * We consider ourselves to have run out of memory if the swap pager
1703 * is full and avail_shortage is still positive. The secondary check
1704 * ensures that we do not kill processes if the instantanious
1705 * availability is good, even if the pageout demon pass says it
1706 * couldn't get to the target.
1708 if (swap_pager_almost_full &&
1710 (vm_page_count_min(recycle_count) || avail_shortage > 0)) {
1711 kprintf("Warning: system low on memory+swap "
1712 "shortage %d for %d ticks!\n",
1713 avail_shortage, ticks - swap_fail_ticks);
1715 if (swap_pager_full &&
1717 avail_shortage > 0 &&
1718 vm_paging_target() > 0 &&
1719 (unsigned int)(ticks - lastkillticks) >= hz) {
1721 * Kill something, maximum rate once per second to give
1722 * the process time to free up sufficient memory.
1724 lastkillticks = ticks;
1725 info.bigproc = NULL;
1727 allproc_scan(vm_pageout_scan_callback, &info);
1728 if (info.bigproc != NULL) {
1729 info.bigproc->p_nice = PRIO_MIN;
1730 info.bigproc->p_usched->resetpriority(
1731 FIRST_LWP_IN_PROC(info.bigproc));
1732 atomic_set_int(&info.bigproc->p_flags, P_LOWMEMKILL);
1733 killproc(info.bigproc, "out of swap space");
1734 wakeup(&vmstats.v_free_count);
1735 PRELE(info.bigproc);
1741 vm_pageout_scan_callback(struct proc *p, void *data)
1743 struct vm_pageout_scan_info *info = data;
1747 * Never kill system processes or init. If we have configured swap
1748 * then try to avoid killing low-numbered pids.
1750 if ((p->p_flags & P_SYSTEM) || (p->p_pid == 1) ||
1751 ((p->p_pid < 48) && (vm_swap_size != 0))) {
1755 lwkt_gettoken(&p->p_token);
1758 * if the process is in a non-running type state,
1761 if (p->p_stat != SACTIVE && p->p_stat != SSTOP && p->p_stat != SCORE) {
1762 lwkt_reltoken(&p->p_token);
1767 * Get the approximate process size. Note that anonymous pages
1768 * with backing swap will be counted twice, but there should not
1769 * be too many such pages due to the stress the VM system is
1770 * under at this point.
1772 size = vmspace_anonymous_count(p->p_vmspace) +
1773 vmspace_swap_count(p->p_vmspace);
1776 * If the this process is bigger than the biggest one
1779 if (info->bigsize < size) {
1781 PRELE(info->bigproc);
1784 info->bigsize = size;
1786 lwkt_reltoken(&p->p_token);
1793 * This routine tries to maintain the pseudo LRU active queue,
1794 * so that during long periods of time where there is no paging,
1795 * that some statistic accumulation still occurs. This code
1796 * helps the situation where paging just starts to occur.
1799 vm_pageout_page_stats(int q)
1801 static int fullintervalcount = 0;
1802 struct vm_page marker;
1804 int pcount, tpcount; /* Number of pages to check */
1807 page_shortage = (vmstats.v_inactive_target + vmstats.v_cache_max +
1808 vmstats.v_free_min) -
1809 (vmstats.v_free_count + vmstats.v_inactive_count +
1810 vmstats.v_cache_count);
1812 if (page_shortage <= 0)
1815 pcount = vm_page_queues[PQ_ACTIVE + q].lcnt;
1816 fullintervalcount += vm_pageout_stats_interval;
1817 if (fullintervalcount < vm_pageout_full_stats_interval) {
1818 tpcount = (vm_pageout_stats_max * pcount) /
1819 vmstats.v_page_count + 1;
1820 if (pcount > tpcount)
1823 fullintervalcount = 0;
1826 bzero(&marker, sizeof(marker));
1827 marker.flags = PG_BUSY | PG_FICTITIOUS | PG_MARKER;
1828 marker.queue = PQ_ACTIVE + q;
1830 marker.wire_count = 1;
1832 vm_page_queues_spin_lock(PQ_ACTIVE + q);
1833 TAILQ_INSERT_HEAD(&vm_page_queues[PQ_ACTIVE + q].pl, &marker, pageq);
1836 * Queue locked at top of loop to avoid stack marker issues.
1838 while ((m = TAILQ_NEXT(&marker, pageq)) != NULL &&
1843 KKASSERT(m->queue == PQ_ACTIVE + q);
1844 TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE + q].pl, &marker, pageq);
1845 TAILQ_INSERT_AFTER(&vm_page_queues[PQ_ACTIVE + q].pl, m,
1849 * Skip marker pages (atomic against other markers to avoid
1850 * infinite hop-over scans).
1852 if (m->flags & PG_MARKER)
1856 * Ignore pages we can't busy
1858 if (vm_page_busy_try(m, TRUE))
1862 * Remaining operations run with the page busy and neither
1863 * the page or the queue will be spin-locked.
1865 vm_page_queues_spin_unlock(PQ_ACTIVE + q);
1866 KKASSERT(m->queue == PQ_ACTIVE + q);
1869 * We now have a safely busied page, the page and queue
1870 * spinlocks have been released.
1874 if (m->hold_count) {
1880 * Calculate activity
1883 if (m->flags & PG_REFERENCED) {
1884 vm_page_flag_clear(m, PG_REFERENCED);
1887 actcount += pmap_ts_referenced(m);
1890 * Update act_count and move page to end of queue.
1893 m->act_count += ACT_ADVANCE + actcount;
1894 if (m->act_count > ACT_MAX)
1895 m->act_count = ACT_MAX;
1896 vm_page_and_queue_spin_lock(m);
1897 if (m->queue - m->pc == PQ_ACTIVE) {
1899 &vm_page_queues[PQ_ACTIVE + q].pl,
1902 &vm_page_queues[PQ_ACTIVE + q].pl,
1905 vm_page_and_queue_spin_unlock(m);
1910 if (m->act_count == 0) {
1912 * We turn off page access, so that we have
1913 * more accurate RSS stats. We don't do this
1914 * in the normal page deactivation when the
1915 * system is loaded VM wise, because the
1916 * cost of the large number of page protect
1917 * operations would be higher than the value
1918 * of doing the operation.
1920 * We use the marker to save our place so
1921 * we can release the spin lock. both (m)
1922 * and (next) will be invalid.
1924 vm_page_protect(m, VM_PROT_NONE);
1925 vm_page_deactivate(m);
1927 m->act_count -= min(m->act_count, ACT_DECLINE);
1928 vm_page_and_queue_spin_lock(m);
1929 if (m->queue - m->pc == PQ_ACTIVE) {
1931 &vm_page_queues[PQ_ACTIVE + q].pl,
1934 &vm_page_queues[PQ_ACTIVE + q].pl,
1937 vm_page_and_queue_spin_unlock(m);
1941 vm_page_queues_spin_lock(PQ_ACTIVE + q);
1945 * Remove our local marker
1947 * Page queue still spin-locked.
1949 TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE + q].pl, &marker, pageq);
1950 vm_page_queues_spin_unlock(PQ_ACTIVE + q);
1954 vm_pageout_free_page_calc(vm_size_t count)
1956 if (count < vmstats.v_page_count)
1959 * free_reserved needs to include enough for the largest swap pager
1960 * structures plus enough for any pv_entry structs when paging.
1962 * v_free_min normal allocations
1963 * v_free_reserved system allocations
1964 * v_pageout_free_min allocations by pageout daemon
1965 * v_interrupt_free_min low level allocations (e.g swap structures)
1967 if (vmstats.v_page_count > 1024)
1968 vmstats.v_free_min = 64 + (vmstats.v_page_count - 1024) / 200;
1970 vmstats.v_free_min = 64;
1971 vmstats.v_free_reserved = vmstats.v_free_min * 4 / 8 + 7;
1972 vmstats.v_free_severe = vmstats.v_free_min * 4 / 8 + 0;
1973 vmstats.v_pageout_free_min = vmstats.v_free_min * 2 / 8 + 7;
1974 vmstats.v_interrupt_free_min = vmstats.v_free_min * 1 / 8 + 7;
1981 * vm_pageout is the high level pageout daemon.
1986 vm_pageout_thread(void)
1994 * Initialize some paging parameters.
1996 curthread->td_flags |= TDF_SYSTHREAD;
1998 vm_pageout_free_page_calc(vmstats.v_page_count);
2001 * v_free_target and v_cache_min control pageout hysteresis. Note
2002 * that these are more a measure of the VM cache queue hysteresis
2003 * then the VM free queue. Specifically, v_free_target is the
2004 * high water mark (free+cache pages).
2006 * v_free_reserved + v_cache_min (mostly means v_cache_min) is the
2007 * low water mark, while v_free_min is the stop. v_cache_min must
2008 * be big enough to handle memory needs while the pageout daemon
2009 * is signalled and run to free more pages.
2011 if (vmstats.v_free_count > 6144)
2012 vmstats.v_free_target = 4 * vmstats.v_free_min + vmstats.v_free_reserved;
2014 vmstats.v_free_target = 2 * vmstats.v_free_min + vmstats.v_free_reserved;
2017 * NOTE: With the new buffer cache b_act_count we want the default
2018 * inactive target to be a percentage of available memory.
2020 * The inactive target essentially determines the minimum
2021 * number of 'temporary' pages capable of caching one-time-use
2022 * files when the VM system is otherwise full of pages
2023 * belonging to multi-time-use files or active program data.
2025 * NOTE: The inactive target is aggressively persued only if the
2026 * inactive queue becomes too small. If the inactive queue
2027 * is large enough to satisfy page movement to free+cache
2028 * then it is repopulated more slowly from the active queue.
2029 * This allows a general inactive_target default to be set.
2031 * There is an issue here for processes which sit mostly idle
2032 * 'overnight', such as sshd, tcsh, and X. Any movement from
2033 * the active queue will eventually cause such pages to
2034 * recycle eventually causing a lot of paging in the morning.
2035 * To reduce the incidence of this pages cycled out of the
2036 * buffer cache are moved directly to the inactive queue if
2037 * they were only used once or twice.
2039 * The vfs.vm_cycle_point sysctl can be used to adjust this.
2040 * Increasing the value (up to 64) increases the number of
2041 * buffer recyclements which go directly to the inactive queue.
2043 if (vmstats.v_free_count > 2048) {
2044 vmstats.v_cache_min = vmstats.v_free_target;
2045 vmstats.v_cache_max = 2 * vmstats.v_cache_min;
2047 vmstats.v_cache_min = 0;
2048 vmstats.v_cache_max = 0;
2050 vmstats.v_inactive_target = vmstats.v_free_count / 4;
2052 /* XXX does not really belong here */
2053 if (vm_page_max_wired == 0)
2054 vm_page_max_wired = vmstats.v_free_count / 3;
2056 if (vm_pageout_stats_max == 0)
2057 vm_pageout_stats_max = vmstats.v_free_target;
2060 * Set interval in seconds for stats scan.
2062 if (vm_pageout_stats_interval == 0)
2063 vm_pageout_stats_interval = 5;
2064 if (vm_pageout_full_stats_interval == 0)
2065 vm_pageout_full_stats_interval = vm_pageout_stats_interval * 4;
2069 * Set maximum free per pass
2071 if (vm_pageout_stats_free_max == 0)
2072 vm_pageout_stats_free_max = 5;
2074 swap_pager_swap_init();
2078 * The pageout daemon is never done, so loop forever.
2083 int inactive_shortage;
2084 int vnodes_skipped = 0;
2085 int recycle_count = 0;
2089 * Wait for an action request. If we timeout check to
2090 * see if paging is needed (in case the normal wakeup
2093 if (vm_pages_needed == 0) {
2094 error = tsleep(&vm_pages_needed,
2096 vm_pageout_stats_interval * hz);
2098 vm_paging_needed() == 0 &&
2099 vm_pages_needed == 0) {
2100 for (q = 0; q < PQ_L2_SIZE; ++q)
2101 vm_pageout_page_stats(q);
2104 vm_pages_needed = 1;
2107 mycpu->gd_cnt.v_pdwakeups++;
2110 * Scan for INACTIVE->CLEAN/PAGEOUT
2112 * This routine tries to avoid thrashing the system with
2113 * unnecessary activity.
2115 * Calculate our target for the number of free+cache pages we
2116 * want to get to. This is higher then the number that causes
2117 * allocations to stall (severe) in order to provide hysteresis,
2118 * and if we don't make it all the way but get to the minimum
2119 * we're happy. Goose it a bit if there are multiple requests
2122 * Don't reduce avail_shortage inside the loop or the
2123 * PQAVERAGE() calculation will break.
2125 * NOTE! deficit is differentiated from avail_shortage as
2126 * REQUIRING at least (deficit) pages to be cleaned,
2127 * even if the page queues are in good shape. This
2128 * is used primarily for handling per-process
2129 * RLIMIT_RSS and may also see small values when
2130 * processes block due to low memory.
2132 avail_shortage = vm_paging_target() + vm_pageout_deficit;
2133 vm_pageout_deficit = 0;
2135 if (avail_shortage > 0) {
2138 for (q = 0; q < PQ_L2_SIZE; ++q) {
2139 delta += vm_pageout_scan_inactive(
2141 (q + q1iterator) & PQ_L2_MASK,
2142 PQAVERAGE(avail_shortage),
2144 if (avail_shortage - delta <= 0)
2147 avail_shortage -= delta;
2152 * Figure out how many active pages we must deactivate. If
2153 * we were able to reach our target with just the inactive
2154 * scan above we limit the number of active pages we
2155 * deactivate to reduce unnecessary work.
2157 inactive_shortage = vmstats.v_inactive_target -
2158 vmstats.v_inactive_count;
2161 * If we were unable to free sufficient inactive pages to
2162 * satisfy the free/cache queue requirements then simply
2163 * reaching the inactive target may not be good enough.
2164 * Try to deactivate pages in excess of the target based
2167 * However to prevent thrashing the VM system do not
2168 * deactivate more than an additional 1/10 the inactive
2169 * target's worth of active pages.
2171 if (avail_shortage > 0) {
2172 tmp = avail_shortage * 2;
2173 if (tmp > vmstats.v_inactive_target / 10)
2174 tmp = vmstats.v_inactive_target / 10;
2175 inactive_shortage += tmp;
2179 * Only trigger a pmap cleanup on inactive shortage.
2181 if (inactive_shortage > 0) {
2186 * Scan for ACTIVE->INACTIVE
2188 * Only trigger on inactive shortage. Triggering on
2189 * avail_shortage can starve the active queue with
2190 * unnecessary active->inactive transitions and destroy
2193 if (/*avail_shortage > 0 ||*/ inactive_shortage > 0) {
2196 for (q = 0; q < PQ_L2_SIZE; ++q) {
2197 delta += vm_pageout_scan_active(
2199 (q + q2iterator) & PQ_L2_MASK,
2200 PQAVERAGE(avail_shortage),
2201 PQAVERAGE(inactive_shortage),
2203 if (inactive_shortage - delta <= 0 &&
2204 avail_shortage - delta <= 0) {
2208 inactive_shortage -= delta;
2209 avail_shortage -= delta;
2214 * Scan for CACHE->FREE
2216 * Finally free enough cache pages to meet our free page
2217 * requirement and take more drastic measures if we are
2220 vm_pageout_scan_cache(avail_shortage, pass,
2221 vnodes_skipped, recycle_count);
2224 * Wait for more work.
2226 if (avail_shortage > 0) {
2228 if (pass < 10 && vm_pages_needed > 1) {
2230 * Normal operation, additional processes
2231 * have already kicked us. Retry immediately
2232 * unless swap space is completely full in
2233 * which case delay a bit.
2235 if (swap_pager_full) {
2236 tsleep(&vm_pages_needed, 0, "pdelay",
2238 } /* else immediate retry */
2239 } else if (pass < 10) {
2241 * Normal operation, fewer processes. Delay
2242 * a bit but allow wakeups.
2244 vm_pages_needed = 0;
2245 tsleep(&vm_pages_needed, 0, "pdelay", hz / 10);
2246 vm_pages_needed = 1;
2247 } else if (swap_pager_full == 0) {
2249 * We've taken too many passes, forced delay.
2251 tsleep(&vm_pages_needed, 0, "pdelay", hz / 10);
2254 * Running out of memory, catastrophic
2255 * back-off to one-second intervals.
2257 tsleep(&vm_pages_needed, 0, "pdelay", hz);
2259 } else if (vm_pages_needed) {
2261 * Interlocked wakeup of waiters (non-optional).
2263 * Similar to vm_page_free_wakeup() in vm_page.c,
2267 if (!vm_page_count_min(vm_page_free_hysteresis) ||
2268 !vm_page_count_target()) {
2269 vm_pages_needed = 0;
2270 wakeup(&vmstats.v_free_count);
2278 static struct kproc_desc page_kp = {
2283 SYSINIT(pagedaemon, SI_SUB_KTHREAD_PAGE, SI_ORDER_FIRST, kproc_start, &page_kp);
2287 * Called after allocating a page out of the cache or free queue
2288 * to possibly wake the pagedaemon up to replentish our supply.
2290 * We try to generate some hysteresis by waking the pagedaemon up
2291 * when our free+cache pages go below the free_min+cache_min level.
2292 * The pagedaemon tries to get the count back up to at least the
2293 * minimum, and through to the target level if possible.
2295 * If the pagedaemon is already active bump vm_pages_needed as a hint
2296 * that there are even more requests pending.
2302 pagedaemon_wakeup(void)
2304 if (vm_paging_needed() && curthread != pagethread) {
2305 if (vm_pages_needed == 0) {
2306 vm_pages_needed = 1; /* SMP race ok */
2307 wakeup(&vm_pages_needed);
2308 } else if (vm_page_count_min(0)) {
2309 ++vm_pages_needed; /* SMP race ok */
2314 #if !defined(NO_SWAPPING)
2321 vm_req_vmdaemon(void)
2323 static int lastrun = 0;
2325 if ((ticks > (lastrun + hz)) || (ticks < lastrun)) {
2326 wakeup(&vm_daemon_needed);
2331 static int vm_daemon_callback(struct proc *p, void *data __unused);
2342 tsleep(&vm_daemon_needed, 0, "psleep", 0);
2343 req_swapout = atomic_swap_int(&vm_pageout_req_swapout, 0);
2349 swapout_procs(vm_pageout_req_swapout);
2352 * scan the processes for exceeding their rlimits or if
2353 * process is swapped out -- deactivate pages
2355 allproc_scan(vm_daemon_callback, NULL);
2360 vm_daemon_callback(struct proc *p, void *data __unused)
2363 vm_pindex_t limit, size;
2366 * if this is a system process or if we have already
2367 * looked at this process, skip it.
2369 lwkt_gettoken(&p->p_token);
2371 if (p->p_flags & (P_SYSTEM | P_WEXIT)) {
2372 lwkt_reltoken(&p->p_token);
2377 * if the process is in a non-running type state,
2380 if (p->p_stat != SACTIVE && p->p_stat != SSTOP && p->p_stat != SCORE) {
2381 lwkt_reltoken(&p->p_token);
2388 limit = OFF_TO_IDX(qmin(p->p_rlimit[RLIMIT_RSS].rlim_cur,
2389 p->p_rlimit[RLIMIT_RSS].rlim_max));
2392 * let processes that are swapped out really be
2393 * swapped out. Set the limit to nothing to get as
2394 * many pages out to swap as possible.
2396 if (p->p_flags & P_SWAPPEDOUT)
2401 size = pmap_resident_tlnw_count(&vm->vm_pmap);
2402 if (limit >= 0 && size >= limit && vm_pageout_memuse_mode >= 1) {
2403 vm_pageout_map_deactivate_pages(&vm->vm_map, limit);
2407 lwkt_reltoken(&p->p_token);