4 * Copyright (c) 2010 The DragonFly Project. All rights reserved.
6 * This code is derived from software contributed to The DragonFly Project
7 * by Matthew Dillon <dillon@backplane.com>
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in
17 * the documentation and/or other materials provided with the
19 * 3. Neither the name of The DragonFly Project nor the names of its
20 * contributors may be used to endorse or promote products derived
21 * from this software without specific, prior written permission.
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
26 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
27 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
28 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
29 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
30 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
31 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
32 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
33 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * Implement the swapcache daemon. When enabled swap is assumed to be
39 * configured on a fast storage device such as a SSD. Swap is assigned
40 * to clean vnode-backed pages in the inactive queue, clustered by object
41 * if possible, and written out. The swap assignment sticks around even
42 * after the underlying pages have been recycled.
44 * The daemon manages write bandwidth based on sysctl settings to control
47 * The vnode strategy code will check for the swap assignments and divert
48 * reads to the swap device when the data is present in the swapcache.
50 * This operates on both regular files and the block device vnodes used by
51 * filesystems to manage meta-data.
55 #include <sys/param.h>
56 #include <sys/systm.h>
57 #include <sys/kernel.h>
59 #include <sys/kthread.h>
60 #include <sys/resourcevar.h>
61 #include <sys/signalvar.h>
62 #include <sys/vnode.h>
63 #include <sys/vmmeter.h>
64 #include <sys/sysctl.h>
67 #include <vm/vm_param.h>
69 #include <vm/vm_object.h>
70 #include <vm/vm_page.h>
71 #include <vm/vm_map.h>
72 #include <vm/vm_pageout.h>
73 #include <vm/vm_pager.h>
74 #include <vm/swap_pager.h>
75 #include <vm/vm_extern.h>
77 #include <sys/thread2.h>
78 #include <vm/vm_page2.h>
80 #define INACTIVE_LIST (&vm_page_queues[PQ_INACTIVE].pl)
82 /* the kernel process "vm_pageout"*/
83 static void vm_swapcached (void);
84 static int vm_swapcached_flush (vm_page_t m, int isblkdev);
85 static int vm_swapcache_test(vm_page_t m);
86 static void vm_swapcache_writing(vm_page_t marker);
87 static void vm_swapcache_cleaning(vm_object_t marker);
88 struct thread *swapcached_thread;
90 static struct kproc_desc swpc_kp = {
95 SYSINIT(swapcached, SI_SUB_KTHREAD_PAGE, SI_ORDER_SECOND, kproc_start, &swpc_kp)
97 SYSCTL_NODE(_vm, OID_AUTO, swapcache, CTLFLAG_RW, NULL, NULL);
99 int vm_swapcache_read_enable;
100 int vm_swapcache_inactive_heuristic;
101 static int vm_swapcache_sleep;
102 static int vm_swapcache_maxlaunder = 256;
103 static int vm_swapcache_data_enable = 0;
104 static int vm_swapcache_meta_enable = 0;
105 static int vm_swapcache_maxswappct = 75;
106 static int vm_swapcache_hysteresis;
107 static int vm_swapcache_use_chflags = 1; /* require chflags cache */
108 static int64_t vm_swapcache_minburst = 10000000LL; /* 10MB */
109 static int64_t vm_swapcache_curburst = 4000000000LL; /* 4G after boot */
110 static int64_t vm_swapcache_maxburst = 2000000000LL; /* 2G nominal max */
111 static int64_t vm_swapcache_accrate = 100000LL; /* 100K/s */
112 static int64_t vm_swapcache_write_count;
113 static int64_t vm_swapcache_maxfilesize;
115 SYSCTL_INT(_vm_swapcache, OID_AUTO, maxlaunder,
116 CTLFLAG_RW, &vm_swapcache_maxlaunder, 0, "");
118 SYSCTL_INT(_vm_swapcache, OID_AUTO, data_enable,
119 CTLFLAG_RW, &vm_swapcache_data_enable, 0, "");
120 SYSCTL_INT(_vm_swapcache, OID_AUTO, meta_enable,
121 CTLFLAG_RW, &vm_swapcache_meta_enable, 0, "");
122 SYSCTL_INT(_vm_swapcache, OID_AUTO, read_enable,
123 CTLFLAG_RW, &vm_swapcache_read_enable, 0, "");
124 SYSCTL_INT(_vm_swapcache, OID_AUTO, maxswappct,
125 CTLFLAG_RW, &vm_swapcache_maxswappct, 0, "");
126 SYSCTL_INT(_vm_swapcache, OID_AUTO, hysteresis,
127 CTLFLAG_RW, &vm_swapcache_hysteresis, 0, "");
128 SYSCTL_INT(_vm_swapcache, OID_AUTO, use_chflags,
129 CTLFLAG_RW, &vm_swapcache_use_chflags, 0, "");
131 SYSCTL_QUAD(_vm_swapcache, OID_AUTO, minburst,
132 CTLFLAG_RW, &vm_swapcache_minburst, 0, "");
133 SYSCTL_QUAD(_vm_swapcache, OID_AUTO, curburst,
134 CTLFLAG_RW, &vm_swapcache_curburst, 0, "");
135 SYSCTL_QUAD(_vm_swapcache, OID_AUTO, maxburst,
136 CTLFLAG_RW, &vm_swapcache_maxburst, 0, "");
137 SYSCTL_QUAD(_vm_swapcache, OID_AUTO, maxfilesize,
138 CTLFLAG_RW, &vm_swapcache_maxfilesize, 0, "");
139 SYSCTL_QUAD(_vm_swapcache, OID_AUTO, accrate,
140 CTLFLAG_RW, &vm_swapcache_accrate, 0, "");
141 SYSCTL_QUAD(_vm_swapcache, OID_AUTO, write_count,
142 CTLFLAG_RW, &vm_swapcache_write_count, 0, "");
144 #define SWAPMAX(adj) \
145 ((int64_t)vm_swap_max * (vm_swapcache_maxswappct + (adj)) / 100)
148 * vm_swapcached is the high level pageout daemon.
155 enum { SWAPC_WRITING, SWAPC_CLEANING } state = SWAPC_WRITING;
156 enum { SWAPB_BURSTING, SWAPB_RECOVERING } burst = SWAPB_BURSTING;
157 struct vm_page page_marker;
158 struct vm_object object_marker;
163 curthread->td_flags |= TDF_SYSTHREAD;
165 lwkt_gettoken(&vm_token);
168 * Initialize our marker for the inactive scan (SWAPC_WRITING)
170 bzero(&page_marker, sizeof(page_marker));
171 page_marker.flags = PG_BUSY | PG_FICTITIOUS | PG_MARKER;
172 page_marker.queue = PQ_INACTIVE;
173 page_marker.wire_count = 1;
174 TAILQ_INSERT_HEAD(INACTIVE_LIST, &page_marker, pageq);
175 vm_swapcache_hysteresis = vmstats.v_inactive_target / 2;
176 vm_swapcache_inactive_heuristic = -vm_swapcache_hysteresis;
179 * Initialize our marker for the vm_object scan (SWAPC_CLEANING)
181 bzero(&object_marker, sizeof(object_marker));
182 object_marker.type = OBJT_MARKER;
183 TAILQ_INSERT_HEAD(&vm_object_list, &object_marker, object_list);
187 * Check every 5 seconds when not enabled or if no swap
190 if ((vm_swapcache_data_enable == 0 &&
191 vm_swapcache_meta_enable == 0) ||
193 tsleep(&vm_swapcache_sleep, 0, "csleep", hz * 5);
198 * Polling rate when enabled is approximately 10 hz.
200 tsleep(&vm_swapcache_sleep, 0, "csleep", hz / 10);
203 * State hysteresis. Generate write activity up to 75% of
204 * swap, then clean out swap assignments down to 70%, then
207 if (state == SWAPC_WRITING) {
208 if (vm_swap_cache_use > SWAPMAX(0))
209 state = SWAPC_CLEANING;
211 if (vm_swap_cache_use < SWAPMAX(-5))
212 state = SWAPC_WRITING;
216 * We are allowed to continue accumulating burst value
217 * in either state. Allow the user to set curburst > maxburst
218 * for the initial load-in.
220 if (vm_swapcache_curburst < vm_swapcache_maxburst) {
221 vm_swapcache_curburst += vm_swapcache_accrate / 10;
222 if (vm_swapcache_curburst > vm_swapcache_maxburst)
223 vm_swapcache_curburst = vm_swapcache_maxburst;
227 * We don't want to nickle-and-dime the scan as that will
228 * create unnecessary fragmentation. The minimum burst
229 * is one-seconds worth of accumulation.
231 if (state == SWAPC_WRITING) {
232 if (vm_swapcache_curburst >= vm_swapcache_accrate) {
233 if (burst == SWAPB_BURSTING) {
234 vm_swapcache_writing(&page_marker);
235 if (vm_swapcache_curburst <= 0)
236 burst = SWAPB_RECOVERING;
237 } else if (vm_swapcache_curburst >
238 vm_swapcache_minburst) {
239 vm_swapcache_writing(&page_marker);
240 burst = SWAPB_BURSTING;
244 vm_swapcache_cleaning(&object_marker);
247 TAILQ_REMOVE(INACTIVE_LIST, &page_marker, pageq);
248 TAILQ_REMOVE(&vm_object_list, &object_marker, object_list);
249 lwkt_reltoken(&vm_token);
254 * The caller must hold vm_token.
257 vm_swapcache_writing(vm_page_t marker)
266 * Deal with an overflow of the heuristic counter or if the user
267 * manually changes the hysteresis.
269 * Try to avoid small incremental pageouts by waiting for enough
270 * pages to buildup in the inactive queue to hopefully get a good
271 * burst in. This heuristic is bumped by the VM system and reset
272 * when our scan hits the end of the queue.
274 if (vm_swapcache_inactive_heuristic < -vm_swapcache_hysteresis)
275 vm_swapcache_inactive_heuristic = -vm_swapcache_hysteresis;
276 if (vm_swapcache_inactive_heuristic < 0)
280 * Scan the inactive queue from our marker to locate
281 * suitable pages to push to the swap cache.
283 * We are looking for clean vnode-backed pages.
285 * NOTE: PG_SWAPPED pages in particular are not part of
286 * our count because once the cache stabilizes we
287 * can end up with a very high datarate of VM pages
291 count = vm_swapcache_maxlaunder;
293 while ((m = TAILQ_NEXT(m, pageq)) != NULL && count--) {
294 if (m->flags & (PG_MARKER | PG_SWAPPED)) {
298 if (vm_swapcache_curburst < 0)
300 if (vm_swapcache_test(m))
310 * If data_enable is 0 do not try to swapcache data.
311 * If use_chflags is set then only swapcache data for
312 * VSWAPCACHE marked vnodes, otherwise any vnode.
314 if (vm_swapcache_data_enable == 0 ||
315 ((vp->v_flag & VSWAPCACHE) == 0 &&
316 vm_swapcache_use_chflags)) {
319 if (vm_swapcache_maxfilesize &&
321 (vm_swapcache_maxfilesize >> PAGE_SHIFT)) {
328 * The PG_NOTMETA flag only applies to pages
329 * associated with block devices.
331 if (m->flags & PG_NOTMETA)
333 if (vm_swapcache_meta_enable == 0)
342 * Ok, move the marker and soft-busy the page.
344 TAILQ_REMOVE(INACTIVE_LIST, marker, pageq);
345 TAILQ_INSERT_AFTER(INACTIVE_LIST, m, marker, pageq);
348 * Assign swap and initiate I/O.
350 * (adjust for the --count which also occurs in the loop)
352 count -= vm_swapcached_flush(m, isblkdev) - 1;
355 * Setup for next loop using marker.
361 * Cleanup marker position. If we hit the end of the
362 * list the marker is placed at the tail. Newly deactivated
363 * pages will be placed after it.
365 * Earlier inactive pages that were dirty and become clean
366 * are typically moved to the end of PQ_INACTIVE by virtue
367 * of vfs_vmio_release() when they become unwired from the
370 TAILQ_REMOVE(INACTIVE_LIST, marker, pageq);
372 TAILQ_INSERT_BEFORE(m, marker, pageq);
374 TAILQ_INSERT_TAIL(INACTIVE_LIST, marker, pageq);
375 vm_swapcache_inactive_heuristic = -vm_swapcache_hysteresis;
380 * Flush the specified page using the swap_pager.
382 * Try to collect surrounding pages, including pages which may
383 * have already been assigned swap. Try to cluster within a
384 * contiguous aligned SMAP_META_PAGES (typ 16 x PAGE_SIZE) block
385 * to match what swap_pager_putpages() can do.
387 * We also want to try to match against the buffer cache blocksize
388 * but we don't really know what it is here. Since the buffer cache
389 * wires and unwires pages in groups the fact that we skip wired pages
390 * should be sufficient.
392 * Returns a count of pages we might have flushed (minimum 1)
394 * The caller must hold vm_token.
398 vm_swapcached_flush(vm_page_t m, int isblkdev)
401 vm_page_t marray[SWAP_META_PAGES];
403 int rtvals[SWAP_META_PAGES];
410 vm_page_protect(m, VM_PROT_READ);
414 * Try to cluster around (m), keeping in mind that the swap pager
415 * can only do SMAP_META_PAGES worth of continguous write.
417 x = (int)m->pindex & SWAP_META_MASK;
421 for (i = x - 1; i >= 0; --i) {
422 m = vm_page_lookup(object, basei - x + i);
425 if (vm_swapcache_test(m))
427 if (isblkdev && (m->flags & PG_NOTMETA))
430 vm_page_protect(m, VM_PROT_READ);
431 if (m->queue - m->pc == PQ_CACHE) {
432 vm_page_unqueue_nowakeup(m);
433 vm_page_deactivate(m);
439 for (j = x + 1; j < SWAP_META_PAGES; ++j) {
440 m = vm_page_lookup(object, basei - x + j);
443 if (vm_swapcache_test(m))
445 if (isblkdev && (m->flags & PG_NOTMETA))
448 vm_page_protect(m, VM_PROT_READ);
449 if (m->queue - m->pc == PQ_CACHE) {
450 vm_page_unqueue_nowakeup(m);
451 vm_page_deactivate(m);
457 vm_object_pip_add(object, count);
458 swap_pager_putpages(object, marray + i, count, FALSE, rtvals + i);
459 vm_swapcache_write_count += count * PAGE_SIZE;
460 vm_swapcache_curburst -= count * PAGE_SIZE;
463 if (rtvals[i] != VM_PAGER_PEND) {
464 vm_page_io_finish(marray[i]);
465 vm_object_pip_wakeup(object);
473 * Test whether a VM page is suitable for writing to the swapcache.
474 * Does not test m->queue, PG_MARKER, or PG_SWAPPED.
476 * Returns 0 on success, 1 on failure
478 * The caller must hold vm_token.
481 vm_swapcache_test(vm_page_t m)
485 if (m->flags & (PG_BUSY | PG_UNMANAGED))
487 if (m->busy || m->hold_count || m->wire_count)
489 if (m->valid != VM_PAGE_BITS_ALL)
491 if (m->dirty & m->valid)
493 if ((object = m->object) == NULL)
495 if (object->type != OBJT_VNODE ||
496 (object->flags & OBJ_DEAD)) {
499 vm_page_test_dirty(m);
500 if (m->dirty & m->valid)
508 * The caller must hold vm_token.
512 vm_swapcache_cleaning(vm_object_t marker)
520 count = vm_swapcache_maxlaunder;
523 * Look for vnode objects
525 lwkt_gettoken(&vm_token);
526 while ((object = TAILQ_NEXT(object, object_list)) != NULL && count--) {
527 if (object->type != OBJT_VNODE)
529 if ((object->flags & OBJ_DEAD) || object->swblock_count == 0)
531 if ((vp = object->handle) == NULL)
533 if (vp->v_type != VREG && vp->v_type != VCHR)
539 if (marker->backing_object != object)
543 * Move the marker so we can work on the VM object
545 TAILQ_REMOVE(&vm_object_list, marker, object_list);
546 TAILQ_INSERT_AFTER(&vm_object_list, object,
547 marker, object_list);
550 * Look for swblocks starting at our iterator.
552 * The swap_pager_condfree() function attempts to free
553 * swap space starting at the specified index. The index
554 * will be updated on return. The function will return
555 * a scan factor (NOT the number of blocks freed).
557 * If it must cut its scan of the object short due to an
558 * excessive number of swblocks, or is able to free the
559 * requested number of blocks, it will return n >= count
560 * and we break and pick it back up on a future attempt.
562 n = swap_pager_condfree(object, &marker->size, count);
575 * Adjust marker so we continue the scan from where we left off.
576 * When we reach the end we start back at the beginning.
578 TAILQ_REMOVE(&vm_object_list, marker, object_list);
580 TAILQ_INSERT_BEFORE(object, marker, object_list);
582 TAILQ_INSERT_HEAD(&vm_object_list, marker, object_list);
583 marker->backing_object = object;
584 lwkt_reltoken(&vm_token);