4 * Copyright (c) 2010 The DragonFly Project. All rights reserved.
6 * This code is derived from software contributed to The DragonFly Project
7 * by Matthew Dillon <dillon@backplane.com>
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in
17 * the documentation and/or other materials provided with the
19 * 3. Neither the name of The DragonFly Project nor the names of its
20 * contributors may be used to endorse or promote products derived
21 * from this software without specific, prior written permission.
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
26 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
27 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
28 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
29 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
30 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
31 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
32 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
33 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * Implement the swapcache daemon. When enabled swap is assumed to be
39 * configured on a fast storage device such as a SSD. Swap is assigned
40 * to clean vnode-backed pages in the inactive queue, clustered by object
41 * if possible, and written out. The swap assignment sticks around even
42 * after the underlying pages have been recycled.
44 * The daemon manages write bandwidth based on sysctl settings to control
47 * The vnode strategy code will check for the swap assignments and divert
48 * reads to the swap device when the data is present in the swapcache.
50 * This operates on both regular files and the block device vnodes used by
51 * filesystems to manage meta-data.
55 #include <sys/param.h>
56 #include <sys/systm.h>
57 #include <sys/kernel.h>
59 #include <sys/kthread.h>
60 #include <sys/resourcevar.h>
61 #include <sys/signalvar.h>
62 #include <sys/vnode.h>
63 #include <sys/vmmeter.h>
64 #include <sys/sysctl.h>
65 #include <sys/eventhandler.h>
68 #include <vm/vm_param.h>
70 #include <vm/vm_object.h>
71 #include <vm/vm_page.h>
72 #include <vm/vm_map.h>
73 #include <vm/vm_pageout.h>
74 #include <vm/vm_pager.h>
75 #include <vm/swap_pager.h>
76 #include <vm/vm_extern.h>
78 #include <sys/thread2.h>
79 #include <sys/spinlock2.h>
80 #include <vm/vm_page2.h>
82 /* the kernel process "vm_pageout"*/
83 static int vm_swapcached_flush (vm_page_t m, int isblkdev);
84 static int vm_swapcache_test(vm_page_t m);
85 static void vm_swapcache_writing(vm_page_t marker);
86 static void vm_swapcache_cleaning(vm_object_t marker);
87 static void vm_swapcache_movemarker(vm_object_t marker, vm_object_t object);
88 struct thread *swapcached_thread;
90 SYSCTL_NODE(_vm, OID_AUTO, swapcache, CTLFLAG_RW, NULL, NULL);
92 int vm_swapcache_read_enable;
93 int vm_swapcache_inactive_heuristic;
94 static int vm_swapcache_sleep;
95 static int vm_swapcache_maxlaunder = 256;
96 static int vm_swapcache_data_enable = 0;
97 static int vm_swapcache_meta_enable = 0;
98 static int vm_swapcache_maxswappct = 75;
99 static int vm_swapcache_hysteresis;
100 int vm_swapcache_use_chflags = 1; /* require chflags cache */
101 static int64_t vm_swapcache_minburst = 10000000LL; /* 10MB */
102 static int64_t vm_swapcache_curburst = 4000000000LL; /* 4G after boot */
103 static int64_t vm_swapcache_maxburst = 2000000000LL; /* 2G nominal max */
104 static int64_t vm_swapcache_accrate = 100000LL; /* 100K/s */
105 static int64_t vm_swapcache_write_count;
106 static int64_t vm_swapcache_maxfilesize;
107 static int64_t vm_swapcache_cleanperobj = 16*1024*1024;
109 SYSCTL_INT(_vm_swapcache, OID_AUTO, maxlaunder,
110 CTLFLAG_RW, &vm_swapcache_maxlaunder, 0, "");
112 SYSCTL_INT(_vm_swapcache, OID_AUTO, data_enable,
113 CTLFLAG_RW, &vm_swapcache_data_enable, 0, "");
114 SYSCTL_INT(_vm_swapcache, OID_AUTO, meta_enable,
115 CTLFLAG_RW, &vm_swapcache_meta_enable, 0, "");
116 SYSCTL_INT(_vm_swapcache, OID_AUTO, read_enable,
117 CTLFLAG_RW, &vm_swapcache_read_enable, 0, "");
118 SYSCTL_INT(_vm_swapcache, OID_AUTO, maxswappct,
119 CTLFLAG_RW, &vm_swapcache_maxswappct, 0, "");
120 SYSCTL_INT(_vm_swapcache, OID_AUTO, hysteresis,
121 CTLFLAG_RW, &vm_swapcache_hysteresis, 0, "");
122 SYSCTL_INT(_vm_swapcache, OID_AUTO, use_chflags,
123 CTLFLAG_RW, &vm_swapcache_use_chflags, 0, "");
125 SYSCTL_QUAD(_vm_swapcache, OID_AUTO, minburst,
126 CTLFLAG_RW, &vm_swapcache_minburst, 0, "");
127 SYSCTL_QUAD(_vm_swapcache, OID_AUTO, curburst,
128 CTLFLAG_RW, &vm_swapcache_curburst, 0, "");
129 SYSCTL_QUAD(_vm_swapcache, OID_AUTO, maxburst,
130 CTLFLAG_RW, &vm_swapcache_maxburst, 0, "");
131 SYSCTL_QUAD(_vm_swapcache, OID_AUTO, maxfilesize,
132 CTLFLAG_RW, &vm_swapcache_maxfilesize, 0, "");
133 SYSCTL_QUAD(_vm_swapcache, OID_AUTO, accrate,
134 CTLFLAG_RW, &vm_swapcache_accrate, 0, "");
135 SYSCTL_QUAD(_vm_swapcache, OID_AUTO, write_count,
136 CTLFLAG_RW, &vm_swapcache_write_count, 0, "");
137 SYSCTL_QUAD(_vm_swapcache, OID_AUTO, cleanperobj,
138 CTLFLAG_RW, &vm_swapcache_cleanperobj, 0, "");
140 #define SWAPMAX(adj) \
141 ((int64_t)vm_swap_max * (vm_swapcache_maxswappct + (adj)) / 100)
144 * When shutting down the machine we want to stop swapcache operation
145 * immediately so swap is not accessed after devices have been shuttered.
148 shutdown_swapcache(void *arg __unused)
150 vm_swapcache_read_enable = 0;
151 vm_swapcache_data_enable = 0;
152 vm_swapcache_meta_enable = 0;
153 wakeup(&vm_swapcache_sleep); /* shortcut 5-second wait */
157 * vm_swapcached is the high level pageout daemon.
162 vm_swapcached_thread(void)
164 enum { SWAPC_WRITING, SWAPC_CLEANING } state = SWAPC_WRITING;
165 enum { SWAPB_BURSTING, SWAPB_RECOVERING } burst = SWAPB_BURSTING;
166 static struct vm_page page_marker[PQ_L2_SIZE];
167 static struct vm_object object_marker;
173 curthread->td_flags |= TDF_SYSTHREAD;
174 EVENTHANDLER_REGISTER(shutdown_pre_sync, shutdown_kproc,
175 swapcached_thread, SHUTDOWN_PRI_FIRST);
176 EVENTHANDLER_REGISTER(shutdown_pre_sync, shutdown_swapcache,
177 NULL, SHUTDOWN_PRI_SECOND);
180 * Initialize our marker for the inactive scan (SWAPC_WRITING)
182 bzero(&page_marker, sizeof(page_marker));
183 for (q = 0; q < PQ_L2_SIZE; ++q) {
184 page_marker[q].flags = PG_BUSY | PG_FICTITIOUS | PG_MARKER;
185 page_marker[q].queue = PQ_INACTIVE + q;
186 page_marker[q].pc = q;
187 page_marker[q].wire_count = 1;
188 vm_page_queues_spin_lock(PQ_INACTIVE + q);
190 &vm_page_queues[PQ_INACTIVE + q].pl,
191 &page_marker[q], pageq);
192 vm_page_queues_spin_unlock(PQ_INACTIVE + q);
195 vm_swapcache_hysteresis = vmstats.v_inactive_target / 2;
196 vm_swapcache_inactive_heuristic = -vm_swapcache_hysteresis;
199 * Initialize our marker for the vm_object scan (SWAPC_CLEANING)
201 bzero(&object_marker, sizeof(object_marker));
202 object_marker.type = OBJT_MARKER;
203 lwkt_gettoken(&vmobj_token);
204 TAILQ_INSERT_HEAD(&vm_object_list, &object_marker, object_list);
205 lwkt_reltoken(&vmobj_token);
211 kproc_suspend_loop();
214 * Check every 5 seconds when not enabled or if no swap
217 if ((vm_swapcache_data_enable == 0 &&
218 vm_swapcache_meta_enable == 0) ||
220 tsleep(&vm_swapcache_sleep, 0, "csleep", hz * 5);
225 * Polling rate when enabled is approximately 10 hz.
227 tsleep(&vm_swapcache_sleep, 0, "csleep", hz / 10);
230 * State hysteresis. Generate write activity up to 75% of
231 * swap, then clean out swap assignments down to 70%, then
234 if (state == SWAPC_WRITING) {
235 if (vm_swap_cache_use > SWAPMAX(0))
236 state = SWAPC_CLEANING;
238 if (vm_swap_cache_use < SWAPMAX(-10))
239 state = SWAPC_WRITING;
243 * We are allowed to continue accumulating burst value
244 * in either state. Allow the user to set curburst > maxburst
245 * for the initial load-in.
247 if (vm_swapcache_curburst < vm_swapcache_maxburst) {
248 vm_swapcache_curburst += vm_swapcache_accrate / 10;
249 if (vm_swapcache_curburst > vm_swapcache_maxburst)
250 vm_swapcache_curburst = vm_swapcache_maxburst;
254 * We don't want to nickle-and-dime the scan as that will
255 * create unnecessary fragmentation. The minimum burst
256 * is one-seconds worth of accumulation.
258 if (state == SWAPC_WRITING) {
259 if (vm_swapcache_curburst >= vm_swapcache_accrate) {
260 if (burst == SWAPB_BURSTING) {
261 for (q = 0; q < PQ_L2_SIZE; ++q) {
262 vm_swapcache_writing(
265 if (vm_swapcache_curburst <= 0)
266 burst = SWAPB_RECOVERING;
267 } else if (vm_swapcache_curburst >
268 vm_swapcache_minburst) {
269 for (q = 0; q < PQ_L2_SIZE; ++q) {
270 vm_swapcache_writing(
273 burst = SWAPB_BURSTING;
277 vm_swapcache_cleaning(&object_marker);
282 * Cleanup (NOT REACHED)
284 for (q = 0; q < PQ_L2_SIZE; ++q) {
285 vm_page_queues_spin_lock(PQ_INACTIVE + q);
287 &vm_page_queues[PQ_INACTIVE + q].pl,
288 &page_marker[q], pageq);
289 vm_page_queues_spin_unlock(PQ_INACTIVE + q);
292 lwkt_gettoken(&vmobj_token);
293 TAILQ_REMOVE(&vm_object_list, &object_marker, object_list);
294 lwkt_reltoken(&vmobj_token);
297 static struct kproc_desc swpc_kp = {
299 vm_swapcached_thread,
302 SYSINIT(swapcached, SI_SUB_KTHREAD_PAGE, SI_ORDER_SECOND, kproc_start, &swpc_kp)
305 vm_swapcache_writing(vm_page_t marker)
314 * Deal with an overflow of the heuristic counter or if the user
315 * manually changes the hysteresis.
317 * Try to avoid small incremental pageouts by waiting for enough
318 * pages to buildup in the inactive queue to hopefully get a good
319 * burst in. This heuristic is bumped by the VM system and reset
320 * when our scan hits the end of the queue.
322 if (vm_swapcache_inactive_heuristic < -vm_swapcache_hysteresis)
323 vm_swapcache_inactive_heuristic = -vm_swapcache_hysteresis;
324 if (vm_swapcache_inactive_heuristic < 0)
328 * Scan the inactive queue from our marker to locate
329 * suitable pages to push to the swap cache.
331 * We are looking for clean vnode-backed pages.
333 * NOTE: PG_SWAPPED pages in particular are not part of
334 * our count because once the cache stabilizes we
335 * can end up with a very high datarate of VM pages
338 count = vm_swapcache_maxlaunder;
340 vm_page_queues_spin_lock(marker->queue);
341 while ((m = TAILQ_NEXT(marker, pageq)) != NULL && count-- > 0) {
342 KKASSERT(m->queue == marker->queue);
344 if (vm_swapcache_curburst < 0)
347 &vm_page_queues[marker->queue].pl, marker, pageq);
349 &vm_page_queues[marker->queue].pl, m, marker, pageq);
352 * Ignore markers and ignore pages that already have a swap
355 if (m->flags & (PG_MARKER | PG_SWAPPED)) {
359 if (vm_page_busy_try(m, TRUE))
361 vm_page_queues_spin_unlock(marker->queue);
363 if ((object = m->object) == NULL) {
365 vm_page_queues_spin_lock(marker->queue);
368 vm_object_hold(object);
369 if (m->object != object) {
370 vm_object_drop(object);
372 vm_page_queues_spin_lock(marker->queue);
375 if (vm_swapcache_test(m)) {
376 vm_object_drop(object);
378 vm_page_queues_spin_lock(marker->queue);
384 vm_object_drop(object);
386 vm_page_queues_spin_lock(marker->queue);
393 * PG_NOTMETA generically means 'don't swapcache this',
394 * and HAMMER will set this for regular data buffers
395 * (and leave it unset for meta-data buffers) as
396 * appropriate when double buffering is enabled.
398 if (m->flags & PG_NOTMETA) {
399 vm_object_drop(object);
401 vm_page_queues_spin_lock(marker->queue);
406 * If data_enable is 0 do not try to swapcache data.
407 * If use_chflags is set then only swapcache data for
408 * VSWAPCACHE marked vnodes, otherwise any vnode.
410 if (vm_swapcache_data_enable == 0 ||
411 ((vp->v_flag & VSWAPCACHE) == 0 &&
412 vm_swapcache_use_chflags)) {
413 vm_object_drop(object);
415 vm_page_queues_spin_lock(marker->queue);
418 if (vm_swapcache_maxfilesize &&
420 (vm_swapcache_maxfilesize >> PAGE_SHIFT)) {
421 vm_object_drop(object);
423 vm_page_queues_spin_lock(marker->queue);
430 * PG_NOTMETA generically means 'don't swapcache this',
431 * and HAMMER will set this for regular data buffers
432 * (and leave it unset for meta-data buffers) as
433 * appropriate when double buffering is enabled.
435 if (m->flags & PG_NOTMETA) {
436 vm_object_drop(object);
438 vm_page_queues_spin_lock(marker->queue);
441 if (vm_swapcache_meta_enable == 0) {
442 vm_object_drop(object);
444 vm_page_queues_spin_lock(marker->queue);
450 vm_object_drop(object);
452 vm_page_queues_spin_lock(marker->queue);
458 * Assign swap and initiate I/O.
460 * (adjust for the --count which also occurs in the loop)
462 count -= vm_swapcached_flush(m, isblkdev) - 1;
465 * Setup for next loop using marker.
467 vm_object_drop(object);
468 vm_page_queues_spin_lock(marker->queue);
472 * The marker could wind up at the end, which is ok. If we hit the
473 * end of the list adjust the heuristic.
475 * Earlier inactive pages that were dirty and become clean
476 * are typically moved to the end of PQ_INACTIVE by virtue
477 * of vfs_vmio_release() when they become unwired from the
481 vm_swapcache_inactive_heuristic = -vm_swapcache_hysteresis;
482 vm_page_queues_spin_unlock(marker->queue);
486 * Flush the specified page using the swap_pager. The page
487 * must be busied by the caller and its disposition will become
488 * the responsibility of this function.
490 * Try to collect surrounding pages, including pages which may
491 * have already been assigned swap. Try to cluster within a
492 * contiguous aligned SMAP_META_PAGES (typ 16 x PAGE_SIZE) block
493 * to match what swap_pager_putpages() can do.
495 * We also want to try to match against the buffer cache blocksize
496 * but we don't really know what it is here. Since the buffer cache
497 * wires and unwires pages in groups the fact that we skip wired pages
498 * should be sufficient.
500 * Returns a count of pages we might have flushed (minimum 1)
504 vm_swapcached_flush(vm_page_t m, int isblkdev)
507 vm_page_t marray[SWAP_META_PAGES];
509 int rtvals[SWAP_META_PAGES];
517 vm_page_protect(m, VM_PROT_READ);
519 vm_object_hold(object);
522 * Try to cluster around (m), keeping in mind that the swap pager
523 * can only do SMAP_META_PAGES worth of continguous write.
525 x = (int)m->pindex & SWAP_META_MASK;
530 for (i = x - 1; i >= 0; --i) {
531 m = vm_page_lookup_busy_try(object, basei - x + i,
533 if (error || m == NULL)
535 if (vm_swapcache_test(m)) {
539 if (isblkdev && (m->flags & PG_NOTMETA)) {
544 vm_page_protect(m, VM_PROT_READ);
545 if (m->queue - m->pc == PQ_CACHE) {
546 vm_page_unqueue_nowakeup(m);
547 vm_page_deactivate(m);
554 for (j = x + 1; j < SWAP_META_PAGES; ++j) {
555 m = vm_page_lookup_busy_try(object, basei - x + j,
557 if (error || m == NULL)
559 if (vm_swapcache_test(m)) {
563 if (isblkdev && (m->flags & PG_NOTMETA)) {
568 vm_page_protect(m, VM_PROT_READ);
569 if (m->queue - m->pc == PQ_CACHE) {
570 vm_page_unqueue_nowakeup(m);
571 vm_page_deactivate(m);
578 vm_object_pip_add(object, count);
579 swap_pager_putpages(object, marray + i, count, FALSE, rtvals + i);
580 vm_swapcache_write_count += count * PAGE_SIZE;
581 vm_swapcache_curburst -= count * PAGE_SIZE;
584 if (rtvals[i] != VM_PAGER_PEND) {
585 vm_page_busy_wait(marray[i], FALSE, "swppgfd");
586 vm_page_io_finish(marray[i]);
587 vm_page_wakeup(marray[i]);
588 vm_object_pip_wakeup(object);
592 vm_object_drop(object);
597 * Test whether a VM page is suitable for writing to the swapcache.
598 * Does not test m->queue, PG_MARKER, or PG_SWAPPED.
600 * Returns 0 on success, 1 on failure
603 vm_swapcache_test(vm_page_t m)
607 if (m->flags & PG_UNMANAGED)
609 if (m->hold_count || m->wire_count)
611 if (m->valid != VM_PAGE_BITS_ALL)
613 if (m->dirty & m->valid)
615 if ((object = m->object) == NULL)
617 if (object->type != OBJT_VNODE ||
618 (object->flags & OBJ_DEAD)) {
621 vm_page_test_dirty(m);
622 if (m->dirty & m->valid)
630 * We clean whole objects up to 16MB
634 vm_swapcache_cleaning(vm_object_t marker)
641 count = vm_swapcache_maxlaunder;
644 * Look for vnode objects
646 lwkt_gettoken(&vmobj_token);
648 while ((object = TAILQ_NEXT(marker, object_list)) != NULL) {
650 * We have to skip markers. We cannot hold/drop marker
653 if (object->type == OBJT_MARKER) {
654 vm_swapcache_movemarker(marker, object);
659 * Safety, or in case there are millions of VM objects
660 * without swapcache backing.
666 * We must hold the object before potentially yielding.
668 vm_object_hold(object);
672 * Only operate on live VNODE objects that are either
673 * VREG or VCHR (VCHR for meta-data).
675 if ((object->type != OBJT_VNODE) ||
676 ((object->flags & OBJ_DEAD) ||
677 object->swblock_count == 0) ||
678 ((vp = object->handle) == NULL) ||
679 (vp->v_type != VREG && vp->v_type != VCHR)) {
680 vm_object_drop(object);
681 /* object may be invalid now */
682 vm_swapcache_movemarker(marker, object);
687 * Reset the object pindex stored in the marker if the
688 * working object has changed.
690 if (marker->backing_object != object) {
692 marker->backing_object_offset = 0;
693 marker->backing_object = object;
697 * Look for swblocks starting at our iterator.
699 * The swap_pager_condfree() function attempts to free
700 * swap space starting at the specified index. The index
701 * will be updated on return. The function will return
702 * a scan factor (NOT the number of blocks freed).
704 * If it must cut its scan of the object short due to an
705 * excessive number of swblocks, or is able to free the
706 * requested number of blocks, it will return n >= count
707 * and we break and pick it back up on a future attempt.
709 * Scan the object linearly and try to batch large sets of
710 * blocks that are likely to clean out entire swap radix
714 lwkt_reltoken(&vmobj_token);
716 n = swap_pager_condfree(object, &marker->size,
717 (count + SWAP_META_MASK) & ~SWAP_META_MASK);
719 vm_object_drop(object); /* object may be invalid now */
720 lwkt_gettoken(&vmobj_token);
723 * If we have exhausted the object or deleted our per-pass
724 * page limit then move us to the next object. Note that
725 * the current object may no longer be on the vm_object_list.
728 marker->backing_object_offset > vm_swapcache_cleanperobj) {
729 vm_swapcache_movemarker(marker, object);
733 * If we have exhausted our max-launder stop for now.
736 marker->backing_object_offset += n * PAGE_SIZE;
742 * If we wound up at the end of the list this will move the
743 * marker back to the beginning.
746 vm_swapcache_movemarker(marker, NULL);
748 lwkt_reltoken(&vmobj_token);
752 * Move the marker past the current object. Object can be stale, but we
753 * still need it to determine if the marker has to be moved. If the object
754 * is still the 'current object' (object after the marker), we hop-scotch
755 * the marker past it.
758 vm_swapcache_movemarker(vm_object_t marker, vm_object_t object)
760 if (TAILQ_NEXT(marker, object_list) == object) {
761 TAILQ_REMOVE(&vm_object_list, marker, object_list);
763 TAILQ_INSERT_AFTER(&vm_object_list, object,
764 marker, object_list);
766 TAILQ_INSERT_HEAD(&vm_object_list,
767 marker, object_list);