kernel - swapcache - Fix snocache and cache flags propagation, fix PG_NOTMETA
[dragonfly.git] / sys / vm / vm_swapcache.c
CommitLineData
096e95c0
MD
1/*
2 * Copyright (c) 2010 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 */
34
35/*
36 * Implement the swapcache daemon. When enabled swap is assumed to be
37 * configured on a fast storage device such as a SSD. Swap is assigned
38 * to clean vnode-backed pages in the inactive queue, clustered by object
39 * if possible, and written out. The swap assignment sticks around even
40 * after the underlying pages have been recycled.
41 *
42 * The daemon manages write bandwidth based on sysctl settings to control
43 * wear on the SSD.
44 *
45 * The vnode strategy code will check for the swap assignments and divert
3ffc7051 46 * reads to the swap device when the data is present in the swapcache.
096e95c0
MD
47 *
48 * This operates on both regular files and the block device vnodes used by
49 * filesystems to manage meta-data.
50 */
51
52#include "opt_vm.h"
53#include <sys/param.h>
54#include <sys/systm.h>
55#include <sys/kernel.h>
56#include <sys/proc.h>
57#include <sys/kthread.h>
58#include <sys/resourcevar.h>
59#include <sys/signalvar.h>
60#include <sys/vnode.h>
61#include <sys/vmmeter.h>
62#include <sys/sysctl.h>
63
64#include <vm/vm.h>
65#include <vm/vm_param.h>
66#include <sys/lock.h>
67#include <vm/vm_object.h>
68#include <vm/vm_page.h>
69#include <vm/vm_map.h>
70#include <vm/vm_pageout.h>
71#include <vm/vm_pager.h>
72#include <vm/swap_pager.h>
73#include <vm/vm_extern.h>
74
75#include <sys/thread2.h>
76#include <vm/vm_page2.h>
77
78#define INACTIVE_LIST (&vm_page_queues[PQ_INACTIVE].pl)
79
80/* the kernel process "vm_pageout"*/
81static void vm_swapcached (void);
aabd5ce8 82static int vm_swapcached_flush (vm_page_t m, int isblkdev);
3ffc7051 83static int vm_swapcache_test(vm_page_t m);
00a3fdca
MD
84static void vm_swapcache_writing(vm_page_t marker);
85static void vm_swapcache_cleaning(vm_object_t marker);
096e95c0
MD
86struct thread *swapcached_thread;
87
88static struct kproc_desc swpc_kp = {
89 "swapcached",
90 vm_swapcached,
91 &swapcached_thread
92};
93SYSINIT(swapcached, SI_SUB_KTHREAD_PAGE, SI_ORDER_SECOND, kproc_start, &swpc_kp)
94
95SYSCTL_NODE(_vm, OID_AUTO, swapcache, CTLFLAG_RW, NULL, NULL);
96
c504e38e 97int vm_swapcache_read_enable;
e527fb6b 98int vm_swapcache_inactive_heuristic;
096e95c0 99static int vm_swapcache_sleep;
1e5196f0 100static int vm_swapcache_maxlaunder = 256;
096e95c0
MD
101static int vm_swapcache_data_enable = 0;
102static int vm_swapcache_meta_enable = 0;
e9b56058 103static int vm_swapcache_maxswappct = 75;
e527fb6b 104static int vm_swapcache_hysteresis;
e9b56058 105static int vm_swapcache_use_chflags = 1; /* require chflags cache */
3ffc7051
MD
106static int64_t vm_swapcache_minburst = 10000000LL; /* 10MB */
107static int64_t vm_swapcache_curburst = 4000000000LL; /* 4G after boot */
108static int64_t vm_swapcache_maxburst = 2000000000LL; /* 2G nominal max */
109static int64_t vm_swapcache_accrate = 100000LL; /* 100K/s */
096e95c0 110static int64_t vm_swapcache_write_count;
3ffc7051 111static int64_t vm_swapcache_maxfilesize;
096e95c0
MD
112
113SYSCTL_INT(_vm_swapcache, OID_AUTO, maxlaunder,
114 CTLFLAG_RW, &vm_swapcache_maxlaunder, 0, "");
c504e38e 115
096e95c0
MD
116SYSCTL_INT(_vm_swapcache, OID_AUTO, data_enable,
117 CTLFLAG_RW, &vm_swapcache_data_enable, 0, "");
118SYSCTL_INT(_vm_swapcache, OID_AUTO, meta_enable,
119 CTLFLAG_RW, &vm_swapcache_meta_enable, 0, "");
c504e38e
MD
120SYSCTL_INT(_vm_swapcache, OID_AUTO, read_enable,
121 CTLFLAG_RW, &vm_swapcache_read_enable, 0, "");
e9b56058
MD
122SYSCTL_INT(_vm_swapcache, OID_AUTO, maxswappct,
123 CTLFLAG_RW, &vm_swapcache_maxswappct, 0, "");
e527fb6b
MD
124SYSCTL_INT(_vm_swapcache, OID_AUTO, hysteresis,
125 CTLFLAG_RW, &vm_swapcache_hysteresis, 0, "");
e9b56058
MD
126SYSCTL_INT(_vm_swapcache, OID_AUTO, use_chflags,
127 CTLFLAG_RW, &vm_swapcache_use_chflags, 0, "");
c504e38e 128
3ffc7051
MD
129SYSCTL_QUAD(_vm_swapcache, OID_AUTO, minburst,
130 CTLFLAG_RW, &vm_swapcache_minburst, 0, "");
c504e38e
MD
131SYSCTL_QUAD(_vm_swapcache, OID_AUTO, curburst,
132 CTLFLAG_RW, &vm_swapcache_curburst, 0, "");
133SYSCTL_QUAD(_vm_swapcache, OID_AUTO, maxburst,
134 CTLFLAG_RW, &vm_swapcache_maxburst, 0, "");
3ffc7051
MD
135SYSCTL_QUAD(_vm_swapcache, OID_AUTO, maxfilesize,
136 CTLFLAG_RW, &vm_swapcache_maxfilesize, 0, "");
c504e38e
MD
137SYSCTL_QUAD(_vm_swapcache, OID_AUTO, accrate,
138 CTLFLAG_RW, &vm_swapcache_accrate, 0, "");
096e95c0
MD
139SYSCTL_QUAD(_vm_swapcache, OID_AUTO, write_count,
140 CTLFLAG_RW, &vm_swapcache_write_count, 0, "");
141
e9b56058
MD
142#define SWAPMAX(adj) \
143 ((int64_t)vm_swap_max * (vm_swapcache_maxswappct + (adj)) / 100)
144
096e95c0
MD
145/*
146 * vm_swapcached is the high level pageout daemon.
147 */
148static void
149vm_swapcached(void)
150{
00a3fdca 151 enum { SWAPC_WRITING, SWAPC_CLEANING } state = SWAPC_WRITING;
3ffc7051 152 enum { SWAPB_BURSTING, SWAPB_RECOVERING } burst = SWAPB_BURSTING;
00a3fdca
MD
153 struct vm_page page_marker;
154 struct vm_object object_marker;
096e95c0
MD
155
156 /*
157 * Thread setup
158 */
159 curthread->td_flags |= TDF_SYSTHREAD;
00a3fdca 160 crit_enter();
096e95c0
MD
161
162 /*
00a3fdca 163 * Initialize our marker for the inactive scan (SWAPC_WRITING)
096e95c0 164 */
00a3fdca
MD
165 bzero(&page_marker, sizeof(page_marker));
166 page_marker.flags = PG_BUSY | PG_FICTITIOUS | PG_MARKER;
167 page_marker.queue = PQ_INACTIVE;
168 page_marker.wire_count = 1;
169 TAILQ_INSERT_HEAD(INACTIVE_LIST, &page_marker, pageq);
e527fb6b
MD
170 vm_swapcache_hysteresis = vmstats.v_inactive_target / 2;
171 vm_swapcache_inactive_heuristic = -vm_swapcache_hysteresis;
096e95c0 172
00a3fdca
MD
173 /*
174 * Initialize our marker for the vm_object scan (SWAPC_CLEANING)
175 */
176 bzero(&object_marker, sizeof(object_marker));
177 object_marker.type = OBJT_MARKER;
178 TAILQ_INSERT_HEAD(&vm_object_list, &object_marker, object_list);
096e95c0
MD
179
180 for (;;) {
181 /*
3da46bd7
MD
182 * Check every 5 seconds when not enabled or if no swap
183 * is present.
096e95c0 184 */
3da46bd7
MD
185 if ((vm_swapcache_data_enable == 0 &&
186 vm_swapcache_meta_enable == 0) ||
187 vm_swap_max == 0) {
096e95c0
MD
188 tsleep(&vm_swapcache_sleep, 0, "csleep", hz * 5);
189 continue;
190 }
c504e38e
MD
191
192 /*
3da46bd7 193 * Polling rate when enabled is approximately 10 hz.
c504e38e
MD
194 */
195 tsleep(&vm_swapcache_sleep, 0, "csleep", hz / 10);
c504e38e
MD
196
197 /*
00a3fdca
MD
198 * State hysteresis. Generate write activity up to 75% of
199 * swap, then clean out swap assignments down to 70%, then
200 * repeat.
c504e38e 201 */
00a3fdca 202 if (state == SWAPC_WRITING) {
e9b56058 203 if (vm_swap_cache_use > SWAPMAX(0))
00a3fdca
MD
204 state = SWAPC_CLEANING;
205 } else {
e9b56058 206 if (vm_swap_cache_use < SWAPMAX(-5))
00a3fdca
MD
207 state = SWAPC_WRITING;
208 }
096e95c0
MD
209
210 /*
00a3fdca 211 * We are allowed to continue accumulating burst value
3ffc7051
MD
212 * in either state. Allow the user to set curburst > maxburst
213 * for the initial load-in.
096e95c0 214 */
3ffc7051
MD
215 if (vm_swapcache_curburst < vm_swapcache_maxburst) {
216 vm_swapcache_curburst += vm_swapcache_accrate / 10;
217 if (vm_swapcache_curburst > vm_swapcache_maxburst)
218 vm_swapcache_curburst = vm_swapcache_maxburst;
219 }
096e95c0
MD
220
221 /*
00a3fdca
MD
222 * We don't want to nickle-and-dime the scan as that will
223 * create unnecessary fragmentation. The minimum burst
224 * is one-seconds worth of accumulation.
096e95c0 225 */
00a3fdca 226 if (state == SWAPC_WRITING) {
3ffc7051
MD
227 if (vm_swapcache_curburst >= vm_swapcache_accrate) {
228 if (burst == SWAPB_BURSTING) {
229 vm_swapcache_writing(&page_marker);
230 if (vm_swapcache_curburst <= 0)
231 burst = SWAPB_RECOVERING;
232 } else if (vm_swapcache_curburst >
233 vm_swapcache_minburst) {
234 vm_swapcache_writing(&page_marker);
235 burst = SWAPB_BURSTING;
236 }
237 }
00a3fdca
MD
238 } else {
239 vm_swapcache_cleaning(&object_marker);
240 }
241 }
242 TAILQ_REMOVE(INACTIVE_LIST, &page_marker, pageq);
243 TAILQ_REMOVE(&vm_object_list, &object_marker, object_list);
244 crit_exit();
245}
246
247static void
248vm_swapcache_writing(vm_page_t marker)
249{
250 vm_object_t object;
251 struct vnode *vp;
252 vm_page_t m;
253 int count;
aabd5ce8 254 int isblkdev;
00a3fdca 255
e527fb6b 256 /*
fdc53cc7
MD
257 * Deal with an overflow of the heuristic counter or if the user
258 * manually changes the hysteresis.
259 *
e527fb6b
MD
260 * Try to avoid small incremental pageouts by waiting for enough
261 * pages to buildup in the inactive queue to hopefully get a good
262 * burst in. This heuristic is bumped by the VM system and reset
263 * when our scan hits the end of the queue.
264 */
fdc53cc7
MD
265 if (vm_swapcache_inactive_heuristic < -vm_swapcache_hysteresis)
266 vm_swapcache_inactive_heuristic = -vm_swapcache_hysteresis;
e527fb6b
MD
267 if (vm_swapcache_inactive_heuristic < 0)
268 return;
269
00a3fdca
MD
270 /*
271 * Scan the inactive queue from our marker to locate
272 * suitable pages to push to the swap cache.
273 *
274 * We are looking for clean vnode-backed pages.
275 *
276 * NOTE: PG_SWAPPED pages in particular are not part of
277 * our count because once the cache stabilizes we
278 * can end up with a very high datarate of VM pages
279 * cycling from it.
280 */
281 m = marker;
282 count = vm_swapcache_maxlaunder;
283
284 while ((m = TAILQ_NEXT(m, pageq)) != NULL && count--) {
285 if (m->flags & (PG_MARKER | PG_SWAPPED)) {
286 ++count;
287 continue;
288 }
289 if (vm_swapcache_curburst < 0)
290 break;
3ffc7051 291 if (vm_swapcache_test(m))
00a3fdca 292 continue;
3ffc7051 293 object = m->object;
00a3fdca
MD
294 vp = object->handle;
295 if (vp == NULL)
296 continue;
d3070b8d 297
00a3fdca
MD
298 switch(vp->v_type) {
299 case VREG:
e9b56058
MD
300 /*
301 * If data_enable is 0 do not try to swapcache data.
302 * If use_chflags is set then only swapcache data for
303 * VSWAPCACHE marked vnodes, otherwise any vnode.
304 */
305 if (vm_swapcache_data_enable == 0 ||
306 ((vp->v_flag & VSWAPCACHE) == 0 &&
307 vm_swapcache_use_chflags)) {
c504e38e 308 continue;
e9b56058 309 }
d3070b8d
MD
310 if (vm_swapcache_maxfilesize &&
311 object->size >
312 (vm_swapcache_maxfilesize >> PAGE_SHIFT)) {
313 continue;
314 }
aabd5ce8 315 isblkdev = 0;
00a3fdca
MD
316 break;
317 case VCHR:
aabd5ce8
MD
318 /*
319 * The PG_NOTMETA flag only applies to pages
320 * associated with block devices.
321 */
322 if (m->flags & PG_NOTMETA)
323 continue;
00a3fdca 324 if (vm_swapcache_meta_enable == 0)
c504e38e 325 continue;
aabd5ce8 326 isblkdev = 1;
00a3fdca
MD
327 break;
328 default:
329 continue;
096e95c0 330 }
1e5196f0
MD
331
332 /*
00a3fdca 333 * Ok, move the marker and soft-busy the page.
1e5196f0 334 */
00a3fdca
MD
335 TAILQ_REMOVE(INACTIVE_LIST, marker, pageq);
336 TAILQ_INSERT_AFTER(INACTIVE_LIST, m, marker, pageq);
096e95c0 337
00a3fdca 338 /*
3ffc7051
MD
339 * Assign swap and initiate I/O.
340 *
341 * (adjust for the --count which also occurs in the loop)
00a3fdca 342 */
aabd5ce8 343 count -= vm_swapcached_flush(m, isblkdev) - 1;
00a3fdca
MD
344
345 /*
346 * Setup for next loop using marker.
347 */
348 m = marker;
096e95c0 349 }
00a3fdca
MD
350
351 /*
352 * Cleanup marker position. If we hit the end of the
353 * list the marker is placed at the tail. Newly deactivated
354 * pages will be placed after it.
355 *
356 * Earlier inactive pages that were dirty and become clean
357 * are typically moved to the end of PQ_INACTIVE by virtue
358 * of vfs_vmio_release() when they become unwired from the
359 * buffer cache.
360 */
361 TAILQ_REMOVE(INACTIVE_LIST, marker, pageq);
e527fb6b 362 if (m) {
00a3fdca 363 TAILQ_INSERT_BEFORE(m, marker, pageq);
e527fb6b 364 } else {
00a3fdca 365 TAILQ_INSERT_TAIL(INACTIVE_LIST, marker, pageq);
e527fb6b
MD
366 vm_swapcache_inactive_heuristic = -vm_swapcache_hysteresis;
367 }
096e95c0
MD
368}
369
370/*
371 * Flush the specified page using the swap_pager.
3ffc7051
MD
372 *
373 * Try to collect surrounding pages, including pages which may
374 * have already been assigned swap. Try to cluster within a
375 * contiguous aligned SMAP_META_PAGES (typ 16 x PAGE_SIZE) block
376 * to match what swap_pager_putpages() can do.
377 *
378 * We also want to try to match against the buffer cache blocksize
379 * but we don't really know what it is here. Since the buffer cache
380 * wires and unwires pages in groups the fact that we skip wired pages
381 * should be sufficient.
382 *
383 * Returns a count of pages we might have flushed (minimum 1)
096e95c0
MD
384 */
385static
3ffc7051 386int
aabd5ce8 387vm_swapcached_flush(vm_page_t m, int isblkdev)
096e95c0
MD
388{
389 vm_object_t object;
3ffc7051
MD
390 vm_page_t marray[SWAP_META_PAGES];
391 vm_pindex_t basei;
392 int rtvals[SWAP_META_PAGES];
393 int x;
394 int i;
395 int j;
396 int count;
096e95c0
MD
397
398 vm_page_io_start(m);
399 vm_page_protect(m, VM_PROT_READ);
096e95c0 400 object = m->object;
3ffc7051
MD
401
402 /*
403 * Try to cluster around (m), keeping in mind that the swap pager
404 * can only do SMAP_META_PAGES worth of continguous write.
405 */
406 x = (int)m->pindex & SWAP_META_MASK;
407 marray[x] = m;
408 basei = m->pindex;
409
410 for (i = x - 1; i >= 0; --i) {
411 m = vm_page_lookup(object, basei - x + i);
412 if (m == NULL)
413 break;
414 if (vm_swapcache_test(m))
415 break;
aabd5ce8
MD
416 if (isblkdev && (m->flags & PG_NOTMETA))
417 break;
3ffc7051
MD
418 vm_page_io_start(m);
419 vm_page_protect(m, VM_PROT_READ);
420 if (m->queue - m->pc == PQ_CACHE) {
421 vm_page_unqueue_nowakeup(m);
422 vm_page_deactivate(m);
423 }
424 marray[i] = m;
096e95c0 425 }
3ffc7051
MD
426 ++i;
427
428 for (j = x + 1; j < SWAP_META_PAGES; ++j) {
429 m = vm_page_lookup(object, basei - x + j);
430 if (m == NULL)
431 break;
432 if (vm_swapcache_test(m))
433 break;
aabd5ce8
MD
434 if (isblkdev && (m->flags & PG_NOTMETA))
435 break;
3ffc7051
MD
436 vm_page_io_start(m);
437 vm_page_protect(m, VM_PROT_READ);
438 if (m->queue - m->pc == PQ_CACHE) {
439 vm_page_unqueue_nowakeup(m);
440 vm_page_deactivate(m);
441 }
442 marray[j] = m;
443 }
444
445 count = j - i;
446 vm_object_pip_add(object, count);
447 swap_pager_putpages(object, marray + i, count, FALSE, rtvals + i);
448 vm_swapcache_write_count += count * PAGE_SIZE;
449 vm_swapcache_curburst -= count * PAGE_SIZE;
450
451 while (i < j) {
452 if (rtvals[i] != VM_PAGER_PEND) {
453 vm_page_io_finish(marray[i]);
454 vm_object_pip_wakeup(object);
455 }
456 ++i;
457 }
458 return(count);
096e95c0 459}
00a3fdca 460
3ffc7051
MD
461/*
462 * Test whether a VM page is suitable for writing to the swapcache.
463 * Does not test m->queue, PG_MARKER, or PG_SWAPPED.
464 *
465 * Returns 0 on success, 1 on failure
466 */
467static int
468vm_swapcache_test(vm_page_t m)
469{
470 vm_object_t object;
471
aabd5ce8 472 if (m->flags & (PG_BUSY | PG_UNMANAGED))
3ffc7051
MD
473 return(1);
474 if (m->busy || m->hold_count || m->wire_count)
475 return(1);
476 if (m->valid != VM_PAGE_BITS_ALL)
477 return(1);
478 if (m->dirty & m->valid)
479 return(1);
480 if ((object = m->object) == NULL)
481 return(1);
482 if (object->type != OBJT_VNODE ||
483 (object->flags & OBJ_DEAD)) {
484 return(1);
485 }
486 vm_page_test_dirty(m);
487 if (m->dirty & m->valid)
488 return(1);
489 return(0);
490}
491
492/*
493 * Cleaning pass
494 */
00a3fdca
MD
495static
496void
497vm_swapcache_cleaning(vm_object_t marker)
498{
499 vm_object_t object;
500 struct vnode *vp;
501 int count;
502 int n;
503
504 object = marker;
505 count = vm_swapcache_maxlaunder;
506
507 /*
508 * Look for vnode objects
509 */
510 while ((object = TAILQ_NEXT(object, object_list)) != NULL && count--) {
511 if (object->type != OBJT_VNODE)
512 continue;
513 if ((object->flags & OBJ_DEAD) || object->swblock_count == 0)
514 continue;
515 if ((vp = object->handle) == NULL)
516 continue;
517 if (vp->v_type != VREG && vp->v_type != VCHR)
518 continue;
519
520 /*
521 * Adjust iterator.
522 */
523 if (marker->backing_object != object)
524 marker->size = 0;
525
526 /*
527 * Move the marker so we can work on the VM object
528 */
529 TAILQ_REMOVE(&vm_object_list, marker, object_list);
530 TAILQ_INSERT_AFTER(&vm_object_list, object,
531 marker, object_list);
532
533 /*
534 * Look for swblocks starting at our iterator.
535 *
536 * The swap_pager_condfree() function attempts to free
537 * swap space starting at the specified index. The index
538 * will be updated on return. The function will return
539 * a scan factor (NOT the number of blocks freed).
540 *
541 * If it must cut its scan of the object short due to an
542 * excessive number of swblocks, or is able to free the
543 * requested number of blocks, it will return n >= count
544 * and we break and pick it back up on a future attempt.
545 */
546 n = swap_pager_condfree(object, &marker->size, count);
547 count -= n;
548 if (count < 0)
549 break;
550
551 /*
552 * Setup for loop.
553 */
554 marker->size = 0;
555 object = marker;
556 }
557
558 /*
559 * Adjust marker so we continue the scan from where we left off.
560 * When we reach the end we start back at the beginning.
561 */
562 TAILQ_REMOVE(&vm_object_list, marker, object_list);
563 if (object)
564 TAILQ_INSERT_BEFORE(object, marker, object_list);
565 else
566 TAILQ_INSERT_HEAD(&vm_object_list, marker, object_list);
567 marker->backing_object = object;
568}