kernel - SWAP CACHE part 12/many - Add swapcache cleanup state
[dragonfly.git] / sys / vm / vm_swapcache.c
1 /*
2  * Copyright (c) 2010 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34
35 /*
36  * Implement the swapcache daemon.  When enabled swap is assumed to be
37  * configured on a fast storage device such as a SSD.  Swap is assigned
38  * to clean vnode-backed pages in the inactive queue, clustered by object
39  * if possible, and written out.  The swap assignment sticks around even
40  * after the underlying pages have been recycled.
41  *
42  * The daemon manages write bandwidth based on sysctl settings to control
43  * wear on the SSD.
44  *
45  * The vnode strategy code will check for the swap assignments and divert
46  * reads to the swap device.
47  *
48  * This operates on both regular files and the block device vnodes used by
49  * filesystems to manage meta-data.
50  */
51
52 #include "opt_vm.h"
53 #include <sys/param.h>
54 #include <sys/systm.h>
55 #include <sys/kernel.h>
56 #include <sys/proc.h>
57 #include <sys/kthread.h>
58 #include <sys/resourcevar.h>
59 #include <sys/signalvar.h>
60 #include <sys/vnode.h>
61 #include <sys/vmmeter.h>
62 #include <sys/sysctl.h>
63
64 #include <vm/vm.h>
65 #include <vm/vm_param.h>
66 #include <sys/lock.h>
67 #include <vm/vm_object.h>
68 #include <vm/vm_page.h>
69 #include <vm/vm_map.h>
70 #include <vm/vm_pageout.h>
71 #include <vm/vm_pager.h>
72 #include <vm/swap_pager.h>
73 #include <vm/vm_extern.h>
74
75 #include <sys/thread2.h>
76 #include <vm/vm_page2.h>
77
78 #define INACTIVE_LIST   (&vm_page_queues[PQ_INACTIVE].pl)
79
80 /* the kernel process "vm_pageout"*/
81 static void vm_swapcached (void);
82 static void vm_swapcached_flush (vm_page_t m);
83 static void vm_swapcache_writing(vm_page_t marker);
84 static void vm_swapcache_cleaning(vm_object_t marker);
85 struct thread *swapcached_thread;
86
87 static struct kproc_desc swpc_kp = {
88         "swapcached",
89         vm_swapcached,
90         &swapcached_thread
91 };
92 SYSINIT(swapcached, SI_SUB_KTHREAD_PAGE, SI_ORDER_SECOND, kproc_start, &swpc_kp)
93
94 SYSCTL_NODE(_vm, OID_AUTO, swapcache, CTLFLAG_RW, NULL, NULL);
95
96 int vm_swapcache_read_enable;
97 static int vm_swapcache_sleep;
98 static int vm_swapcache_maxlaunder = 256;
99 static int vm_swapcache_data_enable = 0;
100 static int vm_swapcache_meta_enable = 0;
101 static int64_t vm_swapcache_curburst = 1000000000LL;
102 static int64_t vm_swapcache_maxburst = 1000000000LL;
103 static int64_t vm_swapcache_accrate = 1000000LL;
104 static int64_t vm_swapcache_write_count;
105
106 SYSCTL_INT(_vm_swapcache, OID_AUTO, maxlaunder,
107         CTLFLAG_RW, &vm_swapcache_maxlaunder, 0, "");
108
109 SYSCTL_INT(_vm_swapcache, OID_AUTO, data_enable,
110         CTLFLAG_RW, &vm_swapcache_data_enable, 0, "");
111 SYSCTL_INT(_vm_swapcache, OID_AUTO, meta_enable,
112         CTLFLAG_RW, &vm_swapcache_meta_enable, 0, "");
113 SYSCTL_INT(_vm_swapcache, OID_AUTO, read_enable,
114         CTLFLAG_RW, &vm_swapcache_read_enable, 0, "");
115
116 SYSCTL_QUAD(_vm_swapcache, OID_AUTO, curburst,
117         CTLFLAG_RW, &vm_swapcache_curburst, 0, "");
118 SYSCTL_QUAD(_vm_swapcache, OID_AUTO, maxburst,
119         CTLFLAG_RW, &vm_swapcache_maxburst, 0, "");
120 SYSCTL_QUAD(_vm_swapcache, OID_AUTO, accrate,
121         CTLFLAG_RW, &vm_swapcache_accrate, 0, "");
122 SYSCTL_QUAD(_vm_swapcache, OID_AUTO, write_count,
123         CTLFLAG_RW, &vm_swapcache_write_count, 0, "");
124
125 /*
126  * vm_swapcached is the high level pageout daemon.
127  */
128 static void
129 vm_swapcached(void)
130 {
131         enum { SWAPC_WRITING, SWAPC_CLEANING } state = SWAPC_WRITING;
132         struct vm_page page_marker;
133         struct vm_object object_marker;
134
135         /*
136          * Thread setup
137          */
138         curthread->td_flags |= TDF_SYSTHREAD;
139         crit_enter();
140
141         /*
142          * Initialize our marker for the inactive scan (SWAPC_WRITING)
143          */
144         bzero(&page_marker, sizeof(page_marker));
145         page_marker.flags = PG_BUSY | PG_FICTITIOUS | PG_MARKER;
146         page_marker.queue = PQ_INACTIVE;
147         page_marker.wire_count = 1;
148         TAILQ_INSERT_HEAD(INACTIVE_LIST, &page_marker, pageq);
149
150         /*
151          * Initialize our marker for the vm_object scan (SWAPC_CLEANING)
152          */
153         bzero(&object_marker, sizeof(object_marker));
154         object_marker.type = OBJT_MARKER;
155         TAILQ_INSERT_HEAD(&vm_object_list, &object_marker, object_list);
156
157         for (;;) {
158                 /*
159                  * Loop once a second or so looking for work when enabled.
160                  */
161                 if (vm_swapcache_data_enable == 0 &&
162                     vm_swapcache_meta_enable == 0) {
163                         tsleep(&vm_swapcache_sleep, 0, "csleep", hz * 5);
164                         continue;
165                 }
166
167                 /*
168                  * Polling rate when enabled is 10 hz.
169                  */
170                 tsleep(&vm_swapcache_sleep, 0, "csleep", hz / 10);
171
172                 /*
173                  * State hysteresis.  Generate write activity up to 75% of
174                  * swap, then clean out swap assignments down to 70%, then
175                  * repeat.
176                  */
177                 if (state == SWAPC_WRITING) {
178                         if (vm_swap_cache_use > (int64_t)vm_swap_max * 75 / 100)
179                                 state = SWAPC_CLEANING;
180                 } else {
181                         if (vm_swap_cache_use < (int64_t)vm_swap_max * 70 / 100)
182                                 state = SWAPC_WRITING;
183                 }
184
185                 /*
186                  * We are allowed to continue accumulating burst value
187                  * in either state.
188                  */
189                 vm_swapcache_curburst += vm_swapcache_accrate / 10;
190                 if (vm_swapcache_curburst > vm_swapcache_maxburst)
191                         vm_swapcache_curburst = vm_swapcache_maxburst;
192
193                 /*
194                  * We don't want to nickle-and-dime the scan as that will
195                  * create unnecessary fragmentation.  The minimum burst
196                  * is one-seconds worth of accumulation.
197                  */
198                 if (state == SWAPC_WRITING) {
199                         if (vm_swapcache_curburst >= vm_swapcache_accrate)
200                                 vm_swapcache_writing(&page_marker);
201                 } else {
202                         vm_swapcache_cleaning(&object_marker);
203                 }
204         }
205         TAILQ_REMOVE(INACTIVE_LIST, &page_marker, pageq);
206         TAILQ_REMOVE(&vm_object_list, &object_marker, object_list);
207         crit_exit();
208 }
209
210 static void
211 vm_swapcache_writing(vm_page_t marker)
212 {
213         vm_object_t object;
214         struct vnode *vp;
215         vm_page_t m;
216         int count;
217
218         /*
219          * Scan the inactive queue from our marker to locate
220          * suitable pages to push to the swap cache.
221          *
222          * We are looking for clean vnode-backed pages.
223          *
224          * NOTE: PG_SWAPPED pages in particular are not part of
225          *       our count because once the cache stabilizes we
226          *       can end up with a very high datarate of VM pages
227          *       cycling from it.
228          */
229         m = marker;
230         count = vm_swapcache_maxlaunder;
231
232         while ((m = TAILQ_NEXT(m, pageq)) != NULL && count--) {
233                 if (m->flags & (PG_MARKER | PG_SWAPPED)) {
234                         ++count;
235                         continue;
236                 }
237                 if (vm_swapcache_curburst < 0)
238                         break;
239                 if (m->flags & (PG_BUSY | PG_UNMANAGED))
240                         continue;
241                 if (m->busy || m->hold_count || m->wire_count)
242                         continue;
243                 if (m->valid != VM_PAGE_BITS_ALL)
244                         continue;
245                 if (m->dirty & m->valid)
246                         continue;
247                 if ((object = m->object) == NULL)
248                         continue;
249                 if (object->type != OBJT_VNODE ||
250                     (object->flags & OBJ_DEAD)) {
251                         continue;
252                 }
253                 vm_page_test_dirty(m);
254                 if (m->dirty & m->valid)
255                         continue;
256                 vp = object->handle;
257                 if (vp == NULL)
258                         continue;
259                 switch(vp->v_type) {
260                 case VREG:
261                         if (vm_swapcache_data_enable == 0)
262                                 continue;
263                         break;
264                 case VCHR:
265                         if (vm_swapcache_meta_enable == 0)
266                                 continue;
267                         break;
268                 default:
269                         continue;
270                 }
271
272                 /*
273                  * Ok, move the marker and soft-busy the page.
274                  */
275                 TAILQ_REMOVE(INACTIVE_LIST, marker, pageq);
276                 TAILQ_INSERT_AFTER(INACTIVE_LIST, m, marker, pageq);
277
278                 /*
279                  * Assign swap and initiate I/O
280                  */
281                 vm_swapcached_flush(m);
282
283                 /*
284                  * Setup for next loop using marker.
285                  */
286                 m = marker;
287         }
288
289         /*
290          * Cleanup marker position.  If we hit the end of the
291          * list the marker is placed at the tail.  Newly deactivated
292          * pages will be placed after it.
293          *
294          * Earlier inactive pages that were dirty and become clean
295          * are typically moved to the end of PQ_INACTIVE by virtue
296          * of vfs_vmio_release() when they become unwired from the
297          * buffer cache.
298          */
299         TAILQ_REMOVE(INACTIVE_LIST, marker, pageq);
300         if (m)
301                 TAILQ_INSERT_BEFORE(m, marker, pageq);
302         else
303                 TAILQ_INSERT_TAIL(INACTIVE_LIST, marker, pageq);
304 }
305
306 /*
307  * Flush the specified page using the swap_pager.
308  */
309 static
310 void
311 vm_swapcached_flush(vm_page_t m)
312 {
313         vm_object_t object;
314         int rtvals;
315
316         vm_page_io_start(m);
317         vm_page_protect(m, VM_PROT_READ);
318
319         object = m->object;
320         vm_object_pip_add(object, 1);
321         swap_pager_putpages(object, &m, 1, FALSE, &rtvals);
322         vm_swapcache_write_count += PAGE_SIZE;
323         vm_swapcache_curburst -= PAGE_SIZE;
324
325         if (rtvals != VM_PAGER_PEND) {
326                 vm_object_pip_wakeup(object);
327                 vm_page_io_finish(m);
328         }
329 }
330
331 static
332 void
333 vm_swapcache_cleaning(vm_object_t marker)
334 {
335         vm_object_t object;
336         struct vnode *vp;
337         int count;
338         int n;
339
340         object = marker;
341         count = vm_swapcache_maxlaunder;
342
343         /*
344          * Look for vnode objects
345          */
346         while ((object = TAILQ_NEXT(object, object_list)) != NULL && count--) {
347                 if (object->type != OBJT_VNODE)
348                         continue;
349                 if ((object->flags & OBJ_DEAD) || object->swblock_count == 0)
350                         continue;
351                 if ((vp = object->handle) == NULL)
352                         continue;
353                 if (vp->v_type != VREG && vp->v_type != VCHR)
354                         continue;
355
356                 /*
357                  * Adjust iterator.
358                  */
359                 if (marker->backing_object != object)
360                         marker->size = 0;
361
362                 /*
363                  * Move the marker so we can work on the VM object
364                  */
365                 TAILQ_REMOVE(&vm_object_list, marker, object_list);
366                 TAILQ_INSERT_AFTER(&vm_object_list, object,
367                                    marker, object_list);
368
369                 /*
370                  * Look for swblocks starting at our iterator.
371                  *
372                  * The swap_pager_condfree() function attempts to free
373                  * swap space starting at the specified index.  The index
374                  * will be updated on return.  The function will return
375                  * a scan factor (NOT the number of blocks freed).
376                  *
377                  * If it must cut its scan of the object short due to an
378                  * excessive number of swblocks, or is able to free the
379                  * requested number of blocks, it will return n >= count
380                  * and we break and pick it back up on a future attempt.
381                  */
382                 n = swap_pager_condfree(object, &marker->size, count);
383                 count -= n;
384                 if (count < 0)
385                         break;
386
387                 /*
388                  * Setup for loop.
389                  */
390                 marker->size = 0;
391                 object = marker;
392         }
393
394         /*
395          * Adjust marker so we continue the scan from where we left off.
396          * When we reach the end we start back at the beginning.
397          */
398         TAILQ_REMOVE(&vm_object_list, marker, object_list);
399         if (object)
400                 TAILQ_INSERT_BEFORE(object, marker, object_list);
401         else
402                 TAILQ_INSERT_HEAD(&vm_object_list, marker, object_list);
403         marker->backing_object = object;
404 }