82786a60986fe293af00bb24ba95e9a513b913b6
[dragonfly.git] / sys / vm / vm_swapcache.c
1 /*
2  * Copyright (c) 2010 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34
35 /*
36  * Implement the swapcache daemon.  When enabled swap is assumed to be
37  * configured on a fast storage device such as a SSD.  Swap is assigned
38  * to clean vnode-backed pages in the inactive queue, clustered by object
39  * if possible, and written out.  The swap assignment sticks around even
40  * after the underlying pages have been recycled.
41  *
42  * The daemon manages write bandwidth based on sysctl settings to control
43  * wear on the SSD.
44  *
45  * The vnode strategy code will check for the swap assignments and divert
46  * reads to the swap device.
47  *
48  * This operates on both regular files and the block device vnodes used by
49  * filesystems to manage meta-data.
50  */
51
52 #include "opt_vm.h"
53 #include <sys/param.h>
54 #include <sys/systm.h>
55 #include <sys/kernel.h>
56 #include <sys/proc.h>
57 #include <sys/kthread.h>
58 #include <sys/resourcevar.h>
59 #include <sys/signalvar.h>
60 #include <sys/vnode.h>
61 #include <sys/vmmeter.h>
62 #include <sys/sysctl.h>
63
64 #include <vm/vm.h>
65 #include <vm/vm_param.h>
66 #include <sys/lock.h>
67 #include <vm/vm_object.h>
68 #include <vm/vm_page.h>
69 #include <vm/vm_map.h>
70 #include <vm/vm_pageout.h>
71 #include <vm/vm_pager.h>
72 #include <vm/swap_pager.h>
73 #include <vm/vm_extern.h>
74
75 #include <sys/thread2.h>
76 #include <vm/vm_page2.h>
77
78 #define INACTIVE_LIST   (&vm_page_queues[PQ_INACTIVE].pl)
79
80 /* the kernel process "vm_pageout"*/
81 static void vm_swapcached (void);
82 static void vm_swapcached_flush (vm_page_t m);
83 struct thread *swapcached_thread;
84
85 static struct kproc_desc swpc_kp = {
86         "swapcached",
87         vm_swapcached,
88         &swapcached_thread
89 };
90 SYSINIT(swapcached, SI_SUB_KTHREAD_PAGE, SI_ORDER_SECOND, kproc_start, &swpc_kp)
91
92 SYSCTL_NODE(_vm, OID_AUTO, swapcache, CTLFLAG_RW, NULL, NULL);
93
94 int vm_swapcache_read_enable;
95 static int vm_swapcache_sleep;
96 static int vm_swapcache_maxlaunder = 128;
97 static int vm_swapcache_data_enable = 0;
98 static int vm_swapcache_meta_enable = 0;
99 static int64_t vm_swapcache_curburst = 1000000000LL;
100 static int64_t vm_swapcache_maxburst = 1000000000LL;
101 static int64_t vm_swapcache_accrate = 1000000LL;
102 static int64_t vm_swapcache_write_count;
103
104 SYSCTL_INT(_vm_swapcache, OID_AUTO, maxlaunder,
105         CTLFLAG_RW, &vm_swapcache_maxlaunder, 0, "");
106
107 SYSCTL_INT(_vm_swapcache, OID_AUTO, data_enable,
108         CTLFLAG_RW, &vm_swapcache_data_enable, 0, "");
109 SYSCTL_INT(_vm_swapcache, OID_AUTO, meta_enable,
110         CTLFLAG_RW, &vm_swapcache_meta_enable, 0, "");
111 SYSCTL_INT(_vm_swapcache, OID_AUTO, read_enable,
112         CTLFLAG_RW, &vm_swapcache_read_enable, 0, "");
113
114 SYSCTL_QUAD(_vm_swapcache, OID_AUTO, curburst,
115         CTLFLAG_RW, &vm_swapcache_curburst, 0, "");
116 SYSCTL_QUAD(_vm_swapcache, OID_AUTO, maxburst,
117         CTLFLAG_RW, &vm_swapcache_maxburst, 0, "");
118 SYSCTL_QUAD(_vm_swapcache, OID_AUTO, accrate,
119         CTLFLAG_RW, &vm_swapcache_accrate, 0, "");
120 SYSCTL_QUAD(_vm_swapcache, OID_AUTO, write_count,
121         CTLFLAG_RW, &vm_swapcache_write_count, 0, "");
122
123 /*
124  * vm_swapcached is the high level pageout daemon.
125  */
126 static void
127 vm_swapcached(void)
128 {
129         struct vm_page marker;
130         vm_object_t object;
131         struct vnode *vp;
132         vm_page_t m;
133         int count;
134
135         /*
136          * Thread setup
137          */
138         curthread->td_flags |= TDF_SYSTHREAD;
139
140         /*
141          * Initialize our marker
142          */
143         bzero(&marker, sizeof(marker));
144         marker.flags = PG_BUSY | PG_FICTITIOUS | PG_MARKER;
145         marker.queue = PQ_INACTIVE;
146         marker.wire_count = 1;
147
148         crit_enter();
149         TAILQ_INSERT_HEAD(INACTIVE_LIST, &marker, pageq);
150
151         for (;;) {
152                 /*
153                  * Loop once a second or so looking for work when enabled.
154                  */
155                 if (vm_swapcache_data_enable == 0 &&
156                     vm_swapcache_meta_enable == 0) {
157                         tsleep(&vm_swapcache_sleep, 0, "csleep", hz * 5);
158                         continue;
159                 }
160
161                 /*
162                  * Polling rate when enabled is 10 hz.  Deal with write
163                  * bandwidth limits.
164                  *
165                  * We don't want to nickle-and-dime the scan as that will
166                  * create unnecessary fragmentation.
167                  */
168                 tsleep(&vm_swapcache_sleep, 0, "csleep", hz / 10);
169                 vm_swapcache_curburst += vm_swapcache_accrate / 10;
170                 if (vm_swapcache_curburst > vm_swapcache_maxburst)
171                         vm_swapcache_curburst = vm_swapcache_maxburst;
172                 if (vm_swapcache_curburst < vm_swapcache_accrate)
173                         continue;
174
175                 /*
176                  * Don't load any more into the cache once we have exceeded
177                  * 2/3 of available swap space.  XXX need to start cleaning
178                  * it out, though vnode recycling will accomplish that to
179                  * some degree.
180                  */
181                 if (vm_swap_cache_use > vm_swap_size * 2 / 3)
182                         continue;
183
184                 /*
185                  * Calculate the number of pages to test.  We don't want
186                  * to get into a cpu-bound loop.
187                  */
188                 count = vmstats.v_inactive_count;
189                 if (count > vm_swapcache_maxlaunder)
190                         count = vm_swapcache_maxlaunder;
191
192                 /*
193                  * Scan the inactive queue from our marker to locate
194                  * suitable pages to push to the swap cache.
195                  *
196                  * We are looking for clean vnode-backed pages.
197                  */
198                 m = &marker;
199                 while ((m = TAILQ_NEXT(m, pageq)) != NULL && count--) {
200                         if (m->flags & PG_MARKER) {
201                                 ++count;
202                                 continue;
203                         }
204                         if (vm_swapcache_curburst < 0)
205                                 break;
206                         if (m->flags & (PG_SWAPPED | PG_BUSY | PG_UNMANAGED))
207                                 continue;
208                         if (m->busy || m->hold_count || m->wire_count)
209                                 continue;
210                         if (m->valid != VM_PAGE_BITS_ALL)
211                                 continue;
212                         if (m->dirty & m->valid)
213                                 continue;
214                         if ((object = m->object) == NULL)
215                                 continue;
216                         if (object->type != OBJT_VNODE ||
217                             (object->flags & OBJ_DEAD)) {
218                                 continue;
219                         }
220                         vm_page_test_dirty(m);
221                         if (m->dirty & m->valid)
222                                 continue;
223                         vp = object->handle;
224                         if (vp == NULL)
225                                 continue;
226                         switch(vp->v_type) {
227                         case VREG:
228                                 if (vm_swapcache_data_enable == 0)
229                                         continue;
230                                 break;
231                         case VCHR:
232                                 if (vm_swapcache_meta_enable == 0)
233                                         continue;
234                                 break;
235                         default:
236                                 continue;
237                         }
238
239                         /*
240                          * Ok, move the marker and soft-busy the page.
241                          */
242                         TAILQ_REMOVE(INACTIVE_LIST, &marker, pageq);
243                         TAILQ_INSERT_AFTER(INACTIVE_LIST, m, &marker, pageq);
244
245                         /*
246                          * Assign swap and initiate I/O
247                          */
248                         vm_swapcached_flush(m);
249
250                         /*
251                          * Setup for next loop using marker.
252                          */
253                         m = &marker;
254                 }
255                 TAILQ_REMOVE(INACTIVE_LIST, &marker, pageq);
256                 if (m)
257                         TAILQ_INSERT_BEFORE(m, &marker, pageq);
258                 else
259                         TAILQ_INSERT_HEAD(INACTIVE_LIST, &marker, pageq);
260
261         }
262         TAILQ_REMOVE(INACTIVE_LIST, &marker, pageq);
263         crit_exit();
264 }
265
266 /*
267  * Flush the specified page using the swap_pager.
268  */
269 static
270 void
271 vm_swapcached_flush(vm_page_t m)
272 {
273         vm_object_t object;
274         int rtvals;
275
276         vm_page_io_start(m);
277         vm_page_protect(m, VM_PROT_READ);
278
279         object = m->object;
280         vm_object_pip_add(object, 1);
281         swap_pager_putpages(object, &m, 1, FALSE, &rtvals);
282         vm_swapcache_write_count += PAGE_SIZE;
283         vm_swapcache_curburst -= PAGE_SIZE;
284
285         if (rtvals != VM_PAGER_PEND) {
286                 vm_object_pip_wakeup(object);
287                 vm_page_io_finish(m);
288         }
289 }