kernel - Add shutdown method for vm.swapcache
[dragonfly.git] / sys / vm / vm_swapcache.c
CommitLineData
096e95c0 1/*
8e7c4729
MD
2 * (MPSAFE)
3 *
096e95c0
MD
4 * Copyright (c) 2010 The DragonFly Project. All rights reserved.
5 *
6 * This code is derived from software contributed to The DragonFly Project
7 * by Matthew Dillon <dillon@backplane.com>
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 *
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in
17 * the documentation and/or other materials provided with the
18 * distribution.
19 * 3. Neither the name of The DragonFly Project nor the names of its
20 * contributors may be used to endorse or promote products derived
21 * from this software without specific, prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
26 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
27 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
28 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
29 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
30 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
31 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
32 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
33 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 */
36
37/*
38 * Implement the swapcache daemon. When enabled swap is assumed to be
39 * configured on a fast storage device such as a SSD. Swap is assigned
40 * to clean vnode-backed pages in the inactive queue, clustered by object
41 * if possible, and written out. The swap assignment sticks around even
42 * after the underlying pages have been recycled.
43 *
44 * The daemon manages write bandwidth based on sysctl settings to control
45 * wear on the SSD.
46 *
47 * The vnode strategy code will check for the swap assignments and divert
3ffc7051 48 * reads to the swap device when the data is present in the swapcache.
096e95c0
MD
49 *
50 * This operates on both regular files and the block device vnodes used by
51 * filesystems to manage meta-data.
52 */
53
54#include "opt_vm.h"
55#include <sys/param.h>
56#include <sys/systm.h>
57#include <sys/kernel.h>
58#include <sys/proc.h>
59#include <sys/kthread.h>
60#include <sys/resourcevar.h>
61#include <sys/signalvar.h>
62#include <sys/vnode.h>
63#include <sys/vmmeter.h>
64#include <sys/sysctl.h>
497524bf 65#include <sys/eventhandler.h>
096e95c0
MD
66
67#include <vm/vm.h>
68#include <vm/vm_param.h>
69#include <sys/lock.h>
70#include <vm/vm_object.h>
71#include <vm/vm_page.h>
72#include <vm/vm_map.h>
73#include <vm/vm_pageout.h>
74#include <vm/vm_pager.h>
75#include <vm/swap_pager.h>
76#include <vm/vm_extern.h>
77
78#include <sys/thread2.h>
79#include <vm/vm_page2.h>
80
81#define INACTIVE_LIST (&vm_page_queues[PQ_INACTIVE].pl)
82
83/* the kernel process "vm_pageout"*/
aabd5ce8 84static int vm_swapcached_flush (vm_page_t m, int isblkdev);
3ffc7051 85static int vm_swapcache_test(vm_page_t m);
00a3fdca
MD
86static void vm_swapcache_writing(vm_page_t marker);
87static void vm_swapcache_cleaning(vm_object_t marker);
096e95c0
MD
88struct thread *swapcached_thread;
89
096e95c0
MD
90SYSCTL_NODE(_vm, OID_AUTO, swapcache, CTLFLAG_RW, NULL, NULL);
91
c504e38e 92int vm_swapcache_read_enable;
e527fb6b 93int vm_swapcache_inactive_heuristic;
096e95c0 94static int vm_swapcache_sleep;
1e5196f0 95static int vm_swapcache_maxlaunder = 256;
096e95c0
MD
96static int vm_swapcache_data_enable = 0;
97static int vm_swapcache_meta_enable = 0;
e9b56058 98static int vm_swapcache_maxswappct = 75;
e527fb6b 99static int vm_swapcache_hysteresis;
bfa86281 100int vm_swapcache_use_chflags = 1; /* require chflags cache */
3ffc7051
MD
101static int64_t vm_swapcache_minburst = 10000000LL; /* 10MB */
102static int64_t vm_swapcache_curburst = 4000000000LL; /* 4G after boot */
103static int64_t vm_swapcache_maxburst = 2000000000LL; /* 2G nominal max */
104static int64_t vm_swapcache_accrate = 100000LL; /* 100K/s */
096e95c0 105static int64_t vm_swapcache_write_count;
3ffc7051 106static int64_t vm_swapcache_maxfilesize;
096e95c0
MD
107
108SYSCTL_INT(_vm_swapcache, OID_AUTO, maxlaunder,
109 CTLFLAG_RW, &vm_swapcache_maxlaunder, 0, "");
c504e38e 110
096e95c0
MD
111SYSCTL_INT(_vm_swapcache, OID_AUTO, data_enable,
112 CTLFLAG_RW, &vm_swapcache_data_enable, 0, "");
113SYSCTL_INT(_vm_swapcache, OID_AUTO, meta_enable,
114 CTLFLAG_RW, &vm_swapcache_meta_enable, 0, "");
c504e38e
MD
115SYSCTL_INT(_vm_swapcache, OID_AUTO, read_enable,
116 CTLFLAG_RW, &vm_swapcache_read_enable, 0, "");
e9b56058
MD
117SYSCTL_INT(_vm_swapcache, OID_AUTO, maxswappct,
118 CTLFLAG_RW, &vm_swapcache_maxswappct, 0, "");
e527fb6b
MD
119SYSCTL_INT(_vm_swapcache, OID_AUTO, hysteresis,
120 CTLFLAG_RW, &vm_swapcache_hysteresis, 0, "");
e9b56058
MD
121SYSCTL_INT(_vm_swapcache, OID_AUTO, use_chflags,
122 CTLFLAG_RW, &vm_swapcache_use_chflags, 0, "");
c504e38e 123
3ffc7051
MD
124SYSCTL_QUAD(_vm_swapcache, OID_AUTO, minburst,
125 CTLFLAG_RW, &vm_swapcache_minburst, 0, "");
c504e38e
MD
126SYSCTL_QUAD(_vm_swapcache, OID_AUTO, curburst,
127 CTLFLAG_RW, &vm_swapcache_curburst, 0, "");
128SYSCTL_QUAD(_vm_swapcache, OID_AUTO, maxburst,
129 CTLFLAG_RW, &vm_swapcache_maxburst, 0, "");
3ffc7051
MD
130SYSCTL_QUAD(_vm_swapcache, OID_AUTO, maxfilesize,
131 CTLFLAG_RW, &vm_swapcache_maxfilesize, 0, "");
c504e38e
MD
132SYSCTL_QUAD(_vm_swapcache, OID_AUTO, accrate,
133 CTLFLAG_RW, &vm_swapcache_accrate, 0, "");
096e95c0
MD
134SYSCTL_QUAD(_vm_swapcache, OID_AUTO, write_count,
135 CTLFLAG_RW, &vm_swapcache_write_count, 0, "");
136
e9b56058
MD
137#define SWAPMAX(adj) \
138 ((int64_t)vm_swap_max * (vm_swapcache_maxswappct + (adj)) / 100)
139
096e95c0 140/*
497524bf
MD
141 * When shutting down the machine we want to stop swapcache operation
142 * immediately so swap is not accessed after devices have been shuttered.
143 */
144static void
145shutdown_swapcache(void *arg __unused)
146{
147 vm_swapcache_read_enable = 0;
148 vm_swapcache_data_enable = 0;
149 vm_swapcache_meta_enable = 0;
150 wakeup(&vm_swapcache_sleep); /* shortcut 5-second wait */
151}
152
153/*
096e95c0 154 * vm_swapcached is the high level pageout daemon.
8e7c4729
MD
155 *
156 * No requirements.
096e95c0
MD
157 */
158static void
cd8ab232 159vm_swapcached_thread(void)
096e95c0 160{
00a3fdca 161 enum { SWAPC_WRITING, SWAPC_CLEANING } state = SWAPC_WRITING;
3ffc7051 162 enum { SWAPB_BURSTING, SWAPB_RECOVERING } burst = SWAPB_BURSTING;
00a3fdca
MD
163 struct vm_page page_marker;
164 struct vm_object object_marker;
096e95c0
MD
165
166 /*
167 * Thread setup
168 */
169 curthread->td_flags |= TDF_SYSTHREAD;
497524bf
MD
170 EVENTHANDLER_REGISTER(shutdown_pre_sync, shutdown_kproc,
171 swapcached_thread, SHUTDOWN_PRI_FIRST);
172 EVENTHANDLER_REGISTER(shutdown_pre_sync, shutdown_swapcache,
173 NULL, SHUTDOWN_PRI_SECOND);
8e7c4729 174 lwkt_gettoken(&vm_token);
eccc8ca1 175 crit_enter();
096e95c0
MD
176
177 /*
00a3fdca 178 * Initialize our marker for the inactive scan (SWAPC_WRITING)
096e95c0 179 */
00a3fdca
MD
180 bzero(&page_marker, sizeof(page_marker));
181 page_marker.flags = PG_BUSY | PG_FICTITIOUS | PG_MARKER;
182 page_marker.queue = PQ_INACTIVE;
183 page_marker.wire_count = 1;
184 TAILQ_INSERT_HEAD(INACTIVE_LIST, &page_marker, pageq);
e527fb6b
MD
185 vm_swapcache_hysteresis = vmstats.v_inactive_target / 2;
186 vm_swapcache_inactive_heuristic = -vm_swapcache_hysteresis;
096e95c0 187
00a3fdca
MD
188 /*
189 * Initialize our marker for the vm_object scan (SWAPC_CLEANING)
190 */
191 bzero(&object_marker, sizeof(object_marker));
192 object_marker.type = OBJT_MARKER;
2de4f77e 193 lwkt_gettoken(&vmobj_token);
00a3fdca 194 TAILQ_INSERT_HEAD(&vm_object_list, &object_marker, object_list);
2de4f77e 195 lwkt_reltoken(&vmobj_token);
096e95c0
MD
196
197 for (;;) {
198 /*
497524bf
MD
199 * Handle shutdown
200 */
201 kproc_suspend_loop();
202
203 /*
3da46bd7
MD
204 * Check every 5 seconds when not enabled or if no swap
205 * is present.
096e95c0 206 */
3da46bd7
MD
207 if ((vm_swapcache_data_enable == 0 &&
208 vm_swapcache_meta_enable == 0) ||
209 vm_swap_max == 0) {
096e95c0
MD
210 tsleep(&vm_swapcache_sleep, 0, "csleep", hz * 5);
211 continue;
212 }
c504e38e
MD
213
214 /*
3da46bd7 215 * Polling rate when enabled is approximately 10 hz.
c504e38e
MD
216 */
217 tsleep(&vm_swapcache_sleep, 0, "csleep", hz / 10);
c504e38e
MD
218
219 /*
00a3fdca
MD
220 * State hysteresis. Generate write activity up to 75% of
221 * swap, then clean out swap assignments down to 70%, then
222 * repeat.
c504e38e 223 */
00a3fdca 224 if (state == SWAPC_WRITING) {
e9b56058 225 if (vm_swap_cache_use > SWAPMAX(0))
00a3fdca
MD
226 state = SWAPC_CLEANING;
227 } else {
e9b56058 228 if (vm_swap_cache_use < SWAPMAX(-5))
00a3fdca
MD
229 state = SWAPC_WRITING;
230 }
096e95c0
MD
231
232 /*
00a3fdca 233 * We are allowed to continue accumulating burst value
3ffc7051
MD
234 * in either state. Allow the user to set curburst > maxburst
235 * for the initial load-in.
096e95c0 236 */
3ffc7051
MD
237 if (vm_swapcache_curburst < vm_swapcache_maxburst) {
238 vm_swapcache_curburst += vm_swapcache_accrate / 10;
239 if (vm_swapcache_curburst > vm_swapcache_maxburst)
240 vm_swapcache_curburst = vm_swapcache_maxburst;
241 }
096e95c0
MD
242
243 /*
00a3fdca
MD
244 * We don't want to nickle-and-dime the scan as that will
245 * create unnecessary fragmentation. The minimum burst
246 * is one-seconds worth of accumulation.
096e95c0 247 */
00a3fdca 248 if (state == SWAPC_WRITING) {
3ffc7051
MD
249 if (vm_swapcache_curburst >= vm_swapcache_accrate) {
250 if (burst == SWAPB_BURSTING) {
251 vm_swapcache_writing(&page_marker);
252 if (vm_swapcache_curburst <= 0)
253 burst = SWAPB_RECOVERING;
254 } else if (vm_swapcache_curburst >
255 vm_swapcache_minburst) {
256 vm_swapcache_writing(&page_marker);
257 burst = SWAPB_BURSTING;
258 }
259 }
00a3fdca
MD
260 } else {
261 vm_swapcache_cleaning(&object_marker);
262 }
263 }
eccc8ca1
MD
264
265 /*
266 * Cleanup (NOT REACHED)
267 */
00a3fdca 268 TAILQ_REMOVE(INACTIVE_LIST, &page_marker, pageq);
eccc8ca1
MD
269 crit_exit();
270 lwkt_reltoken(&vm_token);
271
2de4f77e 272 lwkt_gettoken(&vmobj_token);
00a3fdca 273 TAILQ_REMOVE(&vm_object_list, &object_marker, object_list);
2de4f77e 274 lwkt_reltoken(&vmobj_token);
00a3fdca
MD
275}
276
cd8ab232
MD
277static struct kproc_desc swpc_kp = {
278 "swapcached",
279 vm_swapcached_thread,
280 &swapcached_thread
281};
282SYSINIT(swapcached, SI_SUB_KTHREAD_PAGE, SI_ORDER_SECOND, kproc_start, &swpc_kp)
283
8e7c4729
MD
284/*
285 * The caller must hold vm_token.
286 */
00a3fdca
MD
287static void
288vm_swapcache_writing(vm_page_t marker)
289{
290 vm_object_t object;
291 struct vnode *vp;
292 vm_page_t m;
293 int count;
aabd5ce8 294 int isblkdev;
00a3fdca
MD
295
296 /*
fdc53cc7
MD
297 * Deal with an overflow of the heuristic counter or if the user
298 * manually changes the hysteresis.
299 *
e527fb6b
MD
300 * Try to avoid small incremental pageouts by waiting for enough
301 * pages to buildup in the inactive queue to hopefully get a good
302 * burst in. This heuristic is bumped by the VM system and reset
303 * when our scan hits the end of the queue.
304 */
fdc53cc7
MD
305 if (vm_swapcache_inactive_heuristic < -vm_swapcache_hysteresis)
306 vm_swapcache_inactive_heuristic = -vm_swapcache_hysteresis;
e527fb6b
MD
307 if (vm_swapcache_inactive_heuristic < 0)
308 return;
309
310 /*
00a3fdca
MD
311 * Scan the inactive queue from our marker to locate
312 * suitable pages to push to the swap cache.
313 *
314 * We are looking for clean vnode-backed pages.
315 *
316 * NOTE: PG_SWAPPED pages in particular are not part of
317 * our count because once the cache stabilizes we
318 * can end up with a very high datarate of VM pages
319 * cycling from it.
320 */
321 m = marker;
322 count = vm_swapcache_maxlaunder;
323
324 while ((m = TAILQ_NEXT(m, pageq)) != NULL && count--) {
325 if (m->flags & (PG_MARKER | PG_SWAPPED)) {
326 ++count;
327 continue;
328 }
329 if (vm_swapcache_curburst < 0)
330 break;
3ffc7051 331 if (vm_swapcache_test(m))
00a3fdca 332 continue;
3ffc7051 333 object = m->object;
00a3fdca
MD
334 vp = object->handle;
335 if (vp == NULL)
336 continue;
d3070b8d 337
00a3fdca
MD
338 switch(vp->v_type) {
339 case VREG:
e9b56058 340 /*
bfa86281
MD
341 * PG_NOTMETA generically means 'don't swapcache this',
342 * and HAMMER will set this for regular data buffers
343 * (and leave it unset for meta-data buffers) as
344 * appropriate when double buffering is enabled.
345 */
346 if (m->flags & PG_NOTMETA)
347 continue;
348
349 /*
e9b56058
MD
350 * If data_enable is 0 do not try to swapcache data.
351 * If use_chflags is set then only swapcache data for
352 * VSWAPCACHE marked vnodes, otherwise any vnode.
353 */
354 if (vm_swapcache_data_enable == 0 ||
355 ((vp->v_flag & VSWAPCACHE) == 0 &&
356 vm_swapcache_use_chflags)) {
c504e38e 357 continue;
e9b56058 358 }
d3070b8d
MD
359 if (vm_swapcache_maxfilesize &&
360 object->size >
361 (vm_swapcache_maxfilesize >> PAGE_SHIFT)) {
362 continue;
363 }
aabd5ce8 364 isblkdev = 0;
00a3fdca
MD
365 break;
366 case VCHR:
aabd5ce8 367 /*
bfa86281
MD
368 * PG_NOTMETA generically means 'don't swapcache this',
369 * and HAMMER will set this for regular data buffers
370 * (and leave it unset for meta-data buffers) as
371 * appropriate when double buffering is enabled.
aabd5ce8
MD
372 */
373 if (m->flags & PG_NOTMETA)
374 continue;
00a3fdca 375 if (vm_swapcache_meta_enable == 0)
c504e38e 376 continue;
aabd5ce8 377 isblkdev = 1;
00a3fdca
MD
378 break;
379 default:
380 continue;
096e95c0 381 }
1e5196f0
MD
382
383 /*
00a3fdca 384 * Ok, move the marker and soft-busy the page.
1e5196f0 385 */
00a3fdca
MD
386 TAILQ_REMOVE(INACTIVE_LIST, marker, pageq);
387 TAILQ_INSERT_AFTER(INACTIVE_LIST, m, marker, pageq);
096e95c0 388
00a3fdca 389 /*
3ffc7051
MD
390 * Assign swap and initiate I/O.
391 *
392 * (adjust for the --count which also occurs in the loop)
00a3fdca 393 */
aabd5ce8 394 count -= vm_swapcached_flush(m, isblkdev) - 1;
00a3fdca
MD
395
396 /*
397 * Setup for next loop using marker.
398 */
399 m = marker;
096e95c0 400 }
00a3fdca
MD
401
402 /*
403 * Cleanup marker position. If we hit the end of the
404 * list the marker is placed at the tail. Newly deactivated
405 * pages will be placed after it.
406 *
407 * Earlier inactive pages that were dirty and become clean
408 * are typically moved to the end of PQ_INACTIVE by virtue
409 * of vfs_vmio_release() when they become unwired from the
410 * buffer cache.
411 */
412 TAILQ_REMOVE(INACTIVE_LIST, marker, pageq);
e527fb6b 413 if (m) {
00a3fdca 414 TAILQ_INSERT_BEFORE(m, marker, pageq);
e527fb6b 415 } else {
00a3fdca 416 TAILQ_INSERT_TAIL(INACTIVE_LIST, marker, pageq);
e527fb6b
MD
417 vm_swapcache_inactive_heuristic = -vm_swapcache_hysteresis;
418 }
096e95c0
MD
419}
420
421/*
422 * Flush the specified page using the swap_pager.
3ffc7051
MD
423 *
424 * Try to collect surrounding pages, including pages which may
425 * have already been assigned swap. Try to cluster within a
426 * contiguous aligned SMAP_META_PAGES (typ 16 x PAGE_SIZE) block
427 * to match what swap_pager_putpages() can do.
428 *
429 * We also want to try to match against the buffer cache blocksize
430 * but we don't really know what it is here. Since the buffer cache
431 * wires and unwires pages in groups the fact that we skip wired pages
432 * should be sufficient.
433 *
434 * Returns a count of pages we might have flushed (minimum 1)
8e7c4729
MD
435 *
436 * The caller must hold vm_token.
096e95c0
MD
437 */
438static
3ffc7051 439int
aabd5ce8 440vm_swapcached_flush(vm_page_t m, int isblkdev)
096e95c0
MD
441{
442 vm_object_t object;
3ffc7051
MD
443 vm_page_t marray[SWAP_META_PAGES];
444 vm_pindex_t basei;
445 int rtvals[SWAP_META_PAGES];
446 int x;
447 int i;
448 int j;
449 int count;
096e95c0
MD
450
451 vm_page_io_start(m);
452 vm_page_protect(m, VM_PROT_READ);
096e95c0 453 object = m->object;
3ffc7051
MD
454
455 /*
456 * Try to cluster around (m), keeping in mind that the swap pager
457 * can only do SMAP_META_PAGES worth of continguous write.
458 */
459 x = (int)m->pindex & SWAP_META_MASK;
460 marray[x] = m;
461 basei = m->pindex;
462
463 for (i = x - 1; i >= 0; --i) {
464 m = vm_page_lookup(object, basei - x + i);
465 if (m == NULL)
466 break;
467 if (vm_swapcache_test(m))
468 break;
aabd5ce8
MD
469 if (isblkdev && (m->flags & PG_NOTMETA))
470 break;
3ffc7051
MD
471 vm_page_io_start(m);
472 vm_page_protect(m, VM_PROT_READ);
473 if (m->queue - m->pc == PQ_CACHE) {
474 vm_page_unqueue_nowakeup(m);
475 vm_page_deactivate(m);
476 }
477 marray[i] = m;
096e95c0 478 }
3ffc7051
MD
479 ++i;
480
481 for (j = x + 1; j < SWAP_META_PAGES; ++j) {
482 m = vm_page_lookup(object, basei - x + j);
483 if (m == NULL)
484 break;
485 if (vm_swapcache_test(m))
486 break;
aabd5ce8
MD
487 if (isblkdev && (m->flags & PG_NOTMETA))
488 break;
3ffc7051
MD
489 vm_page_io_start(m);
490 vm_page_protect(m, VM_PROT_READ);
491 if (m->queue - m->pc == PQ_CACHE) {
492 vm_page_unqueue_nowakeup(m);
493 vm_page_deactivate(m);
494 }
495 marray[j] = m;
496 }
497
498 count = j - i;
499 vm_object_pip_add(object, count);
500 swap_pager_putpages(object, marray + i, count, FALSE, rtvals + i);
501 vm_swapcache_write_count += count * PAGE_SIZE;
502 vm_swapcache_curburst -= count * PAGE_SIZE;
503
504 while (i < j) {
505 if (rtvals[i] != VM_PAGER_PEND) {
506 vm_page_io_finish(marray[i]);
507 vm_object_pip_wakeup(object);
508 }
509 ++i;
510 }
511 return(count);
096e95c0 512}
00a3fdca 513
3ffc7051
MD
514/*
515 * Test whether a VM page is suitable for writing to the swapcache.
516 * Does not test m->queue, PG_MARKER, or PG_SWAPPED.
517 *
518 * Returns 0 on success, 1 on failure
8e7c4729
MD
519 *
520 * The caller must hold vm_token.
3ffc7051
MD
521 */
522static int
523vm_swapcache_test(vm_page_t m)
524{
525 vm_object_t object;
526
aabd5ce8 527 if (m->flags & (PG_BUSY | PG_UNMANAGED))
3ffc7051
MD
528 return(1);
529 if (m->busy || m->hold_count || m->wire_count)
530 return(1);
531 if (m->valid != VM_PAGE_BITS_ALL)
532 return(1);
533 if (m->dirty & m->valid)
534 return(1);
535 if ((object = m->object) == NULL)
536 return(1);
537 if (object->type != OBJT_VNODE ||
538 (object->flags & OBJ_DEAD)) {
539 return(1);
540 }
541 vm_page_test_dirty(m);
542 if (m->dirty & m->valid)
543 return(1);
544 return(0);
545}
546
547/*
548 * Cleaning pass
8e7c4729
MD
549 *
550 * The caller must hold vm_token.
3ffc7051 551 */
00a3fdca
MD
552static
553void
554vm_swapcache_cleaning(vm_object_t marker)
555{
556 vm_object_t object;
557 struct vnode *vp;
558 int count;
559 int n;
560
561 object = marker;
562 count = vm_swapcache_maxlaunder;
563
564 /*
565 * Look for vnode objects
566 */
8e7c4729 567 lwkt_gettoken(&vm_token);
2de4f77e
MD
568 lwkt_gettoken(&vmobj_token);
569
bfa86281
MD
570 while ((object = TAILQ_NEXT(object, object_list)) != NULL) {
571 if (--count <= 0)
572 break;
00a3fdca
MD
573 if (object->type != OBJT_VNODE)
574 continue;
575 if ((object->flags & OBJ_DEAD) || object->swblock_count == 0)
576 continue;
577 if ((vp = object->handle) == NULL)
578 continue;
579 if (vp->v_type != VREG && vp->v_type != VCHR)
580 continue;
581
582 /*
583 * Adjust iterator.
584 */
585 if (marker->backing_object != object)
586 marker->size = 0;
587
588 /*
589 * Move the marker so we can work on the VM object
590 */
591 TAILQ_REMOVE(&vm_object_list, marker, object_list);
592 TAILQ_INSERT_AFTER(&vm_object_list, object,
593 marker, object_list);
594
595 /*
596 * Look for swblocks starting at our iterator.
597 *
598 * The swap_pager_condfree() function attempts to free
599 * swap space starting at the specified index. The index
600 * will be updated on return. The function will return
601 * a scan factor (NOT the number of blocks freed).
602 *
603 * If it must cut its scan of the object short due to an
604 * excessive number of swblocks, or is able to free the
605 * requested number of blocks, it will return n >= count
606 * and we break and pick it back up on a future attempt.
607 */
608 n = swap_pager_condfree(object, &marker->size, count);
609 count -= n;
610 if (count < 0)
611 break;
612
613 /*
614 * Setup for loop.
615 */
616 marker->size = 0;
617 object = marker;
618 }
619
620 /*
621 * Adjust marker so we continue the scan from where we left off.
622 * When we reach the end we start back at the beginning.
623 */
624 TAILQ_REMOVE(&vm_object_list, marker, object_list);
625 if (object)
626 TAILQ_INSERT_BEFORE(object, marker, object_list);
627 else
628 TAILQ_INSERT_HEAD(&vm_object_list, marker, object_list);
629 marker->backing_object = object;
2de4f77e
MD
630
631 lwkt_reltoken(&vmobj_token);
8e7c4729 632 lwkt_reltoken(&vm_token);
00a3fdca 633}