From 1e5196f08d08b182e431f9490abed276ccc7bfea Mon Sep 17 00:00:00 2001 From: Matthew Dillon Date: Fri, 5 Feb 2010 10:13:51 -0800 Subject: [PATCH] kernel - SWAP CACHE part 10/many - Fix swap space usage calculation * The code which limits how much swap space the swap cache uses was broken. It was using the current amount of free swap space instead of the total space, causing it to only use 40% of available swap instead of 66% * Fix the calculation and also make it 3/4 (75%) of configured swap. --- sys/vm/swap_pager.h | 1 + sys/vm/vm_swap.c | 2 ++ sys/vm/vm_swapcache.c | 19 +++++++++++++++---- 3 files changed, 18 insertions(+), 4 deletions(-) diff --git a/sys/vm/swap_pager.h b/sys/vm/swap_pager.h index 25c093d6be..9900d6f687 100644 --- a/sys/vm/swap_pager.h +++ b/sys/vm/swap_pager.h @@ -88,6 +88,7 @@ struct swblock { #ifdef _KERNEL extern int swap_pager_full; extern int vm_swap_size; +extern int vm_swap_max; extern int vm_swap_cache_use; extern int vm_swap_anon_use; extern int vm_swapcache_read_enable; diff --git a/sys/vm/vm_swap.c b/sys/vm/vm_swap.c index 8e73309e4c..f92566cdec 100644 --- a/sys/vm/vm_swap.c +++ b/sys/vm/vm_swap.c @@ -73,6 +73,7 @@ struct swdevt *swdevt = should_be_malloced; /* exported to pstat/systat */ static swblk_t nswap; /* first block after the interleaved devs */ int nswdev = NSWAPDEV; /* exported to pstat/systat */ int vm_swap_size; +int vm_swap_max; static int swapdev_strategy (struct vop_strategy_args *ap); struct vnode *swapdev_vp; @@ -345,6 +346,7 @@ swaponvp(struct thread *td, struct vnode *vp, u_quad_t nblks) vsbase = index * dmmax + dvbase * nswdev; blist_free(swapblist, vsbase, blk); vm_swap_size += blk; + vm_swap_max += blk; } swap_pager_newswap(); diff --git a/sys/vm/vm_swapcache.c b/sys/vm/vm_swapcache.c index 82786a6098..24d5302c5f 100644 --- a/sys/vm/vm_swapcache.c +++ b/sys/vm/vm_swapcache.c @@ -93,7 +93,7 @@ SYSCTL_NODE(_vm, OID_AUTO, swapcache, CTLFLAG_RW, NULL, NULL); int vm_swapcache_read_enable; static int vm_swapcache_sleep; -static int vm_swapcache_maxlaunder = 128; +static int vm_swapcache_maxlaunder = 256; static int vm_swapcache_data_enable = 0; static int vm_swapcache_meta_enable = 0; static int64_t vm_swapcache_curburst = 1000000000LL; @@ -174,11 +174,11 @@ vm_swapcached(void) /* * Don't load any more into the cache once we have exceeded - * 2/3 of available swap space. XXX need to start cleaning + * 3/4 of available swap space. XXX need to start cleaning * it out, though vnode recycling will accomplish that to * some degree. */ - if (vm_swap_cache_use > vm_swap_size * 2 / 3) + if (vm_swap_cache_use > vm_swap_max * 3 / 4) continue; /* @@ -252,11 +252,22 @@ vm_swapcached(void) */ m = ▮ } + + /* + * Cleanup marker position. If we hit the end of the + * list the marker is placed at the tail. Newly deactivated + * pages will be placed after it. + * + * Earlier inactive pages that were dirty and become clean + * are typically moved to the end of PQ_INACTIVE by virtue + * of vfs_vmio_release() when they become unwired from the + * buffer cache. + */ TAILQ_REMOVE(INACTIVE_LIST, &marker, pageq); if (m) TAILQ_INSERT_BEFORE(m, &marker, pageq); else - TAILQ_INSERT_HEAD(INACTIVE_LIST, &marker, pageq); + TAILQ_INSERT_TAIL(INACTIVE_LIST, &marker, pageq); } TAILQ_REMOVE(INACTIVE_LIST, &marker, pageq); -- 2.41.0