From e527fb6b9426fc6203263a24c0d8a5c030bc4288 Mon Sep 17 00:00:00 2001 From: Matthew Dillon Date: Sun, 21 Feb 2010 11:41:03 -0800 Subject: [PATCH] kernel - TMPFS - Stabilization pass, fix assertion in nrmdir (again) * Oops, fix bug in last commit. vp was garbage where it was being tested. --- sys/vfs/tmpfs/tmpfs_vnops.c | 14 ++++++++------ sys/vm/swap_pager.h | 1 + sys/vm/vm_page.c | 13 +++++++++---- sys/vm/vm_pageout.c | 4 ++++ sys/vm/vm_swapcache.c | 21 +++++++++++++++++++-- 5 files changed, 41 insertions(+), 12 deletions(-) diff --git a/sys/vfs/tmpfs/tmpfs_vnops.c b/sys/vfs/tmpfs/tmpfs_vnops.c index 76e4598d00..8147c35dce 100644 --- a/sys/vfs/tmpfs/tmpfs_vnops.c +++ b/sys/vfs/tmpfs/tmpfs_vnops.c @@ -1068,12 +1068,6 @@ tmpfs_nrmdir(struct vop_nrmdir_args *v) struct tmpfs_node *node; int error; - /* - * Prevalidate so we don't hit an assertion later - */ - if (vp->v_type != VDIR) - return ENOTDIR; - /* * We have to acquire the vp from v->a_nch because * we will likely unresolve the namecache entry, and @@ -1083,6 +1077,14 @@ tmpfs_nrmdir(struct vop_nrmdir_args *v) error = cache_vref(v->a_nch, v->a_cred, &vp); KKASSERT(error == 0); + /* + * Prevalidate so we don't hit an assertion later + */ + if (vp->v_type != VDIR) { + error = ENOTDIR; + goto out; + } + tmp = VFS_TO_TMPFS(dvp->v_mount); dnode = VP_TO_TMPFS_DIR(dvp); node = VP_TO_TMPFS_DIR(vp); diff --git a/sys/vm/swap_pager.h b/sys/vm/swap_pager.h index a50f4b9b71..ab28cd0614 100644 --- a/sys/vm/swap_pager.h +++ b/sys/vm/swap_pager.h @@ -92,6 +92,7 @@ extern int vm_swap_max; extern int vm_swap_cache_use; extern int vm_swap_anon_use; extern int vm_swapcache_read_enable; +extern int vm_swapcache_inactive_heuristic; extern struct blist *swapblist; void swap_pager_putpages (vm_object_t, struct vm_page **, int, boolean_t, int *); diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c index f905bc1eb3..09c8382ca0 100644 --- a/sys/vm/vm_page.c +++ b/sys/vm/vm_page.c @@ -1148,6 +1148,7 @@ vm_page_unwire(vm_page_t m, int activate) m->queue = PQ_INACTIVE; vm_page_queues[PQ_INACTIVE].lcnt++; vmstats.v_inactive_count++; + ++vm_swapcache_inactive_heuristic; } } } @@ -1179,10 +1180,14 @@ _vm_page_deactivate(vm_page_t m, int athead) mycpu->gd_cnt.v_reactivated++; vm_page_flag_clear(m, PG_WINATCFLS); vm_page_unqueue(m); - if (athead) - TAILQ_INSERT_HEAD(&vm_page_queues[PQ_INACTIVE].pl, m, pageq); - else - TAILQ_INSERT_TAIL(&vm_page_queues[PQ_INACTIVE].pl, m, pageq); + if (athead) { + TAILQ_INSERT_HEAD(&vm_page_queues[PQ_INACTIVE].pl, + m, pageq); + } else { + TAILQ_INSERT_TAIL(&vm_page_queues[PQ_INACTIVE].pl, + m, pageq); + ++vm_swapcache_inactive_heuristic; + } m->queue = PQ_INACTIVE; vm_page_queues[PQ_INACTIVE].lcnt++; vmstats.v_inactive_count++; diff --git a/sys/vm/vm_pageout.c b/sys/vm/vm_pageout.c index eaa9170b61..eb13c92f27 100644 --- a/sys/vm/vm_pageout.c +++ b/sys/vm/vm_pageout.c @@ -794,6 +794,7 @@ rescan0: if (m->hold_count) { TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE].pl, m, pageq); TAILQ_INSERT_TAIL(&vm_page_queues[PQ_INACTIVE].pl, m, pageq); + ++vm_swapcache_inactive_heuristic; continue; } @@ -893,6 +894,7 @@ rescan0: vm_page_flag_set(m, PG_WINATCFLS); TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE].pl, m, pageq); TAILQ_INSERT_TAIL(&vm_page_queues[PQ_INACTIVE].pl, m, pageq); + ++vm_swapcache_inactive_heuristic; } else if (maxlaunder > 0) { /* * We always want to try to flush some dirty pages if @@ -922,6 +924,7 @@ rescan0: if (!swap_pageouts_ok || (object->flags & OBJ_DEAD)) { TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE].pl, m, pageq); TAILQ_INSERT_TAIL(&vm_page_queues[PQ_INACTIVE].pl, m, pageq); + ++vm_swapcache_inactive_heuristic; continue; } @@ -989,6 +992,7 @@ rescan0: if (m->hold_count) { TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE].pl, m, pageq); TAILQ_INSERT_TAIL(&vm_page_queues[PQ_INACTIVE].pl, m, pageq); + ++vm_swapcache_inactive_heuristic; if (object->flags & OBJ_MIGHTBEDIRTY) vnodes_skipped++; vput(vp); diff --git a/sys/vm/vm_swapcache.c b/sys/vm/vm_swapcache.c index 3cf5d45101..790f029356 100644 --- a/sys/vm/vm_swapcache.c +++ b/sys/vm/vm_swapcache.c @@ -95,11 +95,13 @@ SYSINIT(swapcached, SI_SUB_KTHREAD_PAGE, SI_ORDER_SECOND, kproc_start, &swpc_kp) SYSCTL_NODE(_vm, OID_AUTO, swapcache, CTLFLAG_RW, NULL, NULL); int vm_swapcache_read_enable; +int vm_swapcache_inactive_heuristic; static int vm_swapcache_sleep; static int vm_swapcache_maxlaunder = 256; static int vm_swapcache_data_enable = 0; static int vm_swapcache_meta_enable = 0; static int vm_swapcache_maxswappct = 75; +static int vm_swapcache_hysteresis; static int vm_swapcache_use_chflags = 1; /* require chflags cache */ static int64_t vm_swapcache_minburst = 10000000LL; /* 10MB */ static int64_t vm_swapcache_curburst = 4000000000LL; /* 4G after boot */ @@ -119,6 +121,8 @@ SYSCTL_INT(_vm_swapcache, OID_AUTO, read_enable, CTLFLAG_RW, &vm_swapcache_read_enable, 0, ""); SYSCTL_INT(_vm_swapcache, OID_AUTO, maxswappct, CTLFLAG_RW, &vm_swapcache_maxswappct, 0, ""); +SYSCTL_INT(_vm_swapcache, OID_AUTO, hysteresis, + CTLFLAG_RW, &vm_swapcache_hysteresis, 0, ""); SYSCTL_INT(_vm_swapcache, OID_AUTO, use_chflags, CTLFLAG_RW, &vm_swapcache_use_chflags, 0, ""); @@ -163,6 +167,8 @@ vm_swapcached(void) page_marker.queue = PQ_INACTIVE; page_marker.wire_count = 1; TAILQ_INSERT_HEAD(INACTIVE_LIST, &page_marker, pageq); + vm_swapcache_hysteresis = vmstats.v_inactive_target / 2; + vm_swapcache_inactive_heuristic = -vm_swapcache_hysteresis; /* * Initialize our marker for the vm_object scan (SWAPC_CLEANING) @@ -246,6 +252,15 @@ vm_swapcache_writing(vm_page_t marker) vm_page_t m; int count; + /* + * Try to avoid small incremental pageouts by waiting for enough + * pages to buildup in the inactive queue to hopefully get a good + * burst in. This heuristic is bumped by the VM system and reset + * when our scan hits the end of the queue. + */ + if (vm_swapcache_inactive_heuristic < 0) + return; + /* * Scan the inactive queue from our marker to locate * suitable pages to push to the swap cache. @@ -330,10 +345,12 @@ vm_swapcache_writing(vm_page_t marker) * buffer cache. */ TAILQ_REMOVE(INACTIVE_LIST, marker, pageq); - if (m) + if (m) { TAILQ_INSERT_BEFORE(m, marker, pageq); - else + } else { TAILQ_INSERT_TAIL(INACTIVE_LIST, marker, pageq); + vm_swapcache_inactive_heuristic = -vm_swapcache_hysteresis; + } } /* -- 2.41.0