kernel - TMPFS - Stabilization pass, fix assertion in nrmdir (again)
authorMatthew Dillon <dillon@apollo.backplane.com>
Sun, 21 Feb 2010 19:41:03 +0000 (11:41 -0800)
committerMatthew Dillon <dillon@apollo.backplane.com>
Sun, 21 Feb 2010 19:41:03 +0000 (11:41 -0800)
* Oops, fix bug in last commit.  vp was garbage where it was being tested.

sys/vfs/tmpfs/tmpfs_vnops.c
sys/vm/swap_pager.h
sys/vm/vm_page.c
sys/vm/vm_pageout.c
sys/vm/vm_swapcache.c

index 76e4598..8147c35 100644 (file)
@@ -1069,12 +1069,6 @@ tmpfs_nrmdir(struct vop_nrmdir_args *v)
        int error;
 
        /*
-        * Prevalidate so we don't hit an assertion later
-        */
-       if (vp->v_type != VDIR)
-               return ENOTDIR;
-
-       /*
         * We have to acquire the vp from v->a_nch because
         * we will likely unresolve the namecache entry, and
         * a vrele is needed to trigger the tmpfs_inactive/tmpfs_reclaim
@@ -1083,6 +1077,14 @@ tmpfs_nrmdir(struct vop_nrmdir_args *v)
        error = cache_vref(v->a_nch, v->a_cred, &vp);
        KKASSERT(error == 0);
 
+       /*
+        * Prevalidate so we don't hit an assertion later
+        */
+       if (vp->v_type != VDIR) {
+               error = ENOTDIR;
+               goto out;
+       }
+
        tmp = VFS_TO_TMPFS(dvp->v_mount);
        dnode = VP_TO_TMPFS_DIR(dvp);
        node = VP_TO_TMPFS_DIR(vp);
index a50f4b9..ab28cd0 100644 (file)
@@ -92,6 +92,7 @@ extern int vm_swap_max;
 extern int vm_swap_cache_use;
 extern int vm_swap_anon_use;
 extern int vm_swapcache_read_enable;
+extern int vm_swapcache_inactive_heuristic;
 extern struct blist *swapblist;
 
 void swap_pager_putpages (vm_object_t, struct vm_page **, int, boolean_t, int *);
index f905bc1..09c8382 100644 (file)
@@ -1148,6 +1148,7 @@ vm_page_unwire(vm_page_t m, int activate)
                                m->queue = PQ_INACTIVE;
                                vm_page_queues[PQ_INACTIVE].lcnt++;
                                vmstats.v_inactive_count++;
+                               ++vm_swapcache_inactive_heuristic;
                        }
                }
        }
@@ -1179,10 +1180,14 @@ _vm_page_deactivate(vm_page_t m, int athead)
                        mycpu->gd_cnt.v_reactivated++;
                vm_page_flag_clear(m, PG_WINATCFLS);
                vm_page_unqueue(m);
-               if (athead)
-                       TAILQ_INSERT_HEAD(&vm_page_queues[PQ_INACTIVE].pl, m, pageq);
-               else
-                       TAILQ_INSERT_TAIL(&vm_page_queues[PQ_INACTIVE].pl, m, pageq);
+               if (athead) {
+                       TAILQ_INSERT_HEAD(&vm_page_queues[PQ_INACTIVE].pl,
+                                         m, pageq);
+               } else {
+                       TAILQ_INSERT_TAIL(&vm_page_queues[PQ_INACTIVE].pl,
+                                         m, pageq);
+                       ++vm_swapcache_inactive_heuristic;
+               }
                m->queue = PQ_INACTIVE;
                vm_page_queues[PQ_INACTIVE].lcnt++;
                vmstats.v_inactive_count++;
index eaa9170..eb13c92 100644 (file)
@@ -794,6 +794,7 @@ rescan0:
                if (m->hold_count) {
                        TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE].pl, m, pageq);
                        TAILQ_INSERT_TAIL(&vm_page_queues[PQ_INACTIVE].pl, m, pageq);
+                       ++vm_swapcache_inactive_heuristic;
                        continue;
                }
 
@@ -893,6 +894,7 @@ rescan0:
                        vm_page_flag_set(m, PG_WINATCFLS);
                        TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE].pl, m, pageq);
                        TAILQ_INSERT_TAIL(&vm_page_queues[PQ_INACTIVE].pl, m, pageq);
+                       ++vm_swapcache_inactive_heuristic;
                } else if (maxlaunder > 0) {
                        /*
                         * We always want to try to flush some dirty pages if
@@ -922,6 +924,7 @@ rescan0:
                        if (!swap_pageouts_ok || (object->flags & OBJ_DEAD)) {
                                TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE].pl, m, pageq);
                                TAILQ_INSERT_TAIL(&vm_page_queues[PQ_INACTIVE].pl, m, pageq);
+                               ++vm_swapcache_inactive_heuristic;
                                continue;
                        }
 
@@ -989,6 +992,7 @@ rescan0:
                                if (m->hold_count) {
                                        TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE].pl, m, pageq);
                                        TAILQ_INSERT_TAIL(&vm_page_queues[PQ_INACTIVE].pl, m, pageq);
+                                       ++vm_swapcache_inactive_heuristic;
                                        if (object->flags & OBJ_MIGHTBEDIRTY)
                                                vnodes_skipped++;
                                        vput(vp);
index 3cf5d45..790f029 100644 (file)
@@ -95,11 +95,13 @@ SYSINIT(swapcached, SI_SUB_KTHREAD_PAGE, SI_ORDER_SECOND, kproc_start, &swpc_kp)
 SYSCTL_NODE(_vm, OID_AUTO, swapcache, CTLFLAG_RW, NULL, NULL);
 
 int vm_swapcache_read_enable;
+int vm_swapcache_inactive_heuristic;
 static int vm_swapcache_sleep;
 static int vm_swapcache_maxlaunder = 256;
 static int vm_swapcache_data_enable = 0;
 static int vm_swapcache_meta_enable = 0;
 static int vm_swapcache_maxswappct = 75;
+static int vm_swapcache_hysteresis;
 static int vm_swapcache_use_chflags = 1;       /* require chflags cache */
 static int64_t vm_swapcache_minburst = 10000000LL;     /* 10MB */
 static int64_t vm_swapcache_curburst = 4000000000LL;   /* 4G after boot */
@@ -119,6 +121,8 @@ SYSCTL_INT(_vm_swapcache, OID_AUTO, read_enable,
        CTLFLAG_RW, &vm_swapcache_read_enable, 0, "");
 SYSCTL_INT(_vm_swapcache, OID_AUTO, maxswappct,
        CTLFLAG_RW, &vm_swapcache_maxswappct, 0, "");
+SYSCTL_INT(_vm_swapcache, OID_AUTO, hysteresis,
+       CTLFLAG_RW, &vm_swapcache_hysteresis, 0, "");
 SYSCTL_INT(_vm_swapcache, OID_AUTO, use_chflags,
        CTLFLAG_RW, &vm_swapcache_use_chflags, 0, "");
 
@@ -163,6 +167,8 @@ vm_swapcached(void)
        page_marker.queue = PQ_INACTIVE;
        page_marker.wire_count = 1;
        TAILQ_INSERT_HEAD(INACTIVE_LIST, &page_marker, pageq);
+       vm_swapcache_hysteresis = vmstats.v_inactive_target / 2;
+       vm_swapcache_inactive_heuristic = -vm_swapcache_hysteresis;
 
        /*
         * Initialize our marker for the vm_object scan (SWAPC_CLEANING)
@@ -247,6 +253,15 @@ vm_swapcache_writing(vm_page_t marker)
        int count;
 
        /*
+        * Try to avoid small incremental pageouts by waiting for enough
+        * pages to buildup in the inactive queue to hopefully get a good
+        * burst in.  This heuristic is bumped by the VM system and reset
+        * when our scan hits the end of the queue.
+        */
+       if (vm_swapcache_inactive_heuristic < 0)
+               return;
+
+       /*
         * Scan the inactive queue from our marker to locate
         * suitable pages to push to the swap cache.
         *
@@ -330,10 +345,12 @@ vm_swapcache_writing(vm_page_t marker)
         * buffer cache.
         */
        TAILQ_REMOVE(INACTIVE_LIST, marker, pageq);
-       if (m)
+       if (m) {
                TAILQ_INSERT_BEFORE(m, marker, pageq);
-       else
+       } else {
                TAILQ_INSERT_TAIL(INACTIVE_LIST, marker, pageq);
+               vm_swapcache_inactive_heuristic = -vm_swapcache_hysteresis;
+       }
 }
 
 /*