HAMMER - Add vfs.hammer.yield_check, yield on cpu intensive loops
authorMatthew Dillon <dillon@apollo.backplane.com>
Sun, 16 Aug 2009 23:18:11 +0000 (16:18 -0700)
committerMatthew Dillon <dillon@apollo.backplane.com>
Sun, 16 Aug 2009 23:18:11 +0000 (16:18 -0700)
* When running in the kernel HAMMER can wind up cpu-bound.  This code
  allows it to yield to other processes during these periods.  This is
  a bit of a hack and may undergo further development.

* Default check for yield every 16 B-tree iterations.

sys/vfs/hammer/hammer.h
sys/vfs/hammer/hammer_btree.c
sys/vfs/hammer/hammer_flusher.c
sys/vfs/hammer/hammer_vfsops.c

index d0c7952..a2b198a 100644 (file)
@@ -750,6 +750,7 @@ struct hammer_mount {
        struct hammer_flusher flusher;
 
        u_int   check_interrupt;
+       u_int   check_yield;
        uuid_t  fsid;
        struct hammer_io_list volu_list;        /* dirty undo buffers */
        struct hammer_io_list undo_list;        /* dirty undo buffers */
@@ -862,6 +863,7 @@ extern int hammer_bio_count;
 extern int hammer_verify_zone;
 extern int hammer_verify_data;
 extern int hammer_write_mode;
+extern int hammer_yield_check;
 extern int hammer_autoflush;
 extern int64_t hammer_contention_count;
 
index 9305e19..2c05e12 100644 (file)
@@ -117,6 +117,7 @@ hammer_btree_iterate(hammer_cursor_t cursor)
 {
        hammer_node_ondisk_t node;
        hammer_btree_elm_t elm;
+       hammer_mount_t hmp;
        int error = 0;
        int r;
        int s;
@@ -124,6 +125,7 @@ hammer_btree_iterate(hammer_cursor_t cursor)
        /*
         * Skip past the current record
         */
+       hmp = cursor->trans->hmp;
        node = cursor->node->ondisk;
        if (node == NULL)
                return(ENOENT);
@@ -133,6 +135,15 @@ hammer_btree_iterate(hammer_cursor_t cursor)
        }
 
        /*
+        * HAMMER can wind up being cpu-bound.
+        */
+       if (++hmp->check_yield > hammer_yield_check) {
+               hmp->check_yield = 0;
+               lwkt_user_yield();
+       }
+
+
+       /*
         * Loop until an element is found or we are done.
         */
        for (;;) {
@@ -150,7 +161,7 @@ hammer_btree_iterate(hammer_cursor_t cursor)
                 * up our scan.
                 */
                ++hammer_stats_btree_iterations;
-               hammer_flusher_clean_loose_ios(cursor->trans->hmp);
+               hammer_flusher_clean_loose_ios(hmp);
 
                if (cursor->index == node->count) {
                        if (hammer_debug_btree) {
index f6a7fef..28cbe3b 100644 (file)
@@ -319,6 +319,11 @@ hammer_flusher_flush(hammer_mount_t hmp)
                while ((ip = next_ip) != NULL) {
                        next_ip = TAILQ_NEXT(ip, flush_entry);
 
+                       if (++hmp->check_yield > hammer_yield_check) {
+                               hmp->check_yield = 0;
+                               lwkt_user_yield();
+                       }
+
                        /*
                         * Add ip to the slave's work array.  The slave is
                         * not currently running.
index ebcd0be..aa30526 100644 (file)
@@ -100,6 +100,7 @@ int hammer_bio_count;
 int hammer_verify_zone;
 int hammer_verify_data = 1;
 int hammer_write_mode;
+int hammer_yield_check = 16;
 int64_t hammer_contention_count;
 int64_t hammer_zone_limit;
 
@@ -213,6 +214,8 @@ SYSCTL_INT(_vfs_hammer, OID_AUTO, verify_data, CTLFLAG_RW,
           &hammer_verify_data, 0, "");
 SYSCTL_INT(_vfs_hammer, OID_AUTO, write_mode, CTLFLAG_RW,
           &hammer_write_mode, 0, "");
+SYSCTL_INT(_vfs_hammer, OID_AUTO, yield_check, CTLFLAG_RW,
+          &hammer_yield_check, 0, "");
 
 KTR_INFO_MASTER(hammer);