HAMMER 59D/Many: Sync with buffer cache changes in HEAD.
authorMatthew Dillon <dillon@dragonflybsd.org>
Sat, 28 Jun 2008 23:50:37 +0000 (23:50 +0000)
committerMatthew Dillon <dillon@dragonflybsd.org>
Sat, 28 Jun 2008 23:50:37 +0000 (23:50 +0000)
* Adjust hammer to limit dirty meta-data buffers based on total bytes
  rather then total buffers.

* Limit to 1/4 the buffer cache limit (for now)... a better solution
  is needed.

sys/vfs/hammer/hammer.h
sys/vfs/hammer/hammer_flusher.c
sys/vfs/hammer/hammer_io.c
sys/vfs/hammer/hammer_mirror.c
sys/vfs/hammer/hammer_recover.c
sys/vfs/hammer/hammer_vfsops.c

index 17d335f..9676e8f 100644 (file)
@@ -31,7 +31,7 @@
  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE.
  * 
- * $DragonFly: src/sys/vfs/hammer/hammer.h,v 1.95 2008/06/28 18:10:55 dillon Exp $
+ * $DragonFly: src/sys/vfs/hammer/hammer.h,v 1.96 2008/06/28 23:50:37 dillon Exp $
  */
 /*
  * This header file contains structures used internally by the HAMMERFS
@@ -657,8 +657,8 @@ struct hammer_mount {
        struct hammer_io_list alt_data_list;    /* dirty data buffers */
        struct hammer_io_list meta_list;        /* dirty meta bufs    */
        struct hammer_io_list lose_list;        /* loose buffers      */
-       int     locked_dirty_count;             /* meta/volu count    */
-       int     io_running_count;
+       int     locked_dirty_space;             /* meta/volu count    */
+       int     io_running_space;
        int     objid_cache_count;
        hammer_tid_t    asof;                   /* snapshot mount */
        hammer_off_t    next_tid;
@@ -724,13 +724,13 @@ extern int64_t hammer_stats_btree_elements;
 extern int64_t hammer_stats_btree_splits;
 extern int64_t hammer_stats_btree_iterations;
 extern int64_t hammer_stats_record_iterations;
-extern int hammer_count_dirtybufs;
+extern int hammer_count_dirtybufspace;
 extern int hammer_count_refedbufs;
 extern int hammer_count_reservations;
 extern int hammer_count_io_running_read;
 extern int hammer_count_io_running_write;
 extern int hammer_count_io_locked;
-extern int hammer_limit_dirtybufs;
+extern int hammer_limit_dirtybufspace;
 extern int hammer_limit_iqueued;
 extern int hammer_limit_recs;
 extern int hammer_bio_count;
index 6765f44..6403885 100644 (file)
@@ -31,7 +31,7 @@
  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE.
  * 
- * $DragonFly: src/sys/vfs/hammer/hammer_flusher.c,v 1.30 2008/06/27 20:56:59 dillon Exp $
+ * $DragonFly: src/sys/vfs/hammer/hammer_flusher.c,v 1.31 2008/06/28 23:50:37 dillon Exp $
  */
 /*
  * HAMMER dependancy flusher thread
@@ -582,8 +582,8 @@ hammer_flusher_finalize(hammer_transaction_t trans, int final)
 int
 hammer_flusher_meta_limit(hammer_mount_t hmp)
 {
-       if (hmp->locked_dirty_count + hmp->io_running_count >
-           hammer_limit_dirtybufs) {
+       if (hmp->locked_dirty_space + hmp->io_running_space >
+           hammer_limit_dirtybufspace) {
                return(1);
        }
        return(0);
index 64292ce..afd16e1 100644 (file)
@@ -31,7 +31,7 @@
  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE.
  * 
- * $DragonFly: src/sys/vfs/hammer/hammer_io.c,v 1.46 2008/06/23 07:31:14 dillon Exp $
+ * $DragonFly: src/sys/vfs/hammer/hammer_io.c,v 1.47 2008/06/28 23:50:37 dillon Exp $
  */
 /*
  * IO Primitives and buffer cache management
@@ -154,8 +154,8 @@ void
 hammer_io_wait_all(hammer_mount_t hmp, const char *ident)
 {
        crit_enter();
-       while (hmp->io_running_count)
-               tsleep(&hmp->io_running_count, 0, ident, 0);
+       while (hmp->io_running_space)
+               tsleep(&hmp->io_running_space, 0, ident, 0);
        crit_exit();
 }
 
@@ -180,7 +180,7 @@ hammer_io_read(struct vnode *devvp, struct hammer_io *io, hammer_off_t limit)
        int   error;
 
        if ((bp = io->bp) == NULL) {
-               ++hammer_count_io_running_read;
+               hammer_count_io_running_read += io->bytes;
 #if 1
                error = cluster_read(devvp, limit, io->offset, io->bytes,
                                     HAMMER_CLUSTER_SIZE,
@@ -188,7 +188,7 @@ hammer_io_read(struct vnode *devvp, struct hammer_io *io, hammer_off_t limit)
 #else
                error = bread(devvp, io->offset, io->bytes, &io->bp);
 #endif
-               --hammer_count_io_running_read;
+               hammer_count_io_running_read -= io->bytes;
                if (error == 0) {
                        bp = io->bp;
                        bp->b_ops = &hammer_bioops;
@@ -466,8 +466,8 @@ hammer_io_flush(struct hammer_io *io)
         * Transfer ownership to the kernel and initiate I/O.
         */
        io->running = 1;
-       ++io->hmp->io_running_count;
-       ++hammer_count_io_running_write;
+       io->hmp->io_running_space += io->bytes;
+       hammer_count_io_running_write += io->bytes;
        bawrite(bp);
 }
 
@@ -516,13 +516,13 @@ hammer_io_modify(hammer_io_t io, int count)
                switch(io->type) {
                case HAMMER_STRUCTURE_VOLUME:
                        io->mod_list = &hmp->volu_list;
-                       ++hmp->locked_dirty_count;
-                       ++hammer_count_dirtybufs;
+                       hmp->locked_dirty_space += io->bytes;
+                       hammer_count_dirtybufspace += io->bytes;
                        break;
                case HAMMER_STRUCTURE_META_BUFFER:
                        io->mod_list = &hmp->meta_list;
-                       ++hmp->locked_dirty_count;
-                       ++hammer_count_dirtybufs;
+                       hmp->locked_dirty_space += io->bytes;
+                       hammer_count_dirtybufspace += io->bytes;
                        break;
                case HAMMER_STRUCTURE_UNDO_BUFFER:
                        io->mod_list = &hmp->undo_list;
@@ -654,8 +654,8 @@ hammer_io_clear_modify(struct hammer_io *io, int inval)
        KKASSERT(io->mod_list != NULL);
        if (io->mod_list == &io->hmp->volu_list ||
            io->mod_list == &io->hmp->meta_list) {
-               --io->hmp->locked_dirty_count;
-               --hammer_count_dirtybufs;
+               io->hmp->locked_dirty_space -= io->bytes;
+               hammer_count_dirtybufspace -= io->bytes;
        }
        TAILQ_REMOVE(io->mod_list, io, mod_entry);
        io->mod_list = NULL;
@@ -742,10 +742,11 @@ hammer_io_complete(struct buf *bp)
         * Deal with people waiting for I/O to drain
         */
        if (iou->io.running) {
-               --hammer_count_io_running_write;
-               if (--iou->io.hmp->io_running_count == 0)
-                       wakeup(&iou->io.hmp->io_running_count);
-               KKASSERT(iou->io.hmp->io_running_count >= 0);
+               hammer_count_io_running_write -= iou->io.bytes;
+               iou->io.hmp->io_running_space -= iou->io.bytes;
+               if (iou->io.hmp->io_running_space == 0)
+                       wakeup(&iou->io.hmp->io_running_space);
+               KKASSERT(iou->io.hmp->io_running_space >= 0);
                iou->io.running = 0;
        }
 
@@ -884,8 +885,8 @@ hammer_io_checkwrite(struct buf *bp)
         */
        KKASSERT(io->running == 0);
        io->running = 1;
-       ++io->hmp->io_running_count;
-       ++hammer_count_io_running_write;
+       io->hmp->io_running_space += io->bytes;
+       hammer_count_io_running_write += io->bytes;
        return(0);
 }
 
index 654a8d2..5413830 100644 (file)
@@ -31,7 +31,7 @@
  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE.
  * 
- * $DragonFly: src/sys/vfs/hammer/hammer_mirror.c,v 1.3 2008/06/27 20:56:59 dillon Exp $
+ * $DragonFly: src/sys/vfs/hammer/hammer_mirror.c,v 1.4 2008/06/28 23:50:37 dillon Exp $
  */
 /*
  * HAMMER mirroring ioctls - serialize and deserialize modifications made
@@ -129,8 +129,8 @@ retry:
                if (trans->hmp->sync_lock.wanted) {
                        tsleep(trans, 0, "hmrslo", hz / 10);
                }
-               if (trans->hmp->locked_dirty_count +
-                   trans->hmp->io_running_count > hammer_limit_dirtybufs) {
+               if (trans->hmp->locked_dirty_space +
+                   trans->hmp->io_running_space > hammer_limit_dirtybufspace) {
                        hammer_flusher_async(trans->hmp);
                        tsleep(trans, 0, "hmrslo", hz / 10);
                }
index a0364f9..63efc7b 100644 (file)
@@ -31,7 +31,7 @@
  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE.
  * 
- * $DragonFly: src/sys/vfs/hammer/hammer_recover.c,v 1.26 2008/06/27 20:56:59 dillon Exp $
+ * $DragonFly: src/sys/vfs/hammer/hammer_recover.c,v 1.27 2008/06/28 23:50:37 dillon Exp $
  */
 
 #include "hammer.h"
@@ -468,8 +468,8 @@ hammer_recover_flush_buffers(hammer_mount_t hmp, hammer_volume_t root_volume,
         */
        if (root_volume->io.recovered && final) {
                crit_enter();
-               while (hmp->io_running_count)
-                       tsleep(&hmp->io_running_count, 0, "hmrflx", 0);
+               while (hmp->io_running_space > 0)
+                       tsleep(&hmp->io_running_space, 0, "hmrflx", 0);
                crit_exit();
                root_volume->io.recovered = 0;
                hammer_io_flush(&root_volume->io);
index 4e859a3..3d50110 100644 (file)
@@ -31,7 +31,7 @@
  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE.
  * 
- * $DragonFly: src/sys/vfs/hammer/hammer_vfsops.c,v 1.55 2008/06/27 20:56:59 dillon Exp $
+ * $DragonFly: src/sys/vfs/hammer/hammer_vfsops.c,v 1.56 2008/06/28 23:50:37 dillon Exp $
  */
 
 #include <sys/param.h>
@@ -73,13 +73,13 @@ int64_t hammer_stats_btree_elements;
 int64_t hammer_stats_btree_splits;
 int64_t hammer_stats_btree_iterations;
 int64_t hammer_stats_record_iterations;
-int hammer_count_dirtybufs;            /* global */
+int hammer_count_dirtybufspace;                /* global */
 int hammer_count_refedbufs;            /* global */
 int hammer_count_reservations;
 int hammer_count_io_running_read;
 int hammer_count_io_running_write;
 int hammer_count_io_locked;
-int hammer_limit_dirtybufs;            /* per-mount */
+int hammer_limit_dirtybufspace;                /* per-mount */
 int hammer_limit_recs;                 /* as a whole XXX */
 int hammer_limit_iqueued;              /* per-mount */
 int hammer_bio_count;
@@ -110,8 +110,8 @@ SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_recover_faults, CTLFLAG_RW,
 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_cluster_enable, CTLFLAG_RW,
           &hammer_debug_cluster_enable, 0, "");
 
-SYSCTL_INT(_vfs_hammer, OID_AUTO, limit_dirtybufs, CTLFLAG_RW,
-          &hammer_limit_dirtybufs, 0, "");
+SYSCTL_INT(_vfs_hammer, OID_AUTO, limit_dirtybufspace, CTLFLAG_RW,
+          &hammer_limit_dirtybufspace, 0, "");
 SYSCTL_INT(_vfs_hammer, OID_AUTO, limit_recs, CTLFLAG_RW,
           &hammer_limit_recs, 0, "");
 SYSCTL_INT(_vfs_hammer, OID_AUTO, limit_iqueued, CTLFLAG_RW,
@@ -149,8 +149,8 @@ SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_btree_iterations, CTLFLAG_RD,
           &hammer_stats_btree_iterations, 0, "");
 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_record_iterations, CTLFLAG_RD,
           &hammer_stats_record_iterations, 0, "");
-SYSCTL_INT(_vfs_hammer, OID_AUTO, count_dirtybufs, CTLFLAG_RD,
-          &hammer_count_dirtybufs, 0, "");
+SYSCTL_INT(_vfs_hammer, OID_AUTO, count_dirtybufspace, CTLFLAG_RD,
+          &hammer_count_dirtybufspace, 0, "");
 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_refedbufs, CTLFLAG_RD,
           &hammer_count_refedbufs, 0, "");
 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_reservations, CTLFLAG_RD,
@@ -220,10 +220,10 @@ hammer_vfs_init(struct vfsconf *conf)
 {
        if (hammer_limit_recs == 0)             /* XXX TODO */
                hammer_limit_recs = nbuf * 25;
-       if (hammer_limit_dirtybufs == 0) {
-               hammer_limit_dirtybufs = hidirtybuffers / 2;
-               if (hammer_limit_dirtybufs < 100)
-                       hammer_limit_dirtybufs = 100;
+       if (hammer_limit_dirtybufspace == 0) {
+               hammer_limit_dirtybufspace = hidirtybufspace / 2;
+               if (hammer_limit_dirtybufspace < 100)
+                       hammer_limit_dirtybufspace = 100;
        }
        if (hammer_limit_iqueued == 0)
                hammer_limit_iqueued = desiredvnodes / 5;