HAMMER 40F/Many: UNDO cleanup & stabilization.
authorMatthew Dillon <dillon@dragonflybsd.org>
Sun, 4 May 2008 09:06:45 +0000 (09:06 +0000)
committerMatthew Dillon <dillon@dragonflybsd.org>
Sun, 4 May 2008 09:06:45 +0000 (09:06 +0000)
* Properly classify UNDO zone buffers so they are flushed at the correct
  point in time.

* Minor rewrite of the code tracking the UNDO demark for the next flush.

* Introduce a considerably better backend flushing activation algorithm
  to avoid single-buffer flushes.

* Put a lock around the freemap allocator.

sys/vfs/hammer/hammer.h
sys/vfs/hammer/hammer_btree.c
sys/vfs/hammer/hammer_flusher.c
sys/vfs/hammer/hammer_freemap.c
sys/vfs/hammer/hammer_inode.c
sys/vfs/hammer/hammer_io.c
sys/vfs/hammer/hammer_recover.c
sys/vfs/hammer/hammer_undo.c
sys/vfs/hammer/hammer_vfsops.c
sys/vfs/hammer/hammer_vnops.c

index 914e269..b3504ff 100644 (file)
@@ -31,7 +31,7 @@
  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE.
  * 
- * $DragonFly: src/sys/vfs/hammer/hammer.h,v 1.59 2008/05/03 20:21:20 dillon Exp $
+ * $DragonFly: src/sys/vfs/hammer/hammer.h,v 1.60 2008/05/04 09:06:45 dillon Exp $
  */
 /*
  * This header file contains structures used internally by the HAMMERFS
@@ -544,6 +544,7 @@ struct hammer_mount {
        int     flusher_next;   /* next flush group */
        int     flusher_lock;   /* lock sequencing of the next flush */
        int     flusher_exiting;
+       hammer_off_t flusher_undo_start; /* UNDO window for flushes */
        int     reclaim_count;
        thread_t flusher_td;
        u_int   check_interrupt;
@@ -563,6 +564,7 @@ struct hammer_mount {
        hammer_off_t zone_limits[HAMMER_MAX_ZONES];
        struct netexport export;
        struct hammer_lock sync_lock;
+       struct hammer_lock free_lock;
        struct lock blockmap_lock;
        struct hammer_blockmap  blockmap[HAMMER_MAX_ZONES];
        struct hammer_holes     holes[HAMMER_MAX_ZONES];
@@ -767,6 +769,7 @@ hammer_off_t hammer_blockmap_lookup(hammer_mount_t hmp, hammer_off_t bmap_off,
                        int *errorp);
 hammer_off_t hammer_undo_lookup(hammer_mount_t hmp, hammer_off_t bmap_off,
                        int *errorp);
+int64_t hammer_undo_used(hammer_mount_t hmp);
 int64_t hammer_undo_space(hammer_mount_t hmp);
 int64_t hammer_undo_max(hammer_mount_t hmp);
 
@@ -895,4 +898,8 @@ hammer_modify_record_done(hammer_buffer_t buffer)
        hammer_modify_volume(trans, vol, &(vol)->ondisk->field, \
                             sizeof((vol)->ondisk->field))
 
+#define hammer_modify_node_field(trans, node, field)           \
+       hammer_modify_node(trans, node, &(node)->ondisk->field, \
+                            sizeof((node)->ondisk->field))
+
 
index 8812691..4918b44 100644 (file)
@@ -31,7 +31,7 @@
  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE.
  * 
- * $DragonFly: src/sys/vfs/hammer/hammer_btree.c,v 1.41 2008/05/03 20:21:20 dillon Exp $
+ * $DragonFly: src/sys/vfs/hammer/hammer_btree.c,v 1.42 2008/05/04 09:06:45 dillon Exp $
  */
 
 /*
@@ -1009,9 +1009,8 @@ re_search:
                        if ((error = hammer_cursor_upgrade(cursor)) != 0)
                                return(error);
                        KKASSERT(cursor->flags & HAMMER_CURSOR_BACKEND);
-                       hammer_modify_node(cursor->trans, cursor->node,
-                                          &node->elms[0],
-                                          sizeof(node->elms[0]));
+                       hammer_modify_node_field(cursor->trans, cursor->node,
+                                                elms[0]);
                        save = node->elms[0].base.btype;
                        node->elms[0].base = *cursor->left_bound;
                        node->elms[0].base.btype = save;
@@ -2089,9 +2088,7 @@ btree_set_parent(hammer_transaction_t trans, hammer_node_t node,
                child = hammer_get_node(node->hmp,
                                        elm->internal.subtree_offset, &error);
                if (error == 0) {
-                       hammer_modify_node(trans, child,
-                                          &child->ondisk->parent,
-                                          sizeof(child->ondisk->parent));
+                       hammer_modify_node_field(trans, child, parent);
                        child->ondisk->parent = node->node_offset;
                        hammer_modify_node_done(child);
                        hammer_rel_node(child);
index 11a7bd3..6f8a666 100644 (file)
@@ -31,7 +31,7 @@
  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE.
  * 
- * $DragonFly: src/sys/vfs/hammer/hammer_flusher.c,v 1.11 2008/05/03 20:21:20 dillon Exp $
+ * $DragonFly: src/sys/vfs/hammer/hammer_flusher.c,v 1.12 2008/05/04 09:06:45 dillon Exp $
  */
 /*
  * HAMMER dependancy flusher thread
@@ -46,8 +46,9 @@ static void hammer_flusher_thread(void *arg);
 static void hammer_flusher_clean_loose_ios(hammer_mount_t hmp);
 static void hammer_flusher_flush(hammer_mount_t hmp);
 static int hammer_must_finalize_undo(hammer_mount_t hmp);
-static void hammer_flusher_finalize(hammer_transaction_t trans,
-                   hammer_off_t start_offset);
+static void hammer_flusher_finalize(hammer_transaction_t trans);
+
+#define HAMMER_FLUSHER_IMMEDIATE       16
 
 void
 hammer_flusher_sync(hammer_mount_t hmp)
@@ -57,7 +58,7 @@ hammer_flusher_sync(hammer_mount_t hmp)
        if (hmp->flusher_td) {
                seq = hmp->flusher_next;
                if (hmp->flusher_signal == 0) {
-                       hmp->flusher_signal = 1;
+                       hmp->flusher_signal = HAMMER_FLUSHER_IMMEDIATE;
                        wakeup(&hmp->flusher_signal);
                }
                while ((int)(seq - hmp->flusher_done) > 0)
@@ -69,10 +70,8 @@ void
 hammer_flusher_async(hammer_mount_t hmp)
 {
        if (hmp->flusher_td) {
-               if (hmp->flusher_signal == 0) {
-                       hmp->flusher_signal = 1;
+               if (hmp->flusher_signal++ == 0)
                        wakeup(&hmp->flusher_signal);
-               }
        }
 }
 
@@ -93,7 +92,7 @@ hammer_flusher_destroy(hammer_mount_t hmp)
        if (hmp->flusher_td) {
                hmp->flusher_exiting = 1;
                while (hmp->flusher_td) {
-                       hmp->flusher_signal = 1;
+                       hmp->flusher_signal = HAMMER_FLUSHER_IMMEDIATE;
                        wakeup(&hmp->flusher_signal);
                        tsleep(&hmp->flusher_exiting, 0, "hmrwex", 0);
                }
@@ -125,11 +124,21 @@ hammer_flusher_thread(void *arg)
                        break;
                kprintf("E");
 
-               while (hmp->flusher_signal == 0 &&
-                      TAILQ_EMPTY(&hmp->flush_list)) {
-                       tsleep(&hmp->flusher_signal, 0, "hmrwwa", 0);
+               /*
+                * This is a hack until we can dispose of frontend buffer
+                * cache buffers on the frontend.
+                */
+               if (hmp->flusher_signal &&
+                   hmp->flusher_signal < HAMMER_FLUSHER_IMMEDIATE) {
+                       --hmp->flusher_signal;
+                       tsleep(&hmp->flusher_signal, 0, "hmrqwk", hz / 10);
+               } else {
+                       while (hmp->flusher_signal == 0 &&
+                              TAILQ_EMPTY(&hmp->flush_list)) {
+                               tsleep(&hmp->flusher_signal, 0, "hmrwwa", 0);
+                       }
+                       hmp->flusher_signal = 0;
                }
-               hmp->flusher_signal = 0;
        }
        hmp->flusher_td = NULL;
        wakeup(&hmp->flusher_exiting);
@@ -166,11 +175,9 @@ hammer_flusher_flush(hammer_mount_t hmp)
        struct hammer_transaction trans;
        hammer_blockmap_t rootmap;
        hammer_inode_t ip;
-       hammer_off_t start_offset;
 
        hammer_start_transaction_fls(&trans, hmp);
        rootmap = &hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
-       start_offset = rootmap->next_offset;
 
        while ((ip = TAILQ_FIRST(&hmp->flush_list)) != NULL) {
                /*
@@ -192,11 +199,10 @@ hammer_flusher_flush(hammer_mount_t hmp)
                 */
                if (hammer_must_finalize_undo(hmp)) {
                        Debugger("Too many undos!!");
-                       hammer_flusher_finalize(&trans, start_offset);
-                       start_offset = rootmap->next_offset;
+                       hammer_flusher_finalize(&trans);
                }
        }
-       hammer_flusher_finalize(&trans, start_offset);
+       hammer_flusher_finalize(&trans);
        hammer_done_transaction(&trans);
 }
 
@@ -233,84 +239,106 @@ hammer_must_finalize_undo(hammer_mount_t hmp)
  */
 static
 void
-hammer_flusher_finalize(hammer_transaction_t trans, hammer_off_t start_offset)
+hammer_flusher_finalize(hammer_transaction_t trans)
 {
        hammer_mount_t hmp = trans->hmp;
        hammer_volume_t root_volume = trans->rootvol;
        hammer_blockmap_t rootmap;
+       const int bmsize = sizeof(root_volume->ondisk->vol0_blockmap);
        hammer_io_t io;
+       int count;
 
        hammer_lock_ex(&hmp->sync_lock);
+       rootmap = &hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
 
        /*
         * Sync the blockmap to the root volume ondisk buffer and generate
         * the appropriate undo record.  We have to generate the UNDO even
         * though we flush the volume header along with the UNDO fifo update
         * because the meta-data (including the volume header) is flushed
-        * after the fifo update, not before.
+        * after the fifo update, not before, and may have to be undone.
+        *
+        * No UNDOs can be created after this point until we finish the
+        * flush.
         */
-       if (root_volume->io.modified) {
+       if (root_volume->io.modified &&
+           bcmp(hmp->blockmap, root_volume->ondisk->vol0_blockmap, bmsize)) {
                hammer_modify_volume(trans, root_volume,
-                                   &root_volume->ondisk->vol0_blockmap,
-                                   sizeof(root_volume->ondisk->vol0_blockmap));
+                           &root_volume->ondisk->vol0_blockmap,
+                           bmsize);
                bcopy(hmp->blockmap, root_volume->ondisk->vol0_blockmap,
-                     sizeof(root_volume->ondisk->vol0_blockmap));
+                     bmsize);
                hammer_modify_volume_done(root_volume);
        }
 
        /*
-        * Flush undo bufs
+        * Flush the undo bufs, clear the undo cache.
         */
-
        hammer_clear_undo_history(hmp);
 
+       count = 0;
        while ((io = TAILQ_FIRST(&hmp->undo_list)) != NULL) {
                KKASSERT(io->modify_refs == 0);
                hammer_ref(&io->lock);
                KKASSERT(io->type != HAMMER_STRUCTURE_VOLUME);
                hammer_io_flush(io);
                hammer_rel_buffer((hammer_buffer_t)io, 1);
+               ++count;
        }
+       if (count)
+               kprintf("X%d", count);
 
        /*
         * Flush data bufs
         */
+       count = 0;
        while ((io = TAILQ_FIRST(&hmp->data_list)) != NULL) {
                KKASSERT(io->modify_refs == 0);
                hammer_ref(&io->lock);
                KKASSERT(io->type != HAMMER_STRUCTURE_VOLUME);
                hammer_io_flush(io);
                hammer_rel_buffer((hammer_buffer_t)io, 1);
+               ++count;
        }
+       if (count)
+               kprintf("Y%d", count);
 
        /*
         * Wait for I/O to complete
         */
        crit_enter();
-       while (hmp->io_running_count) {
-               kprintf("W[%d]", hmp->io_running_count);
+       while (hmp->io_running_count)
                tsleep(&hmp->io_running_count, 0, "hmrfl1", 0);
-       }
        crit_exit();
 
        /*
-        * Move the undo FIFO's markers and flush the root volume header.
-        *
-        * If a crash occurs while the root volume header is being written
-        * we just have to hope that the undo range has been updated.  It
-        * should be done in one I/O but XXX this won't be perfect.
+        * Update the root volume's next_tid field.  This field is updated
+        * without any related undo.
         */
-       rootmap = &hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
-       if (rootmap->first_offset != start_offset) {
+       if (root_volume->ondisk->vol0_next_tid != hmp->next_tid) {
                hammer_modify_volume(NULL, root_volume, NULL, 0);
-               rootmap->first_offset = start_offset;
+               root_volume->ondisk->vol0_next_tid = hmp->next_tid;
                hammer_modify_volume_done(root_volume);
        }
-       if (root_volume->ondisk->vol0_next_tid != hmp->next_tid) {
+
+       /*
+        * Update the UNDO FIFO's first_offset.  Same deal.
+        */
+       if (rootmap->first_offset != hmp->flusher_undo_start) {
                hammer_modify_volume(NULL, root_volume, NULL, 0);
-               root_volume->ondisk->vol0_next_tid = hmp->next_tid;
+               rootmap->first_offset = hmp->flusher_undo_start;
+               root_volume->ondisk->vol0_blockmap[HAMMER_ZONE_UNDO_INDEX].first_offset = rootmap->first_offset;
                hammer_modify_volume_done(root_volume);
        }
+       trans->hmp->flusher_undo_start = rootmap->next_offset;
+
+       /*
+        * Flush the root volume header.
+        *
+        * If a crash occurs while the root volume header is being written
+        * we just have to hope that the undo range has been updated.  It
+        * should be done in one I/O but XXX this won't be perfect.
+        */
        if (root_volume->io.modified)
                hammer_io_flush(&root_volume->io);
 
@@ -318,22 +346,25 @@ hammer_flusher_finalize(hammer_transaction_t trans, hammer_off_t start_offset)
         * Wait for I/O to complete
         */
        crit_enter();
-       while (hmp->io_running_count) {
+       while (hmp->io_running_count)
                tsleep(&hmp->io_running_count, 0, "hmrfl2", 0);
-       }
        crit_exit();
 
        /*
         * Flush meta-data.  The meta-data will be undone if we crash
         * so we can safely flush it asynchronously.
         */
+       count = 0;
        while ((io = TAILQ_FIRST(&hmp->meta_list)) != NULL) {
                KKASSERT(io->modify_refs == 0);
                hammer_ref(&io->lock);
                KKASSERT(io->type != HAMMER_STRUCTURE_VOLUME);
                hammer_io_flush(io);
                hammer_rel_buffer((hammer_buffer_t)io, 1);
+               ++count;
        }
        hammer_unlock(&hmp->sync_lock);
+       if (count)
+               kprintf("Z%d", count);
 }
 
index 780dd5c..270a611 100644 (file)
@@ -31,7 +31,7 @@
  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE.
  * 
- * $DragonFly: src/sys/vfs/hammer/hammer_freemap.c,v 1.9 2008/05/03 20:21:20 dillon Exp $
+ * $DragonFly: src/sys/vfs/hammer/hammer_freemap.c,v 1.10 2008/05/04 09:06:45 dillon Exp $
  */
 
 /*
@@ -65,6 +65,8 @@ hammer_freemap_alloc(hammer_transaction_t trans, hammer_off_t owner,
        *errorp = 0;
        ondisk = trans->rootvol->ondisk;
 
+       hammer_lock_ex(&trans->hmp->free_lock);
+
        blockmap = &trans->hmp->blockmap[HAMMER_ZONE_FREEMAP_INDEX];
        result_offset = blockmap->next_offset;
        vol_no = HAMMER_VOL_DECODE(result_offset);
@@ -129,6 +131,7 @@ new_volume:
        blockmap->next_offset = result_offset + HAMMER_LARGEBLOCK_SIZE;
        hammer_modify_volume_done(trans->rootvol);
 done:
+       hammer_unlock(&trans->hmp->free_lock);
        if (buffer1)
                hammer_rel_buffer(buffer1, 0);
        if (buffer2)
index f049032..385ab1b 100644 (file)
@@ -31,7 +31,7 @@
  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE.
  * 
- * $DragonFly: src/sys/vfs/hammer/hammer_inode.c,v 1.49 2008/05/03 20:21:20 dillon Exp $
+ * $DragonFly: src/sys/vfs/hammer/hammer_inode.c,v 1.50 2008/05/04 09:06:45 dillon Exp $
  */
 
 #include "hammer.h"
@@ -281,6 +281,9 @@ retry:
                }
                ip->flags |= HAMMER_INODE_ONDISK;
        } else {
+               kprintf("hammer_get_inode: failed ip %p cursor %p error %d\n",
+                       ip, &cursor, *errorp);
+               /*Debugger("x");*/
                --hammer_count_inodes;
                kfree(ip, M_HAMMER);
                ip = NULL;
index 12e64da..c73e7f7 100644 (file)
@@ -31,7 +31,7 @@
  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE.
  * 
- * $DragonFly: src/sys/vfs/hammer/hammer_io.c,v 1.28 2008/04/27 00:45:37 dillon Exp $
+ * $DragonFly: src/sys/vfs/hammer/hammer_io.c,v 1.29 2008/05/04 09:06:45 dillon Exp $
  */
 /*
  * IO Primitives and buffer cache management
@@ -683,6 +683,9 @@ hammer_io_checkwrite(struct buf *bp)
 {
        hammer_io_t io = (void *)LIST_FIRST(&bp->b_dep);
 
+       KKASSERT(io->type != HAMMER_STRUCTURE_VOLUME &&
+                io->type != HAMMER_STRUCTURE_META_BUFFER);
+
        /*
         * We can only clear the modified bit if the IO is not currently
         * undergoing modification.  Otherwise we may miss changes.
index 6cab2d4..4c08d11 100644 (file)
@@ -31,7 +31,7 @@
  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE.
  * 
- * $DragonFly: src/sys/vfs/hammer/hammer_recover.c,v 1.14 2008/05/03 07:59:06 dillon Exp $
+ * $DragonFly: src/sys/vfs/hammer/hammer_recover.c,v 1.15 2008/05/04 09:06:45 dillon Exp $
  */
 
 #include "hammer.h"
@@ -72,16 +72,23 @@ hammer_recover(hammer_mount_t hmp, hammer_volume_t root_volume)
         * root volume's ondisk buffer directly.
         */
        rootmap = &root_volume->ondisk->vol0_blockmap[HAMMER_ZONE_UNDO_INDEX];
+       hmp->flusher_undo_start = rootmap->next_offset;
+
        if (rootmap->first_offset == rootmap->next_offset)
                return(0);
 
-       if (rootmap->next_offset < rootmap->first_offset)
+       if (rootmap->next_offset >= rootmap->first_offset) {
+               bytes = rootmap->next_offset - rootmap->first_offset;
+       } else {
                bytes = rootmap->alloc_offset - rootmap->first_offset +
-                       rootmap->next_offset;
-       else
-               bytes = (rootmap->next_offset - rootmap->first_offset);
+                       (rootmap->next_offset & HAMMER_OFF_LONG_MASK);
+       }
        kprintf("HAMMER(%s) Start Recovery (%lld bytes of UNDO)\n",
                root_volume->ondisk->vol_name, bytes);
+       if (bytes > (rootmap->alloc_offset & HAMMER_OFF_LONG_MASK)) {
+               kprintf("Undo size is absurd, unable to mount\n");
+               return(EIO);
+       }
 
        /*
         * Scan the UNDOs backwards.
@@ -93,7 +100,7 @@ hammer_recover(hammer_mount_t hmp, hammer_volume_t root_volume)
                        root_volume->ondisk->vol_name,
                        scan_offset);
                error = EIO;
-               goto failed;
+               goto done;
        }
 
        while ((int64_t)bytes > 0) {
@@ -109,7 +116,6 @@ hammer_recover(hammer_mount_t hmp, hammer_volume_t root_volume)
                                "underflow\n",
                                root_volume->ondisk->vol_name,
                                scan_offset);
-                       Debugger("FUBAR");
                        error = EIO;
                        break;
                }
@@ -145,7 +151,11 @@ hammer_recover(hammer_mount_t hmp, hammer_volume_t root_volume)
                scan_offset -= tail->tail_size;
                bytes -= tail->tail_size;
        }
-failed:
+done:
+       /*
+        * Reload flusher_undo_start to kick off the UNDO sequencing.
+        */
+       hmp->flusher_undo_start = rootmap->next_offset;
        if (buffer)
                hammer_rel_buffer(buffer, 0);
        return (error);
index 9466ba1..334b6f8 100644 (file)
@@ -31,7 +31,7 @@
  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE.
  * 
- * $DragonFly: src/sys/vfs/hammer/hammer_undo.c,v 1.10 2008/05/03 20:21:20 dillon Exp $
+ * $DragonFly: src/sys/vfs/hammer/hammer_undo.c,v 1.11 2008/05/04 09:06:45 dillon Exp $
  */
 
 /*
@@ -90,12 +90,9 @@ hammer_generate_undo(hammer_transaction_t trans, hammer_io_t io,
        hammer_volume_ondisk_t ondisk;
        hammer_blockmap_t undomap;
        hammer_buffer_t buffer = NULL;
-       struct hammer_blockmap_layer2 *layer2;
        hammer_fifo_undo_t undo;
        hammer_fifo_tail_t tail;
        hammer_off_t next_offset;
-       hammer_off_t result_offset;
-       int i;
        int error;
        int bytes;
 
@@ -103,9 +100,8 @@ hammer_generate_undo(hammer_transaction_t trans, hammer_io_t io,
         * Enter the offset into our undo history.  If there is an existing
         * undo we do not have to generate a new one.
         */
-       if (hammer_enter_undo_history(trans->hmp, zone_off, len) == EALREADY) {
+       if (hammer_enter_undo_history(trans->hmp, zone_off, len) == EALREADY)
                return(0);
-       }
 
        root_volume = trans->rootvol;
        ondisk = root_volume->ondisk;
@@ -135,12 +131,7 @@ again:
                kprintf("undo zone's next_offset wrapped\n");
        }
 
-       i = (next_offset & HAMMER_OFF_SHORT_MASK) / HAMMER_LARGEBLOCK_SIZE;
-       layer2 = &root_volume->ondisk->vol0_undo_array[i];
-       result_offset = layer2->u.phys_offset +
-                       (next_offset & HAMMER_LARGEBLOCK_MASK64);
-
-       undo = hammer_bread(trans->hmp, result_offset, &error, &buffer);
+       undo = hammer_bread(trans->hmp, next_offset, &error, &buffer);
 
        /*
         * We raced another thread, try again.
@@ -150,6 +141,9 @@ again:
 
        hammer_modify_buffer(NULL, buffer, NULL, 0);
 
+       /* XXX eventually goto again again, but for now catch it */
+       KKASSERT(undomap->next_offset == next_offset);
+
        /*
         * The FIFO entry would cross a buffer boundary, PAD to the end
         * of the buffer and try again.  Due to our data alignment, the
@@ -157,7 +151,7 @@ again:
         * populate the first 8 bytes of hammer_fifo_head and the tail may
         * be at the same offset as the head.
         */
-       if ((result_offset ^ (result_offset + bytes)) & ~HAMMER_BUFMASK64) {
+       if ((next_offset ^ (next_offset + bytes)) & ~HAMMER_BUFMASK64) {
                bytes = HAMMER_BUFSIZE - ((int)next_offset & HAMMER_BUFMASK);
                tail = (void *)((char *)undo + bytes - sizeof(*tail));
                if ((void *)undo != (void *)tail) {
@@ -173,7 +167,7 @@ again:
                goto again;
        }
        if (hammer_debug_general & 0x0080)
-               kprintf("undo %016llx %d %d\n", result_offset, bytes, len);
+               kprintf("undo %016llx %d %d\n", next_offset, bytes, len);
 
        /*
         * We're good, create the entry.
@@ -193,13 +187,9 @@ again:
        tail->tail_size = bytes;
 
        undo->head.hdr_crc = crc32(undo, bytes);
-       hammer_modify_buffer_done(buffer);
-
-       /*
-        * Update the undo offset space in the IO XXX
-        */
-
        undomap->next_offset += bytes;
+
+       hammer_modify_buffer_done(buffer);
        hammer_modify_volume_done(root_volume);
 
        if (buffer)
@@ -260,22 +250,34 @@ hammer_clear_undo_history(hammer_mount_t hmp)
  * Misc helper routines.  Return available space and total space.
  */
 int64_t
-hammer_undo_space(hammer_mount_t hmp)
+hammer_undo_used(hammer_mount_t hmp)
 {
        hammer_blockmap_t rootmap;
-       int64_t bytes;
        int64_t max_bytes;
+       int64_t bytes;
 
        rootmap = &hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
 
        if (rootmap->first_offset <= rootmap->next_offset) {
-               bytes = (int)(rootmap->next_offset - rootmap->first_offset);
+               bytes = rootmap->next_offset - rootmap->first_offset;
        } else {
-               bytes = (int)(rootmap->alloc_offset - rootmap->first_offset +
-                             rootmap->next_offset);
+               bytes = rootmap->alloc_offset - rootmap->first_offset +
+                       (rootmap->next_offset & HAMMER_OFF_LONG_MASK);
        }
-       max_bytes = (int)(rootmap->alloc_offset & HAMMER_OFF_SHORT_MASK);
-       return(max_bytes - bytes);
+       max_bytes = rootmap->alloc_offset & HAMMER_OFF_SHORT_MASK;
+       KKASSERT(bytes <= max_bytes);
+       return(bytes);
+}
+
+int64_t
+hammer_undo_space(hammer_mount_t hmp)
+{
+       hammer_blockmap_t rootmap;
+       int64_t max_bytes;
+
+       rootmap = &hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
+       max_bytes = rootmap->alloc_offset & HAMMER_OFF_SHORT_MASK;
+       return(max_bytes - hammer_undo_used(hmp));
 }
 
 int64_t
@@ -285,7 +287,7 @@ hammer_undo_max(hammer_mount_t hmp)
        int64_t max_bytes;
 
        rootmap = &hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
-       max_bytes = (int)(rootmap->alloc_offset & HAMMER_OFF_SHORT_MASK);
+       max_bytes = rootmap->alloc_offset & HAMMER_OFF_SHORT_MASK;
 
        return(max_bytes);
 }
index a21e30c..2de7e77 100644 (file)
@@ -31,7 +31,7 @@
  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE.
  * 
- * $DragonFly: src/sys/vfs/hammer/hammer_vfsops.c,v 1.32 2008/05/03 20:21:20 dillon Exp $
+ * $DragonFly: src/sys/vfs/hammer/hammer_vfsops.c,v 1.33 2008/05/04 09:06:45 dillon Exp $
  */
 
 #include <sys/param.h>
@@ -199,6 +199,7 @@ hammer_vfs_mount(struct mount *mp, char *mntpt, caddr_t data,
                lockinit(&hmp->blockmap_lock, "blkmap", 0, 0);
 
                hmp->sync_lock.refs = 1;
+               hmp->free_lock.refs = 1;
 
                TAILQ_INIT(&hmp->flush_list);
                TAILQ_INIT(&hmp->objid_cache_list);
@@ -316,9 +317,12 @@ hammer_vfs_mount(struct mount *mp, char *mntpt, caddr_t data,
         * Perform any necessary UNDO operations.  The recover code does
         * call hammer_undo_lookup() so we have to pre-cache the blockmap,
         * and then re-copy it again after recovery is complete.
+        *
+        * The recover code will load hmp->flusher_undo_start.
         */
        bcopy(rootvol->ondisk->vol0_blockmap, hmp->blockmap,
              sizeof(hmp->blockmap));
+
        error = hammer_recover(hmp, rootvol);
        if (error) {
                kprintf("Failed to recover HAMMER filesystem on mount\n");
@@ -340,6 +344,8 @@ hammer_vfs_mount(struct mount *mp, char *mntpt, caddr_t data,
         * Certain often-modified fields in the root volume are cached in
         * the hammer_mount structure so we do not have to generate lots
         * of little UNDO structures for them.
+        *
+        * Recopy after recovery.
         */
        hmp->next_tid = rootvol->ondisk->vol0_next_tid;
        bcopy(rootvol->ondisk->vol0_blockmap, hmp->blockmap,
index e0551cb..807ca16 100644 (file)
@@ -31,7 +31,7 @@
  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE.
  * 
- * $DragonFly: src/sys/vfs/hammer/hammer_vnops.c,v 1.46 2008/05/03 20:21:20 dillon Exp $
+ * $DragonFly: src/sys/vfs/hammer/hammer_vnops.c,v 1.47 2008/05/04 09:06:45 dillon Exp $
  */
 
 #include <sys/param.h>
@@ -703,11 +703,6 @@ hammer_vop_nresolve(struct vop_nresolve_args *ap)
                        error = hammer_get_vnode(ip, &vp);
                        hammer_rel_inode(ip, 0);
                } else {
-                       kprintf("nresolve: lookup %s failed dip %p (%016llx) on"
-                               " inode %016llx error %d\n",
-                               ncp->nc_name,
-                               dip, dip->obj_id, obj_id, error);
-                       Debugger("x");
                        vp = NULL;
                }
                if (error == 0) {