HAMMER: Mirroring work
authorMatthew Dillon <dillon@dragonflybsd.org>
Thu, 31 Jul 2008 22:30:33 +0000 (22:30 +0000)
committerMatthew Dillon <dillon@dragonflybsd.org>
Thu, 31 Jul 2008 22:30:33 +0000 (22:30 +0000)
* Fix an invalidation race that can be triggered by the mirroring or
  reblocking code.  The invalidation was being made before the direct IO
  completed rather then after.

* Fix an invalidation race.  hammer_io_inval() was cleaning out any
  pre-existing buffer cache buffer aliases but was not cleaning out the
  VM backing store, resulting in CRC assertions (but no on-media corruption)
  by the mirroring code.

* Change the bulk-record sequencing to avoid adding the record to
  the inode's record list until after the direct-io has been initiated.

* Change the mirror_read code to generate PASS records for deleted records
  whos create_tid is out of bounds, so we do not have to transport the
  data for deleted data records.  This greatly reduces the mirror bandwidth
  needed to mirror deletions.

  The mirror_write code similarly will issue delete_tid updates as
  appropriate when presented with a PASS record.

* Mirror targets no longer strip deleted records which had yet to be created
  on the target.  The record is now created so snapshot state is retained.

sys/vfs/hammer/hammer.h
sys/vfs/hammer/hammer_blockmap.c
sys/vfs/hammer/hammer_btree.c
sys/vfs/hammer/hammer_io.c
sys/vfs/hammer/hammer_mirror.c
sys/vfs/hammer/hammer_object.c
sys/vfs/hammer/hammer_ondisk.c
sys/vfs/hammer/hammer_vnops.c

index 305993c..c4b90ad 100644 (file)
@@ -31,7 +31,7 @@
  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE.
  * 
- * $DragonFly: src/sys/vfs/hammer/hammer.h,v 1.124 2008/07/31 04:42:04 dillon Exp $
+ * $DragonFly: src/sys/vfs/hammer/hammer.h,v 1.125 2008/07/31 22:30:33 dillon Exp $
  */
 /*
  * This header file contains structures used internally by the HAMMERFS
@@ -398,6 +398,7 @@ struct hammer_record {
        struct hammer_btree_leaf_elm    leaf;
        union hammer_data_ondisk        *data;
        int                             flags;
+       hammer_off_t                    zone2_offset;   /* direct-write only */
 };
 
 typedef struct hammer_record *hammer_record_t;
@@ -416,6 +417,7 @@ typedef struct hammer_record *hammer_record_t;
 #define HAMMER_RECF_CONVERT_DELETE     0x0100  /* special case */
 #define HAMMER_RECF_DIRECT_IO          0x0200  /* related direct I/O running*/
 #define HAMMER_RECF_DIRECT_WAIT                0x0400  /* related direct I/O running*/
+#define HAMMER_RECF_DIRECT_INVAL       0x0800  /* buffer alias invalidation */
 
 /*
  * hammer_delete_at_cursor() flags
@@ -846,6 +848,7 @@ int hammer_install_volume(hammer_mount_t hmp, const char *volname,
                        struct vnode *devvp);
 int    hammer_mountcheck_volumes(hammer_mount_t hmp);
 
+int    hammer_mem_add(hammer_record_t record);
 int    hammer_ip_lookup(hammer_cursor_t cursor);
 int    hammer_ip_first(hammer_cursor_t cursor);
 int    hammer_ip_next(hammer_cursor_t cursor);
index f64f73b..004d0e6 100644 (file)
@@ -31,7 +31,7 @@
  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE.
  * 
- * $DragonFly: src/sys/vfs/hammer/hammer_blockmap.c,v 1.26 2008/07/18 00:19:53 dillon Exp $
+ * $DragonFly: src/sys/vfs/hammer/hammer_blockmap.c,v 1.27 2008/07/31 22:30:33 dillon Exp $
  */
 
 /*
@@ -573,7 +573,11 @@ hammer_blockmap_reserve_complete(hammer_mount_t hmp, hammer_reserve_t resv)
                /*
                 * If we are releasing a zone and all of its reservations
                 * were undone we have to clean out all hammer and device
-                * buffers associated with the big block.
+                * buffers associated with the big block.  We do this
+                * primarily because the large-block may be reallocated
+                * from non-large-data to large-data or vise-versa, resulting
+                * in a different mix of 16K and 64K buffer cache buffers.
+                * XXX - this isn't fun and needs to be redone.
                 *
                 * Any direct allocations will cause this test to fail
                 * (bytes_freed will never reach append_off), which is
index f9a1a80..ad558a5 100644 (file)
@@ -31,7 +31,7 @@
  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE.
  * 
- * $DragonFly: src/sys/vfs/hammer/hammer_btree.c,v 1.74 2008/07/19 18:44:49 dillon Exp $
+ * $DragonFly: src/sys/vfs/hammer/hammer_btree.c,v 1.75 2008/07/31 22:30:33 dillon Exp $
  */
 
 /*
@@ -704,8 +704,11 @@ hammer_btree_extract(hammer_cursor_t cursor, int flags)
        KKASSERT(data_len >= 0 && data_len <= HAMMER_XBUFSIZE);
        cursor->data = hammer_bread_ext(hmp, data_off, data_len,
                                        &error, &cursor->data_buffer);
-       if (hammer_crc_test_leaf(cursor->data, &elm->leaf) == 0)
+       if (hammer_crc_test_leaf(cursor->data, &elm->leaf) == 0) {
+               kprintf("CRC DATA @ %016llx/%d FAILED\n",
+                       elm->leaf.data_offset, elm->leaf.data_len);
                Debugger("CRC FAILED: DATA");
+       }
        return(error);
 }
 
index b397052..fdbb3a5 100644 (file)
@@ -31,7 +31,7 @@
  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE.
  * 
- * $DragonFly: src/sys/vfs/hammer/hammer_io.c,v 1.51 2008/07/18 00:19:53 dillon Exp $
+ * $DragonFly: src/sys/vfs/hammer/hammer_io.c,v 1.52 2008/07/31 22:30:33 dillon Exp $
  */
 /*
  * IO Primitives and buffer cache management
@@ -248,7 +248,11 @@ hammer_io_new(struct vnode *devvp, struct hammer_io *io)
 
 /*
  * Remove potential device level aliases against buffers managed by high level
- * vnodes.
+ * vnodes.  Aliases can also be created due to mixed buffer sizes.
+ *
+ * This is nasty because the buffers are also VMIO-backed.  Even if a buffer
+ * does not exist its backing VM pages might, and we have to invalidate
+ * those as well or a getblk() will reinstate them.
  */
 void
 hammer_io_inval(hammer_volume_t volume, hammer_off_t zone2_offset)
@@ -260,20 +264,21 @@ hammer_io_inval(hammer_volume_t volume, hammer_off_t zone2_offset)
        phys_offset = volume->ondisk->vol_buf_beg +
                      (zone2_offset & HAMMER_OFF_SHORT_MASK);
        crit_enter();
-       if ((bp = findblk(volume->devvp, phys_offset)) != NULL) {
+       if ((bp = findblk(volume->devvp, phys_offset)) != NULL)
                bp = getblk(volume->devvp, phys_offset, bp->b_bufsize, 0, 0);
-               if ((iou = (void *)LIST_FIRST(&bp->b_dep)) != NULL) {
-                       hammer_io_clear_modify(&iou->io, 1);
-                       bundirty(bp);
-                       iou->io.reclaim = 1;
-                       hammer_io_deallocate(bp);
-               } else {
-                       KKASSERT((bp->b_flags & B_LOCKED) == 0);
-                       bundirty(bp);
-                       bp->b_flags |= B_NOCACHE|B_RELBUF;
-               }
-               brelse(bp);
+       else
+               bp = getblk(volume->devvp, phys_offset, HAMMER_BUFSIZE, 0, 0);
+       if ((iou = (void *)LIST_FIRST(&bp->b_dep)) != NULL) {
+               hammer_io_clear_modify(&iou->io, 1);
+               bundirty(bp);
+               iou->io.reclaim = 1;
+               hammer_io_deallocate(bp);
+       } else {
+               KKASSERT((bp->b_flags & B_LOCKED) == 0);
+               bundirty(bp);
+               bp->b_flags |= B_NOCACHE|B_RELBUF;
        }
+       brelse(bp);
        crit_exit();
 }
 
@@ -995,9 +1000,6 @@ struct bio_ops hammer_bioops = {
  * disk media.  The bio may be issued asynchronously.  If leaf is non-NULL
  * we validate the CRC.
  *
- * A second-level bio already resolved to a zone-2 offset (typically by
- * the BMAP code, or by a previous hammer_io_direct_write()), is passed. 
- *
  * We must check for the presence of a HAMMER buffer to handle the case
  * where the reblocker has rewritten the data (which it does via the HAMMER
  * buffer system, not via the high-level vnode buffer cache), but not yet
@@ -1048,11 +1050,12 @@ hammer_io_direct_read(hammer_mount_t hmp, struct bio *bio,
                error = EIO;
 
        if (error == 0) {
-               zone2_offset &= HAMMER_OFF_SHORT_MASK;
-
+               /*
+                * 3rd level bio
+                */
                nbio = push_bio(bio);
                nbio->bio_offset = volume->ondisk->vol_buf_beg +
-                                  zone2_offset;
+                                  (zone2_offset & HAMMER_OFF_SHORT_MASK);
 #if 0
                /*
                 * XXX disabled - our CRC check doesn't work if the OS
@@ -1110,7 +1113,7 @@ hammer_io_direct_read_complete(struct bio *nbio)
  * disk media.  The bio may be issued asynchronously.
  *
  * The BIO is associated with the specified record and RECF_DIRECT_IO
- * is set.
+ * is set.  The recorded is added to its object.
  */
 int
 hammer_io_direct_write(hammer_mount_t hmp, hammer_record_t record,
@@ -1148,8 +1151,10 @@ hammer_io_direct_write(hammer_mount_t hmp, hammer_record_t record,
                if (error == 0) {
                        bp = bio->bio_buf;
                        KKASSERT((bp->b_bufsize & HAMMER_BUFMASK) == 0);
+                       /*
                        hammer_del_buffers(hmp, buf_offset,
                                           zone2_offset, bp->b_bufsize);
+                       */
 
                        /*
                         * Second level bio - cached zone2 offset.
@@ -1161,7 +1166,9 @@ hammer_io_direct_write(hammer_mount_t hmp, hammer_record_t record,
                        nbio->bio_offset = zone2_offset;
                        nbio->bio_done = hammer_io_direct_write_complete;
                        nbio->bio_caller_info1.ptr = record;
-                       record->flags |= HAMMER_RECF_DIRECT_IO;
+                       record->zone2_offset = zone2_offset;
+                       record->flags |= HAMMER_RECF_DIRECT_IO |
+                                        HAMMER_RECF_DIRECT_INVAL;
 
                        /*
                         * Third level bio - raw offset specific to the
@@ -1195,7 +1202,17 @@ hammer_io_direct_write(hammer_mount_t hmp, hammer_record_t record,
                        biodone(bio);
                }
        }
-       if (error) {
+       if (error == 0) {
+               /*
+                * The record is all setup now, add it.  Potential conflics
+                * have already been dealt with.
+                */
+               error = hammer_mem_add(record);
+               KKASSERT(error == 0);
+       } else {
+               /*
+                * Major suckage occured.
+                */
                kprintf("hammer_direct_write: failed @ %016llx\n",
                        leaf->data_offset);
                bp = bio->bio_buf;
@@ -1203,6 +1220,8 @@ hammer_io_direct_write(hammer_mount_t hmp, hammer_record_t record,
                bp->b_error = EIO;
                bp->b_flags |= B_ERROR;
                biodone(bio);
+               record->flags |= HAMMER_RECF_DELETED_FE;
+               hammer_rel_mem_record(record);
        }
        return(error);
 }
@@ -1220,17 +1239,21 @@ void
 hammer_io_direct_write_complete(struct bio *nbio)
 {
        struct bio *obio;
+       struct buf *bp;
        hammer_record_t record = nbio->bio_caller_info1.ptr;
 
+       bp = nbio->bio_buf;
        obio = pop_bio(nbio);
-       if (obio->bio_buf->b_flags & B_ERROR) {
+       if (bp->b_flags & B_ERROR) {
                hammer_critical_error(record->ip->hmp, record->ip,
-                                     obio->bio_buf->b_error,
+                                     bp->b_error,
                                      "while writing bulk data");
-               obio->bio_buf->b_flags |= B_INVAL;
+               bp->b_flags |= B_INVAL;
        }
        biodone(obio);
-       KKASSERT(record != NULL && (record->flags & HAMMER_RECF_DIRECT_IO));
+
+       KKASSERT(record != NULL);
+       KKASSERT(record->flags & HAMMER_RECF_DIRECT_IO);
        record->flags &= ~HAMMER_RECF_DIRECT_IO;
        if (record->flags & HAMMER_RECF_DIRECT_WAIT) {
                record->flags &= ~HAMMER_RECF_DIRECT_WAIT;
@@ -1241,22 +1264,40 @@ hammer_io_direct_write_complete(struct bio *nbio)
 
 /*
  * This is called before a record is either committed to the B-Tree
- * or destroyed, to resolve any associated direct-IO.  We must
- * ensure that the data is available on-media to other consumers
- * such as the reblocker or mirroring code.
+ * or destroyed, to resolve any associated direct-IO. 
  *
- * Note that other consumers might access the data via the block
- * device's buffer cache and not the high level vnode's buffer cache.
+ * (1) We must wait for any direct-IO related to the record to complete.
+ *
+ * (2) We must remove any buffer cache aliases for data accessed via
+ *     leaf->data_offset or zone2_offset so non-direct-IO consumers  
+ *     (the mirroring and reblocking code) do not see stale data.
  */
 void
 hammer_io_direct_wait(hammer_record_t record)
 {
-       crit_enter();
-       while (record->flags & HAMMER_RECF_DIRECT_IO) {
-               record->flags |= HAMMER_RECF_DIRECT_WAIT;
-               tsleep(&record->flags, 0, "hmdiow", 0);
+       /*
+        * Wait for I/O to complete
+        */
+       if (record->flags & HAMMER_RECF_DIRECT_IO) {
+               crit_enter();
+               while (record->flags & HAMMER_RECF_DIRECT_IO) {
+                       record->flags |= HAMMER_RECF_DIRECT_WAIT;
+                       tsleep(&record->flags, 0, "hmdiow", 0);
+               }
+               crit_exit();
+       }
+
+       /*
+        * Invalidate any related buffer cache aliases.
+        */
+       if (record->flags & HAMMER_RECF_DIRECT_INVAL) {
+               KKASSERT(record->leaf.data_offset);
+               hammer_del_buffers(record->ip->hmp,
+                                  record->leaf.data_offset,
+                                  record->zone2_offset,
+                                  record->leaf.data_len);
+               record->flags &= ~HAMMER_RECF_DIRECT_INVAL;
        }
-       crit_exit();
 }
 
 /*
index f752db2..81789ca 100644 (file)
@@ -31,7 +31,7 @@
  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE.
  * 
- * $DragonFly: src/sys/vfs/hammer/hammer_mirror.c,v 1.16 2008/07/31 04:42:04 dillon Exp $
+ * $DragonFly: src/sys/vfs/hammer/hammer_mirror.c,v 1.17 2008/07/31 22:30:33 dillon Exp $
  */
 /*
  * HAMMER mirroring ioctls - serialize and deserialize modifications made
@@ -186,10 +186,21 @@ retry:
                elm = &cursor.node->ondisk->elms[cursor.index].leaf;
                mirror->key_cur = elm->base;
 
-               if ((elm->base.create_tid < mirror->tid_beg ||
-                   elm->base.create_tid > mirror->tid_end) &&
-                   (elm->base.delete_tid < mirror->tid_beg ||
-                   elm->base.delete_tid > mirror->tid_end)) {
+               /*
+                * Determine if we should generate a PASS or a REC.  PASS
+                * records are records without any data payload.  Such
+                * records will be generated if the target is already expected
+                * to have the record, allowing it to delete the gaps.
+                *
+                * A PASS record is also used to perform deletions on the
+                * target.
+                *
+                * Such deletions are needed if the master or files on the
+                * master are no-history, or if the slave is so far behind
+                * the master has already been pruned.
+                */
+               if (elm->base.create_tid < mirror->tid_beg ||
+                   elm->base.create_tid > mirror->tid_end) {
                        bytes = sizeof(mrec.rec);
                        if (mirror->count + HAMMER_HEAD_DOALIGN(bytes) >
                            mirror->size) {
@@ -197,15 +208,7 @@ retry:
                        }
 
                        /*
-                        * Fill mrec.  PASS records are records which are
-                        * outside the TID range needed for the mirror
-                        * update.  They are sent without any data payload
-                        * because the mirroring target must still compare
-                        * records that fall outside the SKIP ranges to
-                        * determine what might need to be deleted.  Such
-                        * deletions are needed if the master or files on
-                        * the master are no-history, or if the slave is
-                        * so far behind the master has already been pruned.
+                        * Fill mrec.
                         */
                        mrec.head.signature = HAMMER_IOC_MIRROR_SIGNATURE;
                        mrec.head.type = HAMMER_MREC_TYPE_PASS;
@@ -561,10 +564,12 @@ hammer_ioc_mirror_write_rec(hammer_cursor_t cursor,
         *
         * If the record exists only the delete_tid may be updated.
         *
-        * If the record does not exist we create it.  For now we
-        * ignore records with a non-zero delete_tid.  Note that
-        * mirror operations are effective an as-of operation and
-        * delete_tid can be 0 for mirroring purposes even if it is
+        * If the record does not exist we can create it only if the
+        * create_tid is not too old.  If the create_tid is too old
+        * it may have already been destroyed on the slave from pruning.
+        *
+        * Note that mirror operations are effectively as-of operations
+        * and delete_tid can be 0 for mirroring purposes even if it is
         * not actually 0 at the originator.
         *
         * These functions can return EDEADLK
@@ -576,10 +581,11 @@ hammer_ioc_mirror_write_rec(hammer_cursor_t cursor,
 
        if (error == 0 && hammer_mirror_check(cursor, mrec)) {
                error = hammer_mirror_update(cursor, mrec);
-       } else if (error == ENOENT && mrec->leaf.base.delete_tid == 0) {
-               error = hammer_mirror_write(cursor, mrec, uptr);
        } else if (error == ENOENT) {
-               error = 0;
+               if (mrec->leaf.base.create_tid >= mirror->tid_beg)
+                       error = hammer_mirror_write(cursor, mrec, uptr);
+               else
+                       error = 0;
        }
        if (error == 0 || error == EALREADY)
                mirror->key_cur = mrec->leaf.base;
@@ -630,7 +636,9 @@ hammer_ioc_mirror_write_pass(hammer_cursor_t cursor,
        error = hammer_mirror_delete_to(cursor, mirror);
 
        /*
-        * Locate the record and get past it by setting ATEDISK.
+        * Locate the record and get past it by setting ATEDISK.  Perform
+        * any necessary deletions.  We have no data payload and cannot
+        * create a new record.
         */
        if (error == 0) {
                mirror->key_cur = mrec->leaf.base;
@@ -638,10 +646,13 @@ hammer_ioc_mirror_write_pass(hammer_cursor_t cursor,
                cursor->flags |= HAMMER_CURSOR_BACKEND;
                cursor->flags &= ~HAMMER_CURSOR_INSERT;
                error = hammer_btree_lookup(cursor);
-               if (error == 0)
+               if (error == 0) {
+                       if (hammer_mirror_check(cursor, mrec))
+                               error = hammer_mirror_update(cursor, mrec);
                        cursor->flags |= HAMMER_CURSOR_ATEDISK;
-               else
+               } else {
                        cursor->flags &= ~HAMMER_CURSOR_ATEDISK;
+               }
                if (error == ENOENT)
                        error = 0;
        }
index 2f33e74..48f4e06 100644 (file)
  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE.
  * 
- * $DragonFly: src/sys/vfs/hammer/hammer_object.c,v 1.92 2008/07/19 04:49:39 dillon Exp $
+ * $DragonFly: src/sys/vfs/hammer/hammer_object.c,v 1.93 2008/07/31 22:30:33 dillon Exp $
  */
 
 #include "hammer.h"
 
-static int hammer_mem_add(hammer_record_t record);
 static int hammer_mem_lookup(hammer_cursor_t cursor);
 static int hammer_mem_first(hammer_cursor_t cursor);
 static int hammer_frontend_trunc_callback(hammer_record_t record,
@@ -399,10 +398,13 @@ hammer_rel_mem_record(struct hammer_record *record)
 
                        /*
                         * We must wait for any direct-IO to complete before
-                        * we can destroy the record.
+                        * we can destroy the record because the bio may
+                        * have a reference to it.
                         */
-                       if (record->flags & HAMMER_RECF_DIRECT_IO)
+                       if (record->flags & 
+                          (HAMMER_RECF_DIRECT_IO | HAMMER_RECF_DIRECT_INVAL)) {
                                hammer_io_direct_wait(record);
+                       }
 
 
                        /*
@@ -843,6 +845,8 @@ hammer_ip_get_bulk(hammer_inode_t ip, off_t file_offset, int bytes)
  * flush a buffer cache buffer.  The frontend has locked the related buffer
  * cache buffers and we should be able to manipulate any overlapping
  * in-memory records.
+ *
+ * The caller is responsible for adding the returned record.
  */
 hammer_record_t
 hammer_ip_add_bulk(hammer_inode_t ip, off_t file_offset, void *data, int bytes,
@@ -851,7 +855,6 @@ hammer_ip_add_bulk(hammer_inode_t ip, off_t file_offset, void *data, int bytes,
        hammer_record_t record;
        hammer_record_t conflict;
        int zone;
-       int flags;
 
        /*
         * Deal with conflicting in-memory records.  We cannot have multiple
@@ -903,30 +906,8 @@ hammer_ip_add_bulk(hammer_inode_t ip, off_t file_offset, void *data, int bytes,
                                         HAMMER_LOCALIZE_MISC;
        record->leaf.data_len = bytes;
        hammer_crc_set_leaf(data, &record->leaf);
-       flags = record->flags;
-
-       hammer_ref(&record->lock);      /* mem_add eats a reference */
-       *errorp = hammer_mem_add(record);
-       if (*errorp) {
-               conflict = hammer_ip_get_bulk(ip, file_offset, bytes);
-               kprintf("hammer_ip_add_bulk: error %d conflict %p file_offset %lld bytes %d\n",
-                       *errorp, conflict, file_offset, bytes);
-               if (conflict)
-                       kprintf("conflict %lld %d\n", conflict->leaf.base.key, conflict->leaf.data_len);
-               if (conflict)
-                       hammer_rel_mem_record(conflict);
-       }
        KKASSERT(*errorp == 0);
-       conflict = hammer_ip_get_bulk(ip, file_offset, bytes);
-       if (conflict != record) {
-               kprintf("conflict mismatch %p %p %08x\n", conflict, record, record->flags);
-               if (conflict)
-                   kprintf("conflict mismatch %lld/%d %lld/%d\n", conflict->leaf.base.key, conflict->leaf.data_len, record->leaf.base.key, record->leaf.data_len);
-       }
-       KKASSERT(conflict == record);
-       hammer_rel_mem_record(conflict);
-
-       return (record);
+       return(record);
 }
 
 /*
@@ -1017,6 +998,13 @@ hammer_ip_sync_record_cursor(hammer_cursor_t cursor, hammer_record_t record)
        KKASSERT(record->flags & HAMMER_RECF_INTERLOCK_BE);
        KKASSERT(record->leaf.base.localization != 0);
 
+       /*
+        * Any direct-write related to the record must complete before we
+        * can sync the record to the on-disk media.
+        */
+       if (record->flags & (HAMMER_RECF_DIRECT_IO | HAMMER_RECF_DIRECT_INVAL))
+               hammer_io_direct_wait(record);
+
        /*
         * If this is a bulk-data record placemarker there may be an existing
         * record on-disk, indicating a data overwrite.  If there is the
@@ -1164,13 +1152,6 @@ hammer_ip_sync_record_cursor(hammer_cursor_t cursor, hammer_record_t record)
                record->leaf.data_crc = 0;
        }
 
-       /*
-        * If the record's data was direct-written we cannot insert
-        * it until the direct-IO has completed.
-        */
-       if (record->flags & HAMMER_RECF_DIRECT_IO)
-               hammer_io_direct_wait(record);
-
        error = hammer_btree_insert(cursor, &record->leaf, &doprop);
        if (hammer_debug_inode && error)
                kprintf("BTREE INSERT error %d @ %016llx:%d key %016llx\n", error, cursor->node->node_offset, cursor->index, record->leaf.base.key);
@@ -1224,7 +1205,6 @@ done:
  * A copy of the temporary record->data pointer provided by the caller
  * will be made.
  */
-static
 int
 hammer_mem_add(hammer_record_t record)
 {
index 44f267e..1c0f645 100644 (file)
@@ -31,7 +31,7 @@
  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE.
  * 
- * $DragonFly: src/sys/vfs/hammer/hammer_ondisk.c,v 1.73 2008/07/31 04:42:04 dillon Exp $
+ * $DragonFly: src/sys/vfs/hammer/hammer_ondisk.c,v 1.74 2008/07/31 22:30:33 dillon Exp $
  */
 /*
  * Manage HAMMER's on-disk structures.  These routines are primarily
@@ -526,7 +526,7 @@ again:
                hammer_ref(&buffer->io.lock);
 
                /*
-                * Onced refed the ondisk field will not be cleared by
+                * Once refed the ondisk field will not be cleared by
                 * any other action.
                 */
                if (buffer->ondisk && buffer->io.loading == 0) {
index 31fa543..b83c40c 100644 (file)
@@ -31,7 +31,7 @@
  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE.
  * 
- * $DragonFly: src/sys/vfs/hammer/hammer_vnops.c,v 1.93 2008/07/19 04:49:39 dillon Exp $
+ * $DragonFly: src/sys/vfs/hammer/hammer_vnops.c,v 1.94 2008/07/31 22:30:33 dillon Exp $
  */
 
 #include <sys/param.h>
@@ -2610,7 +2610,6 @@ hammer_vop_strategy_write(struct vop_strategy_args *ap)
                                    bytes, &error);
        if (record) {
                hammer_io_direct_write(hmp, record, bio);
-               hammer_rel_mem_record(record);
                if (ip->rsv_recs > 1 && hmp->rsv_recs > hammer_limit_recs)
                        hammer_flush_inode(ip, 0);
        } else {