HAMMER 38D/Many: Undo/Synchronization and crash recovery
[dragonfly.git] / sys / vfs / hammer / hammer_ondisk.c
1 /*
2  * Copyright (c) 2007-2008 The DragonFly Project.  All rights reserved.
3  * 
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  * 
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  * 
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  * 
34  * $DragonFly: src/sys/vfs/hammer/hammer_ondisk.c,v 1.39 2008/04/26 02:54:00 dillon Exp $
35  */
36 /*
37  * Manage HAMMER's on-disk structures.  These routines are primarily
38  * responsible for interfacing with the kernel's I/O subsystem and for
39  * managing in-memory structures.
40  */
41
42 #include "hammer.h"
43 #include <sys/fcntl.h>
44 #include <sys/nlookup.h>
45 #include <sys/buf.h>
46 #include <sys/buf2.h>
47
48 static void hammer_free_volume(hammer_volume_t volume);
49 static int hammer_load_volume(hammer_volume_t volume);
50 static int hammer_load_buffer(hammer_buffer_t buffer, int isnew);
51 static int hammer_load_node(hammer_node_t node);
52
53 /*
54  * Red-Black tree support for various structures
55  */
56 static int
57 hammer_ino_rb_compare(hammer_inode_t ip1, hammer_inode_t ip2)
58 {
59         if (ip1->obj_id < ip2->obj_id)
60                 return(-1);
61         if (ip1->obj_id > ip2->obj_id)
62                 return(1);
63         if (ip1->obj_asof < ip2->obj_asof)
64                 return(-1);
65         if (ip1->obj_asof > ip2->obj_asof)
66                 return(1);
67         return(0);
68 }
69
70 static int
71 hammer_inode_info_cmp(hammer_inode_info_t info, hammer_inode_t ip)
72 {
73         if (info->obj_id < ip->obj_id)
74                 return(-1);
75         if (info->obj_id > ip->obj_id)
76                 return(1);
77         if (info->obj_asof < ip->obj_asof)
78                 return(-1);
79         if (info->obj_asof > ip->obj_asof)
80                 return(1);
81         return(0);
82 }
83
84 static int
85 hammer_vol_rb_compare(hammer_volume_t vol1, hammer_volume_t vol2)
86 {
87         if (vol1->vol_no < vol2->vol_no)
88                 return(-1);
89         if (vol1->vol_no > vol2->vol_no)
90                 return(1);
91         return(0);
92 }
93
94 static int
95 hammer_buf_rb_compare(hammer_buffer_t buf1, hammer_buffer_t buf2)
96 {
97         if (buf1->zone2_offset < buf2->zone2_offset)
98                 return(-1);
99         if (buf1->zone2_offset > buf2->zone2_offset)
100                 return(1);
101         return(0);
102 }
103
104 static int
105 hammer_nod_rb_compare(hammer_node_t node1, hammer_node_t node2)
106 {
107         if (node1->node_offset < node2->node_offset)
108                 return(-1);
109         if (node1->node_offset > node2->node_offset)
110                 return(1);
111         return(0);
112 }
113
114 /*
115  * Note: The lookup function for hammer_ino_rb_tree winds up being named
116  * hammer_ino_rb_tree_RB_LOOKUP_INFO(root, info).  The other lookup
117  * functions are normal, e.g. hammer_buf_rb_tree_RB_LOOKUP(root, zone2_offset).
118  */
119 RB_GENERATE(hammer_ino_rb_tree, hammer_inode, rb_node, hammer_ino_rb_compare);
120 RB_GENERATE_XLOOKUP(hammer_ino_rb_tree, INFO, hammer_inode, rb_node,
121                 hammer_inode_info_cmp, hammer_inode_info_t);
122 RB_GENERATE2(hammer_vol_rb_tree, hammer_volume, rb_node,
123              hammer_vol_rb_compare, int32_t, vol_no);
124 RB_GENERATE2(hammer_buf_rb_tree, hammer_buffer, rb_node,
125              hammer_buf_rb_compare, hammer_off_t, zone2_offset);
126 RB_GENERATE2(hammer_nod_rb_tree, hammer_node, rb_node,
127              hammer_nod_rb_compare, hammer_off_t, node_offset);
128
129 /************************************************************************
130  *                              VOLUMES                                 *
131  ************************************************************************
132  *
133  * Load a HAMMER volume by name.  Returns 0 on success or a positive error
134  * code on failure.  Volumes must be loaded at mount time, get_volume() will
135  * not load a new volume.
136  *
137  * Calls made to hammer_load_volume() or single-threaded
138  */
139 int
140 hammer_install_volume(struct hammer_mount *hmp, const char *volname)
141 {
142         struct mount *mp;
143         hammer_volume_t volume;
144         struct hammer_volume_ondisk *ondisk;
145         struct nlookupdata nd;
146         struct buf *bp = NULL;
147         int error;
148         int ronly;
149         int setmp = 0;
150
151         mp = hmp->mp;
152         ronly = ((mp->mnt_flag & MNT_RDONLY) ? 1 : 0);
153
154         /*
155          * Allocate a volume structure
156          */
157         ++hammer_count_volumes;
158         volume = kmalloc(sizeof(*volume), M_HAMMER, M_WAITOK|M_ZERO);
159         volume->vol_name = kstrdup(volname, M_HAMMER);
160         hammer_io_init(&volume->io, hmp, HAMMER_STRUCTURE_VOLUME);
161         volume->io.offset = 0LL;
162
163         /*
164          * Get the device vnode
165          */
166         error = nlookup_init(&nd, volume->vol_name, UIO_SYSSPACE, NLC_FOLLOW);
167         if (error == 0)
168                 error = nlookup(&nd);
169         if (error == 0)
170                 error = cache_vref(&nd.nl_nch, nd.nl_cred, &volume->devvp);
171         nlookup_done(&nd);
172         if (error == 0) {
173                 if (vn_isdisk(volume->devvp, &error)) {
174                         error = vfs_mountedon(volume->devvp);
175                 }
176         }
177         if (error == 0 &&
178             count_udev(volume->devvp->v_umajor, volume->devvp->v_uminor) > 0) {
179                 error = EBUSY;
180         }
181         if (error == 0) {
182                 vn_lock(volume->devvp, LK_EXCLUSIVE | LK_RETRY);
183                 error = vinvalbuf(volume->devvp, V_SAVE, 0, 0);
184                 if (error == 0) {
185                         error = VOP_OPEN(volume->devvp, 
186                                          (ronly ? FREAD : FREAD|FWRITE),
187                                          FSCRED, NULL);
188                 }
189                 vn_unlock(volume->devvp);
190         }
191         if (error) {
192                 hammer_free_volume(volume);
193                 return(error);
194         }
195         volume->devvp->v_rdev->si_mountpoint = mp;
196         setmp = 1;
197
198         /*
199          * Extract the volume number from the volume header and do various
200          * sanity checks.
201          */
202         error = bread(volume->devvp, 0LL, HAMMER_BUFSIZE, &bp);
203         if (error)
204                 goto late_failure;
205         ondisk = (void *)bp->b_data;
206         if (ondisk->vol_signature != HAMMER_FSBUF_VOLUME) {
207                 kprintf("hammer_mount: volume %s has an invalid header\n",
208                         volume->vol_name);
209                 error = EFTYPE;
210                 goto late_failure;
211         }
212         volume->vol_no = ondisk->vol_no;
213         volume->buffer_base = ondisk->vol_buf_beg;
214         volume->vol_flags = ondisk->vol_flags;
215         volume->nblocks = ondisk->vol_nblocks; 
216         volume->maxbuf_off = HAMMER_ENCODE_RAW_BUFFER(volume->vol_no,
217                                     ondisk->vol_buf_end - ondisk->vol_buf_beg);
218         RB_INIT(&volume->rb_bufs_root);
219
220         hmp->mp->mnt_stat.f_blocks += volume->nblocks;
221
222         if (RB_EMPTY(&hmp->rb_vols_root)) {
223                 hmp->fsid = ondisk->vol_fsid;
224         } else if (bcmp(&hmp->fsid, &ondisk->vol_fsid, sizeof(uuid_t))) {
225                 kprintf("hammer_mount: volume %s's fsid does not match "
226                         "other volumes\n", volume->vol_name);
227                 error = EFTYPE;
228                 goto late_failure;
229         }
230
231         /*
232          * Insert the volume structure into the red-black tree.
233          */
234         if (RB_INSERT(hammer_vol_rb_tree, &hmp->rb_vols_root, volume)) {
235                 kprintf("hammer_mount: volume %s has a duplicate vol_no %d\n",
236                         volume->vol_name, volume->vol_no);
237                 error = EEXIST;
238         }
239
240         /*
241          * Set the root volume .  HAMMER special cases rootvol the structure.
242          * We do not hold a ref because this would prevent related I/O
243          * from being flushed.
244          */
245         if (error == 0 && ondisk->vol_rootvol == ondisk->vol_no) {
246                 hmp->rootvol = volume;
247                 if (bp) {
248                         brelse(bp);
249                         bp = NULL;
250                 }
251                 hmp->fsid_udev = dev2udev(vn_todev(volume->devvp));
252         }
253 late_failure:
254         if (bp)
255                 brelse(bp);
256         if (error) {
257                 /*vinvalbuf(volume->devvp, V_SAVE, 0, 0);*/
258                 if (setmp)
259                         volume->devvp->v_rdev->si_mountpoint = NULL;
260                 VOP_CLOSE(volume->devvp, ronly ? FREAD : FREAD|FWRITE);
261                 hammer_free_volume(volume);
262         }
263         return (error);
264 }
265
266 /*
267  * Unload and free a HAMMER volume.  Must return >= 0 to continue scan
268  * so returns -1 on failure.
269  */
270 int
271 hammer_unload_volume(hammer_volume_t volume, void *data __unused)
272 {
273         struct hammer_mount *hmp = volume->io.hmp;
274         int ronly = ((hmp->mp->mnt_flag & MNT_RDONLY) ? 1 : 0);
275
276         /*
277          * Sync clusters, sync volume
278          */
279
280         hmp->mp->mnt_stat.f_blocks -= volume->nblocks;
281
282         /*
283          * Clean up the root volume pointer, which is held unlocked in hmp.
284          */
285         if (hmp->rootvol == volume)
286                 hmp->rootvol = NULL;
287
288         /*
289          * Unload buffers.
290          */
291         RB_SCAN(hammer_buf_rb_tree, &volume->rb_bufs_root, NULL,
292                         hammer_unload_buffer, NULL);
293
294         /*
295          * Release our buffer and flush anything left in the buffer cache.
296          */
297         volume->io.flush = 1;
298         volume->io.waitdep = 1;
299         hammer_io_release(&volume->io);
300
301         /*
302          * There should be no references on the volume, no clusters, and
303          * no super-clusters.
304          */
305         KKASSERT(volume->io.lock.refs == 0);
306         KKASSERT(RB_EMPTY(&volume->rb_bufs_root));
307
308         volume->ondisk = NULL;
309         if (volume->devvp) {
310                 if (volume->devvp->v_rdev &&
311                     volume->devvp->v_rdev->si_mountpoint == hmp->mp
312                 ) {
313                         volume->devvp->v_rdev->si_mountpoint = NULL;
314                 }
315                 if (ronly) {
316                         vinvalbuf(volume->devvp, 0, 0, 0);
317                         VOP_CLOSE(volume->devvp, FREAD);
318                 } else {
319                         vinvalbuf(volume->devvp, V_SAVE, 0, 0);
320                         VOP_CLOSE(volume->devvp, FREAD|FWRITE);
321                 }
322         }
323
324         /*
325          * Destroy the structure
326          */
327         RB_REMOVE(hammer_vol_rb_tree, &hmp->rb_vols_root, volume);
328         hammer_free_volume(volume);
329         return(0);
330 }
331
332 static
333 void
334 hammer_free_volume(hammer_volume_t volume)
335 {
336         if (volume->vol_name) {
337                 kfree(volume->vol_name, M_HAMMER);
338                 volume->vol_name = NULL;
339         }
340         if (volume->devvp) {
341                 vrele(volume->devvp);
342                 volume->devvp = NULL;
343         }
344         --hammer_count_volumes;
345         kfree(volume, M_HAMMER);
346 }
347
348 /*
349  * Get a HAMMER volume.  The volume must already exist.
350  */
351 hammer_volume_t
352 hammer_get_volume(struct hammer_mount *hmp, int32_t vol_no, int *errorp)
353 {
354         struct hammer_volume *volume;
355
356         /*
357          * Locate the volume structure
358          */
359         volume = RB_LOOKUP(hammer_vol_rb_tree, &hmp->rb_vols_root, vol_no);
360         if (volume == NULL) {
361                 *errorp = ENOENT;
362                 return(NULL);
363         }
364         hammer_ref(&volume->io.lock);
365
366         /*
367          * Deal with on-disk info
368          */
369         if (volume->ondisk == NULL || volume->io.loading) {
370                 *errorp = hammer_load_volume(volume);
371                 if (*errorp) {
372                         hammer_rel_volume(volume, 1);
373                         volume = NULL;
374                 }
375         } else {
376                 *errorp = 0;
377         }
378         return(volume);
379 }
380
381 int
382 hammer_ref_volume(hammer_volume_t volume)
383 {
384         int error;
385
386         hammer_ref(&volume->io.lock);
387
388         /*
389          * Deal with on-disk info
390          */
391         if (volume->ondisk == NULL || volume->io.loading) {
392                 error = hammer_load_volume(volume);
393                 if (error)
394                         hammer_rel_volume(volume, 1);
395         } else {
396                 error = 0;
397         }
398         return (error);
399 }
400
401 hammer_volume_t
402 hammer_get_root_volume(struct hammer_mount *hmp, int *errorp)
403 {
404         hammer_volume_t volume;
405
406         volume = hmp->rootvol;
407         KKASSERT(volume != NULL);
408         hammer_ref(&volume->io.lock);
409
410         /*
411          * Deal with on-disk info
412          */
413         if (volume->ondisk == NULL || volume->io.loading) {
414                 *errorp = hammer_load_volume(volume);
415                 if (*errorp) {
416                         hammer_rel_volume(volume, 1);
417                         volume = NULL;
418                 }
419         } else {
420                 *errorp = 0;
421         }
422         return (volume);
423 }
424
425 /*
426  * Load a volume's on-disk information.  The volume must be referenced and
427  * not locked.  We temporarily acquire an exclusive lock to interlock
428  * against releases or multiple get's.
429  */
430 static int
431 hammer_load_volume(hammer_volume_t volume)
432 {
433         int error;
434
435         ++volume->io.loading;
436         hammer_lock_ex(&volume->io.lock);
437
438         if (volume->ondisk == NULL) {
439                 error = hammer_io_read(volume->devvp, &volume->io);
440                 if (error == 0)
441                         volume->ondisk = (void *)volume->io.bp->b_data;
442         } else {
443                 error = 0;
444         }
445         --volume->io.loading;
446         hammer_unlock(&volume->io.lock);
447         return(error);
448 }
449
450 /*
451  * Release a volume.  Call hammer_io_release on the last reference.  We have
452  * to acquire an exclusive lock to interlock against volume->ondisk tests
453  * in hammer_load_volume(), and hammer_io_release() also expects an exclusive
454  * lock to be held.
455  *
456  * Volumes are not unloaded from memory during normal operation.
457  */
458 void
459 hammer_rel_volume(hammer_volume_t volume, int flush)
460 {
461         if (flush)
462                 volume->io.flush = 1;
463         crit_enter();
464         if (volume->io.lock.refs == 1) {
465                 ++volume->io.loading;
466                 hammer_lock_ex(&volume->io.lock);
467                 if (volume->io.lock.refs == 1) {
468                         volume->ondisk = NULL;
469                         hammer_io_release(&volume->io);
470                 }
471                 --volume->io.loading;
472                 hammer_unlock(&volume->io.lock);
473         }
474         hammer_unref(&volume->io.lock);
475         crit_exit();
476 }
477
478 /************************************************************************
479  *                              BUFFERS                                 *
480  ************************************************************************
481  *
482  * Manage buffers.  Currently all blockmap-backed zones are translated
483  * to zone-2 buffer offsets.
484  */
485 hammer_buffer_t
486 hammer_get_buffer(hammer_mount_t hmp, hammer_off_t buf_offset,
487                   int isnew, int *errorp)
488 {
489         hammer_buffer_t buffer;
490         hammer_volume_t volume;
491         hammer_off_t    zoneX_offset;
492         hammer_io_type_t iotype;
493         int vol_no;
494         int zone;
495
496         zoneX_offset = buf_offset;
497         zone = HAMMER_ZONE_DECODE(buf_offset);
498
499         /*
500          * What is the buffer class?
501          */
502         switch(zone) {
503         case HAMMER_ZONE_LARGE_DATA_INDEX:
504         case HAMMER_ZONE_SMALL_DATA_INDEX:
505                 iotype = HAMMER_STRUCTURE_DATA_BUFFER;
506                 break;
507         case HAMMER_ZONE_UNDO_INDEX:
508                 iotype = HAMMER_STRUCTURE_UNDO_BUFFER;
509                 break;
510         default:
511                 iotype = HAMMER_STRUCTURE_META_BUFFER;
512                 break;
513         }
514
515         /*
516          * Handle blockmap offset translations
517          */
518         if (zone >= HAMMER_ZONE_BTREE_INDEX) {
519                 buf_offset = hammer_blockmap_lookup(hmp, buf_offset, errorp);
520                 KKASSERT(*errorp == 0);
521         } else if (zone == HAMMER_ZONE_UNDO_INDEX) {
522                 buf_offset = hammer_undo_lookup(hmp, buf_offset, errorp);
523                 KKASSERT(*errorp == 0);
524         }
525
526         /*
527          * Locate the buffer given its zone-2 offset.
528          */
529         buf_offset &= ~HAMMER_BUFMASK64;
530         KKASSERT((buf_offset & HAMMER_ZONE_RAW_BUFFER) ==
531                  HAMMER_ZONE_RAW_BUFFER);
532         vol_no = HAMMER_VOL_DECODE(buf_offset);
533         volume = hammer_get_volume(hmp, vol_no, errorp);
534         if (volume == NULL)
535                 return(NULL);
536
537         /*
538          * NOTE: buf_offset and maxbuf_off are both full offset
539          * specifications.
540          */
541         KKASSERT(buf_offset < volume->maxbuf_off);
542
543         /*
544          * Locate and lock the buffer structure, creating one if necessary.
545          */
546 again:
547         buffer = RB_LOOKUP(hammer_buf_rb_tree, &volume->rb_bufs_root,
548                            buf_offset);
549         if (buffer == NULL) {
550                 ++hammer_count_buffers;
551                 buffer = kmalloc(sizeof(*buffer), M_HAMMER, M_WAITOK|M_ZERO);
552                 buffer->zone2_offset = buf_offset;
553                 buffer->volume = volume;
554
555                 hammer_io_init(&buffer->io, hmp, iotype);
556                 buffer->io.offset = volume->ondisk->vol_buf_beg +
557                                     (buf_offset & HAMMER_OFF_SHORT_MASK);
558                 TAILQ_INIT(&buffer->clist);
559                 hammer_ref(&buffer->io.lock);
560
561                 /*
562                  * Insert the buffer into the RB tree and handle late
563                  * collisions.
564                  */
565                 if (RB_INSERT(hammer_buf_rb_tree, &volume->rb_bufs_root, buffer)) {
566                         hammer_unref(&buffer->io.lock);
567                         --hammer_count_buffers;
568                         kfree(buffer, M_HAMMER);
569                         goto again;
570                 }
571                 hammer_ref(&volume->io.lock);
572         } else {
573                 hammer_ref(&buffer->io.lock);
574
575                 /*
576                  * The buffer is no longer loose if it has a ref.
577                  */
578                 if (buffer->io.mod_list == &hmp->lose_list) {
579                         TAILQ_REMOVE(buffer->io.mod_list, &buffer->io,
580                                      mod_entry);
581                         buffer->io.mod_list = NULL;
582                 }
583                 if (buffer->io.lock.refs == 1)
584                         hammer_io_reinit(&buffer->io, iotype);
585                 else
586                         KKASSERT(buffer->io.type == iotype);
587         }
588
589         /*
590          * Cache the blockmap translation
591          */
592         if ((zoneX_offset & HAMMER_ZONE_RAW_BUFFER) != HAMMER_ZONE_RAW_BUFFER)
593                 buffer->zoneX_offset = zoneX_offset;
594
595         /*
596          * Deal with on-disk info
597          */
598         if (buffer->ondisk == NULL || buffer->io.loading) {
599                 *errorp = hammer_load_buffer(buffer, isnew);
600                 if (*errorp) {
601                         hammer_rel_buffer(buffer, 1);
602                         buffer = NULL;
603                 }
604         } else {
605                 *errorp = 0;
606         }
607         hammer_rel_volume(volume, 0);
608         return(buffer);
609 }
610
611 static int
612 hammer_load_buffer(hammer_buffer_t buffer, int isnew)
613 {
614         hammer_volume_t volume;
615         int error;
616
617         /*
618          * Load the buffer's on-disk info
619          */
620         volume = buffer->volume;
621         ++buffer->io.loading;
622         hammer_lock_ex(&buffer->io.lock);
623
624         if (buffer->ondisk == NULL) {
625                 if (isnew) {
626                         error = hammer_io_new(volume->devvp, &buffer->io);
627                 } else {
628                         error = hammer_io_read(volume->devvp, &buffer->io);
629                 }
630                 if (error == 0)
631                         buffer->ondisk = (void *)buffer->io.bp->b_data;
632         } else if (isnew) {
633                 error = hammer_io_new(volume->devvp, &buffer->io);
634         } else {
635                 error = 0;
636         }
637         --buffer->io.loading;
638         hammer_unlock(&buffer->io.lock);
639         return (error);
640 }
641
642 /*
643  * NOTE: Called from RB_SCAN, must return >= 0 for scan to continue.
644  */
645 int
646 hammer_unload_buffer(hammer_buffer_t buffer, void *data __unused)
647 {
648         hammer_ref(&buffer->io.lock);
649         hammer_flush_buffer_nodes(buffer);
650         KKASSERT(buffer->io.lock.refs == 1);
651         hammer_rel_buffer(buffer, 2);
652         return(0);
653 }
654
655 /*
656  * Reference a buffer that is either already referenced or via a specially
657  * handled pointer (aka cursor->buffer).
658  */
659 int
660 hammer_ref_buffer(hammer_buffer_t buffer)
661 {
662         int error;
663
664         hammer_ref(&buffer->io.lock);
665
666         /*
667          * No longer loose
668          */
669         if (buffer->io.mod_list == &buffer->io.hmp->lose_list) {
670                 TAILQ_REMOVE(buffer->io.mod_list, &buffer->io, mod_entry);
671                 buffer->io.mod_list = NULL;
672         }
673
674         if (buffer->ondisk == NULL || buffer->io.loading) {
675                 error = hammer_load_buffer(buffer, 0);
676                 if (error) {
677                         hammer_rel_buffer(buffer, 1);
678                         /*
679                          * NOTE: buffer pointer can become stale after
680                          * the above release.
681                          */
682                 }
683         } else {
684                 error = 0;
685         }
686         return(error);
687 }
688
689 /*
690  * Release a buffer.  We have to deal with several places where
691  * another thread can ref the buffer.
692  *
693  * Only destroy the structure itself if the related buffer cache buffer
694  * was disassociated from it.  This ties the management of the structure
695  * to the buffer cache subsystem.  buffer->ondisk determines whether the
696  * embedded io is referenced or not.
697  */
698 void
699 hammer_rel_buffer(hammer_buffer_t buffer, int flush)
700 {
701         hammer_volume_t volume;
702         int freeme = 0;
703
704         if (flush)
705                 buffer->io.flush = 1;
706         crit_enter();
707         if (buffer->io.lock.refs == 1) {
708                 ++buffer->io.loading;   /* force interlock check */
709                 hammer_lock_ex(&buffer->io.lock);
710                 if (buffer->io.lock.refs == 1) {
711                         hammer_io_release(&buffer->io);
712                         hammer_flush_buffer_nodes(buffer);
713                         KKASSERT(TAILQ_EMPTY(&buffer->clist));
714
715                         if (buffer->io.bp == NULL &&
716                             buffer->io.lock.refs == 1) {
717                                 /*
718                                  * Final cleanup
719                                  */
720                                 volume = buffer->volume;
721                                 RB_REMOVE(hammer_buf_rb_tree,
722                                           &volume->rb_bufs_root, buffer);
723                                 buffer->volume = NULL; /* sanity */
724                                 hammer_rel_volume(volume, 0);
725                                 freeme = 1;
726                         }
727                 }
728                 --buffer->io.loading;
729                 hammer_unlock(&buffer->io.lock);
730         }
731         hammer_unref(&buffer->io.lock);
732         crit_exit();
733         if (freeme) {
734                 KKASSERT(buffer->io.mod_list == NULL);
735                 --hammer_count_buffers;
736                 kfree(buffer, M_HAMMER);
737         }
738 }
739
740 /*
741  * Remove the zoneX translation cache for a buffer given its zone-2 offset.
742  */
743 void
744 hammer_uncache_buffer(hammer_mount_t hmp, hammer_off_t buf_offset)
745 {
746         hammer_volume_t volume;
747         hammer_buffer_t buffer;
748         int vol_no;
749         int error;
750
751         buf_offset &= ~HAMMER_BUFMASK64;
752         KKASSERT((buf_offset & HAMMER_ZONE_RAW_BUFFER) ==
753                  HAMMER_ZONE_RAW_BUFFER);
754         vol_no = HAMMER_VOL_DECODE(buf_offset);
755         volume = hammer_get_volume(hmp, vol_no, &error);
756         KKASSERT(volume != 0);
757         KKASSERT(buf_offset < volume->maxbuf_off);
758
759         buffer = RB_LOOKUP(hammer_buf_rb_tree, &volume->rb_bufs_root,
760                            buf_offset);
761         if (buffer)
762                 buffer->zoneX_offset = 0;
763         hammer_rel_volume(volume, 0);
764 }
765
766 /*
767  * Access the filesystem buffer containing the specified hammer offset.
768  * buf_offset is a conglomeration of the volume number and vol_buf_beg
769  * relative buffer offset.  It must also have bit 55 set to be valid.
770  * (see hammer_off_t in hammer_disk.h).
771  *
772  * Any prior buffer in *bufferp will be released and replaced by the
773  * requested buffer.
774  */
775 void *
776 hammer_bread(hammer_mount_t hmp, hammer_off_t buf_offset, int *errorp, 
777              struct hammer_buffer **bufferp)
778 {
779         hammer_buffer_t buffer;
780         int32_t xoff = (int32_t)buf_offset & HAMMER_BUFMASK;
781
782         buf_offset &= ~HAMMER_BUFMASK64;
783         KKASSERT((buf_offset & HAMMER_OFF_ZONE_MASK) != 0);
784
785         buffer = *bufferp;
786         if (buffer == NULL || (buffer->zone2_offset != buf_offset &&
787                                buffer->zoneX_offset != buf_offset)) {
788                 if (buffer)
789                         hammer_rel_buffer(buffer, 0);
790                 buffer = hammer_get_buffer(hmp, buf_offset, 0, errorp);
791                 *bufferp = buffer;
792         } else {
793                 *errorp = 0;
794         }
795
796         /*
797          * Return a pointer to the buffer data.
798          */
799         if (buffer == NULL)
800                 return(NULL);
801         else
802                 return((char *)buffer->ondisk + xoff);
803 }
804
805 /*
806  * Access the filesystem buffer containing the specified hammer offset.
807  * No disk read operation occurs.  The result buffer may contain garbage.
808  *
809  * Any prior buffer in *bufferp will be released and replaced by the
810  * requested buffer.
811  *
812  * This function marks the buffer dirty but does not increment its
813  * modify_refs count.
814  */
815 void *
816 hammer_bnew(hammer_mount_t hmp, hammer_off_t buf_offset, int *errorp, 
817              struct hammer_buffer **bufferp)
818 {
819         hammer_buffer_t buffer;
820         int32_t xoff = (int32_t)buf_offset & HAMMER_BUFMASK;
821
822         buf_offset &= ~HAMMER_BUFMASK64;
823
824         buffer = *bufferp;
825         if (buffer == NULL || (buffer->zone2_offset != buf_offset &&
826                                buffer->zoneX_offset != buf_offset)) {
827                 if (buffer)
828                         hammer_rel_buffer(buffer, 0);
829                 buffer = hammer_get_buffer(hmp, buf_offset, 1, errorp);
830                 *bufferp = buffer;
831         } else {
832                 *errorp = 0;
833         }
834
835         /*
836          * Return a pointer to the buffer data.
837          */
838         if (buffer == NULL)
839                 return(NULL);
840         else
841                 return((char *)buffer->ondisk + xoff);
842 }
843
844 /************************************************************************
845  *                              NODES                                   *
846  ************************************************************************
847  *
848  * Manage B-Tree nodes.  B-Tree nodes represent the primary indexing
849  * method used by the HAMMER filesystem.
850  *
851  * Unlike other HAMMER structures, a hammer_node can be PASSIVELY
852  * associated with its buffer, and will only referenced the buffer while
853  * the node itself is referenced.
854  *
855  * A hammer_node can also be passively associated with other HAMMER
856  * structures, such as inodes, while retaining 0 references.  These
857  * associations can be cleared backwards using a pointer-to-pointer in
858  * the hammer_node.
859  *
860  * This allows the HAMMER implementation to cache hammer_nodes long-term
861  * and short-cut a great deal of the infrastructure's complexity.  In
862  * most cases a cached node can be reacquired without having to dip into
863  * either the buffer or cluster management code.
864  *
865  * The caller must pass a referenced cluster on call and will retain
866  * ownership of the reference on return.  The node will acquire its own
867  * additional references, if necessary.
868  */
869 hammer_node_t
870 hammer_get_node(hammer_mount_t hmp, hammer_off_t node_offset, int *errorp)
871 {
872         hammer_node_t node;
873
874         KKASSERT((node_offset & HAMMER_OFF_ZONE_MASK) == HAMMER_ZONE_BTREE);
875
876         /*
877          * Locate the structure, allocating one if necessary.
878          */
879 again:
880         node = RB_LOOKUP(hammer_nod_rb_tree, &hmp->rb_nods_root, node_offset);
881         if (node == NULL) {
882                 ++hammer_count_nodes;
883                 node = kmalloc(sizeof(*node), M_HAMMER, M_WAITOK|M_ZERO);
884                 node->node_offset = node_offset;
885                 node->hmp = hmp;
886                 if (RB_INSERT(hammer_nod_rb_tree, &hmp->rb_nods_root, node)) {
887                         --hammer_count_nodes;
888                         kfree(node, M_HAMMER);
889                         goto again;
890                 }
891         }
892         hammer_ref(&node->lock);
893         if (node->ondisk)
894                 *errorp = 0;
895         else
896                 *errorp = hammer_load_node(node);
897         if (*errorp) {
898                 hammer_rel_node(node);
899                 node = NULL;
900         }
901         return(node);
902 }
903
904 /*
905  * Reference an already-referenced node.
906  */
907 void
908 hammer_ref_node(hammer_node_t node)
909 {
910         KKASSERT(node->lock.refs > 0 && node->ondisk != NULL);
911         hammer_ref(&node->lock);
912 }
913
914 /*
915  * Load a node's on-disk data reference.
916  */
917 static int
918 hammer_load_node(hammer_node_t node)
919 {
920         hammer_buffer_t buffer;
921         int error;
922
923         error = 0;
924         ++node->loading;
925         hammer_lock_ex(&node->lock);
926         if (node->ondisk == NULL) {
927                 /*
928                  * This is a little confusing but the jist is that
929                  * node->buffer determines whether the node is on
930                  * the buffer's clist and node->ondisk determines
931                  * whether the buffer is referenced.
932                  *
933                  * We could be racing a buffer release, in which case
934                  * node->buffer may become NULL while we are blocked
935                  * referencing the buffer.
936                  */
937                 if ((buffer = node->buffer) != NULL) {
938                         error = hammer_ref_buffer(buffer);
939                         if (error == 0 && node->buffer == NULL) {
940                                 TAILQ_INSERT_TAIL(&buffer->clist,
941                                                   node, entry);
942                                 node->buffer = buffer;
943                         }
944                 } else {
945                         buffer = hammer_get_buffer(node->hmp,
946                                                    node->node_offset, 0,
947                                                    &error);
948                         if (buffer) {
949                                 KKASSERT(error == 0);
950                                 TAILQ_INSERT_TAIL(&buffer->clist,
951                                                   node, entry);
952                                 node->buffer = buffer;
953                         }
954                 }
955                 if (error == 0) {
956                         node->ondisk = (void *)((char *)buffer->ondisk +
957                                (node->node_offset & HAMMER_BUFMASK));
958                 }
959         }
960         --node->loading;
961         hammer_unlock(&node->lock);
962         return (error);
963 }
964
965 /*
966  * Safely reference a node, interlock against flushes via the IO subsystem.
967  */
968 hammer_node_t
969 hammer_ref_node_safe(struct hammer_mount *hmp, struct hammer_node **cache,
970                      int *errorp)
971 {
972         hammer_node_t node;
973
974         node = *cache;
975         if (node != NULL) {
976                 hammer_ref(&node->lock);
977                 if (node->ondisk)
978                         *errorp = 0;
979                 else
980                         *errorp = hammer_load_node(node);
981                 if (*errorp) {
982                         hammer_rel_node(node);
983                         node = NULL;
984                 }
985         } else {
986                 *errorp = ENOENT;
987         }
988         return(node);
989 }
990
991 /*
992  * Release a hammer_node.  On the last release the node dereferences
993  * its underlying buffer and may or may not be destroyed.
994  */
995 void
996 hammer_rel_node(hammer_node_t node)
997 {
998         hammer_buffer_t buffer;
999
1000         /*
1001          * If this isn't the last ref just decrement the ref count and
1002          * return.
1003          */
1004         if (node->lock.refs > 1) {
1005                 hammer_unref(&node->lock);
1006                 return;
1007         }
1008
1009         /*
1010          * If there is no ondisk info or no buffer the node failed to load,
1011          * remove the last reference and destroy the node.
1012          */
1013         if (node->ondisk == NULL) {
1014                 hammer_unref(&node->lock);
1015                 hammer_flush_node(node);
1016                 /* node is stale now */
1017                 return;
1018         }
1019
1020         /*
1021          * Do final cleanups and then either destroy the node and leave it
1022          * passively cached.  The buffer reference is removed regardless.
1023          */
1024         buffer = node->buffer;
1025         node->ondisk = NULL;
1026
1027         if ((node->flags & HAMMER_NODE_FLUSH) == 0) {
1028                 hammer_unref(&node->lock);
1029                 hammer_rel_buffer(buffer, 0);
1030                 return;
1031         }
1032
1033         /*
1034          * Destroy the node.
1035          */
1036         hammer_unref(&node->lock);
1037         hammer_flush_node(node);
1038         /* node is stale */
1039         hammer_rel_buffer(buffer, 0);
1040 }
1041
1042 /*
1043  *
1044  *
1045  */
1046 void
1047 hammer_delete_node(hammer_transaction_t trans, hammer_node_t node)
1048 {
1049         node->flags |= HAMMER_NODE_DELETED;
1050         hammer_blockmap_free(trans, node->node_offset, sizeof(*node->ondisk));
1051 }
1052
1053 /*
1054  * Passively cache a referenced hammer_node in *cache.  The caller may
1055  * release the node on return.
1056  */
1057 void
1058 hammer_cache_node(hammer_node_t node, struct hammer_node **cache)
1059 {
1060         hammer_node_t old;
1061
1062         /*
1063          * If the node is being deleted, don't cache it!
1064          */
1065         if (node->flags & HAMMER_NODE_DELETED)
1066                 return;
1067
1068         /*
1069          * Cache the node.  If we previously cached a different node we
1070          * have to give HAMMER a chance to destroy it.
1071          */
1072 again:
1073         if (node->cache1 != cache) {
1074                 if (node->cache2 != cache) {
1075                         if ((old = *cache) != NULL) {
1076                                 KKASSERT(node->lock.refs != 0);
1077                                 hammer_uncache_node(cache);
1078                                 goto again;
1079                         }
1080                         if (node->cache2)
1081                                 *node->cache2 = NULL;
1082                         node->cache2 = node->cache1;
1083                         node->cache1 = cache;
1084                         *cache = node;
1085                 } else {
1086                         struct hammer_node **tmp;
1087                         tmp = node->cache1;
1088                         node->cache1 = node->cache2;
1089                         node->cache2 = tmp;
1090                 }
1091         }
1092 }
1093
1094 void
1095 hammer_uncache_node(struct hammer_node **cache)
1096 {
1097         hammer_node_t node;
1098
1099         if ((node = *cache) != NULL) {
1100                 *cache = NULL;
1101                 if (node->cache1 == cache) {
1102                         node->cache1 = node->cache2;
1103                         node->cache2 = NULL;
1104                 } else if (node->cache2 == cache) {
1105                         node->cache2 = NULL;
1106                 } else {
1107                         panic("hammer_uncache_node: missing cache linkage");
1108                 }
1109                 if (node->cache1 == NULL && node->cache2 == NULL)
1110                         hammer_flush_node(node);
1111         }
1112 }
1113
1114 /*
1115  * Remove a node's cache references and destroy the node if it has no
1116  * other references or backing store.
1117  */
1118 void
1119 hammer_flush_node(hammer_node_t node)
1120 {
1121         hammer_buffer_t buffer;
1122
1123         if (node->cache1)
1124                 *node->cache1 = NULL;
1125         if (node->cache2)
1126                 *node->cache2 = NULL;
1127         if (node->lock.refs == 0 && node->ondisk == NULL) {
1128                 RB_REMOVE(hammer_nod_rb_tree, &node->hmp->rb_nods_root, node);
1129                 if ((buffer = node->buffer) != NULL) {
1130                         node->buffer = NULL;
1131                         TAILQ_REMOVE(&buffer->clist, node, entry);
1132                         /* buffer is unreferenced because ondisk is NULL */
1133                 }
1134                 --hammer_count_nodes;
1135                 kfree(node, M_HAMMER);
1136         }
1137 }
1138
1139 /*
1140  * Flush passively cached B-Tree nodes associated with this buffer.
1141  * This is only called when the buffer is about to be destroyed, so
1142  * none of the nodes should have any references.  The buffer is locked.
1143  *
1144  * We may be interlocked with the buffer.
1145  */
1146 void
1147 hammer_flush_buffer_nodes(hammer_buffer_t buffer)
1148 {
1149         hammer_node_t node;
1150
1151         while ((node = TAILQ_FIRST(&buffer->clist)) != NULL) {
1152                 KKASSERT(node->ondisk == NULL);
1153
1154                 if (node->lock.refs == 0) {
1155                         hammer_ref(&node->lock);
1156                         node->flags |= HAMMER_NODE_FLUSH;
1157                         hammer_rel_node(node);
1158                 } else {
1159                         KKASSERT(node->loading != 0);
1160                         KKASSERT(node->buffer != NULL);
1161                         buffer = node->buffer;
1162                         node->buffer = NULL;
1163                         TAILQ_REMOVE(&buffer->clist, node, entry);
1164                         /* buffer is unreferenced because ondisk is NULL */
1165                 }
1166         }
1167 }
1168
1169
1170 /************************************************************************
1171  *                              ALLOCATORS                              *
1172  ************************************************************************/
1173
1174 /*
1175  * Allocate a B-Tree node.
1176  */
1177 hammer_node_t
1178 hammer_alloc_btree(hammer_transaction_t trans, int *errorp)
1179 {
1180         hammer_buffer_t buffer = NULL;
1181         hammer_node_t node = NULL;
1182         hammer_off_t node_offset;
1183
1184         node_offset = hammer_blockmap_alloc(trans, HAMMER_ZONE_BTREE_INDEX,
1185                                             sizeof(struct hammer_node_ondisk),
1186                                             errorp);
1187         if (*errorp == 0) {
1188                 node = hammer_get_node(trans->hmp, node_offset, errorp);
1189                 hammer_modify_node_noundo(trans, node);
1190                 bzero(node->ondisk, sizeof(*node->ondisk));
1191                 hammer_modify_node_done(node);
1192         }
1193         if (buffer)
1194                 hammer_rel_buffer(buffer, 0);
1195         return(node);
1196 }
1197
1198 /*
1199  * The returned buffers are already appropriately marked as being modified.
1200  * If the caller marks them again unnecessary undo records may be generated.
1201  *
1202  * In-band data is indicated by data_bufferp == NULL.  Pass a data_len of 0
1203  * for zero-fill (caller modifies data_len afterwords).
1204  *
1205  * If the caller is responsible for calling hammer_modify_*() prior to making
1206  * any additional modifications to either the returned record buffer or the
1207  * returned data buffer.
1208  */
1209 void *
1210 hammer_alloc_record(hammer_transaction_t trans, 
1211                     hammer_off_t *rec_offp, u_int16_t rec_type, 
1212                     struct hammer_buffer **rec_bufferp,
1213                     int32_t data_len, void **datap,
1214                     struct hammer_buffer **data_bufferp, int *errorp)
1215 {
1216         hammer_record_ondisk_t rec;
1217         hammer_off_t rec_offset;
1218         hammer_off_t data_offset;
1219         int32_t reclen;
1220
1221         if (datap)
1222                 *datap = NULL;
1223
1224         /*
1225          * Allocate the record
1226          */
1227         rec_offset = hammer_blockmap_alloc(trans, HAMMER_ZONE_RECORD_INDEX,
1228                                            HAMMER_RECORD_SIZE, errorp);
1229         if (*errorp)
1230                 return(NULL);
1231
1232         /*
1233          * Allocate data
1234          */
1235         if (data_len) {
1236                 if (data_bufferp == NULL) {
1237                         switch(rec_type) {
1238                         case HAMMER_RECTYPE_DATA:
1239                                 reclen = offsetof(struct hammer_data_record,
1240                                                   data[0]);
1241                                 break;
1242                         case HAMMER_RECTYPE_DIRENTRY:
1243                                 reclen = offsetof(struct hammer_entry_record,
1244                                                   name[0]);
1245                                 break;
1246                         default:
1247                                 panic("hammer_alloc_record: illegal "
1248                                       "in-band data");
1249                                 /* NOT REACHED */
1250                                 reclen = 0;
1251                                 break;
1252                         }
1253                         KKASSERT(reclen + data_len <= HAMMER_RECORD_SIZE);
1254                         data_offset = rec_offset + reclen;
1255                 } else if (data_len < HAMMER_BUFSIZE) {
1256                         data_offset = hammer_blockmap_alloc(trans,
1257                                                 HAMMER_ZONE_SMALL_DATA_INDEX,
1258                                                 data_len, errorp);
1259                 } else {
1260                         data_offset = hammer_blockmap_alloc(trans,
1261                                                 HAMMER_ZONE_LARGE_DATA_INDEX,
1262                                                 data_len, errorp);
1263                 }
1264         } else {
1265                 data_offset = 0;
1266         }
1267         if (*errorp) {
1268                 hammer_blockmap_free(trans, rec_offset, HAMMER_RECORD_SIZE);
1269                 return(NULL);
1270         }
1271
1272         /*
1273          * Basic return values.
1274          *
1275          * Note that because this is a 'new' buffer, there is no need to
1276          * generate UNDO records for it.
1277          */
1278         *rec_offp = rec_offset;
1279         rec = hammer_bread(trans->hmp, rec_offset, errorp, rec_bufferp);
1280         hammer_modify_buffer(trans, *rec_bufferp, NULL, 0);
1281         bzero(rec, sizeof(*rec));
1282         KKASSERT(*errorp == 0);
1283         rec->base.data_off = data_offset;
1284         rec->base.data_len = data_len;
1285         hammer_modify_buffer_done(*rec_bufferp);
1286
1287         if (data_bufferp) {
1288                 if (data_len) {
1289                         *datap = hammer_bread(trans->hmp, data_offset, errorp,
1290                                               data_bufferp);
1291                         KKASSERT(*errorp == 0);
1292                 } else {
1293                         *datap = NULL;
1294                 }
1295         } else if (data_len) {
1296                 KKASSERT(data_offset + data_len - rec_offset <=
1297                          HAMMER_RECORD_SIZE); 
1298                 if (datap) {
1299                         *datap = (void *)((char *)rec +
1300                                           (int32_t)(data_offset - rec_offset));
1301                 }
1302         } else {
1303                 KKASSERT(datap == NULL);
1304         }
1305         KKASSERT(*errorp == 0);
1306         return(rec);
1307 }
1308
1309 /*
1310  * Allocate data.  If the address of a data buffer is supplied then
1311  * any prior non-NULL *data_bufferp will be released and *data_bufferp
1312  * will be set to the related buffer.  The caller must release it when
1313  * finally done.  The initial *data_bufferp should be set to NULL by
1314  * the caller.
1315  *
1316  * The caller is responsible for making hammer_modify*() calls on the
1317  * *data_bufferp.
1318  */
1319 void *
1320 hammer_alloc_data(hammer_transaction_t trans, int32_t data_len, 
1321                   hammer_off_t *data_offsetp,
1322                   struct hammer_buffer **data_bufferp, int *errorp)
1323 {
1324         void *data;
1325
1326         /*
1327          * Allocate data
1328          */
1329         if (data_len) {
1330                 if (data_len < HAMMER_BUFSIZE) {
1331                         *data_offsetp = hammer_blockmap_alloc(trans,
1332                                                 HAMMER_ZONE_SMALL_DATA_INDEX,
1333                                                 data_len, errorp);
1334                 } else {
1335                         *data_offsetp = hammer_blockmap_alloc(trans,
1336                                                 HAMMER_ZONE_LARGE_DATA_INDEX,
1337                                                 data_len, errorp);
1338                 }
1339         } else {
1340                 *data_offsetp = 0;
1341         }
1342         if (*errorp == 0 && data_bufferp) {
1343                 if (data_len) {
1344                         data = hammer_bread(trans->hmp, *data_offsetp, errorp,
1345                                             data_bufferp);
1346                         KKASSERT(*errorp == 0);
1347                 } else {
1348                         data = NULL;
1349                 }
1350         } else {
1351                 data = NULL;
1352         }
1353         KKASSERT(*errorp == 0);
1354         return(data);
1355 }
1356
1357 /*
1358  * Sync dirty buffers to the media and clean-up any loose ends.
1359  */
1360 static int hammer_sync_scan1(struct mount *mp, struct vnode *vp, void *data);
1361 static int hammer_sync_scan2(struct mount *mp, struct vnode *vp, void *data);
1362
1363 int
1364 hammer_sync_hmp(hammer_mount_t hmp, int waitfor)
1365 {
1366         struct hammer_sync_info info;
1367
1368         info.error = 0;
1369         info.waitfor = waitfor;
1370
1371         vmntvnodescan(hmp->mp, VMSC_GETVP|VMSC_NOWAIT,
1372                       hammer_sync_scan1, hammer_sync_scan2, &info);
1373         if (waitfor == MNT_WAIT)
1374                 hammer_flusher_sync(hmp);
1375         else
1376                 hammer_flusher_async(hmp);
1377
1378         return(info.error);
1379 }
1380
1381 static int
1382 hammer_sync_scan1(struct mount *mp, struct vnode *vp, void *data)
1383 {
1384         struct hammer_inode *ip;
1385
1386         ip = VTOI(vp);
1387         if (vp->v_type == VNON || ip == NULL ||
1388             ((ip->flags & HAMMER_INODE_MODMASK) == 0 &&
1389              RB_EMPTY(&vp->v_rbdirty_tree))) {
1390                 return(-1);
1391         }
1392         return(0);
1393 }
1394
1395 static int
1396 hammer_sync_scan2(struct mount *mp, struct vnode *vp, void *data)
1397 {
1398         struct hammer_sync_info *info = data;
1399         struct hammer_inode *ip;
1400         int error;
1401
1402         ip = VTOI(vp);
1403         if (vp->v_type == VNON || vp->v_type == VBAD ||
1404             ((ip->flags & HAMMER_INODE_MODMASK) == 0 &&
1405              RB_EMPTY(&vp->v_rbdirty_tree))) {
1406                 return(0);
1407         }
1408         error = VOP_FSYNC(vp, info->waitfor);
1409         if (error)
1410                 info->error = error;
1411         return(0);
1412 }
1413