HAMMER 45/Many: Stabilization pass, undo sequencing.
[dragonfly.git] / sys / vfs / hammer / hammer_ondisk.c
1 /*
2  * Copyright (c) 2007-2008 The DragonFly Project.  All rights reserved.
3  * 
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  * 
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  * 
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  * 
34  * $DragonFly: src/sys/vfs/hammer/hammer_ondisk.c,v 1.45 2008/05/15 03:36:40 dillon Exp $
35  */
36 /*
37  * Manage HAMMER's on-disk structures.  These routines are primarily
38  * responsible for interfacing with the kernel's I/O subsystem and for
39  * managing in-memory structures.
40  */
41
42 #include "hammer.h"
43 #include <sys/fcntl.h>
44 #include <sys/nlookup.h>
45 #include <sys/buf.h>
46 #include <sys/buf2.h>
47
48 static void hammer_free_volume(hammer_volume_t volume);
49 static int hammer_load_volume(hammer_volume_t volume);
50 static int hammer_load_buffer(hammer_buffer_t buffer, int isnew);
51 static int hammer_load_node(hammer_node_t node, int isnew);
52
53 /*
54  * Red-Black tree support for various structures
55  */
56 static int
57 hammer_ino_rb_compare(hammer_inode_t ip1, hammer_inode_t ip2)
58 {
59         if (ip1->obj_id < ip2->obj_id)
60                 return(-1);
61         if (ip1->obj_id > ip2->obj_id)
62                 return(1);
63         if (ip1->obj_asof < ip2->obj_asof)
64                 return(-1);
65         if (ip1->obj_asof > ip2->obj_asof)
66                 return(1);
67         return(0);
68 }
69
70 static int
71 hammer_inode_info_cmp(hammer_inode_info_t info, hammer_inode_t ip)
72 {
73         if (info->obj_id < ip->obj_id)
74                 return(-1);
75         if (info->obj_id > ip->obj_id)
76                 return(1);
77         if (info->obj_asof < ip->obj_asof)
78                 return(-1);
79         if (info->obj_asof > ip->obj_asof)
80                 return(1);
81         return(0);
82 }
83
84 static int
85 hammer_vol_rb_compare(hammer_volume_t vol1, hammer_volume_t vol2)
86 {
87         if (vol1->vol_no < vol2->vol_no)
88                 return(-1);
89         if (vol1->vol_no > vol2->vol_no)
90                 return(1);
91         return(0);
92 }
93
94 static int
95 hammer_buf_rb_compare(hammer_buffer_t buf1, hammer_buffer_t buf2)
96 {
97         if (buf1->zone2_offset < buf2->zone2_offset)
98                 return(-1);
99         if (buf1->zone2_offset > buf2->zone2_offset)
100                 return(1);
101         return(0);
102 }
103
104 static int
105 hammer_nod_rb_compare(hammer_node_t node1, hammer_node_t node2)
106 {
107         if (node1->node_offset < node2->node_offset)
108                 return(-1);
109         if (node1->node_offset > node2->node_offset)
110                 return(1);
111         return(0);
112 }
113
114 /*
115  * Note: The lookup function for hammer_ino_rb_tree winds up being named
116  * hammer_ino_rb_tree_RB_LOOKUP_INFO(root, info).  The other lookup
117  * functions are normal, e.g. hammer_buf_rb_tree_RB_LOOKUP(root, zone2_offset).
118  */
119 RB_GENERATE(hammer_ino_rb_tree, hammer_inode, rb_node, hammer_ino_rb_compare);
120 RB_GENERATE_XLOOKUP(hammer_ino_rb_tree, INFO, hammer_inode, rb_node,
121                 hammer_inode_info_cmp, hammer_inode_info_t);
122 RB_GENERATE2(hammer_vol_rb_tree, hammer_volume, rb_node,
123              hammer_vol_rb_compare, int32_t, vol_no);
124 RB_GENERATE2(hammer_buf_rb_tree, hammer_buffer, rb_node,
125              hammer_buf_rb_compare, hammer_off_t, zone2_offset);
126 RB_GENERATE2(hammer_nod_rb_tree, hammer_node, rb_node,
127              hammer_nod_rb_compare, hammer_off_t, node_offset);
128
129 /************************************************************************
130  *                              VOLUMES                                 *
131  ************************************************************************
132  *
133  * Load a HAMMER volume by name.  Returns 0 on success or a positive error
134  * code on failure.  Volumes must be loaded at mount time, get_volume() will
135  * not load a new volume.
136  *
137  * Calls made to hammer_load_volume() or single-threaded
138  */
139 int
140 hammer_install_volume(struct hammer_mount *hmp, const char *volname)
141 {
142         struct mount *mp;
143         hammer_volume_t volume;
144         struct hammer_volume_ondisk *ondisk;
145         struct nlookupdata nd;
146         struct buf *bp = NULL;
147         int error;
148         int ronly;
149         int setmp = 0;
150
151         mp = hmp->mp;
152         ronly = ((mp->mnt_flag & MNT_RDONLY) ? 1 : 0);
153
154         /*
155          * Allocate a volume structure
156          */
157         ++hammer_count_volumes;
158         volume = kmalloc(sizeof(*volume), M_HAMMER, M_WAITOK|M_ZERO);
159         volume->vol_name = kstrdup(volname, M_HAMMER);
160         hammer_io_init(&volume->io, hmp, HAMMER_STRUCTURE_VOLUME);
161         volume->io.offset = 0LL;
162
163         /*
164          * Get the device vnode
165          */
166         error = nlookup_init(&nd, volume->vol_name, UIO_SYSSPACE, NLC_FOLLOW);
167         if (error == 0)
168                 error = nlookup(&nd);
169         if (error == 0)
170                 error = cache_vref(&nd.nl_nch, nd.nl_cred, &volume->devvp);
171         nlookup_done(&nd);
172         if (error == 0) {
173                 if (vn_isdisk(volume->devvp, &error)) {
174                         error = vfs_mountedon(volume->devvp);
175                 }
176         }
177         if (error == 0 &&
178             count_udev(volume->devvp->v_umajor, volume->devvp->v_uminor) > 0) {
179                 error = EBUSY;
180         }
181         if (error == 0) {
182                 vn_lock(volume->devvp, LK_EXCLUSIVE | LK_RETRY);
183                 error = vinvalbuf(volume->devvp, V_SAVE, 0, 0);
184                 if (error == 0) {
185                         error = VOP_OPEN(volume->devvp, 
186                                          (ronly ? FREAD : FREAD|FWRITE),
187                                          FSCRED, NULL);
188                 }
189                 vn_unlock(volume->devvp);
190         }
191         if (error) {
192                 hammer_free_volume(volume);
193                 return(error);
194         }
195         volume->devvp->v_rdev->si_mountpoint = mp;
196         setmp = 1;
197
198         /*
199          * Extract the volume number from the volume header and do various
200          * sanity checks.
201          */
202         error = bread(volume->devvp, 0LL, HAMMER_BUFSIZE, &bp);
203         if (error)
204                 goto late_failure;
205         ondisk = (void *)bp->b_data;
206         if (ondisk->vol_signature != HAMMER_FSBUF_VOLUME) {
207                 kprintf("hammer_mount: volume %s has an invalid header\n",
208                         volume->vol_name);
209                 error = EFTYPE;
210                 goto late_failure;
211         }
212         volume->vol_no = ondisk->vol_no;
213         volume->buffer_base = ondisk->vol_buf_beg;
214         volume->vol_flags = ondisk->vol_flags;
215         volume->nblocks = ondisk->vol_nblocks; 
216         volume->maxbuf_off = HAMMER_ENCODE_RAW_BUFFER(volume->vol_no,
217                                     ondisk->vol_buf_end - ondisk->vol_buf_beg);
218         RB_INIT(&volume->rb_bufs_root);
219
220         if (RB_EMPTY(&hmp->rb_vols_root)) {
221                 hmp->fsid = ondisk->vol_fsid;
222         } else if (bcmp(&hmp->fsid, &ondisk->vol_fsid, sizeof(uuid_t))) {
223                 kprintf("hammer_mount: volume %s's fsid does not match "
224                         "other volumes\n", volume->vol_name);
225                 error = EFTYPE;
226                 goto late_failure;
227         }
228
229         /*
230          * Insert the volume structure into the red-black tree.
231          */
232         if (RB_INSERT(hammer_vol_rb_tree, &hmp->rb_vols_root, volume)) {
233                 kprintf("hammer_mount: volume %s has a duplicate vol_no %d\n",
234                         volume->vol_name, volume->vol_no);
235                 error = EEXIST;
236         }
237
238         /*
239          * Set the root volume .  HAMMER special cases rootvol the structure.
240          * We do not hold a ref because this would prevent related I/O
241          * from being flushed.
242          */
243         if (error == 0 && ondisk->vol_rootvol == ondisk->vol_no) {
244                 hmp->rootvol = volume;
245                 if (bp) {
246                         brelse(bp);
247                         bp = NULL;
248                 }
249                 hmp->fsid_udev = dev2udev(vn_todev(volume->devvp));
250                 hmp->mp->mnt_stat.f_blocks += ondisk->vol0_stat_bigblocks *
251                         (HAMMER_LARGEBLOCK_SIZE / HAMMER_BUFSIZE);
252         }
253 late_failure:
254         if (bp)
255                 brelse(bp);
256         if (error) {
257                 /*vinvalbuf(volume->devvp, V_SAVE, 0, 0);*/
258                 if (setmp)
259                         volume->devvp->v_rdev->si_mountpoint = NULL;
260                 VOP_CLOSE(volume->devvp, ronly ? FREAD : FREAD|FWRITE);
261                 hammer_free_volume(volume);
262         }
263         return (error);
264 }
265
266 /*
267  * Unload and free a HAMMER volume.  Must return >= 0 to continue scan
268  * so returns -1 on failure.
269  */
270 int
271 hammer_unload_volume(hammer_volume_t volume, void *data __unused)
272 {
273         struct hammer_mount *hmp = volume->io.hmp;
274         int ronly = ((hmp->mp->mnt_flag & MNT_RDONLY) ? 1 : 0);
275
276         /*
277          * Clean up the root volume pointer, which is held unlocked in hmp.
278          */
279         if (hmp->rootvol == volume)
280                 hmp->rootvol = NULL;
281
282         /*
283          * Unload buffers.
284          */
285         RB_SCAN(hammer_buf_rb_tree, &volume->rb_bufs_root, NULL,
286                         hammer_unload_buffer, NULL);
287
288         /*
289          * Release our buffer and flush anything left in the buffer cache.
290          */
291         volume->io.waitdep = 1;
292         hammer_io_release(&volume->io, 1);
293
294         /*
295          * There should be no references on the volume, no clusters, and
296          * no super-clusters.
297          */
298         KKASSERT(volume->io.lock.refs == 0);
299         KKASSERT(RB_EMPTY(&volume->rb_bufs_root));
300
301         volume->ondisk = NULL;
302         if (volume->devvp) {
303                 if (volume->devvp->v_rdev &&
304                     volume->devvp->v_rdev->si_mountpoint == hmp->mp
305                 ) {
306                         volume->devvp->v_rdev->si_mountpoint = NULL;
307                 }
308                 if (ronly) {
309                         vinvalbuf(volume->devvp, 0, 0, 0);
310                         VOP_CLOSE(volume->devvp, FREAD);
311                 } else {
312                         vinvalbuf(volume->devvp, V_SAVE, 0, 0);
313                         VOP_CLOSE(volume->devvp, FREAD|FWRITE);
314                 }
315         }
316
317         /*
318          * Destroy the structure
319          */
320         RB_REMOVE(hammer_vol_rb_tree, &hmp->rb_vols_root, volume);
321         hammer_free_volume(volume);
322         return(0);
323 }
324
325 static
326 void
327 hammer_free_volume(hammer_volume_t volume)
328 {
329         if (volume->vol_name) {
330                 kfree(volume->vol_name, M_HAMMER);
331                 volume->vol_name = NULL;
332         }
333         if (volume->devvp) {
334                 vrele(volume->devvp);
335                 volume->devvp = NULL;
336         }
337         --hammer_count_volumes;
338         kfree(volume, M_HAMMER);
339 }
340
341 /*
342  * Get a HAMMER volume.  The volume must already exist.
343  */
344 hammer_volume_t
345 hammer_get_volume(struct hammer_mount *hmp, int32_t vol_no, int *errorp)
346 {
347         struct hammer_volume *volume;
348
349         /*
350          * Locate the volume structure
351          */
352         volume = RB_LOOKUP(hammer_vol_rb_tree, &hmp->rb_vols_root, vol_no);
353         if (volume == NULL) {
354                 *errorp = ENOENT;
355                 return(NULL);
356         }
357         hammer_ref(&volume->io.lock);
358
359         /*
360          * Deal with on-disk info
361          */
362         if (volume->ondisk == NULL || volume->io.loading) {
363                 *errorp = hammer_load_volume(volume);
364                 if (*errorp) {
365                         hammer_rel_volume(volume, 1);
366                         volume = NULL;
367                 }
368         } else {
369                 *errorp = 0;
370         }
371         return(volume);
372 }
373
374 int
375 hammer_ref_volume(hammer_volume_t volume)
376 {
377         int error;
378
379         hammer_ref(&volume->io.lock);
380
381         /*
382          * Deal with on-disk info
383          */
384         if (volume->ondisk == NULL || volume->io.loading) {
385                 error = hammer_load_volume(volume);
386                 if (error)
387                         hammer_rel_volume(volume, 1);
388         } else {
389                 error = 0;
390         }
391         return (error);
392 }
393
394 hammer_volume_t
395 hammer_get_root_volume(struct hammer_mount *hmp, int *errorp)
396 {
397         hammer_volume_t volume;
398
399         volume = hmp->rootvol;
400         KKASSERT(volume != NULL);
401         hammer_ref(&volume->io.lock);
402
403         /*
404          * Deal with on-disk info
405          */
406         if (volume->ondisk == NULL || volume->io.loading) {
407                 *errorp = hammer_load_volume(volume);
408                 if (*errorp) {
409                         hammer_rel_volume(volume, 1);
410                         volume = NULL;
411                 }
412         } else {
413                 *errorp = 0;
414         }
415         return (volume);
416 }
417
418 /*
419  * Load a volume's on-disk information.  The volume must be referenced and
420  * not locked.  We temporarily acquire an exclusive lock to interlock
421  * against releases or multiple get's.
422  */
423 static int
424 hammer_load_volume(hammer_volume_t volume)
425 {
426         int error;
427
428         ++volume->io.loading;
429         hammer_lock_ex(&volume->io.lock);
430
431         if (volume->ondisk == NULL) {
432                 error = hammer_io_read(volume->devvp, &volume->io);
433                 if (error == 0)
434                         volume->ondisk = (void *)volume->io.bp->b_data;
435         } else {
436                 error = 0;
437         }
438         --volume->io.loading;
439         hammer_unlock(&volume->io.lock);
440         return(error);
441 }
442
443 /*
444  * Release a volume.  Call hammer_io_release on the last reference.  We have
445  * to acquire an exclusive lock to interlock against volume->ondisk tests
446  * in hammer_load_volume(), and hammer_io_release() also expects an exclusive
447  * lock to be held.
448  *
449  * Volumes are not unloaded from memory during normal operation.
450  */
451 void
452 hammer_rel_volume(hammer_volume_t volume, int flush)
453 {
454         crit_enter();
455         if (volume->io.lock.refs == 1) {
456                 ++volume->io.loading;
457                 hammer_lock_ex(&volume->io.lock);
458                 if (volume->io.lock.refs == 1) {
459                         volume->ondisk = NULL;
460                         hammer_io_release(&volume->io, flush);
461                 }
462                 --volume->io.loading;
463                 hammer_unlock(&volume->io.lock);
464         }
465         hammer_unref(&volume->io.lock);
466         crit_exit();
467 }
468
469 /************************************************************************
470  *                              BUFFERS                                 *
471  ************************************************************************
472  *
473  * Manage buffers.  Currently all blockmap-backed zones are translated
474  * to zone-2 buffer offsets.
475  */
476 hammer_buffer_t
477 hammer_get_buffer(hammer_mount_t hmp, hammer_off_t buf_offset,
478                   int isnew, int *errorp)
479 {
480         hammer_buffer_t buffer;
481         hammer_volume_t volume;
482         hammer_off_t    zoneX_offset;
483         hammer_io_type_t iotype;
484         int vol_no;
485         int zone;
486
487         zoneX_offset = buf_offset;
488         zone = HAMMER_ZONE_DECODE(buf_offset);
489
490         /*
491          * What is the buffer class?
492          */
493         switch(zone) {
494         case HAMMER_ZONE_LARGE_DATA_INDEX:
495         case HAMMER_ZONE_SMALL_DATA_INDEX:
496                 iotype = HAMMER_STRUCTURE_DATA_BUFFER;
497                 break;
498         case HAMMER_ZONE_UNDO_INDEX:
499                 iotype = HAMMER_STRUCTURE_UNDO_BUFFER;
500                 break;
501         default:
502                 iotype = HAMMER_STRUCTURE_META_BUFFER;
503                 break;
504         }
505
506         /*
507          * Handle blockmap offset translations
508          */
509         if (zone >= HAMMER_ZONE_BTREE_INDEX) {
510                 buf_offset = hammer_blockmap_lookup(hmp, buf_offset, errorp);
511                 KKASSERT(*errorp == 0);
512         } else if (zone == HAMMER_ZONE_UNDO_INDEX) {
513                 buf_offset = hammer_undo_lookup(hmp, buf_offset, errorp);
514                 KKASSERT(*errorp == 0);
515         }
516
517         /*
518          * Locate the buffer given its zone-2 offset.
519          */
520         buf_offset &= ~HAMMER_BUFMASK64;
521         KKASSERT((buf_offset & HAMMER_ZONE_RAW_BUFFER) ==
522                  HAMMER_ZONE_RAW_BUFFER);
523         vol_no = HAMMER_VOL_DECODE(buf_offset);
524         volume = hammer_get_volume(hmp, vol_no, errorp);
525         if (volume == NULL)
526                 return(NULL);
527
528         /*
529          * NOTE: buf_offset and maxbuf_off are both full offset
530          * specifications.
531          */
532         KKASSERT(buf_offset < volume->maxbuf_off);
533
534         /*
535          * Locate and lock the buffer structure, creating one if necessary.
536          */
537 again:
538         buffer = RB_LOOKUP(hammer_buf_rb_tree, &volume->rb_bufs_root,
539                            buf_offset);
540         if (buffer == NULL) {
541                 ++hammer_count_buffers;
542                 buffer = kmalloc(sizeof(*buffer), M_HAMMER, M_WAITOK|M_ZERO);
543                 buffer->zone2_offset = buf_offset;
544                 buffer->volume = volume;
545
546                 hammer_io_init(&buffer->io, hmp, iotype);
547                 buffer->io.offset = volume->ondisk->vol_buf_beg +
548                                     (buf_offset & HAMMER_OFF_SHORT_MASK);
549                 TAILQ_INIT(&buffer->clist);
550                 hammer_ref(&buffer->io.lock);
551
552                 /*
553                  * Insert the buffer into the RB tree and handle late
554                  * collisions.
555                  */
556                 if (RB_INSERT(hammer_buf_rb_tree, &volume->rb_bufs_root, buffer)) {
557                         hammer_unref(&buffer->io.lock);
558                         --hammer_count_buffers;
559                         kfree(buffer, M_HAMMER);
560                         goto again;
561                 }
562                 hammer_ref(&volume->io.lock);
563         } else {
564                 hammer_ref(&buffer->io.lock);
565
566                 /*
567                  * The buffer is no longer loose if it has a ref.
568                  */
569                 if (buffer->io.mod_list == &hmp->lose_list) {
570                         TAILQ_REMOVE(buffer->io.mod_list, &buffer->io,
571                                      mod_entry);
572                         buffer->io.mod_list = NULL;
573                 }
574                 if (buffer->io.lock.refs == 1)
575                         hammer_io_reinit(&buffer->io, iotype);
576                 else
577                         KKASSERT(buffer->io.type == iotype);
578         }
579
580         /*
581          * Cache the blockmap translation
582          */
583         if ((zoneX_offset & HAMMER_ZONE_RAW_BUFFER) != HAMMER_ZONE_RAW_BUFFER)
584                 buffer->zoneX_offset = zoneX_offset;
585
586         /*
587          * Deal with on-disk info
588          */
589         if (buffer->ondisk == NULL || buffer->io.loading) {
590                 *errorp = hammer_load_buffer(buffer, isnew);
591                 if (*errorp) {
592                         hammer_rel_buffer(buffer, 1);
593                         buffer = NULL;
594                 }
595         } else {
596                 *errorp = 0;
597         }
598         hammer_rel_volume(volume, 0);
599         return(buffer);
600 }
601
602 static int
603 hammer_load_buffer(hammer_buffer_t buffer, int isnew)
604 {
605         hammer_volume_t volume;
606         int error;
607
608         /*
609          * Load the buffer's on-disk info
610          */
611         volume = buffer->volume;
612         ++buffer->io.loading;
613         hammer_lock_ex(&buffer->io.lock);
614
615         if (buffer->ondisk == NULL) {
616                 if (isnew) {
617                         error = hammer_io_new(volume->devvp, &buffer->io);
618                 } else {
619                         error = hammer_io_read(volume->devvp, &buffer->io);
620                 }
621                 if (error == 0)
622                         buffer->ondisk = (void *)buffer->io.bp->b_data;
623         } else if (isnew) {
624                 error = hammer_io_new(volume->devvp, &buffer->io);
625         } else {
626                 error = 0;
627         }
628         --buffer->io.loading;
629         hammer_unlock(&buffer->io.lock);
630         return (error);
631 }
632
633 /*
634  * NOTE: Called from RB_SCAN, must return >= 0 for scan to continue.
635  */
636 int
637 hammer_unload_buffer(hammer_buffer_t buffer, void *data __unused)
638 {
639         hammer_ref(&buffer->io.lock);
640         hammer_flush_buffer_nodes(buffer);
641         KKASSERT(buffer->io.lock.refs == 1);
642         hammer_rel_buffer(buffer, 2);
643         return(0);
644 }
645
646 /*
647  * Reference a buffer that is either already referenced or via a specially
648  * handled pointer (aka cursor->buffer).
649  */
650 int
651 hammer_ref_buffer(hammer_buffer_t buffer)
652 {
653         int error;
654
655         hammer_ref(&buffer->io.lock);
656
657         /*
658          * No longer loose
659          */
660         if (buffer->io.mod_list == &buffer->io.hmp->lose_list) {
661                 TAILQ_REMOVE(buffer->io.mod_list, &buffer->io, mod_entry);
662                 buffer->io.mod_list = NULL;
663         }
664
665         if (buffer->ondisk == NULL || buffer->io.loading) {
666                 error = hammer_load_buffer(buffer, 0);
667                 if (error) {
668                         hammer_rel_buffer(buffer, 1);
669                         /*
670                          * NOTE: buffer pointer can become stale after
671                          * the above release.
672                          */
673                 }
674         } else {
675                 error = 0;
676         }
677         return(error);
678 }
679
680 /*
681  * Release a buffer.  We have to deal with several places where
682  * another thread can ref the buffer.
683  *
684  * Only destroy the structure itself if the related buffer cache buffer
685  * was disassociated from it.  This ties the management of the structure
686  * to the buffer cache subsystem.  buffer->ondisk determines whether the
687  * embedded io is referenced or not.
688  */
689 void
690 hammer_rel_buffer(hammer_buffer_t buffer, int flush)
691 {
692         hammer_volume_t volume;
693         int freeme = 0;
694
695         crit_enter();
696         if (buffer->io.lock.refs == 1) {
697                 ++buffer->io.loading;   /* force interlock check */
698                 hammer_lock_ex(&buffer->io.lock);
699                 if (buffer->io.lock.refs == 1) {
700                         hammer_io_release(&buffer->io, flush);
701                         hammer_flush_buffer_nodes(buffer);
702                         KKASSERT(TAILQ_EMPTY(&buffer->clist));
703
704                         if (buffer->io.bp == NULL &&
705                             buffer->io.lock.refs == 1) {
706                                 /*
707                                  * Final cleanup
708                                  */
709                                 volume = buffer->volume;
710                                 RB_REMOVE(hammer_buf_rb_tree,
711                                           &volume->rb_bufs_root, buffer);
712                                 buffer->volume = NULL; /* sanity */
713                                 hammer_rel_volume(volume, 0);
714                                 freeme = 1;
715                         }
716                 }
717                 --buffer->io.loading;
718                 hammer_unlock(&buffer->io.lock);
719         }
720         hammer_unref(&buffer->io.lock);
721         crit_exit();
722         if (freeme) {
723                 KKASSERT(buffer->io.mod_list == NULL);
724                 --hammer_count_buffers;
725                 kfree(buffer, M_HAMMER);
726         }
727 }
728
729 /*
730  * Remove the zoneX translation cache for a buffer given its zone-2 offset.
731  */
732 void
733 hammer_uncache_buffer(hammer_mount_t hmp, hammer_off_t buf_offset)
734 {
735         hammer_volume_t volume;
736         hammer_buffer_t buffer;
737         int vol_no;
738         int error;
739
740         buf_offset &= ~HAMMER_BUFMASK64;
741         KKASSERT((buf_offset & HAMMER_ZONE_RAW_BUFFER) ==
742                  HAMMER_ZONE_RAW_BUFFER);
743         vol_no = HAMMER_VOL_DECODE(buf_offset);
744         volume = hammer_get_volume(hmp, vol_no, &error);
745         KKASSERT(volume != 0);
746         KKASSERT(buf_offset < volume->maxbuf_off);
747
748         buffer = RB_LOOKUP(hammer_buf_rb_tree, &volume->rb_bufs_root,
749                            buf_offset);
750         if (buffer)
751                 buffer->zoneX_offset = 0;
752         hammer_rel_volume(volume, 0);
753 }
754
755 /*
756  * Access the filesystem buffer containing the specified hammer offset.
757  * buf_offset is a conglomeration of the volume number and vol_buf_beg
758  * relative buffer offset.  It must also have bit 55 set to be valid.
759  * (see hammer_off_t in hammer_disk.h).
760  *
761  * Any prior buffer in *bufferp will be released and replaced by the
762  * requested buffer.
763  */
764 void *
765 hammer_bread(hammer_mount_t hmp, hammer_off_t buf_offset, int *errorp, 
766              struct hammer_buffer **bufferp)
767 {
768         hammer_buffer_t buffer;
769         int32_t xoff = (int32_t)buf_offset & HAMMER_BUFMASK;
770
771         buf_offset &= ~HAMMER_BUFMASK64;
772         KKASSERT((buf_offset & HAMMER_OFF_ZONE_MASK) != 0);
773
774         buffer = *bufferp;
775         if (buffer == NULL || (buffer->zone2_offset != buf_offset &&
776                                buffer->zoneX_offset != buf_offset)) {
777                 if (buffer)
778                         hammer_rel_buffer(buffer, 0);
779                 buffer = hammer_get_buffer(hmp, buf_offset, 0, errorp);
780                 *bufferp = buffer;
781         } else {
782                 *errorp = 0;
783         }
784
785         /*
786          * Return a pointer to the buffer data.
787          */
788         if (buffer == NULL)
789                 return(NULL);
790         else
791                 return((char *)buffer->ondisk + xoff);
792 }
793
794 /*
795  * Access the filesystem buffer containing the specified hammer offset.
796  * No disk read operation occurs.  The result buffer may contain garbage.
797  *
798  * Any prior buffer in *bufferp will be released and replaced by the
799  * requested buffer.
800  *
801  * This function marks the buffer dirty but does not increment its
802  * modify_refs count.
803  */
804 void *
805 hammer_bnew(hammer_mount_t hmp, hammer_off_t buf_offset, int *errorp, 
806              struct hammer_buffer **bufferp)
807 {
808         hammer_buffer_t buffer;
809         int32_t xoff = (int32_t)buf_offset & HAMMER_BUFMASK;
810
811         buf_offset &= ~HAMMER_BUFMASK64;
812
813         buffer = *bufferp;
814         if (buffer == NULL || (buffer->zone2_offset != buf_offset &&
815                                buffer->zoneX_offset != buf_offset)) {
816                 if (buffer)
817                         hammer_rel_buffer(buffer, 0);
818                 buffer = hammer_get_buffer(hmp, buf_offset, 1, errorp);
819                 *bufferp = buffer;
820         } else {
821                 *errorp = 0;
822         }
823
824         /*
825          * Return a pointer to the buffer data.
826          */
827         if (buffer == NULL)
828                 return(NULL);
829         else
830                 return((char *)buffer->ondisk + xoff);
831 }
832
833 /************************************************************************
834  *                              NODES                                   *
835  ************************************************************************
836  *
837  * Manage B-Tree nodes.  B-Tree nodes represent the primary indexing
838  * method used by the HAMMER filesystem.
839  *
840  * Unlike other HAMMER structures, a hammer_node can be PASSIVELY
841  * associated with its buffer, and will only referenced the buffer while
842  * the node itself is referenced.
843  *
844  * A hammer_node can also be passively associated with other HAMMER
845  * structures, such as inodes, while retaining 0 references.  These
846  * associations can be cleared backwards using a pointer-to-pointer in
847  * the hammer_node.
848  *
849  * This allows the HAMMER implementation to cache hammer_nodes long-term
850  * and short-cut a great deal of the infrastructure's complexity.  In
851  * most cases a cached node can be reacquired without having to dip into
852  * either the buffer or cluster management code.
853  *
854  * The caller must pass a referenced cluster on call and will retain
855  * ownership of the reference on return.  The node will acquire its own
856  * additional references, if necessary.
857  */
858 hammer_node_t
859 hammer_get_node(hammer_mount_t hmp, hammer_off_t node_offset,
860                 int isnew, int *errorp)
861 {
862         hammer_node_t node;
863
864         KKASSERT((node_offset & HAMMER_OFF_ZONE_MASK) == HAMMER_ZONE_BTREE);
865
866         /*
867          * Locate the structure, allocating one if necessary.
868          */
869 again:
870         node = RB_LOOKUP(hammer_nod_rb_tree, &hmp->rb_nods_root, node_offset);
871         if (node == NULL) {
872                 ++hammer_count_nodes;
873                 node = kmalloc(sizeof(*node), M_HAMMER, M_WAITOK|M_ZERO);
874                 node->node_offset = node_offset;
875                 node->hmp = hmp;
876                 if (RB_INSERT(hammer_nod_rb_tree, &hmp->rb_nods_root, node)) {
877                         --hammer_count_nodes;
878                         kfree(node, M_HAMMER);
879                         goto again;
880                 }
881         }
882         hammer_ref(&node->lock);
883         if (node->ondisk)
884                 *errorp = 0;
885         else
886                 *errorp = hammer_load_node(node, isnew);
887         if (*errorp) {
888                 hammer_rel_node(node);
889                 node = NULL;
890         }
891         return(node);
892 }
893
894 /*
895  * Reference an already-referenced node.
896  */
897 void
898 hammer_ref_node(hammer_node_t node)
899 {
900         KKASSERT(node->lock.refs > 0 && node->ondisk != NULL);
901         hammer_ref(&node->lock);
902 }
903
904 /*
905  * Load a node's on-disk data reference.
906  */
907 static int
908 hammer_load_node(hammer_node_t node, int isnew)
909 {
910         hammer_buffer_t buffer;
911         int error;
912
913         error = 0;
914         ++node->loading;
915         hammer_lock_ex(&node->lock);
916         if (node->ondisk == NULL) {
917                 /*
918                  * This is a little confusing but the jist is that
919                  * node->buffer determines whether the node is on
920                  * the buffer's clist and node->ondisk determines
921                  * whether the buffer is referenced.
922                  *
923                  * We could be racing a buffer release, in which case
924                  * node->buffer may become NULL while we are blocked
925                  * referencing the buffer.
926                  */
927                 if ((buffer = node->buffer) != NULL) {
928                         error = hammer_ref_buffer(buffer);
929                         if (error == 0 && node->buffer == NULL) {
930                                 TAILQ_INSERT_TAIL(&buffer->clist,
931                                                   node, entry);
932                                 node->buffer = buffer;
933                         }
934                 } else {
935                         buffer = hammer_get_buffer(node->hmp,
936                                                    node->node_offset, 0,
937                                                    &error);
938                         if (buffer) {
939                                 KKASSERT(error == 0);
940                                 TAILQ_INSERT_TAIL(&buffer->clist,
941                                                   node, entry);
942                                 node->buffer = buffer;
943                         }
944                 }
945                 if (error == 0) {
946                         node->ondisk = (void *)((char *)buffer->ondisk +
947                                (node->node_offset & HAMMER_BUFMASK));
948                         if (isnew == 0 &&
949                             hammer_crc_test_btree(node->ondisk) == 0) {
950                                 Debugger("CRC FAILED: B-TREE NODE");
951                         }
952                 }
953         }
954         --node->loading;
955         hammer_unlock(&node->lock);
956         return (error);
957 }
958
959 /*
960  * Safely reference a node, interlock against flushes via the IO subsystem.
961  */
962 hammer_node_t
963 hammer_ref_node_safe(struct hammer_mount *hmp, struct hammer_node **cache,
964                      int *errorp)
965 {
966         hammer_node_t node;
967
968         node = *cache;
969         if (node != NULL) {
970                 hammer_ref(&node->lock);
971                 if (node->ondisk)
972                         *errorp = 0;
973                 else
974                         *errorp = hammer_load_node(node, 0);
975                 if (*errorp) {
976                         hammer_rel_node(node);
977                         node = NULL;
978                 }
979         } else {
980                 *errorp = ENOENT;
981         }
982         return(node);
983 }
984
985 /*
986  * Release a hammer_node.  On the last release the node dereferences
987  * its underlying buffer and may or may not be destroyed.
988  */
989 void
990 hammer_rel_node(hammer_node_t node)
991 {
992         hammer_buffer_t buffer;
993
994         /*
995          * If this isn't the last ref just decrement the ref count and
996          * return.
997          */
998         if (node->lock.refs > 1) {
999                 hammer_unref(&node->lock);
1000                 return;
1001         }
1002
1003         /*
1004          * If there is no ondisk info or no buffer the node failed to load,
1005          * remove the last reference and destroy the node.
1006          */
1007         if (node->ondisk == NULL) {
1008                 hammer_unref(&node->lock);
1009                 hammer_flush_node(node);
1010                 /* node is stale now */
1011                 return;
1012         }
1013
1014         /*
1015          * Do final cleanups and then either destroy the node and leave it
1016          * passively cached.  The buffer reference is removed regardless.
1017          */
1018         buffer = node->buffer;
1019         node->ondisk = NULL;
1020
1021         if ((node->flags & HAMMER_NODE_FLUSH) == 0) {
1022                 hammer_unref(&node->lock);
1023                 hammer_rel_buffer(buffer, 0);
1024                 return;
1025         }
1026
1027         /*
1028          * Destroy the node.
1029          */
1030         hammer_unref(&node->lock);
1031         hammer_flush_node(node);
1032         /* node is stale */
1033         hammer_rel_buffer(buffer, 0);
1034 }
1035
1036 /*
1037  * Free space on-media associated with a B-Tree node.
1038  */
1039 void
1040 hammer_delete_node(hammer_transaction_t trans, hammer_node_t node)
1041 {
1042         KKASSERT((node->flags & HAMMER_NODE_DELETED) == 0);
1043         node->flags |= HAMMER_NODE_DELETED;
1044         hammer_blockmap_free(trans, node->node_offset, sizeof(*node->ondisk));
1045 }
1046
1047 /*
1048  * Passively cache a referenced hammer_node in *cache.  The caller may
1049  * release the node on return.
1050  */
1051 void
1052 hammer_cache_node(hammer_node_t node, struct hammer_node **cache)
1053 {
1054         hammer_node_t old;
1055
1056         /*
1057          * If the node is being deleted, don't cache it!
1058          */
1059         if (node->flags & HAMMER_NODE_DELETED)
1060                 return;
1061
1062         /*
1063          * Cache the node.  If we previously cached a different node we
1064          * have to give HAMMER a chance to destroy it.
1065          */
1066 again:
1067         if (node->cache1 != cache) {
1068                 if (node->cache2 != cache) {
1069                         if ((old = *cache) != NULL) {
1070                                 KKASSERT(node->lock.refs != 0);
1071                                 hammer_uncache_node(cache);
1072                                 goto again;
1073                         }
1074                         if (node->cache2)
1075                                 *node->cache2 = NULL;
1076                         node->cache2 = node->cache1;
1077                         node->cache1 = cache;
1078                         *cache = node;
1079                 } else {
1080                         struct hammer_node **tmp;
1081                         tmp = node->cache1;
1082                         node->cache1 = node->cache2;
1083                         node->cache2 = tmp;
1084                 }
1085         }
1086 }
1087
1088 void
1089 hammer_uncache_node(struct hammer_node **cache)
1090 {
1091         hammer_node_t node;
1092
1093         if ((node = *cache) != NULL) {
1094                 *cache = NULL;
1095                 if (node->cache1 == cache) {
1096                         node->cache1 = node->cache2;
1097                         node->cache2 = NULL;
1098                 } else if (node->cache2 == cache) {
1099                         node->cache2 = NULL;
1100                 } else {
1101                         panic("hammer_uncache_node: missing cache linkage");
1102                 }
1103                 if (node->cache1 == NULL && node->cache2 == NULL)
1104                         hammer_flush_node(node);
1105         }
1106 }
1107
1108 /*
1109  * Remove a node's cache references and destroy the node if it has no
1110  * other references or backing store.
1111  */
1112 void
1113 hammer_flush_node(hammer_node_t node)
1114 {
1115         hammer_buffer_t buffer;
1116
1117         if (node->cache1)
1118                 *node->cache1 = NULL;
1119         if (node->cache2)
1120                 *node->cache2 = NULL;
1121         if (node->lock.refs == 0 && node->ondisk == NULL) {
1122                 RB_REMOVE(hammer_nod_rb_tree, &node->hmp->rb_nods_root, node);
1123                 if ((buffer = node->buffer) != NULL) {
1124                         node->buffer = NULL;
1125                         TAILQ_REMOVE(&buffer->clist, node, entry);
1126                         /* buffer is unreferenced because ondisk is NULL */
1127                 }
1128                 --hammer_count_nodes;
1129                 kfree(node, M_HAMMER);
1130         }
1131 }
1132
1133 /*
1134  * Flush passively cached B-Tree nodes associated with this buffer.
1135  * This is only called when the buffer is about to be destroyed, so
1136  * none of the nodes should have any references.  The buffer is locked.
1137  *
1138  * We may be interlocked with the buffer.
1139  */
1140 void
1141 hammer_flush_buffer_nodes(hammer_buffer_t buffer)
1142 {
1143         hammer_node_t node;
1144
1145         while ((node = TAILQ_FIRST(&buffer->clist)) != NULL) {
1146                 KKASSERT(node->ondisk == NULL);
1147
1148                 if (node->lock.refs == 0) {
1149                         hammer_ref(&node->lock);
1150                         node->flags |= HAMMER_NODE_FLUSH;
1151                         hammer_rel_node(node);
1152                 } else {
1153                         KKASSERT(node->loading != 0);
1154                         KKASSERT(node->buffer != NULL);
1155                         buffer = node->buffer;
1156                         node->buffer = NULL;
1157                         TAILQ_REMOVE(&buffer->clist, node, entry);
1158                         /* buffer is unreferenced because ondisk is NULL */
1159                 }
1160         }
1161 }
1162
1163
1164 /************************************************************************
1165  *                              ALLOCATORS                              *
1166  ************************************************************************/
1167
1168 /*
1169  * Allocate a B-Tree node.
1170  */
1171 hammer_node_t
1172 hammer_alloc_btree(hammer_transaction_t trans, int *errorp)
1173 {
1174         hammer_buffer_t buffer = NULL;
1175         hammer_node_t node = NULL;
1176         hammer_off_t node_offset;
1177
1178         node_offset = hammer_blockmap_alloc(trans, HAMMER_ZONE_BTREE_INDEX,
1179                                             sizeof(struct hammer_node_ondisk),
1180                                             errorp);
1181         if (*errorp == 0) {
1182                 node = hammer_get_node(trans->hmp, node_offset, 1, errorp);
1183                 hammer_modify_node_noundo(trans, node);
1184                 bzero(node->ondisk, sizeof(*node->ondisk));
1185                 hammer_modify_node_done(node);
1186         }
1187         if (buffer)
1188                 hammer_rel_buffer(buffer, 0);
1189         return(node);
1190 }
1191
1192 #if 0
1193
1194 /*
1195  * The returned buffers are already appropriately marked as being modified.
1196  * If the caller marks them again unnecessary undo records may be generated.
1197  *
1198  * In-band data is indicated by data_bufferp == NULL.  Pass a data_len of 0
1199  * for zero-fill (caller modifies data_len afterwords).
1200  *
1201  * If the caller is responsible for calling hammer_modify_*() prior to making
1202  * any additional modifications to either the returned record buffer or the
1203  * returned data buffer.
1204  */
1205 void *
1206 hammer_alloc_record(hammer_transaction_t trans, 
1207                     hammer_off_t *rec_offp, u_int16_t rec_type, 
1208                     struct hammer_buffer **rec_bufferp,
1209                     int32_t data_len, void **datap,
1210                     hammer_off_t *data_offp,
1211                     struct hammer_buffer **data_bufferp, int *errorp)
1212 {
1213         hammer_record_ondisk_t rec;
1214         hammer_off_t rec_offset;
1215         hammer_off_t data_offset;
1216         int32_t reclen;
1217
1218         if (datap)
1219                 *datap = NULL;
1220
1221         /*
1222          * Allocate the record
1223          */
1224         rec_offset = hammer_blockmap_alloc(trans, HAMMER_ZONE_RECORD_INDEX,
1225                                            HAMMER_RECORD_SIZE, errorp);
1226         if (*errorp)
1227                 return(NULL);
1228         if (data_offp)
1229                 *data_offp = 0;
1230
1231         /*
1232          * Allocate data
1233          */
1234         if (data_len) {
1235                 if (data_bufferp == NULL) {
1236                         switch(rec_type) {
1237                         case HAMMER_RECTYPE_DATA:
1238                                 reclen = offsetof(struct hammer_data_record,
1239                                                   data[0]);
1240                                 break;
1241                         case HAMMER_RECTYPE_DIRENTRY:
1242                                 reclen = offsetof(struct hammer_entry_record,
1243                                                   name[0]);
1244                                 break;
1245                         default:
1246                                 panic("hammer_alloc_record: illegal "
1247                                       "in-band data");
1248                                 /* NOT REACHED */
1249                                 reclen = 0;
1250                                 break;
1251                         }
1252                         KKASSERT(reclen + data_len <= HAMMER_RECORD_SIZE);
1253                         data_offset = rec_offset + reclen;
1254                 } else if (data_len < HAMMER_BUFSIZE) {
1255                         data_offset = hammer_blockmap_alloc(trans,
1256                                                 HAMMER_ZONE_SMALL_DATA_INDEX,
1257                                                 data_len, errorp);
1258                         *data_offp = data_offset;
1259                 } else {
1260                         data_offset = hammer_blockmap_alloc(trans,
1261                                                 HAMMER_ZONE_LARGE_DATA_INDEX,
1262                                                 data_len, errorp);
1263                         *data_offp = data_offset;
1264                 }
1265         } else {
1266                 data_offset = 0;
1267         }
1268         if (*errorp) {
1269                 hammer_blockmap_free(trans, rec_offset, HAMMER_RECORD_SIZE);
1270                 return(NULL);
1271         }
1272
1273         /*
1274          * Basic return values.
1275          *
1276          * Note that because this is a 'new' buffer, there is no need to
1277          * generate UNDO records for it.
1278          */
1279         *rec_offp = rec_offset;
1280         rec = hammer_bread(trans->hmp, rec_offset, errorp, rec_bufferp);
1281         hammer_modify_buffer(trans, *rec_bufferp, NULL, 0);
1282         bzero(rec, sizeof(*rec));
1283         KKASSERT(*errorp == 0);
1284         rec->base.data_off = data_offset;
1285         rec->base.data_len = data_len;
1286         hammer_modify_buffer_done(*rec_bufferp);
1287
1288         if (data_bufferp) {
1289                 if (data_len) {
1290                         *datap = hammer_bread(trans->hmp, data_offset, errorp,
1291                                               data_bufferp);
1292                         KKASSERT(*errorp == 0);
1293                 } else {
1294                         *datap = NULL;
1295                 }
1296         } else if (data_len) {
1297                 KKASSERT(data_offset + data_len - rec_offset <=
1298                          HAMMER_RECORD_SIZE); 
1299                 if (datap) {
1300                         *datap = (void *)((char *)rec +
1301                                           (int32_t)(data_offset - rec_offset));
1302                 }
1303         } else {
1304                 KKASSERT(datap == NULL);
1305         }
1306         KKASSERT(*errorp == 0);
1307         return(rec);
1308 }
1309
1310 #endif
1311
1312 /*
1313  * Allocate data.  If the address of a data buffer is supplied then
1314  * any prior non-NULL *data_bufferp will be released and *data_bufferp
1315  * will be set to the related buffer.  The caller must release it when
1316  * finally done.  The initial *data_bufferp should be set to NULL by
1317  * the caller.
1318  *
1319  * The caller is responsible for making hammer_modify*() calls on the
1320  * *data_bufferp.
1321  */
1322 void *
1323 hammer_alloc_data(hammer_transaction_t trans, int32_t data_len, 
1324                   hammer_off_t *data_offsetp,
1325                   struct hammer_buffer **data_bufferp, int *errorp)
1326 {
1327         void *data;
1328
1329         /*
1330          * Allocate data
1331          */
1332         if (data_len) {
1333                 if (data_len < HAMMER_BUFSIZE) {
1334                         *data_offsetp = hammer_blockmap_alloc(trans,
1335                                                 HAMMER_ZONE_SMALL_DATA_INDEX,
1336                                                 data_len, errorp);
1337                 } else {
1338                         *data_offsetp = hammer_blockmap_alloc(trans,
1339                                                 HAMMER_ZONE_LARGE_DATA_INDEX,
1340                                                 data_len, errorp);
1341                 }
1342         } else {
1343                 *data_offsetp = 0;
1344         }
1345         if (*errorp == 0 && data_bufferp) {
1346                 if (data_len) {
1347                         data = hammer_bread(trans->hmp, *data_offsetp, errorp,
1348                                             data_bufferp);
1349                         KKASSERT(*errorp == 0);
1350                 } else {
1351                         data = NULL;
1352                 }
1353         } else {
1354                 data = NULL;
1355         }
1356         KKASSERT(*errorp == 0);
1357         return(data);
1358 }
1359
1360 /*
1361  * Sync dirty buffers to the media and clean-up any loose ends.
1362  */
1363 static int hammer_sync_scan1(struct mount *mp, struct vnode *vp, void *data);
1364 static int hammer_sync_scan2(struct mount *mp, struct vnode *vp, void *data);
1365
1366 int
1367 hammer_queue_inodes_flusher(hammer_mount_t hmp, int waitfor)
1368 {
1369         struct hammer_sync_info info;
1370
1371         info.error = 0;
1372         info.waitfor = waitfor;
1373         if (waitfor == MNT_WAIT) {
1374                 vmntvnodescan(hmp->mp, VMSC_GETVP,
1375                               hammer_sync_scan1, hammer_sync_scan2, &info);
1376         } else {
1377                 vmntvnodescan(hmp->mp, VMSC_GETVP|VMSC_NOWAIT,
1378                               hammer_sync_scan1, hammer_sync_scan2, &info);
1379         }
1380         return(info.error);
1381 }
1382
1383 int
1384 hammer_sync_hmp(hammer_mount_t hmp, int waitfor)
1385 {
1386         struct hammer_sync_info info;
1387
1388         info.error = 0;
1389         info.waitfor = waitfor;
1390
1391         vmntvnodescan(hmp->mp, VMSC_GETVP|VMSC_NOWAIT,
1392                       hammer_sync_scan1, hammer_sync_scan2, &info);
1393         if (waitfor == MNT_WAIT)
1394                 hammer_flusher_sync(hmp);
1395         else
1396                 hammer_flusher_async(hmp);
1397
1398         return(info.error);
1399 }
1400
1401 static int
1402 hammer_sync_scan1(struct mount *mp, struct vnode *vp, void *data)
1403 {
1404         struct hammer_inode *ip;
1405
1406         ip = VTOI(vp);
1407         if (vp->v_type == VNON || ip == NULL ||
1408             ((ip->flags & HAMMER_INODE_MODMASK) == 0 &&
1409              RB_EMPTY(&vp->v_rbdirty_tree))) {
1410                 return(-1);
1411         }
1412         return(0);
1413 }
1414
1415 static int
1416 hammer_sync_scan2(struct mount *mp, struct vnode *vp, void *data)
1417 {
1418         struct hammer_sync_info *info = data;
1419         struct hammer_inode *ip;
1420         int error;
1421
1422         ip = VTOI(vp);
1423         if (vp->v_type == VNON || vp->v_type == VBAD ||
1424             ((ip->flags & HAMMER_INODE_MODMASK) == 0 &&
1425              RB_EMPTY(&vp->v_rbdirty_tree))) {
1426                 return(0);
1427         }
1428         error = VOP_FSYNC(vp, info->waitfor);
1429         if (error)
1430                 info->error = error;
1431         return(0);
1432 }
1433