HAMMER 56C/Many: Performance tuning - MEDIA STRUCTURES CHANGED!
[dragonfly.git] / sys / vfs / hammer / hammer_ondisk.c
1 /*
2  * Copyright (c) 2007-2008 The DragonFly Project.  All rights reserved.
3  * 
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  * 
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  * 
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  * 
34  * $DragonFly: src/sys/vfs/hammer/hammer_ondisk.c,v 1.60 2008/06/20 05:38:26 dillon Exp $
35  */
36 /*
37  * Manage HAMMER's on-disk structures.  These routines are primarily
38  * responsible for interfacing with the kernel's I/O subsystem and for
39  * managing in-memory structures.
40  */
41
42 #include "hammer.h"
43 #include <sys/fcntl.h>
44 #include <sys/nlookup.h>
45 #include <sys/buf.h>
46 #include <sys/buf2.h>
47
48 static void hammer_free_volume(hammer_volume_t volume);
49 static int hammer_load_volume(hammer_volume_t volume);
50 static int hammer_load_buffer(hammer_buffer_t buffer, int isnew);
51 static int hammer_load_node(hammer_node_t node, int isnew);
52
53 /*
54  * Red-Black tree support for various structures
55  */
56 int
57 hammer_ino_rb_compare(hammer_inode_t ip1, hammer_inode_t ip2)
58 {
59         if (ip1->obj_id < ip2->obj_id)
60                 return(-1);
61         if (ip1->obj_id > ip2->obj_id)
62                 return(1);
63         if (ip1->obj_asof < ip2->obj_asof)
64                 return(-1);
65         if (ip1->obj_asof > ip2->obj_asof)
66                 return(1);
67         return(0);
68 }
69
70 static int
71 hammer_inode_info_cmp(hammer_inode_info_t info, hammer_inode_t ip)
72 {
73         if (info->obj_id < ip->obj_id)
74                 return(-1);
75         if (info->obj_id > ip->obj_id)
76                 return(1);
77         if (info->obj_asof < ip->obj_asof)
78                 return(-1);
79         if (info->obj_asof > ip->obj_asof)
80                 return(1);
81         return(0);
82 }
83
84 static int
85 hammer_vol_rb_compare(hammer_volume_t vol1, hammer_volume_t vol2)
86 {
87         if (vol1->vol_no < vol2->vol_no)
88                 return(-1);
89         if (vol1->vol_no > vol2->vol_no)
90                 return(1);
91         return(0);
92 }
93
94 static int
95 hammer_buf_rb_compare(hammer_buffer_t buf1, hammer_buffer_t buf2)
96 {
97         if (buf1->zoneX_offset < buf2->zoneX_offset)
98                 return(-1);
99         if (buf1->zoneX_offset > buf2->zoneX_offset)
100                 return(1);
101         return(0);
102 }
103
104 static int
105 hammer_nod_rb_compare(hammer_node_t node1, hammer_node_t node2)
106 {
107         if (node1->node_offset < node2->node_offset)
108                 return(-1);
109         if (node1->node_offset > node2->node_offset)
110                 return(1);
111         return(0);
112 }
113
114 /*
115  * Note: The lookup function for hammer_ino_rb_tree winds up being named
116  * hammer_ino_rb_tree_RB_LOOKUP_INFO(root, info).  The other lookup
117  * functions are normal, e.g. hammer_buf_rb_tree_RB_LOOKUP(root, zone2_offset).
118  */
119 RB_GENERATE(hammer_ino_rb_tree, hammer_inode, rb_node, hammer_ino_rb_compare);
120 RB_GENERATE_XLOOKUP(hammer_ino_rb_tree, INFO, hammer_inode, rb_node,
121                 hammer_inode_info_cmp, hammer_inode_info_t);
122 RB_GENERATE2(hammer_vol_rb_tree, hammer_volume, rb_node,
123              hammer_vol_rb_compare, int32_t, vol_no);
124 RB_GENERATE2(hammer_buf_rb_tree, hammer_buffer, rb_node,
125              hammer_buf_rb_compare, hammer_off_t, zoneX_offset);
126 RB_GENERATE2(hammer_nod_rb_tree, hammer_node, rb_node,
127              hammer_nod_rb_compare, hammer_off_t, node_offset);
128
129 /************************************************************************
130  *                              VOLUMES                                 *
131  ************************************************************************
132  *
133  * Load a HAMMER volume by name.  Returns 0 on success or a positive error
134  * code on failure.  Volumes must be loaded at mount time, get_volume() will
135  * not load a new volume.
136  *
137  * Calls made to hammer_load_volume() or single-threaded
138  */
139 int
140 hammer_install_volume(struct hammer_mount *hmp, const char *volname)
141 {
142         struct mount *mp;
143         hammer_volume_t volume;
144         struct hammer_volume_ondisk *ondisk;
145         struct nlookupdata nd;
146         struct buf *bp = NULL;
147         int error;
148         int ronly;
149         int setmp = 0;
150
151         mp = hmp->mp;
152         ronly = ((mp->mnt_flag & MNT_RDONLY) ? 1 : 0);
153
154         /*
155          * Allocate a volume structure
156          */
157         ++hammer_count_volumes;
158         volume = kmalloc(sizeof(*volume), M_HAMMER, M_WAITOK|M_ZERO);
159         volume->vol_name = kstrdup(volname, M_HAMMER);
160         hammer_io_init(&volume->io, hmp, HAMMER_STRUCTURE_VOLUME);
161         volume->io.offset = 0LL;
162         volume->io.bytes = HAMMER_BUFSIZE;
163
164         /*
165          * Get the device vnode
166          */
167         error = nlookup_init(&nd, volume->vol_name, UIO_SYSSPACE, NLC_FOLLOW);
168         if (error == 0)
169                 error = nlookup(&nd);
170         if (error == 0)
171                 error = cache_vref(&nd.nl_nch, nd.nl_cred, &volume->devvp);
172         nlookup_done(&nd);
173         if (error == 0) {
174                 if (vn_isdisk(volume->devvp, &error)) {
175                         error = vfs_mountedon(volume->devvp);
176                 }
177         }
178         if (error == 0 &&
179             count_udev(volume->devvp->v_umajor, volume->devvp->v_uminor) > 0) {
180                 error = EBUSY;
181         }
182         if (error == 0) {
183                 vn_lock(volume->devvp, LK_EXCLUSIVE | LK_RETRY);
184                 error = vinvalbuf(volume->devvp, V_SAVE, 0, 0);
185                 if (error == 0) {
186                         error = VOP_OPEN(volume->devvp, 
187                                          (ronly ? FREAD : FREAD|FWRITE),
188                                          FSCRED, NULL);
189                 }
190                 vn_unlock(volume->devvp);
191         }
192         if (error) {
193                 hammer_free_volume(volume);
194                 return(error);
195         }
196         volume->devvp->v_rdev->si_mountpoint = mp;
197         setmp = 1;
198
199         /*
200          * Extract the volume number from the volume header and do various
201          * sanity checks.
202          */
203         error = bread(volume->devvp, 0LL, HAMMER_BUFSIZE, &bp);
204         if (error)
205                 goto late_failure;
206         ondisk = (void *)bp->b_data;
207         if (ondisk->vol_signature != HAMMER_FSBUF_VOLUME) {
208                 kprintf("hammer_mount: volume %s has an invalid header\n",
209                         volume->vol_name);
210                 error = EFTYPE;
211                 goto late_failure;
212         }
213         volume->vol_no = ondisk->vol_no;
214         volume->buffer_base = ondisk->vol_buf_beg;
215         volume->vol_flags = ondisk->vol_flags;
216         volume->nblocks = ondisk->vol_nblocks; 
217         volume->maxbuf_off = HAMMER_ENCODE_RAW_BUFFER(volume->vol_no,
218                                     ondisk->vol_buf_end - ondisk->vol_buf_beg);
219         volume->maxraw_off = ondisk->vol_buf_end;
220
221         if (RB_EMPTY(&hmp->rb_vols_root)) {
222                 hmp->fsid = ondisk->vol_fsid;
223         } else if (bcmp(&hmp->fsid, &ondisk->vol_fsid, sizeof(uuid_t))) {
224                 kprintf("hammer_mount: volume %s's fsid does not match "
225                         "other volumes\n", volume->vol_name);
226                 error = EFTYPE;
227                 goto late_failure;
228         }
229
230         /*
231          * Insert the volume structure into the red-black tree.
232          */
233         if (RB_INSERT(hammer_vol_rb_tree, &hmp->rb_vols_root, volume)) {
234                 kprintf("hammer_mount: volume %s has a duplicate vol_no %d\n",
235                         volume->vol_name, volume->vol_no);
236                 error = EEXIST;
237         }
238
239         /*
240          * Set the root volume .  HAMMER special cases rootvol the structure.
241          * We do not hold a ref because this would prevent related I/O
242          * from being flushed.
243          */
244         if (error == 0 && ondisk->vol_rootvol == ondisk->vol_no) {
245                 hmp->rootvol = volume;
246                 if (bp) {
247                         brelse(bp);
248                         bp = NULL;
249                 }
250                 hmp->fsid_udev = dev2udev(vn_todev(volume->devvp));
251                 hmp->mp->mnt_stat.f_blocks += ondisk->vol0_stat_bigblocks *
252                         (HAMMER_LARGEBLOCK_SIZE / HAMMER_BUFSIZE);
253                 hmp->mp->mnt_vstat.f_blocks += ondisk->vol0_stat_bigblocks *
254                         (HAMMER_LARGEBLOCK_SIZE / HAMMER_BUFSIZE);
255         }
256 late_failure:
257         if (bp)
258                 brelse(bp);
259         if (error) {
260                 /*vinvalbuf(volume->devvp, V_SAVE, 0, 0);*/
261                 if (setmp)
262                         volume->devvp->v_rdev->si_mountpoint = NULL;
263                 VOP_CLOSE(volume->devvp, ronly ? FREAD : FREAD|FWRITE);
264                 hammer_free_volume(volume);
265         }
266         return (error);
267 }
268
269 /*
270  * This is called for each volume when updating the mount point from
271  * read-write to read-only or vise-versa.
272  */
273 int
274 hammer_adjust_volume_mode(hammer_volume_t volume, void *data __unused)
275 {
276         if (volume->devvp) {
277                 vn_lock(volume->devvp, LK_EXCLUSIVE | LK_RETRY);
278                 if (volume->io.hmp->ronly) {
279                         /* do not call vinvalbuf */
280                         VOP_OPEN(volume->devvp, FREAD, FSCRED, NULL);
281                         VOP_CLOSE(volume->devvp, FREAD|FWRITE);
282                 } else {
283                         /* do not call vinvalbuf */
284                         VOP_OPEN(volume->devvp, FREAD|FWRITE, FSCRED, NULL);
285                         VOP_CLOSE(volume->devvp, FREAD);
286                 }
287                 vn_unlock(volume->devvp);
288         }
289         return(0);
290 }
291
292 /*
293  * Unload and free a HAMMER volume.  Must return >= 0 to continue scan
294  * so returns -1 on failure.
295  */
296 int
297 hammer_unload_volume(hammer_volume_t volume, void *data __unused)
298 {
299         struct hammer_mount *hmp = volume->io.hmp;
300         int ronly = ((hmp->mp->mnt_flag & MNT_RDONLY) ? 1 : 0);
301
302         /*
303          * Clean up the root volume pointer, which is held unlocked in hmp.
304          */
305         if (hmp->rootvol == volume)
306                 hmp->rootvol = NULL;
307
308         /*
309          * Release our buffer and flush anything left in the buffer cache.
310          */
311         volume->io.waitdep = 1;
312         hammer_io_release(&volume->io, 1);
313         hammer_io_clear_modlist(&volume->io);
314
315         /*
316          * There should be no references on the volume, no clusters, and
317          * no super-clusters.
318          */
319         KKASSERT(volume->io.lock.refs == 0);
320
321         volume->ondisk = NULL;
322         if (volume->devvp) {
323                 if (volume->devvp->v_rdev &&
324                     volume->devvp->v_rdev->si_mountpoint == hmp->mp
325                 ) {
326                         volume->devvp->v_rdev->si_mountpoint = NULL;
327                 }
328                 if (ronly) {
329                         vinvalbuf(volume->devvp, 0, 0, 0);
330                         VOP_CLOSE(volume->devvp, FREAD);
331                 } else {
332                         vinvalbuf(volume->devvp, V_SAVE, 0, 0);
333                         VOP_CLOSE(volume->devvp, FREAD|FWRITE);
334                 }
335         }
336
337         /*
338          * Destroy the structure
339          */
340         RB_REMOVE(hammer_vol_rb_tree, &hmp->rb_vols_root, volume);
341         hammer_free_volume(volume);
342         return(0);
343 }
344
345 static
346 void
347 hammer_free_volume(hammer_volume_t volume)
348 {
349         if (volume->vol_name) {
350                 kfree(volume->vol_name, M_HAMMER);
351                 volume->vol_name = NULL;
352         }
353         if (volume->devvp) {
354                 vrele(volume->devvp);
355                 volume->devvp = NULL;
356         }
357         --hammer_count_volumes;
358         kfree(volume, M_HAMMER);
359 }
360
361 /*
362  * Get a HAMMER volume.  The volume must already exist.
363  */
364 hammer_volume_t
365 hammer_get_volume(struct hammer_mount *hmp, int32_t vol_no, int *errorp)
366 {
367         struct hammer_volume *volume;
368
369         /*
370          * Locate the volume structure
371          */
372         volume = RB_LOOKUP(hammer_vol_rb_tree, &hmp->rb_vols_root, vol_no);
373         if (volume == NULL) {
374                 *errorp = ENOENT;
375                 return(NULL);
376         }
377         hammer_ref(&volume->io.lock);
378
379         /*
380          * Deal with on-disk info
381          */
382         if (volume->ondisk == NULL || volume->io.loading) {
383                 *errorp = hammer_load_volume(volume);
384                 if (*errorp) {
385                         hammer_rel_volume(volume, 1);
386                         volume = NULL;
387                 }
388         } else {
389                 *errorp = 0;
390         }
391         return(volume);
392 }
393
394 int
395 hammer_ref_volume(hammer_volume_t volume)
396 {
397         int error;
398
399         hammer_ref(&volume->io.lock);
400
401         /*
402          * Deal with on-disk info
403          */
404         if (volume->ondisk == NULL || volume->io.loading) {
405                 error = hammer_load_volume(volume);
406                 if (error)
407                         hammer_rel_volume(volume, 1);
408         } else {
409                 error = 0;
410         }
411         return (error);
412 }
413
414 hammer_volume_t
415 hammer_get_root_volume(struct hammer_mount *hmp, int *errorp)
416 {
417         hammer_volume_t volume;
418
419         volume = hmp->rootvol;
420         KKASSERT(volume != NULL);
421         hammer_ref(&volume->io.lock);
422
423         /*
424          * Deal with on-disk info
425          */
426         if (volume->ondisk == NULL || volume->io.loading) {
427                 *errorp = hammer_load_volume(volume);
428                 if (*errorp) {
429                         hammer_rel_volume(volume, 1);
430                         volume = NULL;
431                 }
432         } else {
433                 *errorp = 0;
434         }
435         return (volume);
436 }
437
438 /*
439  * Load a volume's on-disk information.  The volume must be referenced and
440  * not locked.  We temporarily acquire an exclusive lock to interlock
441  * against releases or multiple get's.
442  */
443 static int
444 hammer_load_volume(hammer_volume_t volume)
445 {
446         int error;
447
448         ++volume->io.loading;
449         hammer_lock_ex(&volume->io.lock);
450
451         if (volume->ondisk == NULL) {
452                 error = hammer_io_read(volume->devvp, &volume->io,
453                                        volume->maxraw_off);
454                 if (error == 0)
455                         volume->ondisk = (void *)volume->io.bp->b_data;
456         } else {
457                 error = 0;
458         }
459         --volume->io.loading;
460         hammer_unlock(&volume->io.lock);
461         return(error);
462 }
463
464 /*
465  * Release a volume.  Call hammer_io_release on the last reference.  We have
466  * to acquire an exclusive lock to interlock against volume->ondisk tests
467  * in hammer_load_volume(), and hammer_io_release() also expects an exclusive
468  * lock to be held.
469  *
470  * Volumes are not unloaded from memory during normal operation.
471  */
472 void
473 hammer_rel_volume(hammer_volume_t volume, int flush)
474 {
475         crit_enter();
476         if (volume->io.lock.refs == 1) {
477                 ++volume->io.loading;
478                 hammer_lock_ex(&volume->io.lock);
479                 if (volume->io.lock.refs == 1) {
480                         volume->ondisk = NULL;
481                         hammer_io_release(&volume->io, flush);
482                 }
483                 --volume->io.loading;
484                 hammer_unlock(&volume->io.lock);
485         }
486         hammer_unref(&volume->io.lock);
487         crit_exit();
488 }
489
490 /************************************************************************
491  *                              BUFFERS                                 *
492  ************************************************************************
493  *
494  * Manage buffers.  Currently all blockmap-backed zones are translated
495  * to zone-2 buffer offsets.
496  */
497 hammer_buffer_t
498 hammer_get_buffer(hammer_mount_t hmp, hammer_off_t buf_offset,
499                   int bytes, int isnew, int *errorp)
500 {
501         hammer_buffer_t buffer;
502         hammer_volume_t volume;
503         hammer_off_t    zone2_offset;
504         hammer_io_type_t iotype;
505         int vol_no;
506         int zone;
507
508         buf_offset &= ~HAMMER_BUFMASK64;
509 again:
510         /*
511          * Shortcut if the buffer is already cached
512          */
513         buffer = RB_LOOKUP(hammer_buf_rb_tree, &hmp->rb_bufs_root, buf_offset);
514         if (buffer) {
515                 if (buffer->io.lock.refs == 0)
516                         ++hammer_count_refedbufs;
517                 hammer_ref(&buffer->io.lock);
518
519                 /*
520                  * Onced refed the ondisk field will not be cleared by
521                  * any other action.
522                  */
523                 if (buffer->ondisk && buffer->io.loading == 0) {
524                         *errorp = 0;
525                         return(buffer);
526                 }
527
528                 /*
529                  * The buffer is no longer loose if it has a ref, and
530                  * cannot become loose once it gains a ref.  Loose
531                  * buffers will never be in a modified state.  This should
532                  * only occur on the 0->1 transition of refs.
533                  *
534                  * lose_list can be modified via a biodone() interrupt.
535                  */
536                 if (buffer->io.mod_list == &hmp->lose_list) {
537                         crit_enter();   /* biodone race against list */
538                         TAILQ_REMOVE(buffer->io.mod_list, &buffer->io,
539                                      mod_entry);
540                         crit_exit();
541                         buffer->io.mod_list = NULL;
542                         KKASSERT(buffer->io.modified == 0);
543                 }
544                 goto found;
545         }
546
547         /*
548          * What is the buffer class?
549          */
550         zone = HAMMER_ZONE_DECODE(buf_offset);
551
552         switch(zone) {
553         case HAMMER_ZONE_LARGE_DATA_INDEX:
554         case HAMMER_ZONE_SMALL_DATA_INDEX:
555                 iotype = HAMMER_STRUCTURE_DATA_BUFFER;
556                 break;
557         case HAMMER_ZONE_UNDO_INDEX:
558                 iotype = HAMMER_STRUCTURE_UNDO_BUFFER;
559                 break;
560         case HAMMER_ZONE_META_INDEX:
561         default:
562                 /*
563                  * NOTE: inode data and directory entries are placed in this
564                  * zone.  inode atime/mtime is updated in-place and thus
565                  * buffers containing inodes must be synchronized as
566                  * meta-buffers, same as buffers containing B-Tree info.
567                  */
568                 iotype = HAMMER_STRUCTURE_META_BUFFER;
569                 break;
570         }
571
572         /*
573          * Handle blockmap offset translations
574          */
575         if (zone >= HAMMER_ZONE_BTREE_INDEX) {
576                 zone2_offset = hammer_blockmap_lookup(hmp, buf_offset, errorp);
577         } else if (zone == HAMMER_ZONE_UNDO_INDEX) {
578                 zone2_offset = hammer_undo_lookup(hmp, buf_offset, errorp);
579         } else {
580                 KKASSERT(zone == HAMMER_ZONE_RAW_BUFFER_INDEX);
581                 zone2_offset = buf_offset;
582                 *errorp = 0;
583         }
584         if (*errorp)
585                 return(NULL);
586
587         /*
588          * NOTE: zone2_offset and maxbuf_off are both full zone-2 offset
589          * specifications.
590          */
591         KKASSERT((zone2_offset & HAMMER_OFF_ZONE_MASK) ==
592                  HAMMER_ZONE_RAW_BUFFER);
593         vol_no = HAMMER_VOL_DECODE(zone2_offset);
594         volume = hammer_get_volume(hmp, vol_no, errorp);
595         if (volume == NULL)
596                 return(NULL);
597
598         KKASSERT(zone2_offset < volume->maxbuf_off);
599
600         /*
601          * Allocate a new buffer structure.  We will check for races later.
602          */
603         ++hammer_count_buffers;
604         buffer = kmalloc(sizeof(*buffer), M_HAMMER, M_WAITOK|M_ZERO);
605         buffer->zone2_offset = zone2_offset;
606         buffer->zoneX_offset = buf_offset;
607         buffer->volume = volume;
608
609         hammer_io_init(&buffer->io, hmp, iotype);
610         buffer->io.offset = volume->ondisk->vol_buf_beg +
611                             (zone2_offset & HAMMER_OFF_SHORT_MASK);
612         buffer->io.bytes = bytes;
613         TAILQ_INIT(&buffer->clist);
614         hammer_ref(&buffer->io.lock);
615
616         /*
617          * Insert the buffer into the RB tree and handle late collisions.
618          */
619         if (RB_INSERT(hammer_buf_rb_tree, &hmp->rb_bufs_root, buffer)) {
620                 hammer_unref(&buffer->io.lock);
621                 --hammer_count_buffers;
622                 kfree(buffer, M_HAMMER);
623                 goto again;
624         }
625         ++hammer_count_refedbufs;
626 found:
627
628         /*
629          * Deal with on-disk info and loading races.
630          */
631         if (buffer->ondisk == NULL || buffer->io.loading) {
632                 *errorp = hammer_load_buffer(buffer, isnew);
633                 if (*errorp) {
634                         hammer_rel_buffer(buffer, 1);
635                         buffer = NULL;
636                 }
637         } else {
638                 *errorp = 0;
639         }
640         return(buffer);
641 }
642
643 /*
644  * Destroy all buffers covering the specified zoneX offset range.  This
645  * is called when the related blockmap layer2 entry is freed or when
646  * a direct write bypasses our buffer/buffer-cache subsystem.
647  *
648  * The buffers may be referenced by the caller itself.  Setting reclaim
649  * will cause the buffer to be destroyed when it's ref count reaches zero.
650  */
651 void
652 hammer_del_buffers(hammer_mount_t hmp, hammer_off_t base_offset,
653                    hammer_off_t zone2_offset, int bytes)
654 {
655         hammer_buffer_t buffer;
656         hammer_volume_t volume;
657         int vol_no;
658         int error;
659
660         vol_no = HAMMER_VOL_DECODE(zone2_offset);
661         volume = hammer_get_volume(hmp, vol_no, &error);
662         KKASSERT(error == 0);
663
664         while (bytes > 0) {
665                 buffer = RB_LOOKUP(hammer_buf_rb_tree, &hmp->rb_bufs_root,
666                                    base_offset);
667                 if (buffer) {
668                         KKASSERT(buffer->zone2_offset == zone2_offset);
669                         hammer_io_clear_modify(&buffer->io, 1);
670                         buffer->io.reclaim = 1;
671                         KKASSERT(buffer->volume == volume);
672                         if (buffer->io.lock.refs == 0)
673                                 hammer_unload_buffer(buffer, NULL);
674                 } else {
675                         hammer_io_inval(volume, zone2_offset);
676                 }
677                 base_offset += HAMMER_BUFSIZE;
678                 zone2_offset += HAMMER_BUFSIZE;
679                 bytes -= HAMMER_BUFSIZE;
680         }
681         hammer_rel_volume(volume, 0);
682 }
683
684 static int
685 hammer_load_buffer(hammer_buffer_t buffer, int isnew)
686 {
687         hammer_volume_t volume;
688         int error;
689
690         /*
691          * Load the buffer's on-disk info
692          */
693         volume = buffer->volume;
694         ++buffer->io.loading;
695         hammer_lock_ex(&buffer->io.lock);
696
697         if (hammer_debug_io & 0x0001) {
698                 kprintf("load_buffer %016llx %016llx isnew=%d od=%p\n",
699                         buffer->zoneX_offset, buffer->zone2_offset, isnew,
700                         buffer->ondisk);
701         }
702
703         if (buffer->ondisk == NULL) {
704                 if (isnew) {
705                         error = hammer_io_new(volume->devvp, &buffer->io);
706                 } else {
707                         error = hammer_io_read(volume->devvp, &buffer->io,
708                                                volume->maxraw_off);
709                 }
710                 if (error == 0)
711                         buffer->ondisk = (void *)buffer->io.bp->b_data;
712         } else if (isnew) {
713                 error = hammer_io_new(volume->devvp, &buffer->io);
714         } else {
715                 error = 0;
716         }
717         --buffer->io.loading;
718         hammer_unlock(&buffer->io.lock);
719         return (error);
720 }
721
722 /*
723  * NOTE: Called from RB_SCAN, must return >= 0 for scan to continue.
724  */
725 int
726 hammer_unload_buffer(hammer_buffer_t buffer, void *data __unused)
727 {
728         ++hammer_count_refedbufs;
729         hammer_ref(&buffer->io.lock);
730         hammer_flush_buffer_nodes(buffer);
731         KKASSERT(buffer->io.lock.refs == 1);
732         hammer_rel_buffer(buffer, 2);
733         return(0);
734 }
735
736 /*
737  * Reference a buffer that is either already referenced or via a specially
738  * handled pointer (aka cursor->buffer).
739  */
740 int
741 hammer_ref_buffer(hammer_buffer_t buffer)
742 {
743         int error;
744
745         if (buffer->io.lock.refs == 0)
746                 ++hammer_count_refedbufs;
747         hammer_ref(&buffer->io.lock);
748
749         /*
750          * At this point a biodone() will not touch the buffer other then
751          * incidental bits.  However, lose_list can be modified via
752          * a biodone() interrupt.
753          *
754          * No longer loose
755          */
756         if (buffer->io.mod_list == &buffer->io.hmp->lose_list) {
757                 crit_enter();
758                 TAILQ_REMOVE(buffer->io.mod_list, &buffer->io, mod_entry);
759                 buffer->io.mod_list = NULL;
760                 crit_exit();
761         }
762
763         if (buffer->ondisk == NULL || buffer->io.loading) {
764                 error = hammer_load_buffer(buffer, 0);
765                 if (error) {
766                         hammer_rel_buffer(buffer, 1);
767                         /*
768                          * NOTE: buffer pointer can become stale after
769                          * the above release.
770                          */
771                 }
772         } else {
773                 error = 0;
774         }
775         return(error);
776 }
777
778 /*
779  * Release a buffer.  We have to deal with several places where
780  * another thread can ref the buffer.
781  *
782  * Only destroy the structure itself if the related buffer cache buffer
783  * was disassociated from it.  This ties the management of the structure
784  * to the buffer cache subsystem.  buffer->ondisk determines whether the
785  * embedded io is referenced or not.
786  */
787 void
788 hammer_rel_buffer(hammer_buffer_t buffer, int flush)
789 {
790         hammer_volume_t volume;
791         int freeme = 0;
792
793         crit_enter();
794         if (buffer->io.lock.refs == 1) {
795                 ++buffer->io.loading;   /* force interlock check */
796                 hammer_lock_ex(&buffer->io.lock);
797                 if (buffer->io.lock.refs == 1) {
798                         hammer_io_release(&buffer->io, flush);
799
800                         if (buffer->io.lock.refs == 1)
801                                 --hammer_count_refedbufs;
802
803                         if (buffer->io.bp == NULL &&
804                             buffer->io.lock.refs == 1) {
805                                 /*
806                                  * Final cleanup
807                                  *
808                                  * NOTE: It is impossible for any associated
809                                  * B-Tree nodes to have refs if the buffer
810                                  * has no additional refs.
811                                  */
812                                 RB_REMOVE(hammer_buf_rb_tree,
813                                           &buffer->io.hmp->rb_bufs_root,
814                                           buffer);
815                                 volume = buffer->volume;
816                                 buffer->volume = NULL; /* sanity */
817                                 hammer_rel_volume(volume, 0);
818                                 hammer_io_clear_modlist(&buffer->io);
819                                 hammer_flush_buffer_nodes(buffer);
820                                 KKASSERT(TAILQ_EMPTY(&buffer->clist));
821                                 freeme = 1;
822                         }
823                 }
824                 --buffer->io.loading;
825                 hammer_unlock(&buffer->io.lock);
826         }
827         hammer_unref(&buffer->io.lock);
828         crit_exit();
829         if (freeme) {
830                 --hammer_count_buffers;
831                 kfree(buffer, M_HAMMER);
832         }
833 }
834
835 /*
836  * Access the filesystem buffer containing the specified hammer offset.
837  * buf_offset is a conglomeration of the volume number and vol_buf_beg
838  * relative buffer offset.  It must also have bit 55 set to be valid.
839  * (see hammer_off_t in hammer_disk.h).
840  *
841  * Any prior buffer in *bufferp will be released and replaced by the
842  * requested buffer.
843  */
844 static __inline
845 void *
846 _hammer_bread(hammer_mount_t hmp, hammer_off_t buf_offset, int bytes,
847              int *errorp, struct hammer_buffer **bufferp)
848 {
849         hammer_buffer_t buffer;
850         int32_t xoff = (int32_t)buf_offset & HAMMER_BUFMASK;
851
852         buf_offset &= ~HAMMER_BUFMASK64;
853         KKASSERT((buf_offset & HAMMER_OFF_ZONE_MASK) != 0);
854
855         buffer = *bufferp;
856         if (buffer == NULL || (buffer->zone2_offset != buf_offset &&
857                                buffer->zoneX_offset != buf_offset)) {
858                 if (buffer)
859                         hammer_rel_buffer(buffer, 0);
860                 buffer = hammer_get_buffer(hmp, buf_offset, bytes, 0, errorp);
861                 *bufferp = buffer;
862         } else {
863                 *errorp = 0;
864         }
865
866         /*
867          * Return a pointer to the buffer data.
868          */
869         if (buffer == NULL)
870                 return(NULL);
871         else
872                 return((char *)buffer->ondisk + xoff);
873 }
874
875 void *
876 hammer_bread(hammer_mount_t hmp, hammer_off_t buf_offset,
877              int *errorp, struct hammer_buffer **bufferp)
878 {
879         return(_hammer_bread(hmp, buf_offset, HAMMER_BUFSIZE, errorp, bufferp));
880 }
881
882 void *
883 hammer_bread_ext(hammer_mount_t hmp, hammer_off_t buf_offset, int bytes,
884                  int *errorp, struct hammer_buffer **bufferp)
885 {
886         bytes = (bytes + HAMMER_BUFMASK) & ~HAMMER_BUFMASK;
887         return(_hammer_bread(hmp, buf_offset, bytes, errorp, bufferp));
888 }
889
890 /*
891  * Access the filesystem buffer containing the specified hammer offset.
892  * No disk read operation occurs.  The result buffer may contain garbage.
893  *
894  * Any prior buffer in *bufferp will be released and replaced by the
895  * requested buffer.
896  *
897  * This function marks the buffer dirty but does not increment its
898  * modify_refs count.
899  */
900 static __inline
901 void *
902 _hammer_bnew(hammer_mount_t hmp, hammer_off_t buf_offset, int bytes,
903              int *errorp, struct hammer_buffer **bufferp)
904 {
905         hammer_buffer_t buffer;
906         int32_t xoff = (int32_t)buf_offset & HAMMER_BUFMASK;
907
908         buf_offset &= ~HAMMER_BUFMASK64;
909
910         buffer = *bufferp;
911         if (buffer == NULL || (buffer->zone2_offset != buf_offset &&
912                                buffer->zoneX_offset != buf_offset)) {
913                 if (buffer)
914                         hammer_rel_buffer(buffer, 0);
915                 buffer = hammer_get_buffer(hmp, buf_offset, bytes, 1, errorp);
916                 *bufferp = buffer;
917         } else {
918                 *errorp = 0;
919         }
920
921         /*
922          * Return a pointer to the buffer data.
923          */
924         if (buffer == NULL)
925                 return(NULL);
926         else
927                 return((char *)buffer->ondisk + xoff);
928 }
929
930 void *
931 hammer_bnew(hammer_mount_t hmp, hammer_off_t buf_offset,
932              int *errorp, struct hammer_buffer **bufferp)
933 {
934         return(_hammer_bnew(hmp, buf_offset, HAMMER_BUFSIZE, errorp, bufferp));
935 }
936
937 void *
938 hammer_bnew_ext(hammer_mount_t hmp, hammer_off_t buf_offset, int bytes,
939                 int *errorp, struct hammer_buffer **bufferp)
940 {
941         bytes = (bytes + HAMMER_BUFMASK) & ~HAMMER_BUFMASK;
942         return(_hammer_bnew(hmp, buf_offset, bytes, errorp, bufferp));
943 }
944
945 /************************************************************************
946  *                              NODES                                   *
947  ************************************************************************
948  *
949  * Manage B-Tree nodes.  B-Tree nodes represent the primary indexing
950  * method used by the HAMMER filesystem.
951  *
952  * Unlike other HAMMER structures, a hammer_node can be PASSIVELY
953  * associated with its buffer, and will only referenced the buffer while
954  * the node itself is referenced.
955  *
956  * A hammer_node can also be passively associated with other HAMMER
957  * structures, such as inodes, while retaining 0 references.  These
958  * associations can be cleared backwards using a pointer-to-pointer in
959  * the hammer_node.
960  *
961  * This allows the HAMMER implementation to cache hammer_nodes long-term
962  * and short-cut a great deal of the infrastructure's complexity.  In
963  * most cases a cached node can be reacquired without having to dip into
964  * either the buffer or cluster management code.
965  *
966  * The caller must pass a referenced cluster on call and will retain
967  * ownership of the reference on return.  The node will acquire its own
968  * additional references, if necessary.
969  */
970 hammer_node_t
971 hammer_get_node(hammer_mount_t hmp, hammer_off_t node_offset,
972                 int isnew, int *errorp)
973 {
974         hammer_node_t node;
975
976         KKASSERT((node_offset & HAMMER_OFF_ZONE_MASK) == HAMMER_ZONE_BTREE);
977
978         /*
979          * Locate the structure, allocating one if necessary.
980          */
981 again:
982         node = RB_LOOKUP(hammer_nod_rb_tree, &hmp->rb_nods_root, node_offset);
983         if (node == NULL) {
984                 ++hammer_count_nodes;
985                 node = kmalloc(sizeof(*node), M_HAMMER, M_WAITOK|M_ZERO);
986                 node->node_offset = node_offset;
987                 node->hmp = hmp;
988                 TAILQ_INIT(&node->cache_list);
989                 if (RB_INSERT(hammer_nod_rb_tree, &hmp->rb_nods_root, node)) {
990                         --hammer_count_nodes;
991                         kfree(node, M_HAMMER);
992                         goto again;
993                 }
994         }
995         hammer_ref(&node->lock);
996         if (node->ondisk)
997                 *errorp = 0;
998         else
999                 *errorp = hammer_load_node(node, isnew);
1000         if (*errorp) {
1001                 hammer_rel_node(node);
1002                 node = NULL;
1003         }
1004         return(node);
1005 }
1006
1007 /*
1008  * Reference an already-referenced node.
1009  */
1010 void
1011 hammer_ref_node(hammer_node_t node)
1012 {
1013         KKASSERT(node->lock.refs > 0 && node->ondisk != NULL);
1014         hammer_ref(&node->lock);
1015 }
1016
1017 /*
1018  * Load a node's on-disk data reference.
1019  */
1020 static int
1021 hammer_load_node(hammer_node_t node, int isnew)
1022 {
1023         hammer_buffer_t buffer;
1024         hammer_off_t buf_offset;
1025         int error;
1026
1027         error = 0;
1028         ++node->loading;
1029         hammer_lock_ex(&node->lock);
1030         if (node->ondisk == NULL) {
1031                 /*
1032                  * This is a little confusing but the jist is that
1033                  * node->buffer determines whether the node is on
1034                  * the buffer's clist and node->ondisk determines
1035                  * whether the buffer is referenced.
1036                  *
1037                  * We could be racing a buffer release, in which case
1038                  * node->buffer may become NULL while we are blocked
1039                  * referencing the buffer.
1040                  */
1041                 if ((buffer = node->buffer) != NULL) {
1042                         error = hammer_ref_buffer(buffer);
1043                         if (error == 0 && node->buffer == NULL) {
1044                                 TAILQ_INSERT_TAIL(&buffer->clist,
1045                                                   node, entry);
1046                                 node->buffer = buffer;
1047                         }
1048                 } else {
1049                         buf_offset = node->node_offset & ~HAMMER_BUFMASK64;
1050                         buffer = hammer_get_buffer(node->hmp, buf_offset,
1051                                                    HAMMER_BUFSIZE, 0, &error);
1052                         if (buffer) {
1053                                 KKASSERT(error == 0);
1054                                 TAILQ_INSERT_TAIL(&buffer->clist,
1055                                                   node, entry);
1056                                 node->buffer = buffer;
1057                         }
1058                 }
1059                 if (error)
1060                         goto failed;
1061                 node->ondisk = (void *)((char *)buffer->ondisk +
1062                                         (node->node_offset & HAMMER_BUFMASK));
1063                 if (isnew == 0 && 
1064                     (node->flags & HAMMER_NODE_CRCGOOD) == 0) {
1065                         if (hammer_crc_test_btree(node->ondisk) == 0)
1066                                 Debugger("CRC FAILED: B-TREE NODE");
1067                         node->flags |= HAMMER_NODE_CRCGOOD;
1068                 }
1069         }
1070 failed:
1071         --node->loading;
1072         hammer_unlock(&node->lock);
1073         return (error);
1074 }
1075
1076 /*
1077  * Safely reference a node, interlock against flushes via the IO subsystem.
1078  */
1079 hammer_node_t
1080 hammer_ref_node_safe(struct hammer_mount *hmp, hammer_node_cache_t cache,
1081                      int *errorp)
1082 {
1083         hammer_node_t node;
1084
1085         node = cache->node;
1086         if (node != NULL) {
1087                 hammer_ref(&node->lock);
1088                 if (node->ondisk)
1089                         *errorp = 0;
1090                 else
1091                         *errorp = hammer_load_node(node, 0);
1092                 if (*errorp) {
1093                         hammer_rel_node(node);
1094                         node = NULL;
1095                 }
1096         } else {
1097                 *errorp = ENOENT;
1098         }
1099         return(node);
1100 }
1101
1102 /*
1103  * Release a hammer_node.  On the last release the node dereferences
1104  * its underlying buffer and may or may not be destroyed.
1105  */
1106 void
1107 hammer_rel_node(hammer_node_t node)
1108 {
1109         hammer_buffer_t buffer;
1110
1111         /*
1112          * If this isn't the last ref just decrement the ref count and
1113          * return.
1114          */
1115         if (node->lock.refs > 1) {
1116                 hammer_unref(&node->lock);
1117                 return;
1118         }
1119
1120         /*
1121          * If there is no ondisk info or no buffer the node failed to load,
1122          * remove the last reference and destroy the node.
1123          */
1124         if (node->ondisk == NULL) {
1125                 hammer_unref(&node->lock);
1126                 hammer_flush_node(node);
1127                 /* node is stale now */
1128                 return;
1129         }
1130
1131         /*
1132          * Do not disassociate the node from the buffer if it represents
1133          * a modified B-Tree node that still needs its crc to be generated.
1134          */
1135         if (node->flags & HAMMER_NODE_NEEDSCRC)
1136                 return;
1137
1138         /*
1139          * Do final cleanups and then either destroy the node and leave it
1140          * passively cached.  The buffer reference is removed regardless.
1141          */
1142         buffer = node->buffer;
1143         node->ondisk = NULL;
1144
1145         if ((node->flags & HAMMER_NODE_FLUSH) == 0) {
1146                 hammer_unref(&node->lock);
1147                 hammer_rel_buffer(buffer, 0);
1148                 return;
1149         }
1150
1151         /*
1152          * Destroy the node.
1153          */
1154         hammer_unref(&node->lock);
1155         hammer_flush_node(node);
1156         /* node is stale */
1157         hammer_rel_buffer(buffer, 0);
1158 }
1159
1160 /*
1161  * Free space on-media associated with a B-Tree node.
1162  */
1163 void
1164 hammer_delete_node(hammer_transaction_t trans, hammer_node_t node)
1165 {
1166         KKASSERT((node->flags & HAMMER_NODE_DELETED) == 0);
1167         node->flags |= HAMMER_NODE_DELETED;
1168         hammer_blockmap_free(trans, node->node_offset, sizeof(*node->ondisk));
1169 }
1170
1171 /*
1172  * Passively cache a referenced hammer_node.  The caller may release
1173  * the node on return.
1174  */
1175 void
1176 hammer_cache_node(hammer_node_cache_t cache, hammer_node_t node)
1177 {
1178         /*
1179          * If the node is being deleted, don't cache it!
1180          */
1181         if (node->flags & HAMMER_NODE_DELETED)
1182                 return;
1183         if (cache->node == node)
1184                 return;
1185         while (cache->node)
1186                 hammer_uncache_node(cache);
1187         if (node->flags & HAMMER_NODE_DELETED)
1188                 return;
1189         cache->node = node;
1190         TAILQ_INSERT_TAIL(&node->cache_list, cache, entry);
1191 }
1192
1193 void
1194 hammer_uncache_node(hammer_node_cache_t cache)
1195 {
1196         hammer_node_t node;
1197
1198         if ((node = cache->node) != NULL) {
1199                 TAILQ_REMOVE(&node->cache_list, cache, entry);
1200                 cache->node = NULL;
1201                 if (TAILQ_EMPTY(&node->cache_list))
1202                         hammer_flush_node(node);
1203         }
1204 }
1205
1206 /*
1207  * Remove a node's cache references and destroy the node if it has no
1208  * other references or backing store.
1209  */
1210 void
1211 hammer_flush_node(hammer_node_t node)
1212 {
1213         hammer_node_cache_t cache;
1214         hammer_buffer_t buffer;
1215
1216         while ((cache = TAILQ_FIRST(&node->cache_list)) != NULL) {
1217                 TAILQ_REMOVE(&node->cache_list, cache, entry);
1218                 cache->node = NULL;
1219         }
1220         if (node->lock.refs == 0 && node->ondisk == NULL) {
1221                 KKASSERT((node->flags & HAMMER_NODE_NEEDSCRC) == 0);
1222                 RB_REMOVE(hammer_nod_rb_tree, &node->hmp->rb_nods_root, node);
1223                 if ((buffer = node->buffer) != NULL) {
1224                         node->buffer = NULL;
1225                         TAILQ_REMOVE(&buffer->clist, node, entry);
1226                         /* buffer is unreferenced because ondisk is NULL */
1227                 }
1228                 --hammer_count_nodes;
1229                 kfree(node, M_HAMMER);
1230         }
1231 }
1232
1233 /*
1234  * Flush passively cached B-Tree nodes associated with this buffer.
1235  * This is only called when the buffer is about to be destroyed, so
1236  * none of the nodes should have any references.  The buffer is locked.
1237  *
1238  * We may be interlocked with the buffer.
1239  */
1240 void
1241 hammer_flush_buffer_nodes(hammer_buffer_t buffer)
1242 {
1243         hammer_node_t node;
1244
1245         while ((node = TAILQ_FIRST(&buffer->clist)) != NULL) {
1246                 KKASSERT(node->ondisk == NULL);
1247                 KKASSERT((node->flags & HAMMER_NODE_NEEDSCRC) == 0);
1248
1249                 if (node->lock.refs == 0) {
1250                         hammer_ref(&node->lock);
1251                         node->flags |= HAMMER_NODE_FLUSH;
1252                         hammer_rel_node(node);
1253                 } else {
1254                         KKASSERT(node->loading != 0);
1255                         KKASSERT(node->buffer != NULL);
1256                         buffer = node->buffer;
1257                         node->buffer = NULL;
1258                         TAILQ_REMOVE(&buffer->clist, node, entry);
1259                         /* buffer is unreferenced because ondisk is NULL */
1260                 }
1261         }
1262 }
1263
1264
1265 /************************************************************************
1266  *                              ALLOCATORS                              *
1267  ************************************************************************/
1268
1269 /*
1270  * Allocate a B-Tree node.
1271  */
1272 hammer_node_t
1273 hammer_alloc_btree(hammer_transaction_t trans, int *errorp)
1274 {
1275         hammer_buffer_t buffer = NULL;
1276         hammer_node_t node = NULL;
1277         hammer_off_t node_offset;
1278
1279         node_offset = hammer_blockmap_alloc(trans, HAMMER_ZONE_BTREE_INDEX,
1280                                             sizeof(struct hammer_node_ondisk),
1281                                             errorp);
1282         if (*errorp == 0) {
1283                 node = hammer_get_node(trans->hmp, node_offset, 1, errorp);
1284                 hammer_modify_node_noundo(trans, node);
1285                 bzero(node->ondisk, sizeof(*node->ondisk));
1286                 hammer_modify_node_done(node);
1287         }
1288         if (buffer)
1289                 hammer_rel_buffer(buffer, 0);
1290         return(node);
1291 }
1292
1293 /*
1294  * Allocate data.  If the address of a data buffer is supplied then
1295  * any prior non-NULL *data_bufferp will be released and *data_bufferp
1296  * will be set to the related buffer.  The caller must release it when
1297  * finally done.  The initial *data_bufferp should be set to NULL by
1298  * the caller.
1299  *
1300  * The caller is responsible for making hammer_modify*() calls on the
1301  * *data_bufferp.
1302  */
1303 void *
1304 hammer_alloc_data(hammer_transaction_t trans, int32_t data_len, 
1305                   u_int16_t rec_type, hammer_off_t *data_offsetp,
1306                   struct hammer_buffer **data_bufferp, int *errorp)
1307 {
1308         void *data;
1309         int zone;
1310
1311         /*
1312          * Allocate data
1313          */
1314         if (data_len) {
1315                 switch(rec_type) {
1316                 case HAMMER_RECTYPE_INODE:
1317                 case HAMMER_RECTYPE_PSEUDO_INODE:
1318                 case HAMMER_RECTYPE_DIRENTRY:
1319                 case HAMMER_RECTYPE_EXT:
1320                 case HAMMER_RECTYPE_FIX:
1321                         zone = HAMMER_ZONE_META_INDEX;
1322                         break;
1323                 case HAMMER_RECTYPE_DATA:
1324                 case HAMMER_RECTYPE_DB:
1325                         if (data_len <= HAMMER_BUFSIZE / 2) {
1326                                 zone = HAMMER_ZONE_SMALL_DATA_INDEX;
1327                         } else {
1328                                 data_len = (data_len + HAMMER_BUFMASK) &
1329                                            ~HAMMER_BUFMASK;
1330                                 zone = HAMMER_ZONE_LARGE_DATA_INDEX;
1331                         }
1332                         break;
1333                 default:
1334                         panic("hammer_alloc_data: rec_type %04x unknown",
1335                               rec_type);
1336                         zone = 0;       /* NOT REACHED */
1337                         break;
1338                 }
1339                 *data_offsetp = hammer_blockmap_alloc(trans, zone,
1340                                                       data_len, errorp);
1341         } else {
1342                 *data_offsetp = 0;
1343         }
1344         if (*errorp == 0 && data_bufferp) {
1345                 if (data_len) {
1346                         data = hammer_bread_ext(trans->hmp, *data_offsetp,
1347                                                 data_len, errorp, data_bufferp);
1348                         KKASSERT(*errorp == 0);
1349                 } else {
1350                         data = NULL;
1351                 }
1352         } else {
1353                 data = NULL;
1354         }
1355         KKASSERT(*errorp == 0);
1356         return(data);
1357 }
1358
1359 /*
1360  * Sync dirty buffers to the media and clean-up any loose ends.
1361  */
1362 static int hammer_sync_scan1(struct mount *mp, struct vnode *vp, void *data);
1363 static int hammer_sync_scan2(struct mount *mp, struct vnode *vp, void *data);
1364
1365 int
1366 hammer_queue_inodes_flusher(hammer_mount_t hmp, int waitfor)
1367 {
1368         struct hammer_sync_info info;
1369
1370         info.error = 0;
1371         info.waitfor = waitfor;
1372         if (waitfor == MNT_WAIT) {
1373                 vmntvnodescan(hmp->mp, VMSC_GETVP|VMSC_ONEPASS,
1374                               hammer_sync_scan1, hammer_sync_scan2, &info);
1375         } else {
1376                 vmntvnodescan(hmp->mp, VMSC_GETVP|VMSC_ONEPASS|VMSC_NOWAIT,
1377                               hammer_sync_scan1, hammer_sync_scan2, &info);
1378         }
1379         return(info.error);
1380 }
1381
1382 int
1383 hammer_sync_hmp(hammer_mount_t hmp, int waitfor)
1384 {
1385         struct hammer_sync_info info;
1386
1387         info.error = 0;
1388         info.waitfor = waitfor;
1389
1390         vmntvnodescan(hmp->mp, VMSC_GETVP|VMSC_NOWAIT,
1391                       hammer_sync_scan1, hammer_sync_scan2, &info);
1392         if (waitfor == MNT_WAIT)
1393                 hammer_flusher_sync(hmp);
1394         else
1395                 hammer_flusher_async(hmp);
1396
1397         return(info.error);
1398 }
1399
1400 static int
1401 hammer_sync_scan1(struct mount *mp, struct vnode *vp, void *data)
1402 {
1403         struct hammer_inode *ip;
1404
1405         ip = VTOI(vp);
1406         if (vp->v_type == VNON || ip == NULL ||
1407             ((ip->flags & HAMMER_INODE_MODMASK) == 0 &&
1408              RB_EMPTY(&vp->v_rbdirty_tree))) {
1409                 return(-1);
1410         }
1411         return(0);
1412 }
1413
1414 static int
1415 hammer_sync_scan2(struct mount *mp, struct vnode *vp, void *data)
1416 {
1417         struct hammer_sync_info *info = data;
1418         struct hammer_inode *ip;
1419         int error;
1420
1421         ip = VTOI(vp);
1422         if (vp->v_type == VNON || vp->v_type == VBAD ||
1423             ((ip->flags & HAMMER_INODE_MODMASK) == 0 &&
1424              RB_EMPTY(&vp->v_rbdirty_tree))) {
1425                 return(0);
1426         }
1427         error = VOP_FSYNC(vp, info->waitfor);
1428         if (error)
1429                 info->error = error;
1430         return(0);
1431 }
1432