HAMMER 62/Many: Stabilization, performance, and cleanup
[dragonfly.git] / sys / vfs / hammer / hammer_ondisk.c
1 /*
2  * Copyright (c) 2007-2008 The DragonFly Project.  All rights reserved.
3  * 
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  * 
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  * 
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  * 
34  * $DragonFly: src/sys/vfs/hammer/hammer_ondisk.c,v 1.70 2008/07/16 18:30:59 dillon Exp $
35  */
36 /*
37  * Manage HAMMER's on-disk structures.  These routines are primarily
38  * responsible for interfacing with the kernel's I/O subsystem and for
39  * managing in-memory structures.
40  */
41
42 #include "hammer.h"
43 #include <sys/fcntl.h>
44 #include <sys/nlookup.h>
45 #include <sys/buf.h>
46 #include <sys/buf2.h>
47
48 static void hammer_free_volume(hammer_volume_t volume);
49 static int hammer_load_volume(hammer_volume_t volume);
50 static int hammer_load_buffer(hammer_buffer_t buffer, int isnew);
51 static int hammer_load_node(hammer_node_t node, int isnew);
52
53 static int
54 hammer_vol_rb_compare(hammer_volume_t vol1, hammer_volume_t vol2)
55 {
56         if (vol1->vol_no < vol2->vol_no)
57                 return(-1);
58         if (vol1->vol_no > vol2->vol_no)
59                 return(1);
60         return(0);
61 }
62
63 static int
64 hammer_buf_rb_compare(hammer_buffer_t buf1, hammer_buffer_t buf2)
65 {
66         if (buf1->zoneX_offset < buf2->zoneX_offset)
67                 return(-1);
68         if (buf1->zoneX_offset > buf2->zoneX_offset)
69                 return(1);
70         return(0);
71 }
72
73 static int
74 hammer_nod_rb_compare(hammer_node_t node1, hammer_node_t node2)
75 {
76         if (node1->node_offset < node2->node_offset)
77                 return(-1);
78         if (node1->node_offset > node2->node_offset)
79                 return(1);
80         return(0);
81 }
82
83 RB_GENERATE2(hammer_vol_rb_tree, hammer_volume, rb_node,
84              hammer_vol_rb_compare, int32_t, vol_no);
85 RB_GENERATE2(hammer_buf_rb_tree, hammer_buffer, rb_node,
86              hammer_buf_rb_compare, hammer_off_t, zoneX_offset);
87 RB_GENERATE2(hammer_nod_rb_tree, hammer_node, rb_node,
88              hammer_nod_rb_compare, hammer_off_t, node_offset);
89
90 /************************************************************************
91  *                              VOLUMES                                 *
92  ************************************************************************
93  *
94  * Load a HAMMER volume by name.  Returns 0 on success or a positive error
95  * code on failure.  Volumes must be loaded at mount time, get_volume() will
96  * not load a new volume.
97  *
98  * Calls made to hammer_load_volume() or single-threaded
99  */
100 int
101 hammer_install_volume(struct hammer_mount *hmp, const char *volname)
102 {
103         struct mount *mp;
104         hammer_volume_t volume;
105         struct hammer_volume_ondisk *ondisk;
106         struct nlookupdata nd;
107         struct buf *bp = NULL;
108         int error;
109         int ronly;
110         int setmp = 0;
111
112         mp = hmp->mp;
113         ronly = ((mp->mnt_flag & MNT_RDONLY) ? 1 : 0);
114
115         /*
116          * Allocate a volume structure
117          */
118         ++hammer_count_volumes;
119         volume = kmalloc(sizeof(*volume), M_HAMMER, M_WAITOK|M_ZERO);
120         volume->vol_name = kstrdup(volname, M_HAMMER);
121         hammer_io_init(&volume->io, hmp, HAMMER_STRUCTURE_VOLUME);
122         volume->io.offset = 0LL;
123         volume->io.bytes = HAMMER_BUFSIZE;
124
125         /*
126          * Get the device vnode
127          */
128         error = nlookup_init(&nd, volume->vol_name, UIO_SYSSPACE, NLC_FOLLOW);
129         if (error == 0)
130                 error = nlookup(&nd);
131         if (error == 0)
132                 error = cache_vref(&nd.nl_nch, nd.nl_cred, &volume->devvp);
133         nlookup_done(&nd);
134         if (error == 0) {
135                 if (vn_isdisk(volume->devvp, &error)) {
136                         error = vfs_mountedon(volume->devvp);
137                 }
138         }
139         if (error == 0 &&
140             count_udev(volume->devvp->v_umajor, volume->devvp->v_uminor) > 0) {
141                 error = EBUSY;
142         }
143         if (error == 0) {
144                 vn_lock(volume->devvp, LK_EXCLUSIVE | LK_RETRY);
145                 error = vinvalbuf(volume->devvp, V_SAVE, 0, 0);
146                 if (error == 0) {
147                         error = VOP_OPEN(volume->devvp, 
148                                          (ronly ? FREAD : FREAD|FWRITE),
149                                          FSCRED, NULL);
150                 }
151                 vn_unlock(volume->devvp);
152         }
153         if (error) {
154                 hammer_free_volume(volume);
155                 return(error);
156         }
157         volume->devvp->v_rdev->si_mountpoint = mp;
158         setmp = 1;
159
160         /*
161          * Extract the volume number from the volume header and do various
162          * sanity checks.
163          */
164         error = bread(volume->devvp, 0LL, HAMMER_BUFSIZE, &bp);
165         if (error)
166                 goto late_failure;
167         ondisk = (void *)bp->b_data;
168         if (ondisk->vol_signature != HAMMER_FSBUF_VOLUME) {
169                 kprintf("hammer_mount: volume %s has an invalid header\n",
170                         volume->vol_name);
171                 error = EFTYPE;
172                 goto late_failure;
173         }
174         volume->vol_no = ondisk->vol_no;
175         volume->buffer_base = ondisk->vol_buf_beg;
176         volume->vol_flags = ondisk->vol_flags;
177         volume->nblocks = ondisk->vol_nblocks; 
178         volume->maxbuf_off = HAMMER_ENCODE_RAW_BUFFER(volume->vol_no,
179                                     ondisk->vol_buf_end - ondisk->vol_buf_beg);
180         volume->maxraw_off = ondisk->vol_buf_end;
181
182         if (RB_EMPTY(&hmp->rb_vols_root)) {
183                 hmp->fsid = ondisk->vol_fsid;
184         } else if (bcmp(&hmp->fsid, &ondisk->vol_fsid, sizeof(uuid_t))) {
185                 kprintf("hammer_mount: volume %s's fsid does not match "
186                         "other volumes\n", volume->vol_name);
187                 error = EFTYPE;
188                 goto late_failure;
189         }
190
191         /*
192          * Insert the volume structure into the red-black tree.
193          */
194         if (RB_INSERT(hammer_vol_rb_tree, &hmp->rb_vols_root, volume)) {
195                 kprintf("hammer_mount: volume %s has a duplicate vol_no %d\n",
196                         volume->vol_name, volume->vol_no);
197                 error = EEXIST;
198         }
199
200         /*
201          * Set the root volume .  HAMMER special cases rootvol the structure.
202          * We do not hold a ref because this would prevent related I/O
203          * from being flushed.
204          */
205         if (error == 0 && ondisk->vol_rootvol == ondisk->vol_no) {
206                 hmp->rootvol = volume;
207                 hmp->nvolumes = ondisk->vol_count;
208                 if (bp) {
209                         brelse(bp);
210                         bp = NULL;
211                 }
212                 hmp->mp->mnt_stat.f_blocks += ondisk->vol0_stat_bigblocks *
213                         (HAMMER_LARGEBLOCK_SIZE / HAMMER_BUFSIZE);
214                 hmp->mp->mnt_vstat.f_blocks += ondisk->vol0_stat_bigblocks *
215                         (HAMMER_LARGEBLOCK_SIZE / HAMMER_BUFSIZE);
216         }
217 late_failure:
218         if (bp)
219                 brelse(bp);
220         if (error) {
221                 /*vinvalbuf(volume->devvp, V_SAVE, 0, 0);*/
222                 if (setmp)
223                         volume->devvp->v_rdev->si_mountpoint = NULL;
224                 VOP_CLOSE(volume->devvp, ronly ? FREAD : FREAD|FWRITE);
225                 hammer_free_volume(volume);
226         }
227         return (error);
228 }
229
230 /*
231  * This is called for each volume when updating the mount point from
232  * read-write to read-only or vise-versa.
233  */
234 int
235 hammer_adjust_volume_mode(hammer_volume_t volume, void *data __unused)
236 {
237         if (volume->devvp) {
238                 vn_lock(volume->devvp, LK_EXCLUSIVE | LK_RETRY);
239                 if (volume->io.hmp->ronly) {
240                         /* do not call vinvalbuf */
241                         VOP_OPEN(volume->devvp, FREAD, FSCRED, NULL);
242                         VOP_CLOSE(volume->devvp, FREAD|FWRITE);
243                 } else {
244                         /* do not call vinvalbuf */
245                         VOP_OPEN(volume->devvp, FREAD|FWRITE, FSCRED, NULL);
246                         VOP_CLOSE(volume->devvp, FREAD);
247                 }
248                 vn_unlock(volume->devvp);
249         }
250         return(0);
251 }
252
253 /*
254  * Unload and free a HAMMER volume.  Must return >= 0 to continue scan
255  * so returns -1 on failure.
256  */
257 int
258 hammer_unload_volume(hammer_volume_t volume, void *data __unused)
259 {
260         struct hammer_mount *hmp = volume->io.hmp;
261         int ronly = ((hmp->mp->mnt_flag & MNT_RDONLY) ? 1 : 0);
262         struct buf *bp;
263
264         /*
265          * Clean up the root volume pointer, which is held unlocked in hmp.
266          */
267         if (hmp->rootvol == volume)
268                 hmp->rootvol = NULL;
269
270         /*
271          * Release our buffer and flush anything left in the buffer cache.
272          */
273         volume->io.waitdep = 1;
274         bp = hammer_io_release(&volume->io, 1);
275         hammer_io_clear_modlist(&volume->io);
276
277         /*
278          * There should be no references on the volume, no clusters, and
279          * no super-clusters.
280          */
281         KKASSERT(volume->io.lock.refs == 0);
282         if (bp)
283                 brelse(bp);
284
285         volume->ondisk = NULL;
286         if (volume->devvp) {
287                 if (volume->devvp->v_rdev &&
288                     volume->devvp->v_rdev->si_mountpoint == hmp->mp
289                 ) {
290                         volume->devvp->v_rdev->si_mountpoint = NULL;
291                 }
292                 if (ronly) {
293                         vinvalbuf(volume->devvp, 0, 0, 0);
294                         VOP_CLOSE(volume->devvp, FREAD);
295                 } else {
296                         vinvalbuf(volume->devvp, V_SAVE, 0, 0);
297                         VOP_CLOSE(volume->devvp, FREAD|FWRITE);
298                 }
299         }
300
301         /*
302          * Destroy the structure
303          */
304         RB_REMOVE(hammer_vol_rb_tree, &hmp->rb_vols_root, volume);
305         hammer_free_volume(volume);
306         return(0);
307 }
308
309 static
310 void
311 hammer_free_volume(hammer_volume_t volume)
312 {
313         if (volume->vol_name) {
314                 kfree(volume->vol_name, M_HAMMER);
315                 volume->vol_name = NULL;
316         }
317         if (volume->devvp) {
318                 vrele(volume->devvp);
319                 volume->devvp = NULL;
320         }
321         --hammer_count_volumes;
322         kfree(volume, M_HAMMER);
323 }
324
325 /*
326  * Get a HAMMER volume.  The volume must already exist.
327  */
328 hammer_volume_t
329 hammer_get_volume(struct hammer_mount *hmp, int32_t vol_no, int *errorp)
330 {
331         struct hammer_volume *volume;
332
333         /*
334          * Locate the volume structure
335          */
336         volume = RB_LOOKUP(hammer_vol_rb_tree, &hmp->rb_vols_root, vol_no);
337         if (volume == NULL) {
338                 *errorp = ENOENT;
339                 return(NULL);
340         }
341         hammer_ref(&volume->io.lock);
342
343         /*
344          * Deal with on-disk info
345          */
346         if (volume->ondisk == NULL || volume->io.loading) {
347                 *errorp = hammer_load_volume(volume);
348                 if (*errorp) {
349                         hammer_rel_volume(volume, 1);
350                         volume = NULL;
351                 }
352         } else {
353                 *errorp = 0;
354         }
355         return(volume);
356 }
357
358 int
359 hammer_ref_volume(hammer_volume_t volume)
360 {
361         int error;
362
363         hammer_ref(&volume->io.lock);
364
365         /*
366          * Deal with on-disk info
367          */
368         if (volume->ondisk == NULL || volume->io.loading) {
369                 error = hammer_load_volume(volume);
370                 if (error)
371                         hammer_rel_volume(volume, 1);
372         } else {
373                 error = 0;
374         }
375         return (error);
376 }
377
378 hammer_volume_t
379 hammer_get_root_volume(struct hammer_mount *hmp, int *errorp)
380 {
381         hammer_volume_t volume;
382
383         volume = hmp->rootvol;
384         KKASSERT(volume != NULL);
385         hammer_ref(&volume->io.lock);
386
387         /*
388          * Deal with on-disk info
389          */
390         if (volume->ondisk == NULL || volume->io.loading) {
391                 *errorp = hammer_load_volume(volume);
392                 if (*errorp) {
393                         hammer_rel_volume(volume, 1);
394                         volume = NULL;
395                 }
396         } else {
397                 *errorp = 0;
398         }
399         return (volume);
400 }
401
402 /*
403  * Load a volume's on-disk information.  The volume must be referenced and
404  * not locked.  We temporarily acquire an exclusive lock to interlock
405  * against releases or multiple get's.
406  */
407 static int
408 hammer_load_volume(hammer_volume_t volume)
409 {
410         int error;
411
412         ++volume->io.loading;
413         hammer_lock_ex(&volume->io.lock);
414
415         if (volume->ondisk == NULL) {
416                 error = hammer_io_read(volume->devvp, &volume->io,
417                                        volume->maxraw_off);
418                 if (error == 0)
419                         volume->ondisk = (void *)volume->io.bp->b_data;
420         } else {
421                 error = 0;
422         }
423         --volume->io.loading;
424         hammer_unlock(&volume->io.lock);
425         return(error);
426 }
427
428 /*
429  * Release a volume.  Call hammer_io_release on the last reference.  We have
430  * to acquire an exclusive lock to interlock against volume->ondisk tests
431  * in hammer_load_volume(), and hammer_io_release() also expects an exclusive
432  * lock to be held.
433  *
434  * Volumes are not unloaded from memory during normal operation.
435  */
436 void
437 hammer_rel_volume(hammer_volume_t volume, int flush)
438 {
439         struct buf *bp = NULL;
440
441         crit_enter();
442         if (volume->io.lock.refs == 1) {
443                 ++volume->io.loading;
444                 hammer_lock_ex(&volume->io.lock);
445                 if (volume->io.lock.refs == 1) {
446                         volume->ondisk = NULL;
447                         bp = hammer_io_release(&volume->io, flush);
448                 }
449                 --volume->io.loading;
450                 hammer_unlock(&volume->io.lock);
451         }
452         hammer_unref(&volume->io.lock);
453         if (bp)
454                 brelse(bp);
455         crit_exit();
456 }
457
458 int
459 hammer_mountcheck_volumes(struct hammer_mount *hmp)
460 {
461         hammer_volume_t vol;
462         int i;
463
464         for (i = 0; i < hmp->nvolumes; ++i) {
465                 vol = RB_LOOKUP(hammer_vol_rb_tree, &hmp->rb_vols_root, i);
466                 if (vol == NULL)
467                         return(EINVAL);
468         }
469         return(0);
470 }
471
472 /************************************************************************
473  *                              BUFFERS                                 *
474  ************************************************************************
475  *
476  * Manage buffers.  Currently all blockmap-backed zones are translated
477  * to zone-2 buffer offsets.
478  */
479 hammer_buffer_t
480 hammer_get_buffer(hammer_mount_t hmp, hammer_off_t buf_offset,
481                   int bytes, int isnew, int *errorp)
482 {
483         hammer_buffer_t buffer;
484         hammer_volume_t volume;
485         hammer_off_t    zone2_offset;
486         hammer_io_type_t iotype;
487         int vol_no;
488         int zone;
489
490         buf_offset &= ~HAMMER_BUFMASK64;
491 again:
492         /*
493          * Shortcut if the buffer is already cached
494          */
495         buffer = RB_LOOKUP(hammer_buf_rb_tree, &hmp->rb_bufs_root, buf_offset);
496         if (buffer) {
497                 if (buffer->io.lock.refs == 0)
498                         ++hammer_count_refedbufs;
499                 hammer_ref(&buffer->io.lock);
500
501                 /*
502                  * Onced refed the ondisk field will not be cleared by
503                  * any other action.
504                  */
505                 if (buffer->ondisk && buffer->io.loading == 0) {
506                         *errorp = 0;
507                         return(buffer);
508                 }
509
510                 /*
511                  * The buffer is no longer loose if it has a ref, and
512                  * cannot become loose once it gains a ref.  Loose
513                  * buffers will never be in a modified state.  This should
514                  * only occur on the 0->1 transition of refs.
515                  *
516                  * lose_list can be modified via a biodone() interrupt.
517                  */
518                 if (buffer->io.mod_list == &hmp->lose_list) {
519                         crit_enter();   /* biodone race against list */
520                         TAILQ_REMOVE(buffer->io.mod_list, &buffer->io,
521                                      mod_entry);
522                         crit_exit();
523                         buffer->io.mod_list = NULL;
524                         KKASSERT(buffer->io.modified == 0);
525                 }
526                 goto found;
527         }
528
529         /*
530          * What is the buffer class?
531          */
532         zone = HAMMER_ZONE_DECODE(buf_offset);
533
534         switch(zone) {
535         case HAMMER_ZONE_LARGE_DATA_INDEX:
536         case HAMMER_ZONE_SMALL_DATA_INDEX:
537                 iotype = HAMMER_STRUCTURE_DATA_BUFFER;
538                 break;
539         case HAMMER_ZONE_UNDO_INDEX:
540                 iotype = HAMMER_STRUCTURE_UNDO_BUFFER;
541                 break;
542         case HAMMER_ZONE_META_INDEX:
543         default:
544                 /*
545                  * NOTE: inode data and directory entries are placed in this
546                  * zone.  inode atime/mtime is updated in-place and thus
547                  * buffers containing inodes must be synchronized as
548                  * meta-buffers, same as buffers containing B-Tree info.
549                  */
550                 iotype = HAMMER_STRUCTURE_META_BUFFER;
551                 break;
552         }
553
554         /*
555          * Handle blockmap offset translations
556          */
557         if (zone >= HAMMER_ZONE_BTREE_INDEX) {
558                 zone2_offset = hammer_blockmap_lookup(hmp, buf_offset, errorp);
559         } else if (zone == HAMMER_ZONE_UNDO_INDEX) {
560                 zone2_offset = hammer_undo_lookup(hmp, buf_offset, errorp);
561         } else {
562                 KKASSERT(zone == HAMMER_ZONE_RAW_BUFFER_INDEX);
563                 zone2_offset = buf_offset;
564                 *errorp = 0;
565         }
566         if (*errorp)
567                 return(NULL);
568
569         /*
570          * NOTE: zone2_offset and maxbuf_off are both full zone-2 offset
571          * specifications.
572          */
573         KKASSERT((zone2_offset & HAMMER_OFF_ZONE_MASK) ==
574                  HAMMER_ZONE_RAW_BUFFER);
575         vol_no = HAMMER_VOL_DECODE(zone2_offset);
576         volume = hammer_get_volume(hmp, vol_no, errorp);
577         if (volume == NULL)
578                 return(NULL);
579
580         KKASSERT(zone2_offset < volume->maxbuf_off);
581
582         /*
583          * Allocate a new buffer structure.  We will check for races later.
584          */
585         ++hammer_count_buffers;
586         buffer = kmalloc(sizeof(*buffer), M_HAMMER,
587                          M_WAITOK|M_ZERO|M_USE_RESERVE);
588         buffer->zone2_offset = zone2_offset;
589         buffer->zoneX_offset = buf_offset;
590         buffer->volume = volume;
591
592         hammer_io_init(&buffer->io, hmp, iotype);
593         buffer->io.offset = volume->ondisk->vol_buf_beg +
594                             (zone2_offset & HAMMER_OFF_SHORT_MASK);
595         buffer->io.bytes = bytes;
596         TAILQ_INIT(&buffer->clist);
597         hammer_ref(&buffer->io.lock);
598
599         /*
600          * Insert the buffer into the RB tree and handle late collisions.
601          */
602         if (RB_INSERT(hammer_buf_rb_tree, &hmp->rb_bufs_root, buffer)) {
603                 hammer_unref(&buffer->io.lock);
604                 --hammer_count_buffers;
605                 kfree(buffer, M_HAMMER);
606                 goto again;
607         }
608         ++hammer_count_refedbufs;
609 found:
610
611         /*
612          * Deal with on-disk info and loading races.
613          */
614         if (buffer->ondisk == NULL || buffer->io.loading) {
615                 *errorp = hammer_load_buffer(buffer, isnew);
616                 if (*errorp) {
617                         hammer_rel_buffer(buffer, 1);
618                         buffer = NULL;
619                 }
620         } else {
621                 *errorp = 0;
622         }
623         return(buffer);
624 }
625
626 /*
627  * This is used by the direct-read code to deal with large-data buffers
628  * created by the reblocker and mirror-write code.  The direct-read code
629  * bypasses the HAMMER buffer subsystem and so any aliased dirty hammer
630  * buffers must be fully synced to disk before we can issue the direct-read.
631  *
632  * This code path is not considered critical as only the rebocker and
633  * mirror-write code will create large-data buffers via the HAMMER buffer
634  * subsystem.  They do that because they operate at the B-Tree level and
635  * do not access the vnode/inode structures.
636  */
637 void
638 hammer_sync_buffers(hammer_mount_t hmp, hammer_off_t base_offset, int bytes)
639 {
640         hammer_buffer_t buffer;
641         int error;
642
643         KKASSERT((base_offset & HAMMER_OFF_ZONE_MASK) ==
644                  HAMMER_ZONE_LARGE_DATA);
645
646         while (bytes > 0) {
647                 buffer = RB_LOOKUP(hammer_buf_rb_tree, &hmp->rb_bufs_root,
648                                    base_offset);
649                 if (buffer && buffer->io.modified) {
650                         error = hammer_ref_buffer(buffer);
651                         if (error == 0 && buffer->io.modified) {
652                                 hammer_io_write_interlock(&buffer->io);
653                                 hammer_io_flush(&buffer->io);
654                                 hammer_io_done_interlock(&buffer->io);
655                                 hammer_io_wait(&buffer->io);
656                                 hammer_rel_buffer(buffer, 0);
657                         }
658                 }
659                 base_offset += HAMMER_BUFSIZE;
660                 bytes -= HAMMER_BUFSIZE;
661         }
662 }
663
664 /*
665  * Destroy all buffers covering the specified zoneX offset range.  This
666  * is called when the related blockmap layer2 entry is freed or when
667  * a direct write bypasses our buffer/buffer-cache subsystem.
668  *
669  * The buffers may be referenced by the caller itself.  Setting reclaim
670  * will cause the buffer to be destroyed when it's ref count reaches zero.
671  */
672 void
673 hammer_del_buffers(hammer_mount_t hmp, hammer_off_t base_offset,
674                    hammer_off_t zone2_offset, int bytes)
675 {
676         hammer_buffer_t buffer;
677         hammer_volume_t volume;
678         int vol_no;
679         int error;
680
681         vol_no = HAMMER_VOL_DECODE(zone2_offset);
682         volume = hammer_get_volume(hmp, vol_no, &error);
683         KKASSERT(error == 0);
684
685         while (bytes > 0) {
686                 buffer = RB_LOOKUP(hammer_buf_rb_tree, &hmp->rb_bufs_root,
687                                    base_offset);
688                 if (buffer) {
689                         error = hammer_ref_buffer(buffer);
690                         if (error == 0) {
691                                 KKASSERT(buffer->zone2_offset == zone2_offset);
692                                 hammer_io_clear_modify(&buffer->io, 1);
693                                 buffer->io.reclaim = 1;
694                                 KKASSERT(buffer->volume == volume);
695                                 hammer_rel_buffer(buffer, 0);
696                         }
697                 } else {
698                         hammer_io_inval(volume, zone2_offset);
699                 }
700                 base_offset += HAMMER_BUFSIZE;
701                 zone2_offset += HAMMER_BUFSIZE;
702                 bytes -= HAMMER_BUFSIZE;
703         }
704         hammer_rel_volume(volume, 0);
705 }
706
707 static int
708 hammer_load_buffer(hammer_buffer_t buffer, int isnew)
709 {
710         hammer_volume_t volume;
711         int error;
712
713         /*
714          * Load the buffer's on-disk info
715          */
716         volume = buffer->volume;
717         ++buffer->io.loading;
718         hammer_lock_ex(&buffer->io.lock);
719
720         if (hammer_debug_io & 0x0001) {
721                 kprintf("load_buffer %016llx %016llx isnew=%d od=%p\n",
722                         buffer->zoneX_offset, buffer->zone2_offset, isnew,
723                         buffer->ondisk);
724         }
725
726         if (buffer->ondisk == NULL) {
727                 if (isnew) {
728                         error = hammer_io_new(volume->devvp, &buffer->io);
729                 } else {
730                         error = hammer_io_read(volume->devvp, &buffer->io,
731                                                volume->maxraw_off);
732                 }
733                 if (error == 0)
734                         buffer->ondisk = (void *)buffer->io.bp->b_data;
735         } else if (isnew) {
736                 error = hammer_io_new(volume->devvp, &buffer->io);
737         } else {
738                 error = 0;
739         }
740         --buffer->io.loading;
741         hammer_unlock(&buffer->io.lock);
742         return (error);
743 }
744
745 /*
746  * NOTE: Called from RB_SCAN, must return >= 0 for scan to continue.
747  */
748 int
749 hammer_unload_buffer(hammer_buffer_t buffer, void *data __unused)
750 {
751         ++hammer_count_refedbufs;
752         hammer_ref(&buffer->io.lock);
753         hammer_flush_buffer_nodes(buffer);
754         KKASSERT(buffer->io.lock.refs == 1);
755         hammer_rel_buffer(buffer, 2);
756         return(0);
757 }
758
759 /*
760  * Reference a buffer that is either already referenced or via a specially
761  * handled pointer (aka cursor->buffer).
762  */
763 int
764 hammer_ref_buffer(hammer_buffer_t buffer)
765 {
766         int error;
767
768         if (buffer->io.lock.refs == 0)
769                 ++hammer_count_refedbufs;
770         hammer_ref(&buffer->io.lock);
771
772         /*
773          * At this point a biodone() will not touch the buffer other then
774          * incidental bits.  However, lose_list can be modified via
775          * a biodone() interrupt.
776          *
777          * No longer loose
778          */
779         if (buffer->io.mod_list == &buffer->io.hmp->lose_list) {
780                 crit_enter();
781                 TAILQ_REMOVE(buffer->io.mod_list, &buffer->io, mod_entry);
782                 buffer->io.mod_list = NULL;
783                 crit_exit();
784         }
785
786         if (buffer->ondisk == NULL || buffer->io.loading) {
787                 error = hammer_load_buffer(buffer, 0);
788                 if (error) {
789                         hammer_rel_buffer(buffer, 1);
790                         /*
791                          * NOTE: buffer pointer can become stale after
792                          * the above release.
793                          */
794                 }
795         } else {
796                 error = 0;
797         }
798         return(error);
799 }
800
801 /*
802  * Release a buffer.  We have to deal with several places where
803  * another thread can ref the buffer.
804  *
805  * Only destroy the structure itself if the related buffer cache buffer
806  * was disassociated from it.  This ties the management of the structure
807  * to the buffer cache subsystem.  buffer->ondisk determines whether the
808  * embedded io is referenced or not.
809  */
810 void
811 hammer_rel_buffer(hammer_buffer_t buffer, int flush)
812 {
813         hammer_volume_t volume;
814         struct buf *bp = NULL;
815         int freeme = 0;
816
817         crit_enter();
818         if (buffer->io.lock.refs == 1) {
819                 ++buffer->io.loading;   /* force interlock check */
820                 hammer_lock_ex(&buffer->io.lock);
821                 if (buffer->io.lock.refs == 1) {
822                         bp = hammer_io_release(&buffer->io, flush);
823
824                         if (buffer->io.lock.refs == 1)
825                                 --hammer_count_refedbufs;
826
827                         if (buffer->io.bp == NULL &&
828                             buffer->io.lock.refs == 1) {
829                                 /*
830                                  * Final cleanup
831                                  *
832                                  * NOTE: It is impossible for any associated
833                                  * B-Tree nodes to have refs if the buffer
834                                  * has no additional refs.
835                                  */
836                                 RB_REMOVE(hammer_buf_rb_tree,
837                                           &buffer->io.hmp->rb_bufs_root,
838                                           buffer);
839                                 volume = buffer->volume;
840                                 buffer->volume = NULL; /* sanity */
841                                 hammer_rel_volume(volume, 0);
842                                 hammer_io_clear_modlist(&buffer->io);
843                                 hammer_flush_buffer_nodes(buffer);
844                                 KKASSERT(TAILQ_EMPTY(&buffer->clist));
845                                 freeme = 1;
846                         }
847                 }
848                 --buffer->io.loading;
849                 hammer_unlock(&buffer->io.lock);
850         }
851         hammer_unref(&buffer->io.lock);
852         crit_exit();
853         if (bp)
854                 brelse(bp);
855         if (freeme) {
856                 --hammer_count_buffers;
857                 kfree(buffer, M_HAMMER);
858         }
859 }
860
861 /*
862  * Access the filesystem buffer containing the specified hammer offset.
863  * buf_offset is a conglomeration of the volume number and vol_buf_beg
864  * relative buffer offset.  It must also have bit 55 set to be valid.
865  * (see hammer_off_t in hammer_disk.h).
866  *
867  * Any prior buffer in *bufferp will be released and replaced by the
868  * requested buffer.
869  */
870 static __inline
871 void *
872 _hammer_bread(hammer_mount_t hmp, hammer_off_t buf_offset, int bytes,
873              int *errorp, struct hammer_buffer **bufferp)
874 {
875         hammer_buffer_t buffer;
876         int32_t xoff = (int32_t)buf_offset & HAMMER_BUFMASK;
877
878         buf_offset &= ~HAMMER_BUFMASK64;
879         KKASSERT((buf_offset & HAMMER_OFF_ZONE_MASK) != 0);
880
881         buffer = *bufferp;
882         if (buffer == NULL || (buffer->zone2_offset != buf_offset &&
883                                buffer->zoneX_offset != buf_offset)) {
884                 if (buffer)
885                         hammer_rel_buffer(buffer, 0);
886                 buffer = hammer_get_buffer(hmp, buf_offset, bytes, 0, errorp);
887                 *bufferp = buffer;
888         } else {
889                 *errorp = 0;
890         }
891
892         /*
893          * Return a pointer to the buffer data.
894          */
895         if (buffer == NULL)
896                 return(NULL);
897         else
898                 return((char *)buffer->ondisk + xoff);
899 }
900
901 void *
902 hammer_bread(hammer_mount_t hmp, hammer_off_t buf_offset,
903              int *errorp, struct hammer_buffer **bufferp)
904 {
905         return(_hammer_bread(hmp, buf_offset, HAMMER_BUFSIZE, errorp, bufferp));
906 }
907
908 void *
909 hammer_bread_ext(hammer_mount_t hmp, hammer_off_t buf_offset, int bytes,
910                  int *errorp, struct hammer_buffer **bufferp)
911 {
912         bytes = (bytes + HAMMER_BUFMASK) & ~HAMMER_BUFMASK;
913         return(_hammer_bread(hmp, buf_offset, bytes, errorp, bufferp));
914 }
915
916 /*
917  * Access the filesystem buffer containing the specified hammer offset.
918  * No disk read operation occurs.  The result buffer may contain garbage.
919  *
920  * Any prior buffer in *bufferp will be released and replaced by the
921  * requested buffer.
922  *
923  * This function marks the buffer dirty but does not increment its
924  * modify_refs count.
925  */
926 static __inline
927 void *
928 _hammer_bnew(hammer_mount_t hmp, hammer_off_t buf_offset, int bytes,
929              int *errorp, struct hammer_buffer **bufferp)
930 {
931         hammer_buffer_t buffer;
932         int32_t xoff = (int32_t)buf_offset & HAMMER_BUFMASK;
933
934         buf_offset &= ~HAMMER_BUFMASK64;
935
936         buffer = *bufferp;
937         if (buffer == NULL || (buffer->zone2_offset != buf_offset &&
938                                buffer->zoneX_offset != buf_offset)) {
939                 if (buffer)
940                         hammer_rel_buffer(buffer, 0);
941                 buffer = hammer_get_buffer(hmp, buf_offset, bytes, 1, errorp);
942                 *bufferp = buffer;
943         } else {
944                 *errorp = 0;
945         }
946
947         /*
948          * Return a pointer to the buffer data.
949          */
950         if (buffer == NULL)
951                 return(NULL);
952         else
953                 return((char *)buffer->ondisk + xoff);
954 }
955
956 void *
957 hammer_bnew(hammer_mount_t hmp, hammer_off_t buf_offset,
958              int *errorp, struct hammer_buffer **bufferp)
959 {
960         return(_hammer_bnew(hmp, buf_offset, HAMMER_BUFSIZE, errorp, bufferp));
961 }
962
963 void *
964 hammer_bnew_ext(hammer_mount_t hmp, hammer_off_t buf_offset, int bytes,
965                 int *errorp, struct hammer_buffer **bufferp)
966 {
967         bytes = (bytes + HAMMER_BUFMASK) & ~HAMMER_BUFMASK;
968         return(_hammer_bnew(hmp, buf_offset, bytes, errorp, bufferp));
969 }
970
971 /************************************************************************
972  *                              NODES                                   *
973  ************************************************************************
974  *
975  * Manage B-Tree nodes.  B-Tree nodes represent the primary indexing
976  * method used by the HAMMER filesystem.
977  *
978  * Unlike other HAMMER structures, a hammer_node can be PASSIVELY
979  * associated with its buffer, and will only referenced the buffer while
980  * the node itself is referenced.
981  *
982  * A hammer_node can also be passively associated with other HAMMER
983  * structures, such as inodes, while retaining 0 references.  These
984  * associations can be cleared backwards using a pointer-to-pointer in
985  * the hammer_node.
986  *
987  * This allows the HAMMER implementation to cache hammer_nodes long-term
988  * and short-cut a great deal of the infrastructure's complexity.  In
989  * most cases a cached node can be reacquired without having to dip into
990  * either the buffer or cluster management code.
991  *
992  * The caller must pass a referenced cluster on call and will retain
993  * ownership of the reference on return.  The node will acquire its own
994  * additional references, if necessary.
995  */
996 hammer_node_t
997 hammer_get_node(hammer_mount_t hmp, hammer_off_t node_offset,
998                 int isnew, int *errorp)
999 {
1000         hammer_node_t node;
1001
1002         KKASSERT((node_offset & HAMMER_OFF_ZONE_MASK) == HAMMER_ZONE_BTREE);
1003
1004         /*
1005          * Locate the structure, allocating one if necessary.
1006          */
1007 again:
1008         node = RB_LOOKUP(hammer_nod_rb_tree, &hmp->rb_nods_root, node_offset);
1009         if (node == NULL) {
1010                 ++hammer_count_nodes;
1011                 node = kmalloc(sizeof(*node), M_HAMMER, M_WAITOK|M_ZERO|M_USE_RESERVE);
1012                 node->node_offset = node_offset;
1013                 node->hmp = hmp;
1014                 TAILQ_INIT(&node->cursor_list);
1015                 TAILQ_INIT(&node->cache_list);
1016                 if (RB_INSERT(hammer_nod_rb_tree, &hmp->rb_nods_root, node)) {
1017                         --hammer_count_nodes;
1018                         kfree(node, M_HAMMER);
1019                         goto again;
1020                 }
1021         }
1022         hammer_ref(&node->lock);
1023         if (node->ondisk)
1024                 *errorp = 0;
1025         else
1026                 *errorp = hammer_load_node(node, isnew);
1027         if (*errorp) {
1028                 hammer_rel_node(node);
1029                 node = NULL;
1030         }
1031         return(node);
1032 }
1033
1034 /*
1035  * Reference an already-referenced node.
1036  */
1037 void
1038 hammer_ref_node(hammer_node_t node)
1039 {
1040         KKASSERT(node->lock.refs > 0 && node->ondisk != NULL);
1041         hammer_ref(&node->lock);
1042 }
1043
1044 /*
1045  * Load a node's on-disk data reference.
1046  */
1047 static int
1048 hammer_load_node(hammer_node_t node, int isnew)
1049 {
1050         hammer_buffer_t buffer;
1051         hammer_off_t buf_offset;
1052         int error;
1053
1054         error = 0;
1055         ++node->loading;
1056         hammer_lock_ex(&node->lock);
1057         if (node->ondisk == NULL) {
1058                 /*
1059                  * This is a little confusing but the jist is that
1060                  * node->buffer determines whether the node is on
1061                  * the buffer's clist and node->ondisk determines
1062                  * whether the buffer is referenced.
1063                  *
1064                  * We could be racing a buffer release, in which case
1065                  * node->buffer may become NULL while we are blocked
1066                  * referencing the buffer.
1067                  */
1068                 if ((buffer = node->buffer) != NULL) {
1069                         error = hammer_ref_buffer(buffer);
1070                         if (error == 0 && node->buffer == NULL) {
1071                                 TAILQ_INSERT_TAIL(&buffer->clist,
1072                                                   node, entry);
1073                                 node->buffer = buffer;
1074                         }
1075                 } else {
1076                         buf_offset = node->node_offset & ~HAMMER_BUFMASK64;
1077                         buffer = hammer_get_buffer(node->hmp, buf_offset,
1078                                                    HAMMER_BUFSIZE, 0, &error);
1079                         if (buffer) {
1080                                 KKASSERT(error == 0);
1081                                 TAILQ_INSERT_TAIL(&buffer->clist,
1082                                                   node, entry);
1083                                 node->buffer = buffer;
1084                         }
1085                 }
1086                 if (error)
1087                         goto failed;
1088                 node->ondisk = (void *)((char *)buffer->ondisk +
1089                                         (node->node_offset & HAMMER_BUFMASK));
1090                 if (isnew == 0 && 
1091                     (node->flags & HAMMER_NODE_CRCGOOD) == 0) {
1092                         if (hammer_crc_test_btree(node->ondisk) == 0)
1093                                 Debugger("CRC FAILED: B-TREE NODE");
1094                         node->flags |= HAMMER_NODE_CRCGOOD;
1095                 }
1096         }
1097 failed:
1098         --node->loading;
1099         hammer_unlock(&node->lock);
1100         return (error);
1101 }
1102
1103 /*
1104  * Safely reference a node, interlock against flushes via the IO subsystem.
1105  */
1106 hammer_node_t
1107 hammer_ref_node_safe(struct hammer_mount *hmp, hammer_node_cache_t cache,
1108                      int *errorp)
1109 {
1110         hammer_node_t node;
1111
1112         node = cache->node;
1113         if (node != NULL) {
1114                 hammer_ref(&node->lock);
1115                 if (node->ondisk)
1116                         *errorp = 0;
1117                 else
1118                         *errorp = hammer_load_node(node, 0);
1119                 if (*errorp) {
1120                         hammer_rel_node(node);
1121                         node = NULL;
1122                 }
1123         } else {
1124                 *errorp = ENOENT;
1125         }
1126         return(node);
1127 }
1128
1129 /*
1130  * Release a hammer_node.  On the last release the node dereferences
1131  * its underlying buffer and may or may not be destroyed.
1132  */
1133 void
1134 hammer_rel_node(hammer_node_t node)
1135 {
1136         hammer_buffer_t buffer;
1137
1138         /*
1139          * If this isn't the last ref just decrement the ref count and
1140          * return.
1141          */
1142         if (node->lock.refs > 1) {
1143                 hammer_unref(&node->lock);
1144                 return;
1145         }
1146
1147         /*
1148          * If there is no ondisk info or no buffer the node failed to load,
1149          * remove the last reference and destroy the node.
1150          */
1151         if (node->ondisk == NULL) {
1152                 hammer_unref(&node->lock);
1153                 hammer_flush_node(node);
1154                 /* node is stale now */
1155                 return;
1156         }
1157
1158         /*
1159          * Do not disassociate the node from the buffer if it represents
1160          * a modified B-Tree node that still needs its crc to be generated.
1161          */
1162         if (node->flags & HAMMER_NODE_NEEDSCRC)
1163                 return;
1164
1165         /*
1166          * Do final cleanups and then either destroy the node and leave it
1167          * passively cached.  The buffer reference is removed regardless.
1168          */
1169         buffer = node->buffer;
1170         node->ondisk = NULL;
1171
1172         if ((node->flags & HAMMER_NODE_FLUSH) == 0) {
1173                 hammer_unref(&node->lock);
1174                 hammer_rel_buffer(buffer, 0);
1175                 return;
1176         }
1177
1178         /*
1179          * Destroy the node.
1180          */
1181         hammer_unref(&node->lock);
1182         hammer_flush_node(node);
1183         /* node is stale */
1184         hammer_rel_buffer(buffer, 0);
1185 }
1186
1187 /*
1188  * Free space on-media associated with a B-Tree node.
1189  */
1190 void
1191 hammer_delete_node(hammer_transaction_t trans, hammer_node_t node)
1192 {
1193         KKASSERT((node->flags & HAMMER_NODE_DELETED) == 0);
1194         node->flags |= HAMMER_NODE_DELETED;
1195         hammer_blockmap_free(trans, node->node_offset, sizeof(*node->ondisk));
1196 }
1197
1198 /*
1199  * Passively cache a referenced hammer_node.  The caller may release
1200  * the node on return.
1201  */
1202 void
1203 hammer_cache_node(hammer_node_cache_t cache, hammer_node_t node)
1204 {
1205         /*
1206          * If the node is being deleted, don't cache it!
1207          */
1208         if (node->flags & HAMMER_NODE_DELETED)
1209                 return;
1210         if (cache->node == node)
1211                 return;
1212         while (cache->node)
1213                 hammer_uncache_node(cache);
1214         if (node->flags & HAMMER_NODE_DELETED)
1215                 return;
1216         cache->node = node;
1217         TAILQ_INSERT_TAIL(&node->cache_list, cache, entry);
1218 }
1219
1220 void
1221 hammer_uncache_node(hammer_node_cache_t cache)
1222 {
1223         hammer_node_t node;
1224
1225         if ((node = cache->node) != NULL) {
1226                 TAILQ_REMOVE(&node->cache_list, cache, entry);
1227                 cache->node = NULL;
1228                 if (TAILQ_EMPTY(&node->cache_list))
1229                         hammer_flush_node(node);
1230         }
1231 }
1232
1233 /*
1234  * Remove a node's cache references and destroy the node if it has no
1235  * other references or backing store.
1236  */
1237 void
1238 hammer_flush_node(hammer_node_t node)
1239 {
1240         hammer_node_cache_t cache;
1241         hammer_buffer_t buffer;
1242
1243         while ((cache = TAILQ_FIRST(&node->cache_list)) != NULL) {
1244                 TAILQ_REMOVE(&node->cache_list, cache, entry);
1245                 cache->node = NULL;
1246         }
1247         if (node->lock.refs == 0 && node->ondisk == NULL) {
1248                 KKASSERT((node->flags & HAMMER_NODE_NEEDSCRC) == 0);
1249                 RB_REMOVE(hammer_nod_rb_tree, &node->hmp->rb_nods_root, node);
1250                 if ((buffer = node->buffer) != NULL) {
1251                         node->buffer = NULL;
1252                         TAILQ_REMOVE(&buffer->clist, node, entry);
1253                         /* buffer is unreferenced because ondisk is NULL */
1254                 }
1255                 --hammer_count_nodes;
1256                 kfree(node, M_HAMMER);
1257         }
1258 }
1259
1260 /*
1261  * Flush passively cached B-Tree nodes associated with this buffer.
1262  * This is only called when the buffer is about to be destroyed, so
1263  * none of the nodes should have any references.  The buffer is locked.
1264  *
1265  * We may be interlocked with the buffer.
1266  */
1267 void
1268 hammer_flush_buffer_nodes(hammer_buffer_t buffer)
1269 {
1270         hammer_node_t node;
1271
1272         while ((node = TAILQ_FIRST(&buffer->clist)) != NULL) {
1273                 KKASSERT(node->ondisk == NULL);
1274                 KKASSERT((node->flags & HAMMER_NODE_NEEDSCRC) == 0);
1275
1276                 if (node->lock.refs == 0) {
1277                         hammer_ref(&node->lock);
1278                         node->flags |= HAMMER_NODE_FLUSH;
1279                         hammer_rel_node(node);
1280                 } else {
1281                         KKASSERT(node->loading != 0);
1282                         KKASSERT(node->buffer != NULL);
1283                         buffer = node->buffer;
1284                         node->buffer = NULL;
1285                         TAILQ_REMOVE(&buffer->clist, node, entry);
1286                         /* buffer is unreferenced because ondisk is NULL */
1287                 }
1288         }
1289 }
1290
1291
1292 /************************************************************************
1293  *                              ALLOCATORS                              *
1294  ************************************************************************/
1295
1296 /*
1297  * Allocate a B-Tree node.
1298  */
1299 hammer_node_t
1300 hammer_alloc_btree(hammer_transaction_t trans, int *errorp)
1301 {
1302         hammer_buffer_t buffer = NULL;
1303         hammer_node_t node = NULL;
1304         hammer_off_t node_offset;
1305
1306         node_offset = hammer_blockmap_alloc(trans, HAMMER_ZONE_BTREE_INDEX,
1307                                             sizeof(struct hammer_node_ondisk),
1308                                             errorp);
1309         if (*errorp == 0) {
1310                 node = hammer_get_node(trans->hmp, node_offset, 1, errorp);
1311                 hammer_modify_node_noundo(trans, node);
1312                 bzero(node->ondisk, sizeof(*node->ondisk));
1313                 hammer_modify_node_done(node);
1314         }
1315         if (buffer)
1316                 hammer_rel_buffer(buffer, 0);
1317         return(node);
1318 }
1319
1320 /*
1321  * Allocate data.  If the address of a data buffer is supplied then
1322  * any prior non-NULL *data_bufferp will be released and *data_bufferp
1323  * will be set to the related buffer.  The caller must release it when
1324  * finally done.  The initial *data_bufferp should be set to NULL by
1325  * the caller.
1326  *
1327  * The caller is responsible for making hammer_modify*() calls on the
1328  * *data_bufferp.
1329  */
1330 void *
1331 hammer_alloc_data(hammer_transaction_t trans, int32_t data_len, 
1332                   u_int16_t rec_type, hammer_off_t *data_offsetp,
1333                   struct hammer_buffer **data_bufferp, int *errorp)
1334 {
1335         void *data;
1336         int zone;
1337
1338         /*
1339          * Allocate data
1340          */
1341         if (data_len) {
1342                 switch(rec_type) {
1343                 case HAMMER_RECTYPE_INODE:
1344                 case HAMMER_RECTYPE_DIRENTRY:
1345                 case HAMMER_RECTYPE_EXT:
1346                 case HAMMER_RECTYPE_FIX:
1347                 case HAMMER_RECTYPE_PFS:
1348                         zone = HAMMER_ZONE_META_INDEX;
1349                         break;
1350                 case HAMMER_RECTYPE_DATA:
1351                 case HAMMER_RECTYPE_DB:
1352                         if (data_len <= HAMMER_BUFSIZE / 2) {
1353                                 zone = HAMMER_ZONE_SMALL_DATA_INDEX;
1354                         } else {
1355                                 data_len = (data_len + HAMMER_BUFMASK) &
1356                                            ~HAMMER_BUFMASK;
1357                                 zone = HAMMER_ZONE_LARGE_DATA_INDEX;
1358                         }
1359                         break;
1360                 default:
1361                         panic("hammer_alloc_data: rec_type %04x unknown",
1362                               rec_type);
1363                         zone = 0;       /* NOT REACHED */
1364                         break;
1365                 }
1366                 *data_offsetp = hammer_blockmap_alloc(trans, zone,
1367                                                       data_len, errorp);
1368         } else {
1369                 *data_offsetp = 0;
1370         }
1371         if (*errorp == 0 && data_bufferp) {
1372                 if (data_len) {
1373                         data = hammer_bread_ext(trans->hmp, *data_offsetp,
1374                                                 data_len, errorp, data_bufferp);
1375                         KKASSERT(*errorp == 0);
1376                 } else {
1377                         data = NULL;
1378                 }
1379         } else {
1380                 data = NULL;
1381         }
1382         KKASSERT(*errorp == 0);
1383         return(data);
1384 }
1385
1386 /*
1387  * Sync dirty buffers to the media and clean-up any loose ends.
1388  *
1389  * These functions do not start the flusher going, they simply
1390  * queue everything up to the flusher.
1391  */
1392 static int hammer_sync_scan1(struct mount *mp, struct vnode *vp, void *data);
1393 static int hammer_sync_scan2(struct mount *mp, struct vnode *vp, void *data);
1394
1395 int
1396 hammer_queue_inodes_flusher(hammer_mount_t hmp, int waitfor)
1397 {
1398         struct hammer_sync_info info;
1399
1400         info.error = 0;
1401         info.waitfor = waitfor;
1402         if (waitfor == MNT_WAIT) {
1403                 vmntvnodescan(hmp->mp, VMSC_GETVP|VMSC_ONEPASS,
1404                               hammer_sync_scan1, hammer_sync_scan2, &info);
1405         } else {
1406                 vmntvnodescan(hmp->mp, VMSC_GETVP|VMSC_ONEPASS|VMSC_NOWAIT,
1407                               hammer_sync_scan1, hammer_sync_scan2, &info);
1408         }
1409         return(info.error);
1410 }
1411
1412 /*
1413  * Filesystem sync.  If doing a synchronous sync make a second pass on
1414  * the vnodes in case any were already flushing during the first pass,
1415  * and activate the flusher twice (the second time brings the UNDO FIFO's
1416  * start position up to the end position after the first call).
1417  */
1418 int
1419 hammer_sync_hmp(hammer_mount_t hmp, int waitfor)
1420 {
1421         struct hammer_sync_info info;
1422
1423         info.error = 0;
1424         info.waitfor = MNT_NOWAIT;
1425         vmntvnodescan(hmp->mp, VMSC_GETVP|VMSC_NOWAIT,
1426                       hammer_sync_scan1, hammer_sync_scan2, &info);
1427         if (info.error == 0 && waitfor == MNT_WAIT) {
1428                 info.waitfor = waitfor;
1429                 vmntvnodescan(hmp->mp, VMSC_GETVP,
1430                               hammer_sync_scan1, hammer_sync_scan2, &info);
1431         }
1432         if (waitfor == MNT_WAIT) {
1433                 hammer_flusher_sync(hmp);
1434                 hammer_flusher_sync(hmp);
1435         } else {
1436                 hammer_flusher_async(hmp, NULL);
1437         }
1438         return(info.error);
1439 }
1440
1441 static int
1442 hammer_sync_scan1(struct mount *mp, struct vnode *vp, void *data)
1443 {
1444         struct hammer_inode *ip;
1445
1446         ip = VTOI(vp);
1447         if (vp->v_type == VNON || ip == NULL ||
1448             ((ip->flags & HAMMER_INODE_MODMASK) == 0 &&
1449              RB_EMPTY(&vp->v_rbdirty_tree))) {
1450                 return(-1);
1451         }
1452         return(0);
1453 }
1454
1455 static int
1456 hammer_sync_scan2(struct mount *mp, struct vnode *vp, void *data)
1457 {
1458         struct hammer_sync_info *info = data;
1459         struct hammer_inode *ip;
1460         int error;
1461
1462         ip = VTOI(vp);
1463         if (vp->v_type == VNON || vp->v_type == VBAD ||
1464             ((ip->flags & HAMMER_INODE_MODMASK) == 0 &&
1465              RB_EMPTY(&vp->v_rbdirty_tree))) {
1466                 return(0);
1467         }
1468         error = VOP_FSYNC(vp, MNT_NOWAIT);
1469         if (error)
1470                 info->error = error;
1471         return(0);
1472 }
1473