HAMMER 63/Many: IO Error handling features
[dragonfly.git] / sys / vfs / hammer / hammer_ondisk.c
1 /*
2  * Copyright (c) 2007-2008 The DragonFly Project.  All rights reserved.
3  * 
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  * 
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  * 
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  * 
34  * $DragonFly: src/sys/vfs/hammer/hammer_ondisk.c,v 1.71 2008/07/18 00:19:53 dillon Exp $
35  */
36 /*
37  * Manage HAMMER's on-disk structures.  These routines are primarily
38  * responsible for interfacing with the kernel's I/O subsystem and for
39  * managing in-memory structures.
40  */
41
42 #include "hammer.h"
43 #include <sys/fcntl.h>
44 #include <sys/nlookup.h>
45 #include <sys/buf.h>
46 #include <sys/buf2.h>
47
48 static void hammer_free_volume(hammer_volume_t volume);
49 static int hammer_load_volume(hammer_volume_t volume);
50 static int hammer_load_buffer(hammer_buffer_t buffer, int isnew);
51 static int hammer_load_node(hammer_node_t node, int isnew);
52
53 static int
54 hammer_vol_rb_compare(hammer_volume_t vol1, hammer_volume_t vol2)
55 {
56         if (vol1->vol_no < vol2->vol_no)
57                 return(-1);
58         if (vol1->vol_no > vol2->vol_no)
59                 return(1);
60         return(0);
61 }
62
63 static int
64 hammer_buf_rb_compare(hammer_buffer_t buf1, hammer_buffer_t buf2)
65 {
66         if (buf1->zoneX_offset < buf2->zoneX_offset)
67                 return(-1);
68         if (buf1->zoneX_offset > buf2->zoneX_offset)
69                 return(1);
70         return(0);
71 }
72
73 static int
74 hammer_nod_rb_compare(hammer_node_t node1, hammer_node_t node2)
75 {
76         if (node1->node_offset < node2->node_offset)
77                 return(-1);
78         if (node1->node_offset > node2->node_offset)
79                 return(1);
80         return(0);
81 }
82
83 RB_GENERATE2(hammer_vol_rb_tree, hammer_volume, rb_node,
84              hammer_vol_rb_compare, int32_t, vol_no);
85 RB_GENERATE2(hammer_buf_rb_tree, hammer_buffer, rb_node,
86              hammer_buf_rb_compare, hammer_off_t, zoneX_offset);
87 RB_GENERATE2(hammer_nod_rb_tree, hammer_node, rb_node,
88              hammer_nod_rb_compare, hammer_off_t, node_offset);
89
90 /************************************************************************
91  *                              VOLUMES                                 *
92  ************************************************************************
93  *
94  * Load a HAMMER volume by name.  Returns 0 on success or a positive error
95  * code on failure.  Volumes must be loaded at mount time, get_volume() will
96  * not load a new volume.
97  *
98  * Calls made to hammer_load_volume() or single-threaded
99  */
100 int
101 hammer_install_volume(struct hammer_mount *hmp, const char *volname)
102 {
103         struct mount *mp;
104         hammer_volume_t volume;
105         struct hammer_volume_ondisk *ondisk;
106         struct nlookupdata nd;
107         struct buf *bp = NULL;
108         int error;
109         int ronly;
110         int setmp = 0;
111
112         mp = hmp->mp;
113         ronly = ((mp->mnt_flag & MNT_RDONLY) ? 1 : 0);
114
115         /*
116          * Allocate a volume structure
117          */
118         ++hammer_count_volumes;
119         volume = kmalloc(sizeof(*volume), M_HAMMER, M_WAITOK|M_ZERO);
120         volume->vol_name = kstrdup(volname, M_HAMMER);
121         hammer_io_init(&volume->io, hmp, HAMMER_STRUCTURE_VOLUME);
122         volume->io.offset = 0LL;
123         volume->io.bytes = HAMMER_BUFSIZE;
124
125         /*
126          * Get the device vnode
127          */
128         error = nlookup_init(&nd, volume->vol_name, UIO_SYSSPACE, NLC_FOLLOW);
129         if (error == 0)
130                 error = nlookup(&nd);
131         if (error == 0)
132                 error = cache_vref(&nd.nl_nch, nd.nl_cred, &volume->devvp);
133         nlookup_done(&nd);
134         if (error == 0) {
135                 if (vn_isdisk(volume->devvp, &error)) {
136                         error = vfs_mountedon(volume->devvp);
137                 }
138         }
139         if (error == 0 &&
140             count_udev(volume->devvp->v_umajor, volume->devvp->v_uminor) > 0) {
141                 error = EBUSY;
142         }
143         if (error == 0) {
144                 vn_lock(volume->devvp, LK_EXCLUSIVE | LK_RETRY);
145                 error = vinvalbuf(volume->devvp, V_SAVE, 0, 0);
146                 if (error == 0) {
147                         error = VOP_OPEN(volume->devvp, 
148                                          (ronly ? FREAD : FREAD|FWRITE),
149                                          FSCRED, NULL);
150                 }
151                 vn_unlock(volume->devvp);
152         }
153         if (error) {
154                 hammer_free_volume(volume);
155                 return(error);
156         }
157         volume->devvp->v_rdev->si_mountpoint = mp;
158         setmp = 1;
159
160         /*
161          * Extract the volume number from the volume header and do various
162          * sanity checks.
163          */
164         error = bread(volume->devvp, 0LL, HAMMER_BUFSIZE, &bp);
165         if (error)
166                 goto late_failure;
167         ondisk = (void *)bp->b_data;
168         if (ondisk->vol_signature != HAMMER_FSBUF_VOLUME) {
169                 kprintf("hammer_mount: volume %s has an invalid header\n",
170                         volume->vol_name);
171                 error = EFTYPE;
172                 goto late_failure;
173         }
174         volume->vol_no = ondisk->vol_no;
175         volume->buffer_base = ondisk->vol_buf_beg;
176         volume->vol_flags = ondisk->vol_flags;
177         volume->nblocks = ondisk->vol_nblocks; 
178         volume->maxbuf_off = HAMMER_ENCODE_RAW_BUFFER(volume->vol_no,
179                                     ondisk->vol_buf_end - ondisk->vol_buf_beg);
180         volume->maxraw_off = ondisk->vol_buf_end;
181
182         if (RB_EMPTY(&hmp->rb_vols_root)) {
183                 hmp->fsid = ondisk->vol_fsid;
184         } else if (bcmp(&hmp->fsid, &ondisk->vol_fsid, sizeof(uuid_t))) {
185                 kprintf("hammer_mount: volume %s's fsid does not match "
186                         "other volumes\n", volume->vol_name);
187                 error = EFTYPE;
188                 goto late_failure;
189         }
190
191         /*
192          * Insert the volume structure into the red-black tree.
193          */
194         if (RB_INSERT(hammer_vol_rb_tree, &hmp->rb_vols_root, volume)) {
195                 kprintf("hammer_mount: volume %s has a duplicate vol_no %d\n",
196                         volume->vol_name, volume->vol_no);
197                 error = EEXIST;
198         }
199
200         /*
201          * Set the root volume .  HAMMER special cases rootvol the structure.
202          * We do not hold a ref because this would prevent related I/O
203          * from being flushed.
204          */
205         if (error == 0 && ondisk->vol_rootvol == ondisk->vol_no) {
206                 hmp->rootvol = volume;
207                 hmp->nvolumes = ondisk->vol_count;
208                 if (bp) {
209                         brelse(bp);
210                         bp = NULL;
211                 }
212                 hmp->mp->mnt_stat.f_blocks += ondisk->vol0_stat_bigblocks *
213                         (HAMMER_LARGEBLOCK_SIZE / HAMMER_BUFSIZE);
214                 hmp->mp->mnt_vstat.f_blocks += ondisk->vol0_stat_bigblocks *
215                         (HAMMER_LARGEBLOCK_SIZE / HAMMER_BUFSIZE);
216         }
217 late_failure:
218         if (bp)
219                 brelse(bp);
220         if (error) {
221                 /*vinvalbuf(volume->devvp, V_SAVE, 0, 0);*/
222                 if (setmp)
223                         volume->devvp->v_rdev->si_mountpoint = NULL;
224                 VOP_CLOSE(volume->devvp, ronly ? FREAD : FREAD|FWRITE);
225                 hammer_free_volume(volume);
226         }
227         return (error);
228 }
229
230 /*
231  * This is called for each volume when updating the mount point from
232  * read-write to read-only or vise-versa.
233  */
234 int
235 hammer_adjust_volume_mode(hammer_volume_t volume, void *data __unused)
236 {
237         if (volume->devvp) {
238                 vn_lock(volume->devvp, LK_EXCLUSIVE | LK_RETRY);
239                 if (volume->io.hmp->ronly) {
240                         /* do not call vinvalbuf */
241                         VOP_OPEN(volume->devvp, FREAD, FSCRED, NULL);
242                         VOP_CLOSE(volume->devvp, FREAD|FWRITE);
243                 } else {
244                         /* do not call vinvalbuf */
245                         VOP_OPEN(volume->devvp, FREAD|FWRITE, FSCRED, NULL);
246                         VOP_CLOSE(volume->devvp, FREAD);
247                 }
248                 vn_unlock(volume->devvp);
249         }
250         return(0);
251 }
252
253 /*
254  * Unload and free a HAMMER volume.  Must return >= 0 to continue scan
255  * so returns -1 on failure.
256  */
257 int
258 hammer_unload_volume(hammer_volume_t volume, void *data __unused)
259 {
260         struct hammer_mount *hmp = volume->io.hmp;
261         int ronly = ((hmp->mp->mnt_flag & MNT_RDONLY) ? 1 : 0);
262         struct buf *bp;
263
264         /*
265          * Clean up the root volume pointer, which is held unlocked in hmp.
266          */
267         if (hmp->rootvol == volume)
268                 hmp->rootvol = NULL;
269
270         /*
271          * We must not flush a dirty buffer to disk on umount.  It should
272          * have already been dealt with by the flusher, or we may be in
273          * catastrophic failure.
274          */
275         hammer_io_clear_modify(&volume->io, 1);
276         volume->io.waitdep = 1;
277         bp = hammer_io_release(&volume->io, 1);
278
279         /*
280          * Clean up the persistent ref ioerror might have on the volume
281          */
282         if (volume->io.ioerror) {
283                 volume->io.ioerror = 0;
284                 hammer_unref(&volume->io.lock);
285         }
286
287         /*
288          * There should be no references on the volume, no clusters, and
289          * no super-clusters.
290          */
291         KKASSERT(volume->io.lock.refs == 0);
292         if (bp)
293                 brelse(bp);
294
295         volume->ondisk = NULL;
296         if (volume->devvp) {
297                 if (volume->devvp->v_rdev &&
298                     volume->devvp->v_rdev->si_mountpoint == hmp->mp
299                 ) {
300                         volume->devvp->v_rdev->si_mountpoint = NULL;
301                 }
302                 if (ronly) {
303                         /*
304                          * Make sure we don't sync anything to disk if we
305                          * are in read-only mode (1) or critically-errored
306                          * (2).  Note that there may be dirty buffers in
307                          * normal read-only mode from crash recovery.
308                          */
309                         vinvalbuf(volume->devvp, 0, 0, 0);
310                         VOP_CLOSE(volume->devvp, FREAD);
311                 } else {
312                         /*
313                          * Normal termination, save any dirty buffers
314                          * (XXX there really shouldn't be any).
315                          */
316                         vinvalbuf(volume->devvp, V_SAVE, 0, 0);
317                         VOP_CLOSE(volume->devvp, FREAD|FWRITE);
318                 }
319         }
320
321         /*
322          * Destroy the structure
323          */
324         RB_REMOVE(hammer_vol_rb_tree, &hmp->rb_vols_root, volume);
325         hammer_free_volume(volume);
326         return(0);
327 }
328
329 static
330 void
331 hammer_free_volume(hammer_volume_t volume)
332 {
333         if (volume->vol_name) {
334                 kfree(volume->vol_name, M_HAMMER);
335                 volume->vol_name = NULL;
336         }
337         if (volume->devvp) {
338                 vrele(volume->devvp);
339                 volume->devvp = NULL;
340         }
341         --hammer_count_volumes;
342         kfree(volume, M_HAMMER);
343 }
344
345 /*
346  * Get a HAMMER volume.  The volume must already exist.
347  */
348 hammer_volume_t
349 hammer_get_volume(struct hammer_mount *hmp, int32_t vol_no, int *errorp)
350 {
351         struct hammer_volume *volume;
352
353         /*
354          * Locate the volume structure
355          */
356         volume = RB_LOOKUP(hammer_vol_rb_tree, &hmp->rb_vols_root, vol_no);
357         if (volume == NULL) {
358                 *errorp = ENOENT;
359                 return(NULL);
360         }
361         hammer_ref(&volume->io.lock);
362
363         /*
364          * Deal with on-disk info
365          */
366         if (volume->ondisk == NULL || volume->io.loading) {
367                 *errorp = hammer_load_volume(volume);
368                 if (*errorp) {
369                         hammer_rel_volume(volume, 1);
370                         volume = NULL;
371                 }
372         } else {
373                 *errorp = 0;
374         }
375         return(volume);
376 }
377
378 int
379 hammer_ref_volume(hammer_volume_t volume)
380 {
381         int error;
382
383         hammer_ref(&volume->io.lock);
384
385         /*
386          * Deal with on-disk info
387          */
388         if (volume->ondisk == NULL || volume->io.loading) {
389                 error = hammer_load_volume(volume);
390                 if (error)
391                         hammer_rel_volume(volume, 1);
392         } else {
393                 error = 0;
394         }
395         return (error);
396 }
397
398 hammer_volume_t
399 hammer_get_root_volume(struct hammer_mount *hmp, int *errorp)
400 {
401         hammer_volume_t volume;
402
403         volume = hmp->rootvol;
404         KKASSERT(volume != NULL);
405         hammer_ref(&volume->io.lock);
406
407         /*
408          * Deal with on-disk info
409          */
410         if (volume->ondisk == NULL || volume->io.loading) {
411                 *errorp = hammer_load_volume(volume);
412                 if (*errorp) {
413                         hammer_rel_volume(volume, 1);
414                         volume = NULL;
415                 }
416         } else {
417                 *errorp = 0;
418         }
419         return (volume);
420 }
421
422 /*
423  * Load a volume's on-disk information.  The volume must be referenced and
424  * not locked.  We temporarily acquire an exclusive lock to interlock
425  * against releases or multiple get's.
426  */
427 static int
428 hammer_load_volume(hammer_volume_t volume)
429 {
430         int error;
431
432         ++volume->io.loading;
433         hammer_lock_ex(&volume->io.lock);
434
435         if (volume->ondisk == NULL) {
436                 error = hammer_io_read(volume->devvp, &volume->io,
437                                        volume->maxraw_off);
438                 if (error == 0)
439                         volume->ondisk = (void *)volume->io.bp->b_data;
440         } else {
441                 error = 0;
442         }
443         --volume->io.loading;
444         hammer_unlock(&volume->io.lock);
445         return(error);
446 }
447
448 /*
449  * Release a volume.  Call hammer_io_release on the last reference.  We have
450  * to acquire an exclusive lock to interlock against volume->ondisk tests
451  * in hammer_load_volume(), and hammer_io_release() also expects an exclusive
452  * lock to be held.
453  *
454  * Volumes are not unloaded from memory during normal operation.
455  */
456 void
457 hammer_rel_volume(hammer_volume_t volume, int flush)
458 {
459         struct buf *bp = NULL;
460
461         crit_enter();
462         if (volume->io.lock.refs == 1) {
463                 ++volume->io.loading;
464                 hammer_lock_ex(&volume->io.lock);
465                 if (volume->io.lock.refs == 1) {
466                         volume->ondisk = NULL;
467                         bp = hammer_io_release(&volume->io, flush);
468                 }
469                 --volume->io.loading;
470                 hammer_unlock(&volume->io.lock);
471         }
472         hammer_unref(&volume->io.lock);
473         if (bp)
474                 brelse(bp);
475         crit_exit();
476 }
477
478 int
479 hammer_mountcheck_volumes(struct hammer_mount *hmp)
480 {
481         hammer_volume_t vol;
482         int i;
483
484         for (i = 0; i < hmp->nvolumes; ++i) {
485                 vol = RB_LOOKUP(hammer_vol_rb_tree, &hmp->rb_vols_root, i);
486                 if (vol == NULL)
487                         return(EINVAL);
488         }
489         return(0);
490 }
491
492 /************************************************************************
493  *                              BUFFERS                                 *
494  ************************************************************************
495  *
496  * Manage buffers.  Currently all blockmap-backed zones are translated
497  * to zone-2 buffer offsets.
498  */
499 hammer_buffer_t
500 hammer_get_buffer(hammer_mount_t hmp, hammer_off_t buf_offset,
501                   int bytes, int isnew, int *errorp)
502 {
503         hammer_buffer_t buffer;
504         hammer_volume_t volume;
505         hammer_off_t    zone2_offset;
506         hammer_io_type_t iotype;
507         int vol_no;
508         int zone;
509
510         buf_offset &= ~HAMMER_BUFMASK64;
511 again:
512         /*
513          * Shortcut if the buffer is already cached
514          */
515         buffer = RB_LOOKUP(hammer_buf_rb_tree, &hmp->rb_bufs_root, buf_offset);
516         if (buffer) {
517                 if (buffer->io.lock.refs == 0)
518                         ++hammer_count_refedbufs;
519                 hammer_ref(&buffer->io.lock);
520
521                 /*
522                  * Onced refed the ondisk field will not be cleared by
523                  * any other action.
524                  */
525                 if (buffer->ondisk && buffer->io.loading == 0) {
526                         *errorp = 0;
527                         return(buffer);
528                 }
529
530                 /*
531                  * The buffer is no longer loose if it has a ref, and
532                  * cannot become loose once it gains a ref.  Loose
533                  * buffers will never be in a modified state.  This should
534                  * only occur on the 0->1 transition of refs.
535                  *
536                  * lose_list can be modified via a biodone() interrupt.
537                  */
538                 if (buffer->io.mod_list == &hmp->lose_list) {
539                         crit_enter();   /* biodone race against list */
540                         TAILQ_REMOVE(buffer->io.mod_list, &buffer->io,
541                                      mod_entry);
542                         crit_exit();
543                         buffer->io.mod_list = NULL;
544                         KKASSERT(buffer->io.modified == 0);
545                 }
546                 goto found;
547         }
548
549         /*
550          * What is the buffer class?
551          */
552         zone = HAMMER_ZONE_DECODE(buf_offset);
553
554         switch(zone) {
555         case HAMMER_ZONE_LARGE_DATA_INDEX:
556         case HAMMER_ZONE_SMALL_DATA_INDEX:
557                 iotype = HAMMER_STRUCTURE_DATA_BUFFER;
558                 break;
559         case HAMMER_ZONE_UNDO_INDEX:
560                 iotype = HAMMER_STRUCTURE_UNDO_BUFFER;
561                 break;
562         case HAMMER_ZONE_META_INDEX:
563         default:
564                 /*
565                  * NOTE: inode data and directory entries are placed in this
566                  * zone.  inode atime/mtime is updated in-place and thus
567                  * buffers containing inodes must be synchronized as
568                  * meta-buffers, same as buffers containing B-Tree info.
569                  */
570                 iotype = HAMMER_STRUCTURE_META_BUFFER;
571                 break;
572         }
573
574         /*
575          * Handle blockmap offset translations
576          */
577         if (zone >= HAMMER_ZONE_BTREE_INDEX) {
578                 zone2_offset = hammer_blockmap_lookup(hmp, buf_offset, errorp);
579         } else if (zone == HAMMER_ZONE_UNDO_INDEX) {
580                 zone2_offset = hammer_undo_lookup(hmp, buf_offset, errorp);
581         } else {
582                 KKASSERT(zone == HAMMER_ZONE_RAW_BUFFER_INDEX);
583                 zone2_offset = buf_offset;
584                 *errorp = 0;
585         }
586         if (*errorp)
587                 return(NULL);
588
589         /*
590          * NOTE: zone2_offset and maxbuf_off are both full zone-2 offset
591          * specifications.
592          */
593         KKASSERT((zone2_offset & HAMMER_OFF_ZONE_MASK) ==
594                  HAMMER_ZONE_RAW_BUFFER);
595         vol_no = HAMMER_VOL_DECODE(zone2_offset);
596         volume = hammer_get_volume(hmp, vol_no, errorp);
597         if (volume == NULL)
598                 return(NULL);
599
600         KKASSERT(zone2_offset < volume->maxbuf_off);
601
602         /*
603          * Allocate a new buffer structure.  We will check for races later.
604          */
605         ++hammer_count_buffers;
606         buffer = kmalloc(sizeof(*buffer), M_HAMMER,
607                          M_WAITOK|M_ZERO|M_USE_RESERVE);
608         buffer->zone2_offset = zone2_offset;
609         buffer->zoneX_offset = buf_offset;
610         buffer->volume = volume;
611
612         hammer_io_init(&buffer->io, hmp, iotype);
613         buffer->io.offset = volume->ondisk->vol_buf_beg +
614                             (zone2_offset & HAMMER_OFF_SHORT_MASK);
615         buffer->io.bytes = bytes;
616         TAILQ_INIT(&buffer->clist);
617         hammer_ref(&buffer->io.lock);
618
619         /*
620          * Insert the buffer into the RB tree and handle late collisions.
621          */
622         if (RB_INSERT(hammer_buf_rb_tree, &hmp->rb_bufs_root, buffer)) {
623                 hammer_unref(&buffer->io.lock);
624                 --hammer_count_buffers;
625                 kfree(buffer, M_HAMMER);
626                 goto again;
627         }
628         ++hammer_count_refedbufs;
629 found:
630
631         /*
632          * Deal with on-disk info and loading races.
633          */
634         if (buffer->ondisk == NULL || buffer->io.loading) {
635                 *errorp = hammer_load_buffer(buffer, isnew);
636                 if (*errorp) {
637                         hammer_rel_buffer(buffer, 1);
638                         buffer = NULL;
639                 }
640         } else {
641                 *errorp = 0;
642         }
643         return(buffer);
644 }
645
646 /*
647  * This is used by the direct-read code to deal with large-data buffers
648  * created by the reblocker and mirror-write code.  The direct-read code
649  * bypasses the HAMMER buffer subsystem and so any aliased dirty hammer
650  * buffers must be fully synced to disk before we can issue the direct-read.
651  *
652  * This code path is not considered critical as only the rebocker and
653  * mirror-write code will create large-data buffers via the HAMMER buffer
654  * subsystem.  They do that because they operate at the B-Tree level and
655  * do not access the vnode/inode structures.
656  */
657 void
658 hammer_sync_buffers(hammer_mount_t hmp, hammer_off_t base_offset, int bytes)
659 {
660         hammer_buffer_t buffer;
661         int error;
662
663         KKASSERT((base_offset & HAMMER_OFF_ZONE_MASK) ==
664                  HAMMER_ZONE_LARGE_DATA);
665
666         while (bytes > 0) {
667                 buffer = RB_LOOKUP(hammer_buf_rb_tree, &hmp->rb_bufs_root,
668                                    base_offset);
669                 if (buffer && buffer->io.modified) {
670                         error = hammer_ref_buffer(buffer);
671                         if (error == 0 && buffer->io.modified) {
672                                 hammer_io_write_interlock(&buffer->io);
673                                 hammer_io_flush(&buffer->io);
674                                 hammer_io_done_interlock(&buffer->io);
675                                 hammer_io_wait(&buffer->io);
676                                 hammer_rel_buffer(buffer, 0);
677                         }
678                 }
679                 base_offset += HAMMER_BUFSIZE;
680                 bytes -= HAMMER_BUFSIZE;
681         }
682 }
683
684 /*
685  * Destroy all buffers covering the specified zoneX offset range.  This
686  * is called when the related blockmap layer2 entry is freed or when
687  * a direct write bypasses our buffer/buffer-cache subsystem.
688  *
689  * The buffers may be referenced by the caller itself.  Setting reclaim
690  * will cause the buffer to be destroyed when it's ref count reaches zero.
691  */
692 void
693 hammer_del_buffers(hammer_mount_t hmp, hammer_off_t base_offset,
694                    hammer_off_t zone2_offset, int bytes)
695 {
696         hammer_buffer_t buffer;
697         hammer_volume_t volume;
698         int vol_no;
699         int error;
700
701         vol_no = HAMMER_VOL_DECODE(zone2_offset);
702         volume = hammer_get_volume(hmp, vol_no, &error);
703         KKASSERT(error == 0);
704
705         while (bytes > 0) {
706                 buffer = RB_LOOKUP(hammer_buf_rb_tree, &hmp->rb_bufs_root,
707                                    base_offset);
708                 if (buffer) {
709                         error = hammer_ref_buffer(buffer);
710                         if (error == 0) {
711                                 KKASSERT(buffer->zone2_offset == zone2_offset);
712                                 hammer_io_clear_modify(&buffer->io, 1);
713                                 buffer->io.reclaim = 1;
714                                 KKASSERT(buffer->volume == volume);
715                                 hammer_rel_buffer(buffer, 0);
716                         }
717                 } else {
718                         hammer_io_inval(volume, zone2_offset);
719                 }
720                 base_offset += HAMMER_BUFSIZE;
721                 zone2_offset += HAMMER_BUFSIZE;
722                 bytes -= HAMMER_BUFSIZE;
723         }
724         hammer_rel_volume(volume, 0);
725 }
726
727 static int
728 hammer_load_buffer(hammer_buffer_t buffer, int isnew)
729 {
730         hammer_volume_t volume;
731         int error;
732
733         /*
734          * Load the buffer's on-disk info
735          */
736         volume = buffer->volume;
737         ++buffer->io.loading;
738         hammer_lock_ex(&buffer->io.lock);
739
740         if (hammer_debug_io & 0x0001) {
741                 kprintf("load_buffer %016llx %016llx isnew=%d od=%p\n",
742                         buffer->zoneX_offset, buffer->zone2_offset, isnew,
743                         buffer->ondisk);
744         }
745
746         if (buffer->ondisk == NULL) {
747                 if (isnew) {
748                         error = hammer_io_new(volume->devvp, &buffer->io);
749                 } else {
750                         error = hammer_io_read(volume->devvp, &buffer->io,
751                                                volume->maxraw_off);
752                 }
753                 if (error == 0)
754                         buffer->ondisk = (void *)buffer->io.bp->b_data;
755         } else if (isnew) {
756                 error = hammer_io_new(volume->devvp, &buffer->io);
757         } else {
758                 error = 0;
759         }
760         --buffer->io.loading;
761         hammer_unlock(&buffer->io.lock);
762         return (error);
763 }
764
765 /*
766  * NOTE: Called from RB_SCAN, must return >= 0 for scan to continue.
767  * This routine is only called during unmount.
768  */
769 int
770 hammer_unload_buffer(hammer_buffer_t buffer, void *data __unused)
771 {
772         /*
773          * Clean up the persistent ref ioerror might have on the buffer
774          * and acquire a ref (steal ioerror's if we can).
775          */
776         if (buffer->io.ioerror) {
777                 buffer->io.ioerror = 0;
778         } else {
779                 if (buffer->io.lock.refs == 0)
780                         ++hammer_count_refedbufs;
781                 hammer_ref(&buffer->io.lock);
782         }
783
784         /*
785          * We must not flush a dirty buffer to disk on umount.  It should
786          * have already been dealt with by the flusher, or we may be in
787          * catastrophic failure.
788          */
789         hammer_io_clear_modify(&buffer->io, 1);
790         hammer_flush_buffer_nodes(buffer);
791         KKASSERT(buffer->io.lock.refs == 1);
792         hammer_rel_buffer(buffer, 2);
793         return(0);
794 }
795
796 /*
797  * Reference a buffer that is either already referenced or via a specially
798  * handled pointer (aka cursor->buffer).
799  */
800 int
801 hammer_ref_buffer(hammer_buffer_t buffer)
802 {
803         int error;
804
805         if (buffer->io.lock.refs == 0)
806                 ++hammer_count_refedbufs;
807         hammer_ref(&buffer->io.lock);
808
809         /*
810          * At this point a biodone() will not touch the buffer other then
811          * incidental bits.  However, lose_list can be modified via
812          * a biodone() interrupt.
813          *
814          * No longer loose
815          */
816         if (buffer->io.mod_list == &buffer->io.hmp->lose_list) {
817                 crit_enter();
818                 TAILQ_REMOVE(buffer->io.mod_list, &buffer->io, mod_entry);
819                 buffer->io.mod_list = NULL;
820                 crit_exit();
821         }
822
823         if (buffer->ondisk == NULL || buffer->io.loading) {
824                 error = hammer_load_buffer(buffer, 0);
825                 if (error) {
826                         hammer_rel_buffer(buffer, 1);
827                         /*
828                          * NOTE: buffer pointer can become stale after
829                          * the above release.
830                          */
831                 }
832         } else {
833                 error = 0;
834         }
835         return(error);
836 }
837
838 /*
839  * Release a buffer.  We have to deal with several places where
840  * another thread can ref the buffer.
841  *
842  * Only destroy the structure itself if the related buffer cache buffer
843  * was disassociated from it.  This ties the management of the structure
844  * to the buffer cache subsystem.  buffer->ondisk determines whether the
845  * embedded io is referenced or not.
846  */
847 void
848 hammer_rel_buffer(hammer_buffer_t buffer, int flush)
849 {
850         hammer_volume_t volume;
851         struct buf *bp = NULL;
852         int freeme = 0;
853
854         crit_enter();
855         if (buffer->io.lock.refs == 1) {
856                 ++buffer->io.loading;   /* force interlock check */
857                 hammer_lock_ex(&buffer->io.lock);
858                 if (buffer->io.lock.refs == 1) {
859                         bp = hammer_io_release(&buffer->io, flush);
860
861                         if (buffer->io.lock.refs == 1)
862                                 --hammer_count_refedbufs;
863
864                         if (buffer->io.bp == NULL &&
865                             buffer->io.lock.refs == 1) {
866                                 /*
867                                  * Final cleanup
868                                  *
869                                  * NOTE: It is impossible for any associated
870                                  * B-Tree nodes to have refs if the buffer
871                                  * has no additional refs.
872                                  */
873                                 RB_REMOVE(hammer_buf_rb_tree,
874                                           &buffer->io.hmp->rb_bufs_root,
875                                           buffer);
876                                 volume = buffer->volume;
877                                 buffer->volume = NULL; /* sanity */
878                                 hammer_rel_volume(volume, 0);
879                                 hammer_io_clear_modlist(&buffer->io);
880                                 hammer_flush_buffer_nodes(buffer);
881                                 KKASSERT(TAILQ_EMPTY(&buffer->clist));
882                                 freeme = 1;
883                         }
884                 }
885                 --buffer->io.loading;
886                 hammer_unlock(&buffer->io.lock);
887         }
888         hammer_unref(&buffer->io.lock);
889         crit_exit();
890         if (bp)
891                 brelse(bp);
892         if (freeme) {
893                 --hammer_count_buffers;
894                 kfree(buffer, M_HAMMER);
895         }
896 }
897
898 /*
899  * Access the filesystem buffer containing the specified hammer offset.
900  * buf_offset is a conglomeration of the volume number and vol_buf_beg
901  * relative buffer offset.  It must also have bit 55 set to be valid.
902  * (see hammer_off_t in hammer_disk.h).
903  *
904  * Any prior buffer in *bufferp will be released and replaced by the
905  * requested buffer.
906  */
907 static __inline
908 void *
909 _hammer_bread(hammer_mount_t hmp, hammer_off_t buf_offset, int bytes,
910              int *errorp, struct hammer_buffer **bufferp)
911 {
912         hammer_buffer_t buffer;
913         int32_t xoff = (int32_t)buf_offset & HAMMER_BUFMASK;
914
915         buf_offset &= ~HAMMER_BUFMASK64;
916         KKASSERT((buf_offset & HAMMER_OFF_ZONE_MASK) != 0);
917
918         buffer = *bufferp;
919         if (buffer == NULL || (buffer->zone2_offset != buf_offset &&
920                                buffer->zoneX_offset != buf_offset)) {
921                 if (buffer)
922                         hammer_rel_buffer(buffer, 0);
923                 buffer = hammer_get_buffer(hmp, buf_offset, bytes, 0, errorp);
924                 *bufferp = buffer;
925         } else {
926                 *errorp = 0;
927         }
928
929         /*
930          * Return a pointer to the buffer data.
931          */
932         if (buffer == NULL)
933                 return(NULL);
934         else
935                 return((char *)buffer->ondisk + xoff);
936 }
937
938 void *
939 hammer_bread(hammer_mount_t hmp, hammer_off_t buf_offset,
940              int *errorp, struct hammer_buffer **bufferp)
941 {
942         return(_hammer_bread(hmp, buf_offset, HAMMER_BUFSIZE, errorp, bufferp));
943 }
944
945 void *
946 hammer_bread_ext(hammer_mount_t hmp, hammer_off_t buf_offset, int bytes,
947                  int *errorp, struct hammer_buffer **bufferp)
948 {
949         bytes = (bytes + HAMMER_BUFMASK) & ~HAMMER_BUFMASK;
950         return(_hammer_bread(hmp, buf_offset, bytes, errorp, bufferp));
951 }
952
953 /*
954  * Access the filesystem buffer containing the specified hammer offset.
955  * No disk read operation occurs.  The result buffer may contain garbage.
956  *
957  * Any prior buffer in *bufferp will be released and replaced by the
958  * requested buffer.
959  *
960  * This function marks the buffer dirty but does not increment its
961  * modify_refs count.
962  */
963 static __inline
964 void *
965 _hammer_bnew(hammer_mount_t hmp, hammer_off_t buf_offset, int bytes,
966              int *errorp, struct hammer_buffer **bufferp)
967 {
968         hammer_buffer_t buffer;
969         int32_t xoff = (int32_t)buf_offset & HAMMER_BUFMASK;
970
971         buf_offset &= ~HAMMER_BUFMASK64;
972
973         buffer = *bufferp;
974         if (buffer == NULL || (buffer->zone2_offset != buf_offset &&
975                                buffer->zoneX_offset != buf_offset)) {
976                 if (buffer)
977                         hammer_rel_buffer(buffer, 0);
978                 buffer = hammer_get_buffer(hmp, buf_offset, bytes, 1, errorp);
979                 *bufferp = buffer;
980         } else {
981                 *errorp = 0;
982         }
983
984         /*
985          * Return a pointer to the buffer data.
986          */
987         if (buffer == NULL)
988                 return(NULL);
989         else
990                 return((char *)buffer->ondisk + xoff);
991 }
992
993 void *
994 hammer_bnew(hammer_mount_t hmp, hammer_off_t buf_offset,
995              int *errorp, struct hammer_buffer **bufferp)
996 {
997         return(_hammer_bnew(hmp, buf_offset, HAMMER_BUFSIZE, errorp, bufferp));
998 }
999
1000 void *
1001 hammer_bnew_ext(hammer_mount_t hmp, hammer_off_t buf_offset, int bytes,
1002                 int *errorp, struct hammer_buffer **bufferp)
1003 {
1004         bytes = (bytes + HAMMER_BUFMASK) & ~HAMMER_BUFMASK;
1005         return(_hammer_bnew(hmp, buf_offset, bytes, errorp, bufferp));
1006 }
1007
1008 /************************************************************************
1009  *                              NODES                                   *
1010  ************************************************************************
1011  *
1012  * Manage B-Tree nodes.  B-Tree nodes represent the primary indexing
1013  * method used by the HAMMER filesystem.
1014  *
1015  * Unlike other HAMMER structures, a hammer_node can be PASSIVELY
1016  * associated with its buffer, and will only referenced the buffer while
1017  * the node itself is referenced.
1018  *
1019  * A hammer_node can also be passively associated with other HAMMER
1020  * structures, such as inodes, while retaining 0 references.  These
1021  * associations can be cleared backwards using a pointer-to-pointer in
1022  * the hammer_node.
1023  *
1024  * This allows the HAMMER implementation to cache hammer_nodes long-term
1025  * and short-cut a great deal of the infrastructure's complexity.  In
1026  * most cases a cached node can be reacquired without having to dip into
1027  * either the buffer or cluster management code.
1028  *
1029  * The caller must pass a referenced cluster on call and will retain
1030  * ownership of the reference on return.  The node will acquire its own
1031  * additional references, if necessary.
1032  */
1033 hammer_node_t
1034 hammer_get_node(hammer_mount_t hmp, hammer_off_t node_offset,
1035                 int isnew, int *errorp)
1036 {
1037         hammer_node_t node;
1038
1039         KKASSERT((node_offset & HAMMER_OFF_ZONE_MASK) == HAMMER_ZONE_BTREE);
1040
1041         /*
1042          * Locate the structure, allocating one if necessary.
1043          */
1044 again:
1045         node = RB_LOOKUP(hammer_nod_rb_tree, &hmp->rb_nods_root, node_offset);
1046         if (node == NULL) {
1047                 ++hammer_count_nodes;
1048                 node = kmalloc(sizeof(*node), M_HAMMER, M_WAITOK|M_ZERO|M_USE_RESERVE);
1049                 node->node_offset = node_offset;
1050                 node->hmp = hmp;
1051                 TAILQ_INIT(&node->cursor_list);
1052                 TAILQ_INIT(&node->cache_list);
1053                 if (RB_INSERT(hammer_nod_rb_tree, &hmp->rb_nods_root, node)) {
1054                         --hammer_count_nodes;
1055                         kfree(node, M_HAMMER);
1056                         goto again;
1057                 }
1058         }
1059         hammer_ref(&node->lock);
1060         if (node->ondisk)
1061                 *errorp = 0;
1062         else
1063                 *errorp = hammer_load_node(node, isnew);
1064         if (*errorp) {
1065                 hammer_rel_node(node);
1066                 node = NULL;
1067         }
1068         return(node);
1069 }
1070
1071 /*
1072  * Reference an already-referenced node.
1073  */
1074 void
1075 hammer_ref_node(hammer_node_t node)
1076 {
1077         KKASSERT(node->lock.refs > 0 && node->ondisk != NULL);
1078         hammer_ref(&node->lock);
1079 }
1080
1081 /*
1082  * Load a node's on-disk data reference.
1083  */
1084 static int
1085 hammer_load_node(hammer_node_t node, int isnew)
1086 {
1087         hammer_buffer_t buffer;
1088         hammer_off_t buf_offset;
1089         int error;
1090
1091         error = 0;
1092         ++node->loading;
1093         hammer_lock_ex(&node->lock);
1094         if (node->ondisk == NULL) {
1095                 /*
1096                  * This is a little confusing but the jist is that
1097                  * node->buffer determines whether the node is on
1098                  * the buffer's clist and node->ondisk determines
1099                  * whether the buffer is referenced.
1100                  *
1101                  * We could be racing a buffer release, in which case
1102                  * node->buffer may become NULL while we are blocked
1103                  * referencing the buffer.
1104                  */
1105                 if ((buffer = node->buffer) != NULL) {
1106                         error = hammer_ref_buffer(buffer);
1107                         if (error == 0 && node->buffer == NULL) {
1108                                 TAILQ_INSERT_TAIL(&buffer->clist,
1109                                                   node, entry);
1110                                 node->buffer = buffer;
1111                         }
1112                 } else {
1113                         buf_offset = node->node_offset & ~HAMMER_BUFMASK64;
1114                         buffer = hammer_get_buffer(node->hmp, buf_offset,
1115                                                    HAMMER_BUFSIZE, 0, &error);
1116                         if (buffer) {
1117                                 KKASSERT(error == 0);
1118                                 TAILQ_INSERT_TAIL(&buffer->clist,
1119                                                   node, entry);
1120                                 node->buffer = buffer;
1121                         }
1122                 }
1123                 if (error)
1124                         goto failed;
1125                 node->ondisk = (void *)((char *)buffer->ondisk +
1126                                         (node->node_offset & HAMMER_BUFMASK));
1127                 if (isnew == 0 && 
1128                     (node->flags & HAMMER_NODE_CRCGOOD) == 0) {
1129                         if (hammer_crc_test_btree(node->ondisk) == 0)
1130                                 Debugger("CRC FAILED: B-TREE NODE");
1131                         node->flags |= HAMMER_NODE_CRCGOOD;
1132                 }
1133         }
1134 failed:
1135         --node->loading;
1136         hammer_unlock(&node->lock);
1137         return (error);
1138 }
1139
1140 /*
1141  * Safely reference a node, interlock against flushes via the IO subsystem.
1142  */
1143 hammer_node_t
1144 hammer_ref_node_safe(struct hammer_mount *hmp, hammer_node_cache_t cache,
1145                      int *errorp)
1146 {
1147         hammer_node_t node;
1148
1149         node = cache->node;
1150         if (node != NULL) {
1151                 hammer_ref(&node->lock);
1152                 if (node->ondisk)
1153                         *errorp = 0;
1154                 else
1155                         *errorp = hammer_load_node(node, 0);
1156                 if (*errorp) {
1157                         hammer_rel_node(node);
1158                         node = NULL;
1159                 }
1160         } else {
1161                 *errorp = ENOENT;
1162         }
1163         return(node);
1164 }
1165
1166 /*
1167  * Release a hammer_node.  On the last release the node dereferences
1168  * its underlying buffer and may or may not be destroyed.
1169  */
1170 void
1171 hammer_rel_node(hammer_node_t node)
1172 {
1173         hammer_buffer_t buffer;
1174
1175         /*
1176          * If this isn't the last ref just decrement the ref count and
1177          * return.
1178          */
1179         if (node->lock.refs > 1) {
1180                 hammer_unref(&node->lock);
1181                 return;
1182         }
1183
1184         /*
1185          * If there is no ondisk info or no buffer the node failed to load,
1186          * remove the last reference and destroy the node.
1187          */
1188         if (node->ondisk == NULL) {
1189                 hammer_unref(&node->lock);
1190                 hammer_flush_node(node);
1191                 /* node is stale now */
1192                 return;
1193         }
1194
1195         /*
1196          * Do not disassociate the node from the buffer if it represents
1197          * a modified B-Tree node that still needs its crc to be generated.
1198          */
1199         if (node->flags & HAMMER_NODE_NEEDSCRC)
1200                 return;
1201
1202         /*
1203          * Do final cleanups and then either destroy the node and leave it
1204          * passively cached.  The buffer reference is removed regardless.
1205          */
1206         buffer = node->buffer;
1207         node->ondisk = NULL;
1208
1209         if ((node->flags & HAMMER_NODE_FLUSH) == 0) {
1210                 hammer_unref(&node->lock);
1211                 hammer_rel_buffer(buffer, 0);
1212                 return;
1213         }
1214
1215         /*
1216          * Destroy the node.
1217          */
1218         hammer_unref(&node->lock);
1219         hammer_flush_node(node);
1220         /* node is stale */
1221         hammer_rel_buffer(buffer, 0);
1222 }
1223
1224 /*
1225  * Free space on-media associated with a B-Tree node.
1226  */
1227 void
1228 hammer_delete_node(hammer_transaction_t trans, hammer_node_t node)
1229 {
1230         KKASSERT((node->flags & HAMMER_NODE_DELETED) == 0);
1231         node->flags |= HAMMER_NODE_DELETED;
1232         hammer_blockmap_free(trans, node->node_offset, sizeof(*node->ondisk));
1233 }
1234
1235 /*
1236  * Passively cache a referenced hammer_node.  The caller may release
1237  * the node on return.
1238  */
1239 void
1240 hammer_cache_node(hammer_node_cache_t cache, hammer_node_t node)
1241 {
1242         /*
1243          * If the node doesn't exist, or is being deleted, don't cache it!
1244          *
1245          * The node can only ever be NULL in the I/O failure path.
1246          */
1247         if (node == NULL || (node->flags & HAMMER_NODE_DELETED))
1248                 return;
1249         if (cache->node == node)
1250                 return;
1251         while (cache->node)
1252                 hammer_uncache_node(cache);
1253         if (node->flags & HAMMER_NODE_DELETED)
1254                 return;
1255         cache->node = node;
1256         TAILQ_INSERT_TAIL(&node->cache_list, cache, entry);
1257 }
1258
1259 void
1260 hammer_uncache_node(hammer_node_cache_t cache)
1261 {
1262         hammer_node_t node;
1263
1264         if ((node = cache->node) != NULL) {
1265                 TAILQ_REMOVE(&node->cache_list, cache, entry);
1266                 cache->node = NULL;
1267                 if (TAILQ_EMPTY(&node->cache_list))
1268                         hammer_flush_node(node);
1269         }
1270 }
1271
1272 /*
1273  * Remove a node's cache references and destroy the node if it has no
1274  * other references or backing store.
1275  */
1276 void
1277 hammer_flush_node(hammer_node_t node)
1278 {
1279         hammer_node_cache_t cache;
1280         hammer_buffer_t buffer;
1281
1282         while ((cache = TAILQ_FIRST(&node->cache_list)) != NULL) {
1283                 TAILQ_REMOVE(&node->cache_list, cache, entry);
1284                 cache->node = NULL;
1285         }
1286         if (node->lock.refs == 0 && node->ondisk == NULL) {
1287                 KKASSERT((node->flags & HAMMER_NODE_NEEDSCRC) == 0);
1288                 RB_REMOVE(hammer_nod_rb_tree, &node->hmp->rb_nods_root, node);
1289                 if ((buffer = node->buffer) != NULL) {
1290                         node->buffer = NULL;
1291                         TAILQ_REMOVE(&buffer->clist, node, entry);
1292                         /* buffer is unreferenced because ondisk is NULL */
1293                 }
1294                 --hammer_count_nodes;
1295                 kfree(node, M_HAMMER);
1296         }
1297 }
1298
1299 /*
1300  * Flush passively cached B-Tree nodes associated with this buffer.
1301  * This is only called when the buffer is about to be destroyed, so
1302  * none of the nodes should have any references.  The buffer is locked.
1303  *
1304  * We may be interlocked with the buffer.
1305  */
1306 void
1307 hammer_flush_buffer_nodes(hammer_buffer_t buffer)
1308 {
1309         hammer_node_t node;
1310
1311         while ((node = TAILQ_FIRST(&buffer->clist)) != NULL) {
1312                 KKASSERT(node->ondisk == NULL);
1313                 KKASSERT((node->flags & HAMMER_NODE_NEEDSCRC) == 0);
1314
1315                 if (node->lock.refs == 0) {
1316                         hammer_ref(&node->lock);
1317                         node->flags |= HAMMER_NODE_FLUSH;
1318                         hammer_rel_node(node);
1319                 } else {
1320                         KKASSERT(node->loading != 0);
1321                         KKASSERT(node->buffer != NULL);
1322                         buffer = node->buffer;
1323                         node->buffer = NULL;
1324                         TAILQ_REMOVE(&buffer->clist, node, entry);
1325                         /* buffer is unreferenced because ondisk is NULL */
1326                 }
1327         }
1328 }
1329
1330
1331 /************************************************************************
1332  *                              ALLOCATORS                              *
1333  ************************************************************************/
1334
1335 /*
1336  * Allocate a B-Tree node.
1337  */
1338 hammer_node_t
1339 hammer_alloc_btree(hammer_transaction_t trans, int *errorp)
1340 {
1341         hammer_buffer_t buffer = NULL;
1342         hammer_node_t node = NULL;
1343         hammer_off_t node_offset;
1344
1345         node_offset = hammer_blockmap_alloc(trans, HAMMER_ZONE_BTREE_INDEX,
1346                                             sizeof(struct hammer_node_ondisk),
1347                                             errorp);
1348         if (*errorp == 0) {
1349                 node = hammer_get_node(trans->hmp, node_offset, 1, errorp);
1350                 hammer_modify_node_noundo(trans, node);
1351                 bzero(node->ondisk, sizeof(*node->ondisk));
1352                 hammer_modify_node_done(node);
1353         }
1354         if (buffer)
1355                 hammer_rel_buffer(buffer, 0);
1356         return(node);
1357 }
1358
1359 /*
1360  * Allocate data.  If the address of a data buffer is supplied then
1361  * any prior non-NULL *data_bufferp will be released and *data_bufferp
1362  * will be set to the related buffer.  The caller must release it when
1363  * finally done.  The initial *data_bufferp should be set to NULL by
1364  * the caller.
1365  *
1366  * The caller is responsible for making hammer_modify*() calls on the
1367  * *data_bufferp.
1368  */
1369 void *
1370 hammer_alloc_data(hammer_transaction_t trans, int32_t data_len, 
1371                   u_int16_t rec_type, hammer_off_t *data_offsetp,
1372                   struct hammer_buffer **data_bufferp, int *errorp)
1373 {
1374         void *data;
1375         int zone;
1376
1377         /*
1378          * Allocate data
1379          */
1380         if (data_len) {
1381                 switch(rec_type) {
1382                 case HAMMER_RECTYPE_INODE:
1383                 case HAMMER_RECTYPE_DIRENTRY:
1384                 case HAMMER_RECTYPE_EXT:
1385                 case HAMMER_RECTYPE_FIX:
1386                 case HAMMER_RECTYPE_PFS:
1387                         zone = HAMMER_ZONE_META_INDEX;
1388                         break;
1389                 case HAMMER_RECTYPE_DATA:
1390                 case HAMMER_RECTYPE_DB:
1391                         if (data_len <= HAMMER_BUFSIZE / 2) {
1392                                 zone = HAMMER_ZONE_SMALL_DATA_INDEX;
1393                         } else {
1394                                 data_len = (data_len + HAMMER_BUFMASK) &
1395                                            ~HAMMER_BUFMASK;
1396                                 zone = HAMMER_ZONE_LARGE_DATA_INDEX;
1397                         }
1398                         break;
1399                 default:
1400                         panic("hammer_alloc_data: rec_type %04x unknown",
1401                               rec_type);
1402                         zone = 0;       /* NOT REACHED */
1403                         break;
1404                 }
1405                 *data_offsetp = hammer_blockmap_alloc(trans, zone,
1406                                                       data_len, errorp);
1407         } else {
1408                 *data_offsetp = 0;
1409         }
1410         if (*errorp == 0 && data_bufferp) {
1411                 if (data_len) {
1412                         data = hammer_bread_ext(trans->hmp, *data_offsetp,
1413                                                 data_len, errorp, data_bufferp);
1414                 } else {
1415                         data = NULL;
1416                 }
1417         } else {
1418                 data = NULL;
1419         }
1420         return(data);
1421 }
1422
1423 /*
1424  * Sync dirty buffers to the media and clean-up any loose ends.
1425  *
1426  * These functions do not start the flusher going, they simply
1427  * queue everything up to the flusher.
1428  */
1429 static int hammer_sync_scan1(struct mount *mp, struct vnode *vp, void *data);
1430 static int hammer_sync_scan2(struct mount *mp, struct vnode *vp, void *data);
1431
1432 int
1433 hammer_queue_inodes_flusher(hammer_mount_t hmp, int waitfor)
1434 {
1435         struct hammer_sync_info info;
1436
1437         info.error = 0;
1438         info.waitfor = waitfor;
1439         if (waitfor == MNT_WAIT) {
1440                 vmntvnodescan(hmp->mp, VMSC_GETVP|VMSC_ONEPASS,
1441                               hammer_sync_scan1, hammer_sync_scan2, &info);
1442         } else {
1443                 vmntvnodescan(hmp->mp, VMSC_GETVP|VMSC_ONEPASS|VMSC_NOWAIT,
1444                               hammer_sync_scan1, hammer_sync_scan2, &info);
1445         }
1446         return(info.error);
1447 }
1448
1449 /*
1450  * Filesystem sync.  If doing a synchronous sync make a second pass on
1451  * the vnodes in case any were already flushing during the first pass,
1452  * and activate the flusher twice (the second time brings the UNDO FIFO's
1453  * start position up to the end position after the first call).
1454  */
1455 int
1456 hammer_sync_hmp(hammer_mount_t hmp, int waitfor)
1457 {
1458         struct hammer_sync_info info;
1459
1460         info.error = 0;
1461         info.waitfor = MNT_NOWAIT;
1462         vmntvnodescan(hmp->mp, VMSC_GETVP|VMSC_NOWAIT,
1463                       hammer_sync_scan1, hammer_sync_scan2, &info);
1464         if (info.error == 0 && waitfor == MNT_WAIT) {
1465                 info.waitfor = waitfor;
1466                 vmntvnodescan(hmp->mp, VMSC_GETVP,
1467                               hammer_sync_scan1, hammer_sync_scan2, &info);
1468         }
1469         if (waitfor == MNT_WAIT) {
1470                 hammer_flusher_sync(hmp);
1471                 hammer_flusher_sync(hmp);
1472         } else {
1473                 hammer_flusher_async(hmp, NULL);
1474         }
1475         return(info.error);
1476 }
1477
1478 static int
1479 hammer_sync_scan1(struct mount *mp, struct vnode *vp, void *data)
1480 {
1481         struct hammer_inode *ip;
1482
1483         ip = VTOI(vp);
1484         if (vp->v_type == VNON || ip == NULL ||
1485             ((ip->flags & HAMMER_INODE_MODMASK) == 0 &&
1486              RB_EMPTY(&vp->v_rbdirty_tree))) {
1487                 return(-1);
1488         }
1489         return(0);
1490 }
1491
1492 static int
1493 hammer_sync_scan2(struct mount *mp, struct vnode *vp, void *data)
1494 {
1495         struct hammer_sync_info *info = data;
1496         struct hammer_inode *ip;
1497         int error;
1498
1499         ip = VTOI(vp);
1500         if (vp->v_type == VNON || vp->v_type == VBAD ||
1501             ((ip->flags & HAMMER_INODE_MODMASK) == 0 &&
1502              RB_EMPTY(&vp->v_rbdirty_tree))) {
1503                 return(0);
1504         }
1505         error = VOP_FSYNC(vp, MNT_NOWAIT);
1506         if (error)
1507                 info->error = error;
1508         return(0);
1509 }
1510