sbin/hammer: Add hammer strip command
[dragonfly.git] / sys / vfs / hammer / hammer_ondisk.c
1 /*
2  * Copyright (c) 2007-2008 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  * $DragonFly: src/sys/vfs/hammer/hammer_ondisk.c,v 1.76 2008/08/29 20:19:08 dillon Exp $
35  */
36 /*
37  * Manage HAMMER's on-disk structures.  These routines are primarily
38  * responsible for interfacing with the kernel's I/O subsystem and for
39  * managing in-memory structures.
40  */
41
42 #include <sys/nlookup.h>
43 #include <sys/buf2.h>
44
45 #include "hammer.h"
46
47 static void hammer_free_volume(hammer_volume_t volume);
48 static int hammer_load_volume(hammer_volume_t volume);
49 static int hammer_load_buffer(hammer_buffer_t buffer, int isnew);
50 static int hammer_load_node(hammer_transaction_t trans,
51                                 hammer_node_t node, int isnew);
52 static void _hammer_rel_node(hammer_node_t node, int locked);
53
54 static int
55 hammer_vol_rb_compare(hammer_volume_t vol1, hammer_volume_t vol2)
56 {
57         if (vol1->vol_no < vol2->vol_no)
58                 return(-1);
59         if (vol1->vol_no > vol2->vol_no)
60                 return(1);
61         return(0);
62 }
63
64 /*
65  * hammer_buffer structures are indexed via their zoneX_offset, not
66  * their zone2_offset.
67  */
68 static int
69 hammer_buf_rb_compare(hammer_buffer_t buf1, hammer_buffer_t buf2)
70 {
71         if (buf1->zoneX_offset < buf2->zoneX_offset)
72                 return(-1);
73         if (buf1->zoneX_offset > buf2->zoneX_offset)
74                 return(1);
75         return(0);
76 }
77
78 static int
79 hammer_nod_rb_compare(hammer_node_t node1, hammer_node_t node2)
80 {
81         if (node1->node_offset < node2->node_offset)
82                 return(-1);
83         if (node1->node_offset > node2->node_offset)
84                 return(1);
85         return(0);
86 }
87
88 RB_GENERATE2(hammer_vol_rb_tree, hammer_volume, rb_node,
89              hammer_vol_rb_compare, int32_t, vol_no);
90 RB_GENERATE2(hammer_buf_rb_tree, hammer_buffer, rb_node,
91              hammer_buf_rb_compare, hammer_off_t, zoneX_offset);
92 RB_GENERATE2(hammer_nod_rb_tree, hammer_node, rb_node,
93              hammer_nod_rb_compare, hammer_off_t, node_offset);
94
95 /************************************************************************
96  *                              VOLUMES                                 *
97  ************************************************************************
98  *
99  * Load a HAMMER volume by name.  Returns 0 on success or a positive error
100  * code on failure.  Volumes must be loaded at mount time or via hammer
101  * volume-add command, hammer_get_volume() will not load a new volume.
102  *
103  * The passed devvp is vref()'d but not locked.  This function consumes the
104  * ref (typically by associating it with the volume structure).
105  *
106  * Calls made to hammer_load_volume() or single-threaded
107  */
108 int
109 hammer_install_volume(hammer_mount_t hmp, const char *volname,
110                       struct vnode *devvp, void *data)
111 {
112         struct mount *mp;
113         hammer_volume_t volume;
114         hammer_volume_ondisk_t ondisk;
115         hammer_volume_ondisk_t img;
116         struct nlookupdata nd;
117         struct buf *bp = NULL;
118         int error;
119         int ronly;
120         int setmp = 0;
121         int i;
122
123         mp = hmp->mp;
124         ronly = ((mp->mnt_flag & MNT_RDONLY) ? 1 : 0);
125
126         /*
127          * Allocate a volume structure
128          */
129         ++hammer_count_volumes;
130         volume = kmalloc(sizeof(*volume), hmp->m_misc, M_WAITOK|M_ZERO);
131         volume->vol_name = kstrdup(volname, hmp->m_misc);
132         volume->io.hmp = hmp;   /* bootstrap */
133         hammer_io_init(&volume->io, volume, HAMMER_IOTYPE_VOLUME);
134         volume->io.offset = 0LL;
135         volume->io.bytes = HAMMER_BUFSIZE;
136
137         /*
138          * Get the device vnode
139          */
140         if (devvp == NULL) {
141                 error = nlookup_init(&nd, volume->vol_name, UIO_SYSSPACE, NLC_FOLLOW);
142                 if (error == 0)
143                         error = nlookup(&nd);
144                 if (error == 0)
145                         error = cache_vref(&nd.nl_nch, nd.nl_cred, &volume->devvp);
146                 nlookup_done(&nd);
147         } else {
148                 error = 0;
149                 volume->devvp = devvp;
150         }
151
152         if (error == 0) {
153                 if (vn_isdisk(volume->devvp, &error)) {
154                         error = vfs_mountedon(volume->devvp);
155                 }
156         }
157         if (error == 0 && vcount(volume->devvp) > 0)
158                 error = EBUSY;
159         if (error == 0) {
160                 vn_lock(volume->devvp, LK_EXCLUSIVE | LK_RETRY);
161                 error = vinvalbuf(volume->devvp, V_SAVE, 0, 0);
162                 if (error == 0) {
163                         error = VOP_OPEN(volume->devvp,
164                                          (ronly ? FREAD : FREAD|FWRITE),
165                                          FSCRED, NULL);
166                 }
167                 vn_unlock(volume->devvp);
168         }
169         if (error) {
170                 hammer_free_volume(volume);
171                 return(error);
172         }
173         volume->devvp->v_rdev->si_mountpoint = mp;
174         setmp = 1;
175
176         /*
177          * Extract the volume number from the volume header and do various
178          * sanity checks.
179          */
180         error = bread(volume->devvp, 0LL, HAMMER_BUFSIZE, &bp);
181         if (error)
182                 goto late_failure;
183         ondisk = (void *)bp->b_data;
184
185         /*
186          * Initialize the volume header with data if the data is specified.
187          */
188         if (ronly == 0 && data) {
189                 img = (hammer_volume_ondisk_t)data;
190                 if (ondisk->vol_signature == HAMMER_FSBUF_VOLUME) {
191                         hkprintf("Formatting of valid HAMMER volume %s denied. "
192                                 "Erase with hammer strip or dd!\n", volname);
193                         error = EFTYPE;
194                         goto late_failure;
195                 }
196                 bcopy(img, ondisk, sizeof(*img));
197         }
198
199         if (ondisk->vol_signature != HAMMER_FSBUF_VOLUME) {
200                 hkprintf("volume %s has an invalid header\n", volume->vol_name);
201                 for (i = 0; i < (int)sizeof(ondisk->vol_signature); i++) {
202                         kprintf("%02x", ((char*)&ondisk->vol_signature)[i] & 0xFF);
203                         if (i != (int)sizeof(ondisk->vol_signature) - 1)
204                                 kprintf(" ");
205                 }
206                 kprintf("\n");
207                 error = EFTYPE;
208                 goto late_failure;
209         }
210         volume->vol_no = ondisk->vol_no;
211         volume->vol_flags = ondisk->vol_flags;
212         volume->maxbuf_off = HAMMER_ENCODE_RAW_BUFFER(volume->vol_no,
213                                     HAMMER_VOL_BUF_SIZE(ondisk));
214
215         if (RB_EMPTY(&hmp->rb_vols_root)) {
216                 hmp->fsid = ondisk->vol_fsid;
217         } else if (bcmp(&hmp->fsid, &ondisk->vol_fsid, sizeof(uuid_t))) {
218                 hkprintf("volume %s's fsid does not match other volumes\n",
219                         volume->vol_name);
220                 error = EFTYPE;
221                 goto late_failure;
222         }
223
224         /*
225          * Insert the volume structure into the red-black tree.
226          */
227         if (RB_INSERT(hammer_vol_rb_tree, &hmp->rb_vols_root, volume)) {
228                 hkprintf("volume %s has a duplicate vol_no %d\n",
229                         volume->vol_name, volume->vol_no);
230                 error = EEXIST;
231         }
232
233         if (error == 0)
234                 hammer_volume_number_add(hmp, volume);
235
236         /*
237          * Set the root volume .  HAMMER special cases rootvol the structure.
238          * We do not hold a ref because this would prevent related I/O
239          * from being flushed.
240          */
241         if (error == 0 && ondisk->vol_rootvol == ondisk->vol_no) {
242                 hmp->rootvol = volume;
243                 hmp->nvolumes = ondisk->vol_count;
244                 if (bp) {
245                         brelse(bp);
246                         bp = NULL;
247                 }
248                 hmp->mp->mnt_stat.f_blocks += ondisk->vol0_stat_bigblocks *
249                                                 HAMMER_BUFFERS_PER_BIGBLOCK;
250                 hmp->mp->mnt_vstat.f_blocks += ondisk->vol0_stat_bigblocks *
251                                                 HAMMER_BUFFERS_PER_BIGBLOCK;
252         }
253 late_failure:
254         if (bp)
255                 brelse(bp);
256         if (error) {
257                 /*vinvalbuf(volume->devvp, V_SAVE, 0, 0);*/
258                 if (setmp)
259                         volume->devvp->v_rdev->si_mountpoint = NULL;
260                 vn_lock(volume->devvp, LK_EXCLUSIVE | LK_RETRY);
261                 VOP_CLOSE(volume->devvp, ronly ? FREAD : FREAD|FWRITE, NULL);
262                 vn_unlock(volume->devvp);
263                 hammer_free_volume(volume);
264         }
265         return (error);
266 }
267
268 /*
269  * This is called for each volume when updating the mount point from
270  * read-write to read-only or vise-versa.
271  */
272 int
273 hammer_adjust_volume_mode(hammer_volume_t volume, void *data __unused)
274 {
275         if (volume->devvp) {
276                 vn_lock(volume->devvp, LK_EXCLUSIVE | LK_RETRY);
277                 if (volume->io.hmp->ronly) {
278                         /* do not call vinvalbuf */
279                         VOP_OPEN(volume->devvp, FREAD, FSCRED, NULL);
280                         VOP_CLOSE(volume->devvp, FREAD|FWRITE, NULL);
281                 } else {
282                         /* do not call vinvalbuf */
283                         VOP_OPEN(volume->devvp, FREAD|FWRITE, FSCRED, NULL);
284                         VOP_CLOSE(volume->devvp, FREAD, NULL);
285                 }
286                 vn_unlock(volume->devvp);
287         }
288         return(0);
289 }
290
291 /*
292  * Unload and free a HAMMER volume.  Must return >= 0 to continue scan
293  * so returns -1 on failure.
294  */
295 int
296 hammer_unload_volume(hammer_volume_t volume, void *data)
297 {
298         hammer_mount_t hmp = volume->io.hmp;
299         struct buf *bp = NULL;
300         hammer_volume_ondisk_t img;
301         int ronly = ((hmp->mp->mnt_flag & MNT_RDONLY) ? 1 : 0);
302         int error;
303
304         /*
305          * Clear the volume header with data if the data is specified.
306          */
307         if (ronly == 0 && data && volume->devvp) {
308                 img = (hammer_volume_ondisk_t)data;
309                 error = bread(volume->devvp, 0LL, HAMMER_BUFSIZE, &bp);
310                 if (error || bp->b_bcount < sizeof(*img)) {
311                         hmkprintf(hmp, "Failed to read volume header: %d\n", error);
312                         brelse(bp);
313                 } else {
314                         bcopy(img, bp->b_data, sizeof(*img));
315                         error = bwrite(bp);
316                         if (error)
317                                 hmkprintf(hmp, "Failed to clear volume header: %d\n",
318                                         error);
319                 }
320         }
321
322         /*
323          * Clean up the root volume pointer, which is held unlocked in hmp.
324          */
325         if (hmp->rootvol == volume)
326                 hmp->rootvol = NULL;
327
328         /*
329          * We must not flush a dirty buffer to disk on umount.  It should
330          * have already been dealt with by the flusher, or we may be in
331          * catastrophic failure.
332          */
333         hammer_io_clear_modify(&volume->io, 1);
334         volume->io.waitdep = 1;
335
336         /*
337          * Clean up the persistent ref ioerror might have on the volume
338          */
339         if (volume->io.ioerror)
340                 hammer_io_clear_error_noassert(&volume->io);
341
342         /*
343          * This should release the bp.  Releasing the volume with flush set
344          * implies the interlock is set.
345          */
346         hammer_ref_interlock_true(&volume->io.lock);
347         hammer_rel_volume(volume, 1);
348         KKASSERT(volume->io.bp == NULL);
349
350         /*
351          * There should be no references on the volume.
352          */
353         KKASSERT(hammer_norefs(&volume->io.lock));
354
355         volume->ondisk = NULL;
356         if (volume->devvp) {
357                 if (volume->devvp->v_rdev &&
358                     volume->devvp->v_rdev->si_mountpoint == hmp->mp) {
359                         volume->devvp->v_rdev->si_mountpoint = NULL;
360                 }
361                 if (ronly) {
362                         /*
363                          * Make sure we don't sync anything to disk if we
364                          * are in read-only mode (1) or critically-errored
365                          * (2).  Note that there may be dirty buffers in
366                          * normal read-only mode from crash recovery.
367                          */
368                         vn_lock(volume->devvp, LK_EXCLUSIVE | LK_RETRY);
369                         vinvalbuf(volume->devvp, 0, 0, 0);
370                         VOP_CLOSE(volume->devvp, FREAD, NULL);
371                         vn_unlock(volume->devvp);
372                 } else {
373                         /*
374                          * Normal termination, save any dirty buffers
375                          * (XXX there really shouldn't be any).
376                          */
377                         vn_lock(volume->devvp, LK_EXCLUSIVE | LK_RETRY);
378                         vinvalbuf(volume->devvp, V_SAVE, 0, 0);
379                         VOP_CLOSE(volume->devvp, FREAD|FWRITE, NULL);
380                         vn_unlock(volume->devvp);
381                 }
382         }
383
384         /*
385          * Destroy the structure
386          */
387         RB_REMOVE(hammer_vol_rb_tree, &hmp->rb_vols_root, volume);
388         hammer_volume_number_del(hmp, volume);
389         hammer_free_volume(volume);
390         return(0);
391 }
392
393 static
394 void
395 hammer_free_volume(hammer_volume_t volume)
396 {
397         hammer_mount_t hmp = volume->io.hmp;
398
399         if (volume->vol_name) {
400                 kfree(volume->vol_name, hmp->m_misc);
401                 volume->vol_name = NULL;
402         }
403         if (volume->devvp) {
404                 vrele(volume->devvp);
405                 volume->devvp = NULL;
406         }
407         --hammer_count_volumes;
408         kfree(volume, hmp->m_misc);
409 }
410
411 /*
412  * Get a HAMMER volume.  The volume must already exist.
413  */
414 hammer_volume_t
415 hammer_get_volume(hammer_mount_t hmp, int32_t vol_no, int *errorp)
416 {
417         hammer_volume_t volume;
418
419         /*
420          * Locate the volume structure
421          */
422         volume = RB_LOOKUP(hammer_vol_rb_tree, &hmp->rb_vols_root, vol_no);
423         if (volume == NULL) {
424                 *errorp = ENOENT;
425                 return(NULL);
426         }
427
428         /*
429          * Reference the volume, load/check the data on the 0->1 transition.
430          * hammer_load_volume() will dispose of the interlock on return,
431          * and also clean up the ref count on error.
432          */
433         if (hammer_ref_interlock(&volume->io.lock)) {
434                 *errorp = hammer_load_volume(volume);
435                 if (*errorp)
436                         volume = NULL;
437         } else {
438                 KKASSERT(volume->ondisk);
439                 *errorp = 0;
440         }
441         return(volume);
442 }
443
444 int
445 hammer_ref_volume(hammer_volume_t volume)
446 {
447         int error;
448
449         /*
450          * Reference the volume and deal with the check condition used to
451          * load its ondisk info.
452          */
453         if (hammer_ref_interlock(&volume->io.lock)) {
454                 error = hammer_load_volume(volume);
455         } else {
456                 KKASSERT(volume->ondisk);
457                 error = 0;
458         }
459         return (error);
460 }
461
462 /*
463  * May be called without fs_token
464  */
465 hammer_volume_t
466 hammer_get_root_volume(hammer_mount_t hmp, int *errorp)
467 {
468         hammer_volume_t volume;
469
470         volume = hmp->rootvol;
471         KKASSERT(volume != NULL);
472
473         /*
474          * Reference the volume and deal with the check condition used to
475          * load its ondisk info.
476          */
477         if (hammer_ref_interlock(&volume->io.lock)) {
478                 lwkt_gettoken(&volume->io.hmp->fs_token);
479                 *errorp = hammer_load_volume(volume);
480                 lwkt_reltoken(&volume->io.hmp->fs_token);
481                 if (*errorp)
482                         volume = NULL;
483         } else {
484                 KKASSERT(volume->ondisk);
485                 *errorp = 0;
486         }
487         return (volume);
488 }
489
490 /*
491  * Load a volume's on-disk information.  The volume must be referenced and
492  * the interlock is held on call.  The interlock will be released on return.
493  * The reference will also be released on return if an error occurs.
494  */
495 static int
496 hammer_load_volume(hammer_volume_t volume)
497 {
498         int error;
499
500         if (volume->ondisk == NULL) {
501                 error = hammer_io_read(volume->devvp, &volume->io,
502                                        HAMMER_BUFSIZE);
503                 if (error == 0) {
504                         volume->ondisk = (void *)volume->io.bp->b_data;
505                         hammer_ref_interlock_done(&volume->io.lock);
506                 } else {
507                         hammer_rel_volume(volume, 1);
508                 }
509         } else {
510                 error = 0;
511         }
512         return(error);
513 }
514
515 /*
516  * Release a previously acquired reference on the volume.
517  *
518  * Volumes are not unloaded from memory during normal operation.
519  *
520  * May be called without fs_token
521  */
522 void
523 hammer_rel_volume(hammer_volume_t volume, int locked)
524 {
525         struct buf *bp;
526
527         if (hammer_rel_interlock(&volume->io.lock, locked)) {
528                 lwkt_gettoken(&volume->io.hmp->fs_token);
529                 volume->ondisk = NULL;
530                 bp = hammer_io_release(&volume->io, locked);
531                 lwkt_reltoken(&volume->io.hmp->fs_token);
532                 hammer_rel_interlock_done(&volume->io.lock, locked);
533                 if (bp)
534                         brelse(bp);
535         }
536 }
537
538 int
539 hammer_mountcheck_volumes(hammer_mount_t hmp)
540 {
541         hammer_volume_t vol;
542         int i;
543
544         HAMMER_VOLUME_NUMBER_FOREACH(hmp, i) {
545                 vol = RB_LOOKUP(hammer_vol_rb_tree, &hmp->rb_vols_root, i);
546                 if (vol == NULL)
547                         return(EINVAL);
548         }
549         return(0);
550 }
551
552 int
553 hammer_get_installed_volumes(hammer_mount_t hmp)
554 {
555         int i, ret = 0;
556
557         HAMMER_VOLUME_NUMBER_FOREACH(hmp, i)
558                 ret++;
559         return(ret);
560 }
561
562 /************************************************************************
563  *                              BUFFERS                                 *
564  ************************************************************************
565  *
566  * Manage buffers.  Currently most blockmap-backed zones are direct-mapped
567  * to zone-2 buffer offsets, without a translation stage.  However, the
568  * hammer_buffer structure is indexed by its zoneX_offset, not its
569  * zone2_offset.
570  *
571  * The proper zone must be maintained throughout the code-base all the way
572  * through to the big-block allocator, or routines like hammer_del_buffers()
573  * will not be able to locate all potentially conflicting buffers.
574  */
575
576 /*
577  * Helper function returns whether a zone offset can be directly translated
578  * to a raw buffer index or not.  Really only the volume and undo zones
579  * can't be directly translated.  Volumes are special-cased and undo zones
580  * shouldn't be aliased accessed in read-only mode.
581  *
582  * This function is ONLY used to detect aliased zones during a read-only
583  * mount.
584  */
585 static __inline int
586 hammer_direct_zone(hammer_off_t buf_offset)
587 {
588         int zone = HAMMER_ZONE_DECODE(buf_offset);
589
590         return(hammer_is_direct_mapped_index(zone));
591 }
592
593 hammer_buffer_t
594 hammer_get_buffer(hammer_mount_t hmp, hammer_off_t buf_offset,
595                   int bytes, int isnew, int *errorp)
596 {
597         hammer_buffer_t buffer;
598         hammer_volume_t volume;
599         hammer_off_t    zone2_offset;
600         int vol_no;
601         int zone;
602
603         buf_offset &= ~HAMMER_BUFMASK64;
604 again:
605         /*
606          * Shortcut if the buffer is already cached
607          */
608         buffer = RB_LOOKUP(hammer_buf_rb_tree, &hmp->rb_bufs_root, buf_offset);
609         if (buffer) {
610                 /*
611                  * Once refed the ondisk field will not be cleared by
612                  * any other action.  Shortcut the operation if the
613                  * ondisk structure is valid.
614                  */
615 found_aliased:
616                 if (hammer_ref_interlock(&buffer->io.lock) == 0) {
617                         hammer_io_advance(&buffer->io);
618                         KKASSERT(buffer->ondisk);
619                         *errorp = 0;
620                         return(buffer);
621                 }
622
623                 /*
624                  * 0->1 transition or defered 0->1 transition (CHECK),
625                  * interlock now held.  Shortcut if ondisk is already
626                  * assigned.
627                  */
628                 atomic_add_int(&hammer_count_refedbufs, 1);
629                 if (buffer->ondisk) {
630                         hammer_io_advance(&buffer->io);
631                         hammer_ref_interlock_done(&buffer->io.lock);
632                         *errorp = 0;
633                         return(buffer);
634                 }
635
636                 /*
637                  * The buffer is no longer loose if it has a ref, and
638                  * cannot become loose once it gains a ref.  Loose
639                  * buffers will never be in a modified state.  This should
640                  * only occur on the 0->1 transition of refs.
641                  *
642                  * lose_root can be modified via a biodone() interrupt
643                  * so the io_token must be held.
644                  */
645                 if (buffer->io.mod_root == &hmp->lose_root) {
646                         lwkt_gettoken(&hmp->io_token);
647                         if (buffer->io.mod_root == &hmp->lose_root) {
648                                 RB_REMOVE(hammer_mod_rb_tree,
649                                           buffer->io.mod_root, &buffer->io);
650                                 buffer->io.mod_root = NULL;
651                                 KKASSERT(buffer->io.modified == 0);
652                         }
653                         lwkt_reltoken(&hmp->io_token);
654                 }
655                 goto found;
656         } else if (hmp->ronly && hammer_direct_zone(buf_offset)) {
657                 /*
658                  * If this is a read-only mount there could be an alias
659                  * in the raw-zone.  If there is we use that buffer instead.
660                  *
661                  * rw mounts will not have aliases.  Also note when going
662                  * from ro -> rw the recovered raw buffers are flushed and
663                  * reclaimed, so again there will not be any aliases once
664                  * the mount is rw.
665                  */
666                 buffer = RB_LOOKUP(hammer_buf_rb_tree, &hmp->rb_bufs_root,
667                                    hammer_xlate_to_zone2(buf_offset));
668                 if (buffer) {
669                         if (hammer_debug_general & 0x0001) {
670                                 hkrateprintf(&hmp->kdiag,
671                                             "recovered aliased %016jx\n",
672                                             (intmax_t)buf_offset);
673                         }
674                         goto found_aliased;
675                 }
676         }
677
678         /*
679          * Handle blockmap offset translations
680          */
681         zone = HAMMER_ZONE_DECODE(buf_offset);
682         if (hammer_is_zone2_mapped_index(zone)) {
683                 zone2_offset = hammer_blockmap_lookup(hmp, buf_offset, errorp);
684         } else if (zone == HAMMER_ZONE_UNDO_INDEX) {
685                 zone2_offset = hammer_undo_lookup(hmp, buf_offset, errorp);
686         } else {
687                 /* Must be zone-2 (not 1 or 4 or 15) */
688                 KKASSERT(zone == HAMMER_ZONE_RAW_BUFFER_INDEX);
689                 zone2_offset = buf_offset;
690                 *errorp = 0;
691         }
692         if (*errorp)
693                 return(NULL);
694
695         /*
696          * NOTE: zone2_offset and maxbuf_off are both full zone-2 offset
697          * specifications.
698          */
699         KKASSERT(hammer_is_zone_raw_buffer(zone2_offset));
700         vol_no = HAMMER_VOL_DECODE(zone2_offset);
701         volume = hammer_get_volume(hmp, vol_no, errorp);
702         if (volume == NULL)
703                 return(NULL);
704
705         KKASSERT(zone2_offset < volume->maxbuf_off);
706
707         /*
708          * Allocate a new buffer structure.  We will check for races later.
709          */
710         ++hammer_count_buffers;
711         buffer = kmalloc(sizeof(*buffer), hmp->m_misc,
712                          M_WAITOK|M_ZERO|M_USE_RESERVE);
713         buffer->zone2_offset = zone2_offset;
714         buffer->zoneX_offset = buf_offset;
715
716         hammer_io_init(&buffer->io, volume, hammer_zone_to_iotype(zone));
717         buffer->io.offset = hammer_xlate_to_phys(volume->ondisk, zone2_offset);
718         buffer->io.bytes = bytes;
719         TAILQ_INIT(&buffer->node_list);
720         hammer_ref_interlock_true(&buffer->io.lock);
721
722         /*
723          * Insert the buffer into the RB tree and handle late collisions.
724          */
725         if (RB_INSERT(hammer_buf_rb_tree, &hmp->rb_bufs_root, buffer)) {
726                 hammer_rel_volume(volume, 0);
727                 buffer->io.volume = NULL;                       /* safety */
728                 if (hammer_rel_interlock(&buffer->io.lock, 1))  /* safety */
729                         hammer_rel_interlock_done(&buffer->io.lock, 1);
730                 --hammer_count_buffers;
731                 kfree(buffer, hmp->m_misc);
732                 goto again;
733         }
734         atomic_add_int(&hammer_count_refedbufs, 1);
735 found:
736
737         /*
738          * The buffer is referenced and interlocked.  Load the buffer
739          * if necessary.  hammer_load_buffer() deals with the interlock
740          * and, if an error is returned, also deals with the ref.
741          */
742         if (buffer->ondisk == NULL) {
743                 *errorp = hammer_load_buffer(buffer, isnew);
744                 if (*errorp)
745                         buffer = NULL;
746         } else {
747                 hammer_io_advance(&buffer->io);
748                 hammer_ref_interlock_done(&buffer->io.lock);
749                 *errorp = 0;
750         }
751         return(buffer);
752 }
753
754 /*
755  * This is used by the direct-read code to deal with large-data buffers
756  * created by the reblocker and mirror-write code.  The direct-read code
757  * bypasses the HAMMER buffer subsystem and so any aliased dirty or write-
758  * running hammer buffers must be fully synced to disk before we can issue
759  * the direct-read.
760  *
761  * This code path is not considered critical as only the rebocker and
762  * mirror-write code will create large-data buffers via the HAMMER buffer
763  * subsystem.  They do that because they operate at the B-Tree level and
764  * do not access the vnode/inode structures.
765  */
766 void
767 hammer_sync_buffers(hammer_mount_t hmp, hammer_off_t base_offset, int bytes)
768 {
769         hammer_buffer_t buffer;
770         int error;
771
772         KKASSERT(hammer_is_zone_large_data(base_offset));
773
774         while (bytes > 0) {
775                 buffer = RB_LOOKUP(hammer_buf_rb_tree, &hmp->rb_bufs_root,
776                                    base_offset);
777                 if (buffer && (buffer->io.modified || buffer->io.running)) {
778                         error = hammer_ref_buffer(buffer);
779                         if (error == 0) {
780                                 hammer_io_wait(&buffer->io);
781                                 if (buffer->io.modified) {
782                                         hammer_io_write_interlock(&buffer->io);
783                                         hammer_io_flush(&buffer->io, 0);
784                                         hammer_io_done_interlock(&buffer->io);
785                                         hammer_io_wait(&buffer->io);
786                                 }
787                                 hammer_rel_buffer(buffer, 0);
788                         }
789                 }
790                 base_offset += HAMMER_BUFSIZE;
791                 bytes -= HAMMER_BUFSIZE;
792         }
793 }
794
795 /*
796  * Destroy all buffers covering the specified zoneX offset range.  This
797  * is called when the related blockmap layer2 entry is freed or when
798  * a direct write bypasses our buffer/buffer-cache subsystem.
799  *
800  * The buffers may be referenced by the caller itself.  Setting reclaim
801  * will cause the buffer to be destroyed when it's ref count reaches zero.
802  *
803  * Return 0 on success, EAGAIN if some buffers could not be destroyed due
804  * to additional references held by other threads, or some other (typically
805  * fatal) error.
806  */
807 int
808 hammer_del_buffers(hammer_mount_t hmp, hammer_off_t base_offset,
809                    hammer_off_t zone2_offset, int bytes,
810                    int report_conflicts)
811 {
812         hammer_buffer_t buffer;
813         hammer_volume_t volume;
814         int vol_no;
815         int error;
816         int ret_error;
817
818         vol_no = HAMMER_VOL_DECODE(zone2_offset);
819         volume = hammer_get_volume(hmp, vol_no, &ret_error);
820         KKASSERT(ret_error == 0);
821
822         while (bytes > 0) {
823                 buffer = RB_LOOKUP(hammer_buf_rb_tree, &hmp->rb_bufs_root,
824                                    base_offset);
825                 if (buffer) {
826                         error = hammer_ref_buffer(buffer);
827                         if (hammer_debug_general & 0x20000) {
828                                 hkprintf("delbufr %016jx rerr=%d 1ref=%d\n",
829                                         (intmax_t)buffer->zoneX_offset,
830                                         error,
831                                         hammer_oneref(&buffer->io.lock));
832                         }
833                         if (error == 0 && !hammer_oneref(&buffer->io.lock)) {
834                                 error = EAGAIN;
835                                 hammer_rel_buffer(buffer, 0);
836                         }
837                         if (error == 0) {
838                                 KKASSERT(buffer->zone2_offset == zone2_offset);
839                                 hammer_io_clear_modify(&buffer->io, 1);
840                                 buffer->io.reclaim = 1;
841                                 buffer->io.waitdep = 1;
842                                 KKASSERT(buffer->io.volume == volume);
843                                 hammer_rel_buffer(buffer, 0);
844                         }
845                 } else {
846                         error = hammer_io_inval(volume, zone2_offset);
847                 }
848                 if (error) {
849                         ret_error = error;
850                         if (report_conflicts ||
851                             (hammer_debug_general & 0x8000)) {
852                                 krateprintf(&hmp->kdiag,
853                                         "hammer_del_buffers: unable to "
854                                         "invalidate %016jx buffer=%p "
855                                         "rep=%d lkrefs=%08x\n",
856                                         (intmax_t)base_offset,
857                                         buffer, report_conflicts,
858                                         (buffer ? buffer->io.lock.refs : -1));
859                         }
860                 }
861                 base_offset += HAMMER_BUFSIZE;
862                 zone2_offset += HAMMER_BUFSIZE;
863                 bytes -= HAMMER_BUFSIZE;
864         }
865         hammer_rel_volume(volume, 0);
866         return (ret_error);
867 }
868
869 /*
870  * Given a referenced and interlocked buffer load/validate the data.
871  *
872  * The buffer interlock will be released on return.  If an error is
873  * returned the buffer reference will also be released (and the buffer
874  * pointer will thus be stale).
875  */
876 static int
877 hammer_load_buffer(hammer_buffer_t buffer, int isnew)
878 {
879         hammer_volume_t volume;
880         int error;
881
882         /*
883          * Load the buffer's on-disk info
884          */
885         volume = buffer->io.volume;
886
887         if (hammer_debug_io & 0x0004) {
888                 hdkprintf("load_buffer %016jx %016jx isnew=%d od=%p\n",
889                         (intmax_t)buffer->zoneX_offset,
890                         (intmax_t)buffer->zone2_offset,
891                         isnew, buffer->ondisk);
892         }
893
894         if (buffer->ondisk == NULL) {
895                 /*
896                  * Issue the read or generate a new buffer.  When reading
897                  * the limit argument controls any read-ahead clustering
898                  * hammer_io_read() is allowed to do.
899                  *
900                  * We cannot read-ahead in the large-data zone and we cannot
901                  * cross a big-block boundary as the next big-block might
902                  * use a different buffer size.
903                  */
904                 if (isnew) {
905                         error = hammer_io_new(volume->devvp, &buffer->io);
906                 } else if (hammer_is_zone_large_data(buffer->zoneX_offset)) {
907                         error = hammer_io_read(volume->devvp, &buffer->io,
908                                                buffer->io.bytes);
909                 } else {
910                         hammer_off_t limit;
911
912                         limit = HAMMER_BIGBLOCK_DOALIGN(buffer->zone2_offset);
913                         limit -= buffer->zone2_offset;
914                         error = hammer_io_read(volume->devvp, &buffer->io,
915                                                limit);
916                 }
917                 if (error == 0)
918                         buffer->ondisk = (void *)buffer->io.bp->b_data;
919         } else if (isnew) {
920                 error = hammer_io_new(volume->devvp, &buffer->io);
921         } else {
922                 error = 0;
923         }
924         if (error == 0) {
925                 hammer_io_advance(&buffer->io);
926                 hammer_ref_interlock_done(&buffer->io.lock);
927         } else {
928                 hammer_rel_buffer(buffer, 1);
929         }
930         return (error);
931 }
932
933 /*
934  * NOTE: Called from RB_SCAN, must return >= 0 for scan to continue.
935  * This routine is only called during unmount or when a volume is
936  * removed.
937  *
938  * If data != NULL, it specifies a volume whoose buffers should
939  * be unloaded.
940  */
941 int
942 hammer_unload_buffer(hammer_buffer_t buffer, void *data)
943 {
944         hammer_volume_t volume = (hammer_volume_t)data;
945
946         /*
947          * If volume != NULL we are only interested in unloading buffers
948          * associated with a particular volume.
949          */
950         if (volume != NULL && volume != buffer->io.volume)
951                 return 0;
952
953         /*
954          * Clean up the persistent ref ioerror might have on the buffer
955          * and acquire a ref.  Expect a 0->1 transition.
956          */
957         if (buffer->io.ioerror) {
958                 hammer_io_clear_error_noassert(&buffer->io);
959                 atomic_add_int(&hammer_count_refedbufs, -1);
960         }
961         hammer_ref_interlock_true(&buffer->io.lock);
962         atomic_add_int(&hammer_count_refedbufs, 1);
963
964         /*
965          * We must not flush a dirty buffer to disk on umount.  It should
966          * have already been dealt with by the flusher, or we may be in
967          * catastrophic failure.
968          *
969          * We must set waitdep to ensure that a running buffer is waited
970          * on and released prior to us trying to unload the volume.
971          */
972         hammer_io_clear_modify(&buffer->io, 1);
973         hammer_flush_buffer_nodes(buffer);
974         buffer->io.waitdep = 1;
975         hammer_rel_buffer(buffer, 1);
976         return(0);
977 }
978
979 /*
980  * Reference a buffer that is either already referenced or via a specially
981  * handled pointer (aka cursor->buffer).
982  */
983 int
984 hammer_ref_buffer(hammer_buffer_t buffer)
985 {
986         hammer_mount_t hmp;
987         int error;
988         int locked;
989
990         /*
991          * Acquire a ref, plus the buffer will be interlocked on the
992          * 0->1 transition.
993          */
994         locked = hammer_ref_interlock(&buffer->io.lock);
995         hmp = buffer->io.hmp;
996
997         /*
998          * At this point a biodone() will not touch the buffer other then
999          * incidental bits.  However, lose_root can be modified via
1000          * a biodone() interrupt.
1001          *
1002          * No longer loose.  lose_root requires the io_token.
1003          */
1004         if (buffer->io.mod_root == &hmp->lose_root) {
1005                 lwkt_gettoken(&hmp->io_token);
1006                 if (buffer->io.mod_root == &hmp->lose_root) {
1007                         RB_REMOVE(hammer_mod_rb_tree,
1008                                   buffer->io.mod_root, &buffer->io);
1009                         buffer->io.mod_root = NULL;
1010                 }
1011                 lwkt_reltoken(&hmp->io_token);
1012         }
1013
1014         if (locked) {
1015                 atomic_add_int(&hammer_count_refedbufs, 1);
1016                 error = hammer_load_buffer(buffer, 0);
1017                 /* NOTE: on error the buffer pointer is stale */
1018         } else {
1019                 error = 0;
1020         }
1021         return(error);
1022 }
1023
1024 /*
1025  * Release a reference on the buffer.  On the 1->0 transition the
1026  * underlying IO will be released but the data reference is left
1027  * cached.
1028  *
1029  * Only destroy the structure itself if the related buffer cache buffer
1030  * was disassociated from it.  This ties the management of the structure
1031  * to the buffer cache subsystem.  buffer->ondisk determines whether the
1032  * embedded io is referenced or not.
1033  */
1034 void
1035 hammer_rel_buffer(hammer_buffer_t buffer, int locked)
1036 {
1037         hammer_volume_t volume;
1038         hammer_mount_t hmp;
1039         struct buf *bp = NULL;
1040         int freeme = 0;
1041
1042         hmp = buffer->io.hmp;
1043
1044         if (hammer_rel_interlock(&buffer->io.lock, locked) == 0)
1045                 return;
1046
1047         /*
1048          * hammer_count_refedbufs accounting.  Decrement if we are in
1049          * the error path or if CHECK is clear.
1050          *
1051          * If we are not in the error path and CHECK is set the caller
1052          * probably just did a hammer_ref() and didn't account for it,
1053          * so we don't account for the loss here.
1054          */
1055         if (locked || (buffer->io.lock.refs & HAMMER_REFS_CHECK) == 0)
1056                 atomic_add_int(&hammer_count_refedbufs, -1);
1057
1058         /*
1059          * If the caller locked us or the normal released transitions
1060          * from 1->0 (and acquired the lock) attempt to release the
1061          * io.  If the called locked us we tell hammer_io_release()
1062          * to flush (which would be the unload or failure path).
1063          */
1064         bp = hammer_io_release(&buffer->io, locked);
1065
1066         /*
1067          * If the buffer has no bp association and no refs we can destroy
1068          * it.
1069          *
1070          * NOTE: It is impossible for any associated B-Tree nodes to have
1071          * refs if the buffer has no additional refs.
1072          */
1073         if (buffer->io.bp == NULL && hammer_norefs(&buffer->io.lock)) {
1074                 RB_REMOVE(hammer_buf_rb_tree,
1075                           &buffer->io.hmp->rb_bufs_root,
1076                           buffer);
1077                 volume = buffer->io.volume;
1078                 buffer->io.volume = NULL; /* sanity */
1079                 hammer_rel_volume(volume, 0);
1080                 hammer_io_clear_modlist(&buffer->io);
1081                 hammer_flush_buffer_nodes(buffer);
1082                 KKASSERT(TAILQ_EMPTY(&buffer->node_list));
1083                 freeme = 1;
1084         }
1085
1086         /*
1087          * Cleanup
1088          */
1089         hammer_rel_interlock_done(&buffer->io.lock, locked);
1090         if (bp)
1091                 brelse(bp);
1092         if (freeme) {
1093                 --hammer_count_buffers;
1094                 kfree(buffer, hmp->m_misc);
1095         }
1096 }
1097
1098 /*
1099  * Access the filesystem buffer containing the specified hammer offset.
1100  * buf_offset is a conglomeration of the volume number and vol_buf_beg
1101  * relative buffer offset.  It must also have bit 55 set to be valid.
1102  * (see hammer_off_t in hammer_disk.h).
1103  *
1104  * Any prior buffer in *bufferp will be released and replaced by the
1105  * requested buffer.
1106  *
1107  * NOTE: The buffer is indexed via its zoneX_offset but we allow the
1108  * passed cached *bufferp to match against either zoneX or zone2.
1109  */
1110 static __inline
1111 void *
1112 _hammer_bread(hammer_mount_t hmp, hammer_off_t buf_offset, int bytes,
1113              int isnew, int *errorp, hammer_buffer_t *bufferp)
1114 {
1115         hammer_buffer_t buffer;
1116         int32_t xoff = (int32_t)buf_offset & HAMMER_BUFMASK;
1117
1118         buf_offset &= ~HAMMER_BUFMASK64;
1119         KKASSERT(HAMMER_ZONE(buf_offset) != 0);
1120
1121         buffer = *bufferp;
1122         if (buffer == NULL || (buffer->zone2_offset != buf_offset &&
1123                                buffer->zoneX_offset != buf_offset)) {
1124                 if (buffer)
1125                         hammer_rel_buffer(buffer, 0);
1126                 buffer = hammer_get_buffer(hmp, buf_offset, bytes, isnew, errorp);
1127                 *bufferp = buffer;
1128         } else {
1129                 *errorp = 0;
1130         }
1131
1132         /*
1133          * Return a pointer to the buffer data.
1134          */
1135         if (buffer == NULL)
1136                 return(NULL);
1137         else
1138                 return((char *)buffer->ondisk + xoff);
1139 }
1140
1141 void *
1142 hammer_bread(hammer_mount_t hmp, hammer_off_t buf_offset,
1143              int *errorp, hammer_buffer_t *bufferp)
1144 {
1145         return(_hammer_bread(hmp, buf_offset, HAMMER_BUFSIZE, 0, errorp, bufferp));
1146 }
1147
1148 void *
1149 hammer_bread_ext(hammer_mount_t hmp, hammer_off_t buf_offset, int bytes,
1150                  int *errorp, hammer_buffer_t *bufferp)
1151 {
1152         bytes = HAMMER_BUFSIZE_DOALIGN(bytes);
1153         return(_hammer_bread(hmp, buf_offset, bytes, 0, errorp, bufferp));
1154 }
1155
1156 /*
1157  * Access the filesystem buffer containing the specified hammer offset.
1158  * No disk read operation occurs.  The result buffer may contain garbage.
1159  *
1160  * Any prior buffer in *bufferp will be released and replaced by the
1161  * requested buffer.
1162  *
1163  * This function marks the buffer dirty but does not increment its
1164  * modify_refs count.
1165  */
1166 void *
1167 hammer_bnew(hammer_mount_t hmp, hammer_off_t buf_offset,
1168              int *errorp, hammer_buffer_t *bufferp)
1169 {
1170         return(_hammer_bread(hmp, buf_offset, HAMMER_BUFSIZE, 1, errorp, bufferp));
1171 }
1172
1173 void *
1174 hammer_bnew_ext(hammer_mount_t hmp, hammer_off_t buf_offset, int bytes,
1175                 int *errorp, hammer_buffer_t *bufferp)
1176 {
1177         bytes = HAMMER_BUFSIZE_DOALIGN(bytes);
1178         return(_hammer_bread(hmp, buf_offset, bytes, 1, errorp, bufferp));
1179 }
1180
1181 /************************************************************************
1182  *                              NODES                                   *
1183  ************************************************************************
1184  *
1185  * Manage B-Tree nodes.  B-Tree nodes represent the primary indexing
1186  * method used by the HAMMER filesystem.
1187  *
1188  * Unlike other HAMMER structures, a hammer_node can be PASSIVELY
1189  * associated with its buffer, and will only referenced the buffer while
1190  * the node itself is referenced.
1191  *
1192  * A hammer_node can also be passively associated with other HAMMER
1193  * structures, such as inodes, while retaining 0 references.  These
1194  * associations can be cleared backwards using a pointer-to-pointer in
1195  * the hammer_node.
1196  *
1197  * This allows the HAMMER implementation to cache hammer_nodes long-term
1198  * and short-cut a great deal of the infrastructure's complexity.  In
1199  * most cases a cached node can be reacquired without having to dip into
1200  * the B-Tree.
1201  */
1202 hammer_node_t
1203 hammer_get_node(hammer_transaction_t trans, hammer_off_t node_offset,
1204                 int isnew, int *errorp)
1205 {
1206         hammer_mount_t hmp = trans->hmp;
1207         hammer_node_t node;
1208         int doload;
1209
1210         KKASSERT(hammer_is_zone_btree(node_offset));
1211
1212         /*
1213          * Locate the structure, allocating one if necessary.
1214          */
1215 again:
1216         node = RB_LOOKUP(hammer_nod_rb_tree, &hmp->rb_nods_root, node_offset);
1217         if (node == NULL) {
1218                 ++hammer_count_nodes;
1219                 node = kmalloc(sizeof(*node), hmp->m_misc, M_WAITOK|M_ZERO|M_USE_RESERVE);
1220                 node->node_offset = node_offset;
1221                 node->hmp = hmp;
1222                 TAILQ_INIT(&node->cursor_list);
1223                 TAILQ_INIT(&node->cache_list);
1224                 if (RB_INSERT(hammer_nod_rb_tree, &hmp->rb_nods_root, node)) {
1225                         --hammer_count_nodes;
1226                         kfree(node, hmp->m_misc);
1227                         goto again;
1228                 }
1229                 doload = hammer_ref_interlock_true(&node->lock);
1230         } else {
1231                 doload = hammer_ref_interlock(&node->lock);
1232         }
1233         if (doload) {
1234                 *errorp = hammer_load_node(trans, node, isnew);
1235                 if (*errorp)
1236                         node = NULL;
1237         } else {
1238                 KKASSERT(node->ondisk);
1239                 *errorp = 0;
1240                 hammer_io_advance(&node->buffer->io);
1241         }
1242         return(node);
1243 }
1244
1245 /*
1246  * Reference an already-referenced node.  0->1 transitions should assert
1247  * so we do not have to deal with hammer_ref() setting CHECK.
1248  */
1249 void
1250 hammer_ref_node(hammer_node_t node)
1251 {
1252         KKASSERT(hammer_isactive(&node->lock) && node->ondisk != NULL);
1253         hammer_ref(&node->lock);
1254 }
1255
1256 /*
1257  * Load a node's on-disk data reference.  Called with the node referenced
1258  * and interlocked.
1259  *
1260  * On return the node interlock will be unlocked.  If a non-zero error code
1261  * is returned the node will also be dereferenced (and the caller's pointer
1262  * will be stale).
1263  */
1264 static int
1265 hammer_load_node(hammer_transaction_t trans, hammer_node_t node, int isnew)
1266 {
1267         hammer_buffer_t buffer;
1268         hammer_off_t buf_offset;
1269         int error;
1270
1271         error = 0;
1272         if (node->ondisk == NULL) {
1273                 /*
1274                  * This is a little confusing but the jist is that
1275                  * node->buffer determines whether the node is on
1276                  * the buffer's node_list and node->ondisk determines
1277                  * whether the buffer is referenced.
1278                  *
1279                  * We could be racing a buffer release, in which case
1280                  * node->buffer may become NULL while we are blocked
1281                  * referencing the buffer.
1282                  */
1283                 if ((buffer = node->buffer) != NULL) {
1284                         error = hammer_ref_buffer(buffer);
1285                         if (error == 0 && node->buffer == NULL) {
1286                                 TAILQ_INSERT_TAIL(&buffer->node_list, node, entry);
1287                                 node->buffer = buffer;
1288                         }
1289                 } else {
1290                         buf_offset = node->node_offset & ~HAMMER_BUFMASK64;
1291                         buffer = hammer_get_buffer(node->hmp, buf_offset,
1292                                                    HAMMER_BUFSIZE, 0, &error);
1293                         if (buffer) {
1294                                 KKASSERT(error == 0);
1295                                 TAILQ_INSERT_TAIL(&buffer->node_list, node, entry);
1296                                 node->buffer = buffer;
1297                         }
1298                 }
1299                 if (error)
1300                         goto failed;
1301                 node->ondisk = (void *)((char *)buffer->ondisk +
1302                                         (node->node_offset & HAMMER_BUFMASK));
1303
1304                 /*
1305                  * Check CRC.  NOTE: Neither flag is set and the CRC is not
1306                  * generated on new B-Tree nodes.
1307                  */
1308                 if (isnew == 0 &&
1309                     (node->flags & HAMMER_NODE_CRCANY) == 0) {
1310                         if (hammer_crc_test_btree(node->ondisk) == 0) {
1311                                 hdkprintf("CRC B-TREE NODE @ %016jx/%lu FAILED\n",
1312                                         (intmax_t)node->node_offset,
1313                                         sizeof(*node->ondisk));
1314                                 if (hammer_debug_critical)
1315                                         Debugger("CRC FAILED: B-TREE NODE");
1316                                 node->flags |= HAMMER_NODE_CRCBAD;
1317                         } else {
1318                                 node->flags |= HAMMER_NODE_CRCGOOD;
1319                         }
1320                 }
1321         }
1322         if (node->flags & HAMMER_NODE_CRCBAD) {
1323                 if (trans->flags & HAMMER_TRANSF_CRCDOM)
1324                         error = EDOM;
1325                 else
1326                         error = EIO;
1327         }
1328 failed:
1329         if (error) {
1330                 _hammer_rel_node(node, 1);
1331         } else {
1332                 hammer_ref_interlock_done(&node->lock);
1333         }
1334         return (error);
1335 }
1336
1337 /*
1338  * Safely reference a node, interlock against flushes via the IO subsystem.
1339  */
1340 hammer_node_t
1341 hammer_ref_node_safe(hammer_transaction_t trans, hammer_node_cache_t cache,
1342                      int *errorp)
1343 {
1344         hammer_node_t node;
1345         int doload;
1346
1347         node = cache->node;
1348         if (node != NULL) {
1349                 doload = hammer_ref_interlock(&node->lock);
1350                 if (doload) {
1351                         *errorp = hammer_load_node(trans, node, 0);
1352                         if (*errorp)
1353                                 node = NULL;
1354                 } else {
1355                         KKASSERT(node->ondisk);
1356                         if (node->flags & HAMMER_NODE_CRCBAD) {
1357                                 if (trans->flags & HAMMER_TRANSF_CRCDOM)
1358                                         *errorp = EDOM;
1359                                 else
1360                                         *errorp = EIO;
1361                                 _hammer_rel_node(node, 0);
1362                                 node = NULL;
1363                         } else {
1364                                 *errorp = 0;
1365                         }
1366                 }
1367         } else {
1368                 *errorp = ENOENT;
1369         }
1370         return(node);
1371 }
1372
1373 /*
1374  * Release a hammer_node.  On the last release the node dereferences
1375  * its underlying buffer and may or may not be destroyed.
1376  *
1377  * If locked is non-zero the passed node has been interlocked by the
1378  * caller and we are in the failure/unload path, otherwise it has not and
1379  * we are doing a normal release.
1380  *
1381  * This function will dispose of the interlock and the reference.
1382  * On return the node pointer is stale.
1383  */
1384 void
1385 _hammer_rel_node(hammer_node_t node, int locked)
1386 {
1387         hammer_buffer_t buffer;
1388
1389         /*
1390          * Deref the node.  If this isn't the 1->0 transition we're basically
1391          * done.  If locked is non-zero this function will just deref the
1392          * locked node and return 1, otherwise it will deref the locked
1393          * node and either lock and return 1 on the 1->0 transition or
1394          * not lock and return 0.
1395          */
1396         if (hammer_rel_interlock(&node->lock, locked) == 0)
1397                 return;
1398
1399         /*
1400          * Either locked was non-zero and we are interlocked, or the
1401          * hammer_rel_interlock() call returned non-zero and we are
1402          * interlocked.
1403          *
1404          * The ref-count must still be decremented if locked != 0 so
1405          * the cleanup required still varies a bit.
1406          *
1407          * hammer_flush_node() when called with 1 or 2 will dispose of
1408          * the lock and possible ref-count.
1409          */
1410         if (node->ondisk == NULL) {
1411                 hammer_flush_node(node, locked + 1);
1412                 /* node is stale now */
1413                 return;
1414         }
1415
1416         /*
1417          * Do not disassociate the node from the buffer if it represents
1418          * a modified B-Tree node that still needs its crc to be generated.
1419          */
1420         if (node->flags & HAMMER_NODE_NEEDSCRC) {
1421                 hammer_rel_interlock_done(&node->lock, locked);
1422                 return;
1423         }
1424
1425         /*
1426          * Do final cleanups and then either destroy the node and leave it
1427          * passively cached.  The buffer reference is removed regardless.
1428          */
1429         buffer = node->buffer;
1430         node->ondisk = NULL;
1431
1432         if ((node->flags & HAMMER_NODE_FLUSH) == 0) {
1433                 /*
1434                  * Normal release.
1435                  */
1436                 hammer_rel_interlock_done(&node->lock, locked);
1437         } else {
1438                 /*
1439                  * Destroy the node.
1440                  */
1441                 hammer_flush_node(node, locked + 1);
1442                 /* node is stale */
1443
1444         }
1445         hammer_rel_buffer(buffer, 0);
1446 }
1447
1448 void
1449 hammer_rel_node(hammer_node_t node)
1450 {
1451         _hammer_rel_node(node, 0);
1452 }
1453
1454 /*
1455  * Free space on-media associated with a B-Tree node.
1456  */
1457 void
1458 hammer_delete_node(hammer_transaction_t trans, hammer_node_t node)
1459 {
1460         KKASSERT((node->flags & HAMMER_NODE_DELETED) == 0);
1461         node->flags |= HAMMER_NODE_DELETED;
1462         hammer_blockmap_free(trans, node->node_offset, sizeof(*node->ondisk));
1463 }
1464
1465 /*
1466  * Passively cache a referenced hammer_node.  The caller may release
1467  * the node on return.
1468  */
1469 void
1470 hammer_cache_node(hammer_node_cache_t cache, hammer_node_t node)
1471 {
1472         /*
1473          * If the node doesn't exist, or is being deleted, don't cache it!
1474          *
1475          * The node can only ever be NULL in the I/O failure path.
1476          */
1477         if (node == NULL || (node->flags & HAMMER_NODE_DELETED))
1478                 return;
1479         if (cache->node == node)
1480                 return;
1481         while (cache->node)
1482                 hammer_uncache_node(cache);
1483         if (node->flags & HAMMER_NODE_DELETED)
1484                 return;
1485         cache->node = node;
1486         TAILQ_INSERT_TAIL(&node->cache_list, cache, entry);
1487 }
1488
1489 void
1490 hammer_uncache_node(hammer_node_cache_t cache)
1491 {
1492         hammer_node_t node;
1493
1494         if ((node = cache->node) != NULL) {
1495                 TAILQ_REMOVE(&node->cache_list, cache, entry);
1496                 cache->node = NULL;
1497                 if (TAILQ_EMPTY(&node->cache_list))
1498                         hammer_flush_node(node, 0);
1499         }
1500 }
1501
1502 /*
1503  * Remove a node's cache references and destroy the node if it has no
1504  * other references or backing store.
1505  *
1506  * locked == 0  Normal unlocked operation
1507  * locked == 1  Call hammer_rel_interlock_done(..., 0);
1508  * locked == 2  Call hammer_rel_interlock_done(..., 1);
1509  *
1510  * XXX for now this isn't even close to being MPSAFE so the refs check
1511  *     is sufficient.
1512  */
1513 void
1514 hammer_flush_node(hammer_node_t node, int locked)
1515 {
1516         hammer_node_cache_t cache;
1517         hammer_buffer_t buffer;
1518         hammer_mount_t hmp = node->hmp;
1519         int dofree;
1520
1521         while ((cache = TAILQ_FIRST(&node->cache_list)) != NULL) {
1522                 TAILQ_REMOVE(&node->cache_list, cache, entry);
1523                 cache->node = NULL;
1524         }
1525
1526         /*
1527          * NOTE: refs is predisposed if another thread is blocking and
1528          *       will be larger than 0 in that case.  We aren't MPSAFE
1529          *       here.
1530          */
1531         if (node->ondisk == NULL && hammer_norefs(&node->lock)) {
1532                 KKASSERT((node->flags & HAMMER_NODE_NEEDSCRC) == 0);
1533                 RB_REMOVE(hammer_nod_rb_tree, &node->hmp->rb_nods_root, node);
1534                 if ((buffer = node->buffer) != NULL) {
1535                         node->buffer = NULL;
1536                         TAILQ_REMOVE(&buffer->node_list, node, entry);
1537                         /* buffer is unreferenced because ondisk is NULL */
1538                 }
1539                 dofree = 1;
1540         } else {
1541                 dofree = 0;
1542         }
1543
1544         /*
1545          * Deal with the interlock if locked == 1 or locked == 2.
1546          */
1547         if (locked)
1548                 hammer_rel_interlock_done(&node->lock, locked - 1);
1549
1550         /*
1551          * Destroy if requested
1552          */
1553         if (dofree) {
1554                 --hammer_count_nodes;
1555                 kfree(node, hmp->m_misc);
1556         }
1557 }
1558
1559 /*
1560  * Flush passively cached B-Tree nodes associated with this buffer.
1561  * This is only called when the buffer is about to be destroyed, so
1562  * none of the nodes should have any references.  The buffer is locked.
1563  *
1564  * We may be interlocked with the buffer.
1565  */
1566 void
1567 hammer_flush_buffer_nodes(hammer_buffer_t buffer)
1568 {
1569         hammer_node_t node;
1570
1571         while ((node = TAILQ_FIRST(&buffer->node_list)) != NULL) {
1572                 KKASSERT(node->ondisk == NULL);
1573                 KKASSERT((node->flags & HAMMER_NODE_NEEDSCRC) == 0);
1574
1575                 if (hammer_try_interlock_norefs(&node->lock)) {
1576                         hammer_ref(&node->lock);
1577                         node->flags |= HAMMER_NODE_FLUSH;
1578                         _hammer_rel_node(node, 1);
1579                 } else {
1580                         KKASSERT(node->buffer != NULL);
1581                         buffer = node->buffer;
1582                         node->buffer = NULL;
1583                         TAILQ_REMOVE(&buffer->node_list, node, entry);
1584                         /* buffer is unreferenced because ondisk is NULL */
1585                 }
1586         }
1587 }
1588
1589
1590 /************************************************************************
1591  *                              ALLOCATORS                              *
1592  ************************************************************************/
1593
1594 /*
1595  * Allocate a B-Tree node.
1596  */
1597 hammer_node_t
1598 hammer_alloc_btree(hammer_transaction_t trans, hammer_off_t hint, int *errorp)
1599 {
1600         hammer_buffer_t buffer = NULL;
1601         hammer_node_t node = NULL;
1602         hammer_off_t node_offset;
1603
1604         node_offset = hammer_blockmap_alloc(trans, HAMMER_ZONE_BTREE_INDEX,
1605                                             sizeof(struct hammer_node_ondisk),
1606                                             hint, errorp);
1607         if (*errorp == 0) {
1608                 node = hammer_get_node(trans, node_offset, 1, errorp);
1609                 hammer_modify_node_noundo(trans, node);
1610                 bzero(node->ondisk, sizeof(*node->ondisk));
1611                 hammer_modify_node_done(node);
1612         }
1613         if (buffer)
1614                 hammer_rel_buffer(buffer, 0);
1615         return(node);
1616 }
1617
1618 /*
1619  * Allocate data.  If the address of a data buffer is supplied then
1620  * any prior non-NULL *data_bufferp will be released and *data_bufferp
1621  * will be set to the related buffer.  The caller must release it when
1622  * finally done.  The initial *data_bufferp should be set to NULL by
1623  * the caller.
1624  *
1625  * The caller is responsible for making hammer_modify*() calls on the
1626  * *data_bufferp.
1627  */
1628 void *
1629 hammer_alloc_data(hammer_transaction_t trans, int32_t data_len,
1630                   uint16_t rec_type, hammer_off_t *data_offsetp,
1631                   hammer_buffer_t *data_bufferp,
1632                   hammer_off_t hint, int *errorp)
1633 {
1634         void *data;
1635         int zone;
1636
1637         /*
1638          * Allocate data directly from blockmap.
1639          */
1640         if (data_len) {
1641                 switch(rec_type) {
1642                 case HAMMER_RECTYPE_INODE:
1643                 case HAMMER_RECTYPE_DIRENTRY:
1644                 case HAMMER_RECTYPE_EXT:
1645                 case HAMMER_RECTYPE_FIX:
1646                 case HAMMER_RECTYPE_PFS:
1647                 case HAMMER_RECTYPE_SNAPSHOT:
1648                 case HAMMER_RECTYPE_CONFIG:
1649                         zone = HAMMER_ZONE_META_INDEX;
1650                         break;
1651                 case HAMMER_RECTYPE_DATA:
1652                 case HAMMER_RECTYPE_DB:
1653                         /*
1654                          * Only mirror-write comes here.
1655                          * Regular allocation path uses blockmap reservation.
1656                          */
1657                         zone = hammer_data_zone_index(data_len);
1658                         if (zone == HAMMER_ZONE_LARGE_DATA_INDEX) {
1659                                 /* round up */
1660                                 data_len = HAMMER_BUFSIZE_DOALIGN(data_len);
1661                         }
1662                         break;
1663                 default:
1664                         hpanic("rec_type %04x unknown", rec_type);
1665                         zone = HAMMER_ZONE_UNAVAIL_INDEX; /* NOT REACHED */
1666                         break;
1667                 }
1668                 *data_offsetp = hammer_blockmap_alloc(trans, zone, data_len,
1669                                                       hint, errorp);
1670         } else {
1671                 *data_offsetp = 0;
1672         }
1673
1674         data = NULL;
1675         if (*errorp == 0 && data_bufferp && data_len)
1676                 data = hammer_bread_ext(trans->hmp, *data_offsetp, data_len,
1677                                         errorp, data_bufferp);
1678         return(data);
1679 }
1680
1681 /*
1682  * Sync dirty buffers to the media and clean-up any loose ends.
1683  *
1684  * These functions do not start the flusher going, they simply
1685  * queue everything up to the flusher.
1686  */
1687 static int hammer_sync_scan2(struct mount *mp, struct vnode *vp, void *data);
1688
1689 struct hammer_sync_info {
1690         int error;
1691 };
1692
1693 int
1694 hammer_queue_inodes_flusher(hammer_mount_t hmp, int waitfor)
1695 {
1696         struct hammer_sync_info info;
1697
1698         info.error = 0;
1699         if (waitfor == MNT_WAIT) {
1700                 vsyncscan(hmp->mp, VMSC_GETVP | VMSC_ONEPASS,
1701                           hammer_sync_scan2, &info);
1702         } else {
1703                 vsyncscan(hmp->mp, VMSC_GETVP | VMSC_ONEPASS | VMSC_NOWAIT,
1704                           hammer_sync_scan2, &info);
1705         }
1706         return(info.error);
1707 }
1708
1709 /*
1710  * Filesystem sync.  If doing a synchronous sync make a second pass on
1711  * the vnodes in case any were already flushing during the first pass,
1712  * and activate the flusher twice (the second time brings the UNDO FIFO's
1713  * start position up to the end position after the first call).
1714  *
1715  * If doing a lazy sync make just one pass on the vnode list, ignoring
1716  * any new vnodes added to the list while the sync is in progress.
1717  */
1718 int
1719 hammer_sync_hmp(hammer_mount_t hmp, int waitfor)
1720 {
1721         struct hammer_sync_info info;
1722         int flags;
1723
1724         flags = VMSC_GETVP;
1725         if (waitfor & MNT_LAZY)
1726                 flags |= VMSC_ONEPASS;
1727
1728         info.error = 0;
1729         vsyncscan(hmp->mp, flags | VMSC_NOWAIT, hammer_sync_scan2, &info);
1730
1731         if (info.error == 0 && (waitfor & MNT_WAIT)) {
1732                 vsyncscan(hmp->mp, flags, hammer_sync_scan2, &info);
1733         }
1734         if (waitfor == MNT_WAIT) {
1735                 hammer_flusher_sync(hmp);
1736                 hammer_flusher_sync(hmp);
1737         } else {
1738                 hammer_flusher_async(hmp, NULL);
1739                 hammer_flusher_async(hmp, NULL);
1740         }
1741         return(info.error);
1742 }
1743
1744 static int
1745 hammer_sync_scan2(struct mount *mp, struct vnode *vp, void *data)
1746 {
1747         struct hammer_sync_info *info = data;
1748         hammer_inode_t ip;
1749         int error;
1750
1751         ip = VTOI(vp);
1752         if (ip == NULL)
1753                 return(0);
1754         if (vp->v_type == VNON || vp->v_type == VBAD) {
1755                 vclrisdirty(vp);
1756                 return(0);
1757         }
1758         if ((ip->flags & HAMMER_INODE_MODMASK) == 0 &&
1759             RB_EMPTY(&vp->v_rbdirty_tree)) {
1760                 vclrisdirty(vp);
1761                 return(0);
1762         }
1763         error = VOP_FSYNC(vp, MNT_NOWAIT, 0);
1764         if (error)
1765                 info->error = error;
1766         return(0);
1767 }