sbin/hammer2: Add missing usage() exit for "destroy"/"destroy-inum"
[dragonfly.git] / sys / vfs / hammer / hammer_ondisk.c
1 /*
2  * Copyright (c) 2007-2008 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  * $DragonFly: src/sys/vfs/hammer/hammer_ondisk.c,v 1.76 2008/08/29 20:19:08 dillon Exp $
35  */
36 /*
37  * Manage HAMMER's on-disk structures.  These routines are primarily
38  * responsible for interfacing with the kernel's I/O subsystem and for
39  * managing in-memory structures.
40  */
41
42 #include <sys/nlookup.h>
43 #include <sys/buf2.h>
44
45 #include "hammer.h"
46
47 static void hammer_free_volume(hammer_volume_t volume);
48 static int hammer_load_volume(hammer_volume_t volume);
49 static int hammer_load_buffer(hammer_buffer_t buffer, int isnew);
50 static int hammer_load_node(hammer_transaction_t trans,
51                                 hammer_node_t node, int isnew);
52 static void _hammer_rel_node(hammer_node_t node, int locked);
53
54 static int
55 hammer_vol_rb_compare(hammer_volume_t vol1, hammer_volume_t vol2)
56 {
57         if (vol1->vol_no < vol2->vol_no)
58                 return(-1);
59         if (vol1->vol_no > vol2->vol_no)
60                 return(1);
61         return(0);
62 }
63
64 /*
65  * hammer_buffer structures are indexed via their zoneX_offset, not
66  * their zone2_offset.
67  */
68 static int
69 hammer_buf_rb_compare(hammer_buffer_t buf1, hammer_buffer_t buf2)
70 {
71         if (buf1->zoneX_offset < buf2->zoneX_offset)
72                 return(-1);
73         if (buf1->zoneX_offset > buf2->zoneX_offset)
74                 return(1);
75         return(0);
76 }
77
78 static int
79 hammer_nod_rb_compare(hammer_node_t node1, hammer_node_t node2)
80 {
81         if (node1->node_offset < node2->node_offset)
82                 return(-1);
83         if (node1->node_offset > node2->node_offset)
84                 return(1);
85         return(0);
86 }
87
88 RB_GENERATE2(hammer_vol_rb_tree, hammer_volume, rb_node,
89              hammer_vol_rb_compare, int32_t, vol_no);
90 RB_GENERATE2(hammer_buf_rb_tree, hammer_buffer, rb_node,
91              hammer_buf_rb_compare, hammer_off_t, zoneX_offset);
92 RB_GENERATE2(hammer_nod_rb_tree, hammer_node, rb_node,
93              hammer_nod_rb_compare, hammer_off_t, node_offset);
94
95 /************************************************************************
96  *                              VOLUMES                                 *
97  ************************************************************************
98  *
99  * Load a HAMMER volume by name.  Returns 0 on success or a positive error
100  * code on failure.  Volumes must be loaded at mount time or via hammer
101  * volume-add command, hammer_get_volume() will not load a new volume.
102  *
103  * The passed devvp is vref()'d but not locked.  This function consumes the
104  * ref (typically by associating it with the volume structure).
105  *
106  * Calls made to hammer_load_volume() or single-threaded
107  */
108 int
109 hammer_install_volume(hammer_mount_t hmp, const char *volname,
110                       struct vnode *devvp, void *data)
111 {
112         struct mount *mp;
113         hammer_volume_t volume;
114         hammer_volume_ondisk_t ondisk;
115         hammer_volume_ondisk_t img;
116         struct nlookupdata nd;
117         struct buf *bp = NULL;
118         int error;
119         int ronly;
120         int setmp = 0;
121         int i;
122
123         mp = hmp->mp;
124         ronly = ((mp->mnt_flag & MNT_RDONLY) ? 1 : 0);
125
126         /*
127          * Allocate a volume structure
128          */
129         ++hammer_count_volumes;
130         volume = kmalloc(sizeof(*volume), hmp->m_misc, M_WAITOK|M_ZERO);
131         volume->vol_name = kstrdup(volname, hmp->m_misc);
132         volume->io.hmp = hmp;   /* bootstrap */
133         hammer_io_init(&volume->io, volume, HAMMER_IOTYPE_VOLUME);
134         volume->io.offset = 0LL;
135         volume->io.bytes = HAMMER_BUFSIZE;
136
137         /*
138          * Get the device vnode
139          */
140         if (devvp == NULL) {
141                 error = nlookup_init(&nd, volume->vol_name, UIO_SYSSPACE, NLC_FOLLOW);
142                 if (error == 0)
143                         error = nlookup(&nd);
144                 if (error == 0)
145                         error = cache_vref(&nd.nl_nch, nd.nl_cred, &volume->devvp);
146                 nlookup_done(&nd);
147         } else {
148                 error = 0;
149                 volume->devvp = devvp;
150         }
151
152         if (error == 0) {
153                 if (vn_isdisk(volume->devvp, &error)) {
154                         error = vfs_mountedon(volume->devvp);
155                 }
156         }
157         if (error == 0 && vcount(volume->devvp) > 0)
158                 error = EBUSY;
159         if (error == 0) {
160                 vn_lock(volume->devvp, LK_EXCLUSIVE | LK_RETRY);
161                 error = vinvalbuf(volume->devvp, V_SAVE, 0, 0);
162                 if (error == 0) {
163                         error = VOP_OPEN(volume->devvp,
164                                          (ronly ? FREAD : FREAD|FWRITE),
165                                          FSCRED, NULL);
166                 }
167                 vn_unlock(volume->devvp);
168         }
169         if (error) {
170                 hammer_free_volume(volume);
171                 return(error);
172         }
173         volume->devvp->v_rdev->si_mountpoint = mp;
174         setmp = 1;
175
176         /*
177          * Extract the volume number from the volume header and do various
178          * sanity checks.
179          */
180         error = bread(volume->devvp, 0LL, HAMMER_BUFSIZE, &bp);
181         if (error)
182                 goto late_failure;
183         ondisk = (void *)bp->b_data;
184
185         /*
186          * Initialize the volume header with data if the data is specified.
187          */
188         if (ronly == 0 && data) {
189                 img = (hammer_volume_ondisk_t)data;
190                 if (ondisk->vol_signature == HAMMER_FSBUF_VOLUME) {
191                         hkprintf("Formatting of valid HAMMER volume %s denied. "
192                                 "Erase with hammer strip or dd!\n", volname);
193                         error = EFTYPE;
194                         goto late_failure;
195                 }
196                 bcopy(img, ondisk, sizeof(*img));
197         }
198
199         if (ondisk->vol_signature != HAMMER_FSBUF_VOLUME) {
200                 hkprintf("volume %s has an invalid header\n", volume->vol_name);
201                 for (i = 0; i < (int)sizeof(ondisk->vol_signature); i++) {
202                         kprintf("%02x", ((char*)&ondisk->vol_signature)[i] & 0xFF);
203                         if (i != (int)sizeof(ondisk->vol_signature) - 1)
204                                 kprintf(" ");
205                 }
206                 kprintf("\n");
207                 error = EFTYPE;
208                 goto late_failure;
209         }
210         volume->vol_no = ondisk->vol_no;
211         volume->vol_flags = ondisk->vol_flags;
212         volume->maxbuf_off = HAMMER_ENCODE_RAW_BUFFER(volume->vol_no,
213                                     HAMMER_VOL_BUF_SIZE(ondisk));
214
215         if (RB_EMPTY(&hmp->rb_vols_root)) {
216                 hmp->fsid = ondisk->vol_fsid;
217         } else if (kuuid_compare(&hmp->fsid, &ondisk->vol_fsid)) {
218                 hkprintf("volume %s's fsid does not match other volumes\n",
219                         volume->vol_name);
220                 error = EFTYPE;
221                 goto late_failure;
222         }
223
224         /*
225          * Insert the volume structure into the red-black tree.
226          */
227         if (RB_INSERT(hammer_vol_rb_tree, &hmp->rb_vols_root, volume)) {
228                 hkprintf("volume %s has a duplicate vol_no %d\n",
229                         volume->vol_name, volume->vol_no);
230                 error = EEXIST;
231         }
232
233         if (error == 0)
234                 hammer_volume_number_add(hmp, volume);
235
236         /*
237          * Set the root volume .  HAMMER special cases rootvol the structure.
238          * We do not hold a ref because this would prevent related I/O
239          * from being flushed.
240          */
241         if (error == 0 && ondisk->vol_rootvol == ondisk->vol_no) {
242                 if (ondisk->vol_rootvol != HAMMER_ROOT_VOLNO) {
243                         hkprintf("volume %s has invalid root vol_no %d\n",
244                                 volume->vol_name, ondisk->vol_rootvol);
245                         error = EINVAL;
246                         goto late_failure;
247                 }
248                 hmp->rootvol = volume;
249                 hmp->nvolumes = ondisk->vol_count;
250                 if (bp) {
251                         brelse(bp);
252                         bp = NULL;
253                 }
254                 hmp->mp->mnt_stat.f_blocks += ondisk->vol0_stat_bigblocks *
255                                                 HAMMER_BUFFERS_PER_BIGBLOCK;
256                 hmp->mp->mnt_vstat.f_blocks += ondisk->vol0_stat_bigblocks *
257                                                 HAMMER_BUFFERS_PER_BIGBLOCK;
258         }
259 late_failure:
260         if (bp)
261                 brelse(bp);
262         if (error) {
263                 /*vinvalbuf(volume->devvp, V_SAVE, 0, 0);*/
264                 if (setmp)
265                         volume->devvp->v_rdev->si_mountpoint = NULL;
266                 vn_lock(volume->devvp, LK_EXCLUSIVE | LK_RETRY);
267                 VOP_CLOSE(volume->devvp, ronly ? FREAD : FREAD|FWRITE, NULL);
268                 vn_unlock(volume->devvp);
269                 hammer_free_volume(volume);
270         }
271         return (error);
272 }
273
274 /*
275  * This is called for each volume when updating the mount point from
276  * read-write to read-only or vise-versa.
277  */
278 int
279 hammer_adjust_volume_mode(hammer_volume_t volume, void *data __unused)
280 {
281         if (volume->devvp) {
282                 vn_lock(volume->devvp, LK_EXCLUSIVE | LK_RETRY);
283                 if (volume->io.hmp->ronly) {
284                         /* do not call vinvalbuf */
285                         VOP_OPEN(volume->devvp, FREAD, FSCRED, NULL);
286                         VOP_CLOSE(volume->devvp, FREAD|FWRITE, NULL);
287                 } else {
288                         /* do not call vinvalbuf */
289                         VOP_OPEN(volume->devvp, FREAD|FWRITE, FSCRED, NULL);
290                         VOP_CLOSE(volume->devvp, FREAD, NULL);
291                 }
292                 vn_unlock(volume->devvp);
293         }
294         return(0);
295 }
296
297 /*
298  * Unload and free a HAMMER volume.  Must return >= 0 to continue scan
299  * so returns -1 on failure.
300  */
301 int
302 hammer_unload_volume(hammer_volume_t volume, void *data)
303 {
304         hammer_mount_t hmp = volume->io.hmp;
305         struct buf *bp = NULL;
306         hammer_volume_ondisk_t img;
307         int ronly = ((hmp->mp->mnt_flag & MNT_RDONLY) ? 1 : 0);
308         int error;
309
310         /*
311          * Clear the volume header with data if the data is specified.
312          */
313         if (ronly == 0 && data && volume->devvp) {
314                 img = (hammer_volume_ondisk_t)data;
315                 error = bread(volume->devvp, 0LL, HAMMER_BUFSIZE, &bp);
316                 if (error || bp->b_bcount < sizeof(*img)) {
317                         hmkprintf(hmp, "Failed to read volume header: %d\n", error);
318                         brelse(bp);
319                 } else {
320                         bcopy(img, bp->b_data, sizeof(*img));
321                         error = bwrite(bp);
322                         if (error)
323                                 hmkprintf(hmp, "Failed to clear volume header: %d\n",
324                                         error);
325                 }
326         }
327
328         /*
329          * Clean up the root volume pointer, which is held unlocked in hmp.
330          */
331         if (hmp->rootvol == volume)
332                 hmp->rootvol = NULL;
333
334         /*
335          * We must not flush a dirty buffer to disk on umount.  It should
336          * have already been dealt with by the flusher, or we may be in
337          * catastrophic failure.
338          */
339         hammer_io_clear_modify(&volume->io, 1);
340         volume->io.waitdep = 1;
341
342         /*
343          * Clean up the persistent ref ioerror might have on the volume
344          */
345         if (volume->io.ioerror)
346                 hammer_io_clear_error_noassert(&volume->io);
347
348         /*
349          * This should release the bp.  Releasing the volume with flush set
350          * implies the interlock is set.
351          */
352         hammer_ref_interlock_true(&volume->io.lock);
353         hammer_rel_volume(volume, 1);
354         KKASSERT(volume->io.bp == NULL);
355
356         /*
357          * There should be no references on the volume.
358          */
359         KKASSERT(hammer_norefs(&volume->io.lock));
360
361         volume->ondisk = NULL;
362         if (volume->devvp) {
363                 if (volume->devvp->v_rdev &&
364                     volume->devvp->v_rdev->si_mountpoint == hmp->mp) {
365                         volume->devvp->v_rdev->si_mountpoint = NULL;
366                 }
367                 if (ronly) {
368                         /*
369                          * Make sure we don't sync anything to disk if we
370                          * are in read-only mode (1) or critically-errored
371                          * (2).  Note that there may be dirty buffers in
372                          * normal read-only mode from crash recovery.
373                          */
374                         vn_lock(volume->devvp, LK_EXCLUSIVE | LK_RETRY);
375                         vinvalbuf(volume->devvp, 0, 0, 0);
376                         VOP_CLOSE(volume->devvp, FREAD, NULL);
377                         vn_unlock(volume->devvp);
378                 } else {
379                         /*
380                          * Normal termination, save any dirty buffers
381                          * (XXX there really shouldn't be any).
382                          */
383                         vn_lock(volume->devvp, LK_EXCLUSIVE | LK_RETRY);
384                         vinvalbuf(volume->devvp, V_SAVE, 0, 0);
385                         VOP_CLOSE(volume->devvp, FREAD|FWRITE, NULL);
386                         vn_unlock(volume->devvp);
387                 }
388         }
389
390         /*
391          * Destroy the structure
392          */
393         RB_REMOVE(hammer_vol_rb_tree, &hmp->rb_vols_root, volume);
394         hammer_volume_number_del(hmp, volume);
395         hammer_free_volume(volume);
396         return(0);
397 }
398
399 static
400 void
401 hammer_free_volume(hammer_volume_t volume)
402 {
403         hammer_mount_t hmp = volume->io.hmp;
404
405         if (volume->vol_name) {
406                 kfree(volume->vol_name, hmp->m_misc);
407                 volume->vol_name = NULL;
408         }
409         if (volume->devvp) {
410                 vrele(volume->devvp);
411                 volume->devvp = NULL;
412         }
413         --hammer_count_volumes;
414         kfree(volume, hmp->m_misc);
415 }
416
417 /*
418  * Get a HAMMER volume.  The volume must already exist.
419  */
420 hammer_volume_t
421 hammer_get_volume(hammer_mount_t hmp, int32_t vol_no, int *errorp)
422 {
423         hammer_volume_t volume;
424
425         /*
426          * Locate the volume structure
427          */
428         volume = RB_LOOKUP(hammer_vol_rb_tree, &hmp->rb_vols_root, vol_no);
429         if (volume == NULL) {
430                 *errorp = ENOENT;
431                 return(NULL);
432         }
433
434         /*
435          * Reference the volume, load/check the data on the 0->1 transition.
436          * hammer_load_volume() will dispose of the interlock on return,
437          * and also clean up the ref count on error.
438          */
439         if (hammer_ref_interlock(&volume->io.lock)) {
440                 *errorp = hammer_load_volume(volume);
441                 if (*errorp)
442                         volume = NULL;
443         } else {
444                 KKASSERT(volume->ondisk);
445                 *errorp = 0;
446         }
447         return(volume);
448 }
449
450 int
451 hammer_ref_volume(hammer_volume_t volume)
452 {
453         int error;
454
455         /*
456          * Reference the volume and deal with the check condition used to
457          * load its ondisk info.
458          */
459         if (hammer_ref_interlock(&volume->io.lock)) {
460                 error = hammer_load_volume(volume);
461         } else {
462                 KKASSERT(volume->ondisk);
463                 error = 0;
464         }
465         return (error);
466 }
467
468 /*
469  * May be called without fs_token
470  */
471 hammer_volume_t
472 hammer_get_root_volume(hammer_mount_t hmp, int *errorp)
473 {
474         hammer_volume_t volume;
475
476         volume = hmp->rootvol;
477         KKASSERT(volume != NULL);
478
479         /*
480          * Reference the volume and deal with the check condition used to
481          * load its ondisk info.
482          */
483         if (hammer_ref_interlock(&volume->io.lock)) {
484                 lwkt_gettoken(&volume->io.hmp->fs_token);
485                 *errorp = hammer_load_volume(volume);
486                 lwkt_reltoken(&volume->io.hmp->fs_token);
487                 if (*errorp)
488                         volume = NULL;
489         } else {
490                 KKASSERT(volume->ondisk);
491                 *errorp = 0;
492         }
493         return (volume);
494 }
495
496 /*
497  * Load a volume's on-disk information.  The volume must be referenced and
498  * the interlock is held on call.  The interlock will be released on return.
499  * The reference will also be released on return if an error occurs.
500  */
501 static int
502 hammer_load_volume(hammer_volume_t volume)
503 {
504         int error;
505
506         if (volume->ondisk == NULL) {
507                 error = hammer_io_read(volume->devvp, &volume->io,
508                                        HAMMER_BUFSIZE);
509                 if (error == 0) {
510                         volume->ondisk = (void *)volume->io.bp->b_data;
511                         hammer_ref_interlock_done(&volume->io.lock);
512                 } else {
513                         hammer_rel_volume(volume, 1);
514                 }
515         } else {
516                 error = 0;
517         }
518         return(error);
519 }
520
521 /*
522  * Release a previously acquired reference on the volume.
523  *
524  * Volumes are not unloaded from memory during normal operation.
525  *
526  * May be called without fs_token
527  */
528 void
529 hammer_rel_volume(hammer_volume_t volume, int locked)
530 {
531         struct buf *bp;
532
533         if (hammer_rel_interlock(&volume->io.lock, locked)) {
534                 lwkt_gettoken(&volume->io.hmp->fs_token);
535                 volume->ondisk = NULL;
536                 bp = hammer_io_release(&volume->io, locked);
537                 lwkt_reltoken(&volume->io.hmp->fs_token);
538                 hammer_rel_interlock_done(&volume->io.lock, locked);
539                 if (bp)
540                         brelse(bp);
541         }
542 }
543
544 int
545 hammer_mountcheck_volumes(hammer_mount_t hmp)
546 {
547         hammer_volume_t vol;
548         int i;
549
550         HAMMER_VOLUME_NUMBER_FOREACH(hmp, i) {
551                 vol = RB_LOOKUP(hammer_vol_rb_tree, &hmp->rb_vols_root, i);
552                 if (vol == NULL)
553                         return(EINVAL);
554         }
555         return(0);
556 }
557
558 int
559 hammer_get_installed_volumes(hammer_mount_t hmp)
560 {
561         int i, ret = 0;
562
563         HAMMER_VOLUME_NUMBER_FOREACH(hmp, i)
564                 ret++;
565         return(ret);
566 }
567
568 /************************************************************************
569  *                              BUFFERS                                 *
570  ************************************************************************
571  *
572  * Manage buffers.  Currently most blockmap-backed zones are direct-mapped
573  * to zone-2 buffer offsets, without a translation stage.  However, the
574  * hammer_buffer structure is indexed by its zoneX_offset, not its
575  * zone2_offset.
576  *
577  * The proper zone must be maintained throughout the code-base all the way
578  * through to the big-block allocator, or routines like hammer_del_buffers()
579  * will not be able to locate all potentially conflicting buffers.
580  */
581
582 /*
583  * Helper function returns whether a zone offset can be directly translated
584  * to a raw buffer index or not.  Really only the volume and undo zones
585  * can't be directly translated.  Volumes are special-cased and undo zones
586  * shouldn't be aliased accessed in read-only mode.
587  *
588  * This function is ONLY used to detect aliased zones during a read-only
589  * mount.
590  */
591 static __inline int
592 hammer_direct_zone(hammer_off_t buf_offset)
593 {
594         return(hammer_is_zone_direct_xlated(buf_offset));
595 }
596
597 hammer_buffer_t
598 hammer_get_buffer(hammer_mount_t hmp, hammer_off_t buf_offset,
599                   int bytes, int isnew, int *errorp)
600 {
601         hammer_buffer_t buffer;
602         hammer_volume_t volume;
603         hammer_off_t    zone2_offset;
604         int vol_no;
605         int zone;
606
607         buf_offset &= ~HAMMER_BUFMASK64;
608 again:
609         /*
610          * Shortcut if the buffer is already cached
611          */
612         buffer = RB_LOOKUP(hammer_buf_rb_tree, &hmp->rb_bufs_root, buf_offset);
613         if (buffer) {
614                 /*
615                  * Once refed the ondisk field will not be cleared by
616                  * any other action.  Shortcut the operation if the
617                  * ondisk structure is valid.
618                  */
619 found_aliased:
620                 if (hammer_ref_interlock(&buffer->io.lock) == 0) {
621                         hammer_io_advance(&buffer->io);
622                         KKASSERT(buffer->ondisk);
623                         *errorp = 0;
624                         return(buffer);
625                 }
626
627                 /*
628                  * 0->1 transition or defered 0->1 transition (CHECK),
629                  * interlock now held.  Shortcut if ondisk is already
630                  * assigned.
631                  */
632                 atomic_add_int(&hammer_count_refedbufs, 1);
633                 if (buffer->ondisk) {
634                         hammer_io_advance(&buffer->io);
635                         hammer_ref_interlock_done(&buffer->io.lock);
636                         *errorp = 0;
637                         return(buffer);
638                 }
639
640                 /*
641                  * The buffer is no longer loose if it has a ref, and
642                  * cannot become loose once it gains a ref.  Loose
643                  * buffers will never be in a modified state.  This should
644                  * only occur on the 0->1 transition of refs.
645                  *
646                  * lose_root can be modified via a biodone() interrupt
647                  * so the io_token must be held.
648                  */
649                 if (buffer->io.mod_root == &hmp->lose_root) {
650                         lwkt_gettoken(&hmp->io_token);
651                         if (buffer->io.mod_root == &hmp->lose_root) {
652                                 RB_REMOVE(hammer_mod_rb_tree,
653                                           buffer->io.mod_root, &buffer->io);
654                                 buffer->io.mod_root = NULL;
655                                 KKASSERT(buffer->io.modified == 0);
656                         }
657                         lwkt_reltoken(&hmp->io_token);
658                 }
659                 goto found;
660         } else if (hmp->ronly && hammer_direct_zone(buf_offset)) {
661                 /*
662                  * If this is a read-only mount there could be an alias
663                  * in the raw-zone.  If there is we use that buffer instead.
664                  *
665                  * rw mounts will not have aliases.  Also note when going
666                  * from ro -> rw the recovered raw buffers are flushed and
667                  * reclaimed, so again there will not be any aliases once
668                  * the mount is rw.
669                  */
670                 buffer = RB_LOOKUP(hammer_buf_rb_tree, &hmp->rb_bufs_root,
671                                    hammer_xlate_to_zone2(buf_offset));
672                 if (buffer) {
673                         if (hammer_debug_general & 0x0001) {
674                                 hkrateprintf(&hmp->kdiag,
675                                             "recovered aliased %016jx\n",
676                                             (intmax_t)buf_offset);
677                         }
678                         goto found_aliased;
679                 }
680         }
681
682         /*
683          * Handle blockmap offset translations
684          */
685         zone = HAMMER_ZONE_DECODE(buf_offset);
686         if (hammer_is_index_record(zone)) {
687                 zone2_offset = hammer_blockmap_lookup(hmp, buf_offset, errorp);
688         } else if (zone == HAMMER_ZONE_UNDO_INDEX) {
689                 zone2_offset = hammer_undo_lookup(hmp, buf_offset, errorp);
690         } else {
691                 /* Must be zone-2 (not 1 or 4 or 15) */
692                 KKASSERT(zone == HAMMER_ZONE_RAW_BUFFER_INDEX);
693                 zone2_offset = buf_offset;
694                 *errorp = 0;
695         }
696         if (*errorp)
697                 return(NULL);
698
699         /*
700          * NOTE: zone2_offset and maxbuf_off are both full zone-2 offset
701          * specifications.
702          */
703         KKASSERT(hammer_is_zone_raw_buffer(zone2_offset));
704         vol_no = HAMMER_VOL_DECODE(zone2_offset);
705         volume = hammer_get_volume(hmp, vol_no, errorp);
706         if (volume == NULL)
707                 return(NULL);
708
709         KKASSERT(zone2_offset < volume->maxbuf_off);
710
711         /*
712          * Allocate a new buffer structure.  We will check for races later.
713          */
714         ++hammer_count_buffers;
715         buffer = kmalloc(sizeof(*buffer), hmp->m_misc,
716                          M_WAITOK|M_ZERO|M_USE_RESERVE);
717         buffer->zone2_offset = zone2_offset;
718         buffer->zoneX_offset = buf_offset;
719
720         hammer_io_init(&buffer->io, volume, hammer_zone_to_iotype(zone));
721         buffer->io.offset = hammer_xlate_to_phys(volume->ondisk, zone2_offset);
722         buffer->io.bytes = bytes;
723         TAILQ_INIT(&buffer->node_list);
724         hammer_ref_interlock_true(&buffer->io.lock);
725
726         /*
727          * Insert the buffer into the RB tree and handle late collisions.
728          */
729         if (RB_INSERT(hammer_buf_rb_tree, &hmp->rb_bufs_root, buffer)) {
730                 hammer_rel_volume(volume, 0);
731                 buffer->io.volume = NULL;                       /* safety */
732                 if (hammer_rel_interlock(&buffer->io.lock, 1))  /* safety */
733                         hammer_rel_interlock_done(&buffer->io.lock, 1);
734                 --hammer_count_buffers;
735                 kfree(buffer, hmp->m_misc);
736                 goto again;
737         }
738         atomic_add_int(&hammer_count_refedbufs, 1);
739 found:
740
741         /*
742          * The buffer is referenced and interlocked.  Load the buffer
743          * if necessary.  hammer_load_buffer() deals with the interlock
744          * and, if an error is returned, also deals with the ref.
745          */
746         if (buffer->ondisk == NULL) {
747                 *errorp = hammer_load_buffer(buffer, isnew);
748                 if (*errorp)
749                         buffer = NULL;
750         } else {
751                 hammer_io_advance(&buffer->io);
752                 hammer_ref_interlock_done(&buffer->io.lock);
753                 *errorp = 0;
754         }
755         return(buffer);
756 }
757
758 /*
759  * This is used by the direct-read code to deal with large-data buffers
760  * created by the reblocker and mirror-write code.  The direct-read code
761  * bypasses the HAMMER buffer subsystem and so any aliased dirty or write-
762  * running hammer buffers must be fully synced to disk before we can issue
763  * the direct-read.
764  *
765  * This code path is not considered critical as only the rebocker and
766  * mirror-write code will create large-data buffers via the HAMMER buffer
767  * subsystem.  They do that because they operate at the B-Tree level and
768  * do not access the vnode/inode structures.
769  */
770 void
771 hammer_sync_buffers(hammer_mount_t hmp, hammer_off_t base_offset, int bytes)
772 {
773         hammer_buffer_t buffer;
774         int error;
775
776         KKASSERT(hammer_is_zone_large_data(base_offset));
777
778         while (bytes > 0) {
779                 buffer = RB_LOOKUP(hammer_buf_rb_tree, &hmp->rb_bufs_root,
780                                    base_offset);
781                 if (buffer && (buffer->io.modified || buffer->io.running)) {
782                         error = hammer_ref_buffer(buffer);
783                         if (error == 0) {
784                                 hammer_io_wait(&buffer->io);
785                                 if (buffer->io.modified) {
786                                         hammer_io_write_interlock(&buffer->io);
787                                         hammer_io_flush(&buffer->io, 0);
788                                         hammer_io_done_interlock(&buffer->io);
789                                         hammer_io_wait(&buffer->io);
790                                 }
791                                 hammer_rel_buffer(buffer, 0);
792                         }
793                 }
794                 base_offset += HAMMER_BUFSIZE;
795                 bytes -= HAMMER_BUFSIZE;
796         }
797 }
798
799 /*
800  * Destroy all buffers covering the specified zoneX offset range.  This
801  * is called when the related blockmap layer2 entry is freed or when
802  * a direct write bypasses our buffer/buffer-cache subsystem.
803  *
804  * The buffers may be referenced by the caller itself.  Setting reclaim
805  * will cause the buffer to be destroyed when it's ref count reaches zero.
806  *
807  * Return 0 on success, EAGAIN if some buffers could not be destroyed due
808  * to additional references held by other threads, or some other (typically
809  * fatal) error.
810  */
811 int
812 hammer_del_buffers(hammer_mount_t hmp, hammer_off_t base_offset,
813                    hammer_off_t zone2_offset, int bytes,
814                    int report_conflicts)
815 {
816         hammer_buffer_t buffer;
817         hammer_volume_t volume;
818         int vol_no;
819         int error;
820         int ret_error;
821
822         vol_no = HAMMER_VOL_DECODE(zone2_offset);
823         volume = hammer_get_volume(hmp, vol_no, &ret_error);
824         KKASSERT(ret_error == 0);
825
826         while (bytes > 0) {
827                 buffer = RB_LOOKUP(hammer_buf_rb_tree, &hmp->rb_bufs_root,
828                                    base_offset);
829                 if (buffer) {
830                         error = hammer_ref_buffer(buffer);
831                         if (hammer_debug_general & 0x20000) {
832                                 hkprintf("delbufr %016jx rerr=%d 1ref=%d\n",
833                                         (intmax_t)buffer->zoneX_offset,
834                                         error,
835                                         hammer_oneref(&buffer->io.lock));
836                         }
837                         if (error == 0 && !hammer_oneref(&buffer->io.lock)) {
838                                 error = EAGAIN;
839                                 hammer_rel_buffer(buffer, 0);
840                         }
841                         if (error == 0) {
842                                 KKASSERT(buffer->zone2_offset == zone2_offset);
843                                 hammer_io_clear_modify(&buffer->io, 1);
844                                 buffer->io.reclaim = 1;
845                                 buffer->io.waitdep = 1;
846                                 KKASSERT(buffer->io.volume == volume);
847                                 hammer_rel_buffer(buffer, 0);
848                         }
849                 } else {
850                         error = hammer_io_inval(volume, zone2_offset);
851                 }
852                 if (error) {
853                         ret_error = error;
854                         if (report_conflicts ||
855                             (hammer_debug_general & 0x8000)) {
856                                 krateprintf(&hmp->kdiag,
857                                         "hammer_del_buffers: unable to "
858                                         "invalidate %016jx buffer=%p "
859                                         "rep=%d lkrefs=%08x\n",
860                                         (intmax_t)base_offset,
861                                         buffer, report_conflicts,
862                                         (buffer ? buffer->io.lock.refs : -1));
863                         }
864                 }
865                 base_offset += HAMMER_BUFSIZE;
866                 zone2_offset += HAMMER_BUFSIZE;
867                 bytes -= HAMMER_BUFSIZE;
868         }
869         hammer_rel_volume(volume, 0);
870         return (ret_error);
871 }
872
873 /*
874  * Given a referenced and interlocked buffer load/validate the data.
875  *
876  * The buffer interlock will be released on return.  If an error is
877  * returned the buffer reference will also be released (and the buffer
878  * pointer will thus be stale).
879  */
880 static int
881 hammer_load_buffer(hammer_buffer_t buffer, int isnew)
882 {
883         hammer_volume_t volume;
884         int error;
885
886         /*
887          * Load the buffer's on-disk info
888          */
889         volume = buffer->io.volume;
890
891         if (hammer_debug_io & 0x0004) {
892                 hdkprintf("load_buffer %016jx %016jx isnew=%d od=%p\n",
893                         (intmax_t)buffer->zoneX_offset,
894                         (intmax_t)buffer->zone2_offset,
895                         isnew, buffer->ondisk);
896         }
897
898         if (buffer->ondisk == NULL) {
899                 /*
900                  * Issue the read or generate a new buffer.  When reading
901                  * the limit argument controls any read-ahead clustering
902                  * hammer_io_read() is allowed to do.
903                  *
904                  * We cannot read-ahead in the large-data zone and we cannot
905                  * cross a big-block boundary as the next big-block might
906                  * use a different buffer size.
907                  */
908                 if (isnew) {
909                         error = hammer_io_new(volume->devvp, &buffer->io);
910                 } else if (hammer_is_zone_large_data(buffer->zoneX_offset)) {
911                         error = hammer_io_read(volume->devvp, &buffer->io,
912                                                buffer->io.bytes);
913                 } else {
914                         hammer_off_t limit;
915
916                         limit = HAMMER_BIGBLOCK_DOALIGN(buffer->zone2_offset);
917                         limit -= buffer->zone2_offset;
918                         error = hammer_io_read(volume->devvp, &buffer->io,
919                                                limit);
920                 }
921                 if (error == 0)
922                         buffer->ondisk = (void *)buffer->io.bp->b_data;
923         } else if (isnew) {
924                 error = hammer_io_new(volume->devvp, &buffer->io);
925         } else {
926                 error = 0;
927         }
928         if (error == 0) {
929                 hammer_io_advance(&buffer->io);
930                 hammer_ref_interlock_done(&buffer->io.lock);
931         } else {
932                 hammer_rel_buffer(buffer, 1);
933         }
934         return (error);
935 }
936
937 /*
938  * NOTE: Called from RB_SCAN, must return >= 0 for scan to continue.
939  * This routine is only called during unmount or when a volume is
940  * removed.
941  *
942  * If data != NULL, it specifies a volume whoose buffers should
943  * be unloaded.
944  */
945 int
946 hammer_unload_buffer(hammer_buffer_t buffer, void *data)
947 {
948         hammer_volume_t volume = (hammer_volume_t)data;
949
950         /*
951          * If volume != NULL we are only interested in unloading buffers
952          * associated with a particular volume.
953          */
954         if (volume != NULL && volume != buffer->io.volume)
955                 return 0;
956
957         /*
958          * Clean up the persistent ref ioerror might have on the buffer
959          * and acquire a ref.  Expect a 0->1 transition.
960          */
961         if (buffer->io.ioerror) {
962                 hammer_io_clear_error_noassert(&buffer->io);
963                 atomic_add_int(&hammer_count_refedbufs, -1);
964         }
965         hammer_ref_interlock_true(&buffer->io.lock);
966         atomic_add_int(&hammer_count_refedbufs, 1);
967
968         /*
969          * We must not flush a dirty buffer to disk on umount.  It should
970          * have already been dealt with by the flusher, or we may be in
971          * catastrophic failure.
972          *
973          * We must set waitdep to ensure that a running buffer is waited
974          * on and released prior to us trying to unload the volume.
975          */
976         hammer_io_clear_modify(&buffer->io, 1);
977         hammer_flush_buffer_nodes(buffer);
978         buffer->io.waitdep = 1;
979         hammer_rel_buffer(buffer, 1);
980         return(0);
981 }
982
983 /*
984  * Reference a buffer that is either already referenced or via a specially
985  * handled pointer (aka cursor->buffer).
986  */
987 int
988 hammer_ref_buffer(hammer_buffer_t buffer)
989 {
990         hammer_mount_t hmp;
991         int error;
992         int locked;
993
994         /*
995          * Acquire a ref, plus the buffer will be interlocked on the
996          * 0->1 transition.
997          */
998         locked = hammer_ref_interlock(&buffer->io.lock);
999         hmp = buffer->io.hmp;
1000
1001         /*
1002          * At this point a biodone() will not touch the buffer other then
1003          * incidental bits.  However, lose_root can be modified via
1004          * a biodone() interrupt.
1005          *
1006          * No longer loose.  lose_root requires the io_token.
1007          */
1008         if (buffer->io.mod_root == &hmp->lose_root) {
1009                 lwkt_gettoken(&hmp->io_token);
1010                 if (buffer->io.mod_root == &hmp->lose_root) {
1011                         RB_REMOVE(hammer_mod_rb_tree,
1012                                   buffer->io.mod_root, &buffer->io);
1013                         buffer->io.mod_root = NULL;
1014                 }
1015                 lwkt_reltoken(&hmp->io_token);
1016         }
1017
1018         if (locked) {
1019                 atomic_add_int(&hammer_count_refedbufs, 1);
1020                 error = hammer_load_buffer(buffer, 0);
1021                 /* NOTE: on error the buffer pointer is stale */
1022         } else {
1023                 error = 0;
1024         }
1025         return(error);
1026 }
1027
1028 /*
1029  * Release a reference on the buffer.  On the 1->0 transition the
1030  * underlying IO will be released but the data reference is left
1031  * cached.
1032  *
1033  * Only destroy the structure itself if the related buffer cache buffer
1034  * was disassociated from it.  This ties the management of the structure
1035  * to the buffer cache subsystem.  buffer->ondisk determines whether the
1036  * embedded io is referenced or not.
1037  */
1038 void
1039 hammer_rel_buffer(hammer_buffer_t buffer, int locked)
1040 {
1041         hammer_volume_t volume;
1042         hammer_mount_t hmp;
1043         struct buf *bp = NULL;
1044         int freeme = 0;
1045
1046         hmp = buffer->io.hmp;
1047
1048         if (hammer_rel_interlock(&buffer->io.lock, locked) == 0)
1049                 return;
1050
1051         /*
1052          * hammer_count_refedbufs accounting.  Decrement if we are in
1053          * the error path or if CHECK is clear.
1054          *
1055          * If we are not in the error path and CHECK is set the caller
1056          * probably just did a hammer_ref() and didn't account for it,
1057          * so we don't account for the loss here.
1058          */
1059         if (locked || (buffer->io.lock.refs & HAMMER_REFS_CHECK) == 0)
1060                 atomic_add_int(&hammer_count_refedbufs, -1);
1061
1062         /*
1063          * If the caller locked us or the normal released transitions
1064          * from 1->0 (and acquired the lock) attempt to release the
1065          * io.  If the called locked us we tell hammer_io_release()
1066          * to flush (which would be the unload or failure path).
1067          */
1068         bp = hammer_io_release(&buffer->io, locked);
1069
1070         /*
1071          * If the buffer has no bp association and no refs we can destroy
1072          * it.
1073          *
1074          * NOTE: It is impossible for any associated B-Tree nodes to have
1075          * refs if the buffer has no additional refs.
1076          */
1077         if (buffer->io.bp == NULL && hammer_norefs(&buffer->io.lock)) {
1078                 RB_REMOVE(hammer_buf_rb_tree,
1079                           &buffer->io.hmp->rb_bufs_root,
1080                           buffer);
1081                 volume = buffer->io.volume;
1082                 buffer->io.volume = NULL; /* sanity */
1083                 hammer_rel_volume(volume, 0);
1084                 hammer_io_clear_modlist(&buffer->io);
1085                 hammer_flush_buffer_nodes(buffer);
1086                 KKASSERT(TAILQ_EMPTY(&buffer->node_list));
1087                 freeme = 1;
1088         }
1089
1090         /*
1091          * Cleanup
1092          */
1093         hammer_rel_interlock_done(&buffer->io.lock, locked);
1094         if (bp)
1095                 brelse(bp);
1096         if (freeme) {
1097                 --hammer_count_buffers;
1098                 kfree(buffer, hmp->m_misc);
1099         }
1100 }
1101
1102 /*
1103  * Access the filesystem buffer containing the specified hammer offset.
1104  * buf_offset is a conglomeration of the volume number and vol_buf_beg
1105  * relative buffer offset.  It must also have bit 55 set to be valid.
1106  * (see hammer_off_t in hammer_disk.h).
1107  *
1108  * Any prior buffer in *bufferp will be released and replaced by the
1109  * requested buffer.
1110  *
1111  * NOTE: The buffer is indexed via its zoneX_offset but we allow the
1112  * passed cached *bufferp to match against either zoneX or zone2.
1113  */
1114 static __inline
1115 void *
1116 _hammer_bread(hammer_mount_t hmp, hammer_off_t buf_offset, int bytes,
1117              int isnew, int *errorp, hammer_buffer_t *bufferp)
1118 {
1119         hammer_buffer_t buffer;
1120         int32_t xoff = (int32_t)buf_offset & HAMMER_BUFMASK;
1121
1122         buf_offset &= ~HAMMER_BUFMASK64;
1123         KKASSERT(HAMMER_ZONE(buf_offset) != 0);
1124
1125         buffer = *bufferp;
1126         if (buffer == NULL || (buffer->zone2_offset != buf_offset &&
1127                                buffer->zoneX_offset != buf_offset)) {
1128                 if (buffer)
1129                         hammer_rel_buffer(buffer, 0);
1130                 buffer = hammer_get_buffer(hmp, buf_offset, bytes, isnew, errorp);
1131                 *bufferp = buffer;
1132         } else {
1133                 *errorp = 0;
1134         }
1135
1136         /*
1137          * Return a pointer to the buffer data.
1138          */
1139         if (buffer == NULL)
1140                 return(NULL);
1141         else
1142                 return((char *)buffer->ondisk + xoff);
1143 }
1144
1145 void *
1146 hammer_bread(hammer_mount_t hmp, hammer_off_t buf_offset,
1147              int *errorp, hammer_buffer_t *bufferp)
1148 {
1149         return(_hammer_bread(hmp, buf_offset, HAMMER_BUFSIZE, 0, errorp, bufferp));
1150 }
1151
1152 void *
1153 hammer_bread_ext(hammer_mount_t hmp, hammer_off_t buf_offset, int bytes,
1154                  int *errorp, hammer_buffer_t *bufferp)
1155 {
1156         bytes = HAMMER_BUFSIZE_DOALIGN(bytes);
1157         return(_hammer_bread(hmp, buf_offset, bytes, 0, errorp, bufferp));
1158 }
1159
1160 /*
1161  * Access the filesystem buffer containing the specified hammer offset.
1162  * No disk read operation occurs.  The result buffer may contain garbage.
1163  *
1164  * Any prior buffer in *bufferp will be released and replaced by the
1165  * requested buffer.
1166  *
1167  * This function marks the buffer dirty but does not increment its
1168  * modify_refs count.
1169  */
1170 void *
1171 hammer_bnew(hammer_mount_t hmp, hammer_off_t buf_offset,
1172              int *errorp, hammer_buffer_t *bufferp)
1173 {
1174         return(_hammer_bread(hmp, buf_offset, HAMMER_BUFSIZE, 1, errorp, bufferp));
1175 }
1176
1177 void *
1178 hammer_bnew_ext(hammer_mount_t hmp, hammer_off_t buf_offset, int bytes,
1179                 int *errorp, hammer_buffer_t *bufferp)
1180 {
1181         bytes = HAMMER_BUFSIZE_DOALIGN(bytes);
1182         return(_hammer_bread(hmp, buf_offset, bytes, 1, errorp, bufferp));
1183 }
1184
1185 /************************************************************************
1186  *                              NODES                                   *
1187  ************************************************************************
1188  *
1189  * Manage B-Tree nodes.  B-Tree nodes represent the primary indexing
1190  * method used by the HAMMER filesystem.
1191  *
1192  * Unlike other HAMMER structures, a hammer_node can be PASSIVELY
1193  * associated with its buffer, and will only referenced the buffer while
1194  * the node itself is referenced.
1195  *
1196  * A hammer_node can also be passively associated with other HAMMER
1197  * structures, such as inodes, while retaining 0 references.  These
1198  * associations can be cleared backwards using a pointer-to-pointer in
1199  * the hammer_node.
1200  *
1201  * This allows the HAMMER implementation to cache hammer_nodes long-term
1202  * and short-cut a great deal of the infrastructure's complexity.  In
1203  * most cases a cached node can be reacquired without having to dip into
1204  * the B-Tree.
1205  */
1206 hammer_node_t
1207 hammer_get_node(hammer_transaction_t trans, hammer_off_t node_offset,
1208                 int isnew, int *errorp)
1209 {
1210         hammer_mount_t hmp = trans->hmp;
1211         hammer_node_t node;
1212         int doload;
1213
1214         KKASSERT(hammer_is_zone_btree(node_offset));
1215
1216         /*
1217          * Locate the structure, allocating one if necessary.
1218          */
1219 again:
1220         node = RB_LOOKUP(hammer_nod_rb_tree, &hmp->rb_nods_root, node_offset);
1221         if (node == NULL) {
1222                 ++hammer_count_nodes;
1223                 node = kmalloc(sizeof(*node), hmp->m_misc, M_WAITOK|M_ZERO|M_USE_RESERVE);
1224                 node->node_offset = node_offset;
1225                 node->hmp = hmp;
1226                 TAILQ_INIT(&node->cursor_list);
1227                 TAILQ_INIT(&node->cache_list);
1228                 if (RB_INSERT(hammer_nod_rb_tree, &hmp->rb_nods_root, node)) {
1229                         --hammer_count_nodes;
1230                         kfree(node, hmp->m_misc);
1231                         goto again;
1232                 }
1233                 doload = hammer_ref_interlock_true(&node->lock);
1234         } else {
1235                 doload = hammer_ref_interlock(&node->lock);
1236         }
1237         if (doload) {
1238                 *errorp = hammer_load_node(trans, node, isnew);
1239                 if (*errorp)
1240                         node = NULL;
1241         } else {
1242                 KKASSERT(node->ondisk);
1243                 *errorp = 0;
1244                 hammer_io_advance(&node->buffer->io);
1245         }
1246         return(node);
1247 }
1248
1249 /*
1250  * Reference an already-referenced node.  0->1 transitions should assert
1251  * so we do not have to deal with hammer_ref() setting CHECK.
1252  */
1253 void
1254 hammer_ref_node(hammer_node_t node)
1255 {
1256         KKASSERT(hammer_isactive(&node->lock) && node->ondisk != NULL);
1257         hammer_ref(&node->lock);
1258 }
1259
1260 /*
1261  * Load a node's on-disk data reference.  Called with the node referenced
1262  * and interlocked.
1263  *
1264  * On return the node interlock will be unlocked.  If a non-zero error code
1265  * is returned the node will also be dereferenced (and the caller's pointer
1266  * will be stale).
1267  */
1268 static int
1269 hammer_load_node(hammer_transaction_t trans, hammer_node_t node, int isnew)
1270 {
1271         hammer_buffer_t buffer;
1272         hammer_off_t buf_offset;
1273         hammer_mount_t hmp = trans->hmp;
1274         int error;
1275
1276         error = 0;
1277         if (node->ondisk == NULL) {
1278                 /*
1279                  * This is a little confusing but the jist is that
1280                  * node->buffer determines whether the node is on
1281                  * the buffer's node_list and node->ondisk determines
1282                  * whether the buffer is referenced.
1283                  *
1284                  * We could be racing a buffer release, in which case
1285                  * node->buffer may become NULL while we are blocked
1286                  * referencing the buffer.
1287                  */
1288                 if ((buffer = node->buffer) != NULL) {
1289                         error = hammer_ref_buffer(buffer);
1290                         if (error == 0 && node->buffer == NULL) {
1291                                 TAILQ_INSERT_TAIL(&buffer->node_list, node, entry);
1292                                 node->buffer = buffer;
1293                         }
1294                 } else {
1295                         buf_offset = node->node_offset & ~HAMMER_BUFMASK64;
1296                         buffer = hammer_get_buffer(node->hmp, buf_offset,
1297                                                    HAMMER_BUFSIZE, 0, &error);
1298                         if (buffer) {
1299                                 KKASSERT(error == 0);
1300                                 TAILQ_INSERT_TAIL(&buffer->node_list, node, entry);
1301                                 node->buffer = buffer;
1302                         }
1303                 }
1304                 if (error)
1305                         goto failed;
1306                 node->ondisk = (void *)((char *)buffer->ondisk +
1307                                         (node->node_offset & HAMMER_BUFMASK));
1308
1309                 /*
1310                  * Check CRC.  NOTE: Neither flag is set and the CRC is not
1311                  * generated on new B-Tree nodes.
1312                  */
1313                 if (isnew == 0 &&
1314                     (node->flags & HAMMER_NODE_CRCANY) == 0) {
1315                         if (hammer_crc_test_btree(hmp->version, node->ondisk) == 0) {
1316                                 hdkprintf("CRC B-TREE NODE @ %016jx/%lu FAILED\n",
1317                                         (intmax_t)node->node_offset,
1318                                         sizeof(*node->ondisk));
1319                                 if (hammer_debug_critical)
1320                                         Debugger("CRC FAILED: B-TREE NODE");
1321                                 node->flags |= HAMMER_NODE_CRCBAD;
1322                         } else {
1323                                 node->flags |= HAMMER_NODE_CRCGOOD;
1324                         }
1325                 }
1326         }
1327         if (node->flags & HAMMER_NODE_CRCBAD) {
1328                 if (trans->flags & HAMMER_TRANSF_CRCDOM)
1329                         error = EDOM;
1330                 else
1331                         error = EIO;
1332         }
1333 failed:
1334         if (error) {
1335                 _hammer_rel_node(node, 1);
1336         } else {
1337                 hammer_ref_interlock_done(&node->lock);
1338         }
1339         return (error);
1340 }
1341
1342 /*
1343  * Safely reference a node, interlock against flushes via the IO subsystem.
1344  */
1345 hammer_node_t
1346 hammer_ref_node_safe(hammer_transaction_t trans, hammer_node_cache_t cache,
1347                      int *errorp)
1348 {
1349         hammer_node_t node;
1350         int doload;
1351
1352         node = cache->node;
1353         if (node != NULL) {
1354                 doload = hammer_ref_interlock(&node->lock);
1355                 if (doload) {
1356                         *errorp = hammer_load_node(trans, node, 0);
1357                         if (*errorp)
1358                                 node = NULL;
1359                 } else {
1360                         KKASSERT(node->ondisk);
1361                         if (node->flags & HAMMER_NODE_CRCBAD) {
1362                                 if (trans->flags & HAMMER_TRANSF_CRCDOM)
1363                                         *errorp = EDOM;
1364                                 else
1365                                         *errorp = EIO;
1366                                 _hammer_rel_node(node, 0);
1367                                 node = NULL;
1368                         } else {
1369                                 *errorp = 0;
1370                         }
1371                 }
1372         } else {
1373                 *errorp = ENOENT;
1374         }
1375         return(node);
1376 }
1377
1378 /*
1379  * Release a hammer_node.  On the last release the node dereferences
1380  * its underlying buffer and may or may not be destroyed.
1381  *
1382  * If locked is non-zero the passed node has been interlocked by the
1383  * caller and we are in the failure/unload path, otherwise it has not and
1384  * we are doing a normal release.
1385  *
1386  * This function will dispose of the interlock and the reference.
1387  * On return the node pointer is stale.
1388  */
1389 void
1390 _hammer_rel_node(hammer_node_t node, int locked)
1391 {
1392         hammer_buffer_t buffer;
1393
1394         /*
1395          * Deref the node.  If this isn't the 1->0 transition we're basically
1396          * done.  If locked is non-zero this function will just deref the
1397          * locked node and return 1, otherwise it will deref the locked
1398          * node and either lock and return 1 on the 1->0 transition or
1399          * not lock and return 0.
1400          */
1401         if (hammer_rel_interlock(&node->lock, locked) == 0)
1402                 return;
1403
1404         /*
1405          * Either locked was non-zero and we are interlocked, or the
1406          * hammer_rel_interlock() call returned non-zero and we are
1407          * interlocked.
1408          *
1409          * The ref-count must still be decremented if locked != 0 so
1410          * the cleanup required still varies a bit.
1411          *
1412          * hammer_flush_node() when called with 1 or 2 will dispose of
1413          * the lock and possible ref-count.
1414          */
1415         if (node->ondisk == NULL) {
1416                 hammer_flush_node(node, locked + 1);
1417                 /* node is stale now */
1418                 return;
1419         }
1420
1421         /*
1422          * Do not disassociate the node from the buffer if it represents
1423          * a modified B-Tree node that still needs its crc to be generated.
1424          */
1425         if (node->flags & HAMMER_NODE_NEEDSCRC) {
1426                 hammer_rel_interlock_done(&node->lock, locked);
1427                 return;
1428         }
1429
1430         /*
1431          * Do final cleanups and then either destroy the node and leave it
1432          * passively cached.  The buffer reference is removed regardless.
1433          */
1434         buffer = node->buffer;
1435         node->ondisk = NULL;
1436
1437         if ((node->flags & HAMMER_NODE_FLUSH) == 0) {
1438                 /*
1439                  * Normal release.
1440                  */
1441                 hammer_rel_interlock_done(&node->lock, locked);
1442         } else {
1443                 /*
1444                  * Destroy the node.
1445                  */
1446                 hammer_flush_node(node, locked + 1);
1447                 /* node is stale */
1448
1449         }
1450         hammer_rel_buffer(buffer, 0);
1451 }
1452
1453 void
1454 hammer_rel_node(hammer_node_t node)
1455 {
1456         _hammer_rel_node(node, 0);
1457 }
1458
1459 /*
1460  * Free space on-media associated with a B-Tree node.
1461  */
1462 void
1463 hammer_delete_node(hammer_transaction_t trans, hammer_node_t node)
1464 {
1465         KKASSERT((node->flags & HAMMER_NODE_DELETED) == 0);
1466         node->flags |= HAMMER_NODE_DELETED;
1467         hammer_blockmap_free(trans, node->node_offset, sizeof(*node->ondisk));
1468 }
1469
1470 /*
1471  * Passively cache a referenced hammer_node.  The caller may release
1472  * the node on return.
1473  */
1474 void
1475 hammer_cache_node(hammer_node_cache_t cache, hammer_node_t node)
1476 {
1477         /*
1478          * If the node doesn't exist, or is being deleted, don't cache it!
1479          *
1480          * The node can only ever be NULL in the I/O failure path.
1481          */
1482         if (node == NULL || (node->flags & HAMMER_NODE_DELETED))
1483                 return;
1484         if (cache->node == node)
1485                 return;
1486         while (cache->node)
1487                 hammer_uncache_node(cache);
1488         if (node->flags & HAMMER_NODE_DELETED)
1489                 return;
1490         cache->node = node;
1491         TAILQ_INSERT_TAIL(&node->cache_list, cache, entry);
1492 }
1493
1494 void
1495 hammer_uncache_node(hammer_node_cache_t cache)
1496 {
1497         hammer_node_t node;
1498
1499         if ((node = cache->node) != NULL) {
1500                 TAILQ_REMOVE(&node->cache_list, cache, entry);
1501                 cache->node = NULL;
1502                 if (TAILQ_EMPTY(&node->cache_list))
1503                         hammer_flush_node(node, 0);
1504         }
1505 }
1506
1507 /*
1508  * Remove a node's cache references and destroy the node if it has no
1509  * other references or backing store.
1510  *
1511  * locked == 0  Normal unlocked operation
1512  * locked == 1  Call hammer_rel_interlock_done(..., 0);
1513  * locked == 2  Call hammer_rel_interlock_done(..., 1);
1514  *
1515  * XXX for now this isn't even close to being MPSAFE so the refs check
1516  *     is sufficient.
1517  */
1518 void
1519 hammer_flush_node(hammer_node_t node, int locked)
1520 {
1521         hammer_node_cache_t cache;
1522         hammer_buffer_t buffer;
1523         hammer_mount_t hmp = node->hmp;
1524         int dofree;
1525
1526         while ((cache = TAILQ_FIRST(&node->cache_list)) != NULL) {
1527                 TAILQ_REMOVE(&node->cache_list, cache, entry);
1528                 cache->node = NULL;
1529         }
1530
1531         /*
1532          * NOTE: refs is predisposed if another thread is blocking and
1533          *       will be larger than 0 in that case.  We aren't MPSAFE
1534          *       here.
1535          */
1536         if (node->ondisk == NULL && hammer_norefs(&node->lock)) {
1537                 KKASSERT((node->flags & HAMMER_NODE_NEEDSCRC) == 0);
1538                 RB_REMOVE(hammer_nod_rb_tree, &node->hmp->rb_nods_root, node);
1539                 if ((buffer = node->buffer) != NULL) {
1540                         node->buffer = NULL;
1541                         TAILQ_REMOVE(&buffer->node_list, node, entry);
1542                         /* buffer is unreferenced because ondisk is NULL */
1543                 }
1544                 dofree = 1;
1545         } else {
1546                 dofree = 0;
1547         }
1548
1549         /*
1550          * Deal with the interlock if locked == 1 or locked == 2.
1551          */
1552         if (locked)
1553                 hammer_rel_interlock_done(&node->lock, locked - 1);
1554
1555         /*
1556          * Destroy if requested
1557          */
1558         if (dofree) {
1559                 --hammer_count_nodes;
1560                 kfree(node, hmp->m_misc);
1561         }
1562 }
1563
1564 /*
1565  * Flush passively cached B-Tree nodes associated with this buffer.
1566  * This is only called when the buffer is about to be destroyed, so
1567  * none of the nodes should have any references.  The buffer is locked.
1568  *
1569  * We may be interlocked with the buffer.
1570  */
1571 void
1572 hammer_flush_buffer_nodes(hammer_buffer_t buffer)
1573 {
1574         hammer_node_t node;
1575
1576         while ((node = TAILQ_FIRST(&buffer->node_list)) != NULL) {
1577                 KKASSERT(node->ondisk == NULL);
1578                 KKASSERT((node->flags & HAMMER_NODE_NEEDSCRC) == 0);
1579
1580                 if (hammer_try_interlock_norefs(&node->lock)) {
1581                         hammer_ref(&node->lock);
1582                         node->flags |= HAMMER_NODE_FLUSH;
1583                         _hammer_rel_node(node, 1);
1584                 } else {
1585                         KKASSERT(node->buffer != NULL);
1586                         buffer = node->buffer;
1587                         node->buffer = NULL;
1588                         TAILQ_REMOVE(&buffer->node_list, node, entry);
1589                         /* buffer is unreferenced because ondisk is NULL */
1590                 }
1591         }
1592 }
1593
1594
1595 /************************************************************************
1596  *                              ALLOCATORS                              *
1597  ************************************************************************/
1598
1599 /*
1600  * Allocate a B-Tree node.
1601  */
1602 hammer_node_t
1603 hammer_alloc_btree(hammer_transaction_t trans, hammer_off_t hint, int *errorp)
1604 {
1605         hammer_buffer_t buffer = NULL;
1606         hammer_node_t node = NULL;
1607         hammer_off_t node_offset;
1608
1609         node_offset = hammer_blockmap_alloc(trans, HAMMER_ZONE_BTREE_INDEX,
1610                                             sizeof(struct hammer_node_ondisk),
1611                                             hint, errorp);
1612         if (*errorp == 0) {
1613                 node = hammer_get_node(trans, node_offset, 1, errorp);
1614                 hammer_modify_node_noundo(trans, node);
1615                 bzero(node->ondisk, sizeof(*node->ondisk));
1616                 hammer_modify_node_done(node);
1617         }
1618         if (buffer)
1619                 hammer_rel_buffer(buffer, 0);
1620         return(node);
1621 }
1622
1623 /*
1624  * Allocate data.  If the address of a data buffer is supplied then
1625  * any prior non-NULL *data_bufferp will be released and *data_bufferp
1626  * will be set to the related buffer.  The caller must release it when
1627  * finally done.  The initial *data_bufferp should be set to NULL by
1628  * the caller.
1629  *
1630  * The caller is responsible for making hammer_modify*() calls on the
1631  * *data_bufferp.
1632  */
1633 void *
1634 hammer_alloc_data(hammer_transaction_t trans, int32_t data_len,
1635                   uint16_t rec_type, hammer_off_t *data_offsetp,
1636                   hammer_buffer_t *data_bufferp,
1637                   hammer_off_t hint, int *errorp)
1638 {
1639         void *data;
1640         int zone;
1641
1642         /*
1643          * Allocate data directly from blockmap.
1644          */
1645         if (data_len) {
1646                 switch(rec_type) {
1647                 case HAMMER_RECTYPE_INODE:
1648                 case HAMMER_RECTYPE_DIRENTRY:
1649                 case HAMMER_RECTYPE_EXT:
1650                 case HAMMER_RECTYPE_FIX:
1651                 case HAMMER_RECTYPE_PFS:
1652                 case HAMMER_RECTYPE_SNAPSHOT:
1653                 case HAMMER_RECTYPE_CONFIG:
1654                         zone = HAMMER_ZONE_META_INDEX;
1655                         break;
1656                 case HAMMER_RECTYPE_DATA:
1657                 case HAMMER_RECTYPE_DB:
1658                         /*
1659                          * Only mirror-write comes here.
1660                          * Regular allocation path uses blockmap reservation.
1661                          */
1662                         zone = hammer_data_zone_index(data_len);
1663                         if (zone == HAMMER_ZONE_LARGE_DATA_INDEX) {
1664                                 /* round up */
1665                                 data_len = HAMMER_BUFSIZE_DOALIGN(data_len);
1666                         }
1667                         break;
1668                 default:
1669                         hpanic("rec_type %04x unknown", rec_type);
1670                         zone = HAMMER_ZONE_UNAVAIL_INDEX; /* NOT REACHED */
1671                         break;
1672                 }
1673                 *data_offsetp = hammer_blockmap_alloc(trans, zone, data_len,
1674                                                       hint, errorp);
1675         } else {
1676                 *data_offsetp = 0;
1677         }
1678
1679         data = NULL;
1680         if (*errorp == 0 && data_bufferp && data_len)
1681                 data = hammer_bread_ext(trans->hmp, *data_offsetp, data_len,
1682                                         errorp, data_bufferp);
1683         return(data);
1684 }
1685
1686 /*
1687  * Sync dirty buffers to the media and clean-up any loose ends.
1688  *
1689  * These functions do not start the flusher going, they simply
1690  * queue everything up to the flusher.
1691  */
1692 static int hammer_sync_scan2(struct mount *mp, struct vnode *vp, void *data);
1693
1694 struct hammer_sync_info {
1695         int error;
1696 };
1697
1698 int
1699 hammer_queue_inodes_flusher(hammer_mount_t hmp, int waitfor)
1700 {
1701         struct hammer_sync_info info;
1702
1703         info.error = 0;
1704         if (waitfor == MNT_WAIT) {
1705                 vsyncscan(hmp->mp, VMSC_GETVP | VMSC_ONEPASS,
1706                           hammer_sync_scan2, &info);
1707         } else {
1708                 vsyncscan(hmp->mp, VMSC_GETVP | VMSC_ONEPASS | VMSC_NOWAIT,
1709                           hammer_sync_scan2, &info);
1710         }
1711         return(info.error);
1712 }
1713
1714 /*
1715  * Filesystem sync.  If doing a synchronous sync make a second pass on
1716  * the vnodes in case any were already flushing during the first pass,
1717  * and activate the flusher twice (the second time brings the UNDO FIFO's
1718  * start position up to the end position after the first call).
1719  *
1720  * If doing a lazy sync make just one pass on the vnode list, ignoring
1721  * any new vnodes added to the list while the sync is in progress.
1722  */
1723 int
1724 hammer_sync_hmp(hammer_mount_t hmp, int waitfor)
1725 {
1726         struct hammer_sync_info info;
1727         int flags;
1728
1729         flags = VMSC_GETVP;
1730         if (waitfor & MNT_LAZY)
1731                 flags |= VMSC_ONEPASS;
1732
1733         info.error = 0;
1734         vsyncscan(hmp->mp, flags | VMSC_NOWAIT, hammer_sync_scan2, &info);
1735
1736         if (info.error == 0 && (waitfor & MNT_WAIT)) {
1737                 vsyncscan(hmp->mp, flags, hammer_sync_scan2, &info);
1738         }
1739         if (waitfor == MNT_WAIT) {
1740                 hammer_flusher_sync(hmp);
1741                 hammer_flusher_sync(hmp);
1742         } else {
1743                 hammer_flusher_async(hmp, NULL);
1744                 hammer_flusher_async(hmp, NULL);
1745         }
1746         return(info.error);
1747 }
1748
1749 static int
1750 hammer_sync_scan2(struct mount *mp, struct vnode *vp, void *data)
1751 {
1752         struct hammer_sync_info *info = data;
1753         hammer_inode_t ip;
1754         int error;
1755
1756         ip = VTOI(vp);
1757         if (ip == NULL)
1758                 return(0);
1759         if (vp->v_type == VNON || vp->v_type == VBAD) {
1760                 vclrisdirty(vp);
1761                 return(0);
1762         }
1763         if ((ip->flags & HAMMER_INODE_MODMASK) == 0 &&
1764             RB_EMPTY(&vp->v_rbdirty_tree)) {
1765                 vclrisdirty(vp);
1766                 return(0);
1767         }
1768         error = VOP_FSYNC(vp, MNT_NOWAIT, 0);
1769         if (error)
1770                 info->error = error;
1771         return(0);
1772 }