HAMMER VFS: Add missing hammer_rel_volume() call in deadlock/retry case.
[dragonfly.git] / sys / vfs / hammer / hammer_ondisk.c
1 /*
2  * Copyright (c) 2007-2008 The DragonFly Project.  All rights reserved.
3  * 
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  * 
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  * 
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  * 
34  * $DragonFly: src/sys/vfs/hammer/hammer_ondisk.c,v 1.76 2008/08/29 20:19:08 dillon Exp $
35  */
36 /*
37  * Manage HAMMER's on-disk structures.  These routines are primarily
38  * responsible for interfacing with the kernel's I/O subsystem and for
39  * managing in-memory structures.
40  */
41
42 #include "hammer.h"
43 #include <sys/fcntl.h>
44 #include <sys/nlookup.h>
45 #include <sys/buf.h>
46 #include <sys/buf2.h>
47
48 static void hammer_free_volume(hammer_volume_t volume);
49 static int hammer_load_volume(hammer_volume_t volume);
50 static int hammer_load_buffer(hammer_buffer_t buffer, int isnew);
51 static int hammer_load_node(hammer_node_t node, int isnew);
52
53 static int
54 hammer_vol_rb_compare(hammer_volume_t vol1, hammer_volume_t vol2)
55 {
56         if (vol1->vol_no < vol2->vol_no)
57                 return(-1);
58         if (vol1->vol_no > vol2->vol_no)
59                 return(1);
60         return(0);
61 }
62
63 /*
64  * hammer_buffer structures are indexed via their zoneX_offset, not
65  * their zone2_offset.
66  */
67 static int
68 hammer_buf_rb_compare(hammer_buffer_t buf1, hammer_buffer_t buf2)
69 {
70         if (buf1->zoneX_offset < buf2->zoneX_offset)
71                 return(-1);
72         if (buf1->zoneX_offset > buf2->zoneX_offset)
73                 return(1);
74         return(0);
75 }
76
77 static int
78 hammer_nod_rb_compare(hammer_node_t node1, hammer_node_t node2)
79 {
80         if (node1->node_offset < node2->node_offset)
81                 return(-1);
82         if (node1->node_offset > node2->node_offset)
83                 return(1);
84         return(0);
85 }
86
87 RB_GENERATE2(hammer_vol_rb_tree, hammer_volume, rb_node,
88              hammer_vol_rb_compare, int32_t, vol_no);
89 RB_GENERATE2(hammer_buf_rb_tree, hammer_buffer, rb_node,
90              hammer_buf_rb_compare, hammer_off_t, zoneX_offset);
91 RB_GENERATE2(hammer_nod_rb_tree, hammer_node, rb_node,
92              hammer_nod_rb_compare, hammer_off_t, node_offset);
93
94 /************************************************************************
95  *                              VOLUMES                                 *
96  ************************************************************************
97  *
98  * Load a HAMMER volume by name.  Returns 0 on success or a positive error
99  * code on failure.  Volumes must be loaded at mount time, get_volume() will
100  * not load a new volume.
101  *
102  * Calls made to hammer_load_volume() or single-threaded
103  */
104 int
105 hammer_install_volume(struct hammer_mount *hmp, const char *volname,
106                       struct vnode *devvp)
107 {
108         struct mount *mp;
109         hammer_volume_t volume;
110         struct hammer_volume_ondisk *ondisk;
111         struct nlookupdata nd;
112         struct buf *bp = NULL;
113         int error;
114         int ronly;
115         int setmp = 0;
116
117         mp = hmp->mp;
118         ronly = ((mp->mnt_flag & MNT_RDONLY) ? 1 : 0);
119
120         /*
121          * Allocate a volume structure
122          */
123         ++hammer_count_volumes;
124         volume = kmalloc(sizeof(*volume), hmp->m_misc, M_WAITOK|M_ZERO);
125         volume->vol_name = kstrdup(volname, hmp->m_misc);
126         volume->io.hmp = hmp;   /* bootstrap */
127         hammer_io_init(&volume->io, volume, HAMMER_STRUCTURE_VOLUME);
128         volume->io.offset = 0LL;
129         volume->io.bytes = HAMMER_BUFSIZE;
130
131         /*
132          * Get the device vnode
133          */
134         if (devvp == NULL) {
135                 error = nlookup_init(&nd, volume->vol_name, UIO_SYSSPACE, NLC_FOLLOW);
136                 if (error == 0)
137                         error = nlookup(&nd);
138                 if (error == 0)
139                         error = cache_vref(&nd.nl_nch, nd.nl_cred, &volume->devvp);
140                 nlookup_done(&nd);
141         } else {
142                 error = 0;
143                 volume->devvp = devvp;
144         }
145
146         if (error == 0) {
147                 if (vn_isdisk(volume->devvp, &error)) {
148                         error = vfs_mountedon(volume->devvp);
149                 }
150         }
151         if (error == 0 &&
152             count_udev(volume->devvp->v_umajor, volume->devvp->v_uminor) > 0) {
153                 error = EBUSY;
154         }
155         if (error == 0) {
156                 vn_lock(volume->devvp, LK_EXCLUSIVE | LK_RETRY);
157                 error = vinvalbuf(volume->devvp, V_SAVE, 0, 0);
158                 if (error == 0) {
159                         error = VOP_OPEN(volume->devvp, 
160                                          (ronly ? FREAD : FREAD|FWRITE),
161                                          FSCRED, NULL);
162                 }
163                 vn_unlock(volume->devvp);
164         }
165         if (error) {
166                 hammer_free_volume(volume);
167                 return(error);
168         }
169         volume->devvp->v_rdev->si_mountpoint = mp;
170         setmp = 1;
171
172         /*
173          * Extract the volume number from the volume header and do various
174          * sanity checks.
175          */
176         error = bread(volume->devvp, 0LL, HAMMER_BUFSIZE, &bp);
177         if (error)
178                 goto late_failure;
179         ondisk = (void *)bp->b_data;
180         if (ondisk->vol_signature != HAMMER_FSBUF_VOLUME) {
181                 kprintf("hammer_mount: volume %s has an invalid header\n",
182                         volume->vol_name);
183                 error = EFTYPE;
184                 goto late_failure;
185         }
186         volume->vol_no = ondisk->vol_no;
187         volume->buffer_base = ondisk->vol_buf_beg;
188         volume->vol_flags = ondisk->vol_flags;
189         volume->nblocks = ondisk->vol_nblocks; 
190         volume->maxbuf_off = HAMMER_ENCODE_RAW_BUFFER(volume->vol_no,
191                                     ondisk->vol_buf_end - ondisk->vol_buf_beg);
192         volume->maxraw_off = ondisk->vol_buf_end;
193
194         if (RB_EMPTY(&hmp->rb_vols_root)) {
195                 hmp->fsid = ondisk->vol_fsid;
196         } else if (bcmp(&hmp->fsid, &ondisk->vol_fsid, sizeof(uuid_t))) {
197                 kprintf("hammer_mount: volume %s's fsid does not match "
198                         "other volumes\n", volume->vol_name);
199                 error = EFTYPE;
200                 goto late_failure;
201         }
202
203         /*
204          * Insert the volume structure into the red-black tree.
205          */
206         if (RB_INSERT(hammer_vol_rb_tree, &hmp->rb_vols_root, volume)) {
207                 kprintf("hammer_mount: volume %s has a duplicate vol_no %d\n",
208                         volume->vol_name, volume->vol_no);
209                 error = EEXIST;
210         }
211
212         /*
213          * Set the root volume .  HAMMER special cases rootvol the structure.
214          * We do not hold a ref because this would prevent related I/O
215          * from being flushed.
216          */
217         if (error == 0 && ondisk->vol_rootvol == ondisk->vol_no) {
218                 hmp->rootvol = volume;
219                 hmp->nvolumes = ondisk->vol_count;
220                 if (bp) {
221                         brelse(bp);
222                         bp = NULL;
223                 }
224                 hmp->mp->mnt_stat.f_blocks += ondisk->vol0_stat_bigblocks *
225                         (HAMMER_LARGEBLOCK_SIZE / HAMMER_BUFSIZE);
226                 hmp->mp->mnt_vstat.f_blocks += ondisk->vol0_stat_bigblocks *
227                         (HAMMER_LARGEBLOCK_SIZE / HAMMER_BUFSIZE);
228         }
229 late_failure:
230         if (bp)
231                 brelse(bp);
232         if (error) {
233                 /*vinvalbuf(volume->devvp, V_SAVE, 0, 0);*/
234                 if (setmp)
235                         volume->devvp->v_rdev->si_mountpoint = NULL;
236                 VOP_CLOSE(volume->devvp, ronly ? FREAD : FREAD|FWRITE);
237                 hammer_free_volume(volume);
238         }
239         return (error);
240 }
241
242 /*
243  * This is called for each volume when updating the mount point from
244  * read-write to read-only or vise-versa.
245  */
246 int
247 hammer_adjust_volume_mode(hammer_volume_t volume, void *data __unused)
248 {
249         if (volume->devvp) {
250                 vn_lock(volume->devvp, LK_EXCLUSIVE | LK_RETRY);
251                 if (volume->io.hmp->ronly) {
252                         /* do not call vinvalbuf */
253                         VOP_OPEN(volume->devvp, FREAD, FSCRED, NULL);
254                         VOP_CLOSE(volume->devvp, FREAD|FWRITE);
255                 } else {
256                         /* do not call vinvalbuf */
257                         VOP_OPEN(volume->devvp, FREAD|FWRITE, FSCRED, NULL);
258                         VOP_CLOSE(volume->devvp, FREAD);
259                 }
260                 vn_unlock(volume->devvp);
261         }
262         return(0);
263 }
264
265 /*
266  * Unload and free a HAMMER volume.  Must return >= 0 to continue scan
267  * so returns -1 on failure.
268  */
269 int
270 hammer_unload_volume(hammer_volume_t volume, void *data __unused)
271 {
272         hammer_mount_t hmp = volume->io.hmp;
273         int ronly = ((hmp->mp->mnt_flag & MNT_RDONLY) ? 1 : 0);
274         struct buf *bp;
275
276         /*
277          * Clean up the root volume pointer, which is held unlocked in hmp.
278          */
279         if (hmp->rootvol == volume)
280                 hmp->rootvol = NULL;
281
282         /*
283          * We must not flush a dirty buffer to disk on umount.  It should
284          * have already been dealt with by the flusher, or we may be in
285          * catastrophic failure.
286          */
287         hammer_io_clear_modify(&volume->io, 1);
288         volume->io.waitdep = 1;
289         bp = hammer_io_release(&volume->io, 1);
290
291         /*
292          * Clean up the persistent ref ioerror might have on the volume
293          */
294         if (volume->io.ioerror) {
295                 volume->io.ioerror = 0;
296                 hammer_unref(&volume->io.lock);
297         }
298
299         /*
300          * There should be no references on the volume, no clusters, and
301          * no super-clusters.
302          */
303         KKASSERT(volume->io.lock.refs == 0);
304         if (bp)
305                 brelse(bp);
306
307         volume->ondisk = NULL;
308         if (volume->devvp) {
309                 if (volume->devvp->v_rdev &&
310                     volume->devvp->v_rdev->si_mountpoint == hmp->mp
311                 ) {
312                         volume->devvp->v_rdev->si_mountpoint = NULL;
313                 }
314                 if (ronly) {
315                         /*
316                          * Make sure we don't sync anything to disk if we
317                          * are in read-only mode (1) or critically-errored
318                          * (2).  Note that there may be dirty buffers in
319                          * normal read-only mode from crash recovery.
320                          */
321                         vinvalbuf(volume->devvp, 0, 0, 0);
322                         VOP_CLOSE(volume->devvp, FREAD);
323                 } else {
324                         /*
325                          * Normal termination, save any dirty buffers
326                          * (XXX there really shouldn't be any).
327                          */
328                         vinvalbuf(volume->devvp, V_SAVE, 0, 0);
329                         VOP_CLOSE(volume->devvp, FREAD|FWRITE);
330                 }
331         }
332
333         /*
334          * Destroy the structure
335          */
336         RB_REMOVE(hammer_vol_rb_tree, &hmp->rb_vols_root, volume);
337         hammer_free_volume(volume);
338         return(0);
339 }
340
341 static
342 void
343 hammer_free_volume(hammer_volume_t volume)
344 {
345         hammer_mount_t hmp = volume->io.hmp;
346
347         if (volume->vol_name) {
348                 kfree(volume->vol_name, hmp->m_misc);
349                 volume->vol_name = NULL;
350         }
351         if (volume->devvp) {
352                 vrele(volume->devvp);
353                 volume->devvp = NULL;
354         }
355         --hammer_count_volumes;
356         kfree(volume, hmp->m_misc);
357 }
358
359 /*
360  * Get a HAMMER volume.  The volume must already exist.
361  */
362 hammer_volume_t
363 hammer_get_volume(struct hammer_mount *hmp, int32_t vol_no, int *errorp)
364 {
365         struct hammer_volume *volume;
366
367         /*
368          * Locate the volume structure
369          */
370         volume = RB_LOOKUP(hammer_vol_rb_tree, &hmp->rb_vols_root, vol_no);
371         if (volume == NULL) {
372                 *errorp = ENOENT;
373                 return(NULL);
374         }
375         hammer_ref(&volume->io.lock);
376
377         /*
378          * Deal with on-disk info
379          */
380         if (volume->ondisk == NULL || volume->io.loading) {
381                 *errorp = hammer_load_volume(volume);
382                 if (*errorp) {
383                         hammer_rel_volume(volume, 1);
384                         volume = NULL;
385                 }
386         } else {
387                 *errorp = 0;
388         }
389         return(volume);
390 }
391
392 int
393 hammer_ref_volume(hammer_volume_t volume)
394 {
395         int error;
396
397         hammer_ref(&volume->io.lock);
398
399         /*
400          * Deal with on-disk info
401          */
402         if (volume->ondisk == NULL || volume->io.loading) {
403                 error = hammer_load_volume(volume);
404                 if (error)
405                         hammer_rel_volume(volume, 1);
406         } else {
407                 error = 0;
408         }
409         return (error);
410 }
411
412 hammer_volume_t
413 hammer_get_root_volume(struct hammer_mount *hmp, int *errorp)
414 {
415         hammer_volume_t volume;
416
417         volume = hmp->rootvol;
418         KKASSERT(volume != NULL);
419         hammer_ref(&volume->io.lock);
420
421         /*
422          * Deal with on-disk info
423          */
424         if (volume->ondisk == NULL || volume->io.loading) {
425                 *errorp = hammer_load_volume(volume);
426                 if (*errorp) {
427                         hammer_rel_volume(volume, 1);
428                         volume = NULL;
429                 }
430         } else {
431                 *errorp = 0;
432         }
433         return (volume);
434 }
435
436 /*
437  * Load a volume's on-disk information.  The volume must be referenced and
438  * not locked.  We temporarily acquire an exclusive lock to interlock
439  * against releases or multiple get's.
440  */
441 static int
442 hammer_load_volume(hammer_volume_t volume)
443 {
444         int error;
445
446         ++volume->io.loading;
447         hammer_lock_ex(&volume->io.lock);
448
449         if (volume->ondisk == NULL) {
450                 error = hammer_io_read(volume->devvp, &volume->io,
451                                        volume->maxraw_off);
452                 if (error == 0)
453                         volume->ondisk = (void *)volume->io.bp->b_data;
454         } else {
455                 error = 0;
456         }
457         --volume->io.loading;
458         hammer_unlock(&volume->io.lock);
459         return(error);
460 }
461
462 /*
463  * Release a volume.  Call hammer_io_release on the last reference.  We have
464  * to acquire an exclusive lock to interlock against volume->ondisk tests
465  * in hammer_load_volume(), and hammer_io_release() also expects an exclusive
466  * lock to be held.
467  *
468  * Volumes are not unloaded from memory during normal operation.
469  */
470 void
471 hammer_rel_volume(hammer_volume_t volume, int flush)
472 {
473         struct buf *bp = NULL;
474
475         crit_enter();
476         if (volume->io.lock.refs == 1) {
477                 ++volume->io.loading;
478                 hammer_lock_ex(&volume->io.lock);
479                 if (volume->io.lock.refs == 1) {
480                         volume->ondisk = NULL;
481                         bp = hammer_io_release(&volume->io, flush);
482                 }
483                 --volume->io.loading;
484                 hammer_unlock(&volume->io.lock);
485         }
486         hammer_unref(&volume->io.lock);
487         if (bp)
488                 brelse(bp);
489         crit_exit();
490 }
491
492 int
493 hammer_mountcheck_volumes(struct hammer_mount *hmp)
494 {
495         hammer_volume_t vol;
496         int i;
497
498         for (i = 0; i < hmp->nvolumes; ++i) {
499                 vol = RB_LOOKUP(hammer_vol_rb_tree, &hmp->rb_vols_root, i);
500                 if (vol == NULL)
501                         return(EINVAL);
502         }
503         return(0);
504 }
505
506 /************************************************************************
507  *                              BUFFERS                                 *
508  ************************************************************************
509  *
510  * Manage buffers.  Currently all blockmap-backed zones are direct-mapped
511  * to zone-2 buffer offsets, without a translation stage.  However, the
512  * hammer_buffer structure is indexed by its zoneX_offset, not its
513  * zone2_offset.
514  *
515  * The proper zone must be maintained throughout the code-base all the way
516  * through to the big-block allocator, or routines like hammer_del_buffers()
517  * will not be able to locate all potentially conflicting buffers.
518  */
519 hammer_buffer_t
520 hammer_get_buffer(hammer_mount_t hmp, hammer_off_t buf_offset,
521                   int bytes, int isnew, int *errorp)
522 {
523         hammer_buffer_t buffer;
524         hammer_volume_t volume;
525         hammer_off_t    zone2_offset;
526         hammer_io_type_t iotype;
527         int vol_no;
528         int zone;
529
530         buf_offset &= ~HAMMER_BUFMASK64;
531 again:
532         /*
533          * Shortcut if the buffer is already cached
534          */
535         buffer = RB_LOOKUP(hammer_buf_rb_tree, &hmp->rb_bufs_root, buf_offset);
536         if (buffer) {
537                 if (buffer->io.lock.refs == 0)
538                         ++hammer_count_refedbufs;
539                 hammer_ref(&buffer->io.lock);
540
541                 /*
542                  * Once refed the ondisk field will not be cleared by
543                  * any other action.
544                  */
545                 if (buffer->ondisk && buffer->io.loading == 0) {
546                         *errorp = 0;
547                         return(buffer);
548                 }
549
550                 /*
551                  * The buffer is no longer loose if it has a ref, and
552                  * cannot become loose once it gains a ref.  Loose
553                  * buffers will never be in a modified state.  This should
554                  * only occur on the 0->1 transition of refs.
555                  *
556                  * lose_list can be modified via a biodone() interrupt.
557                  */
558                 if (buffer->io.mod_list == &hmp->lose_list) {
559                         crit_enter();   /* biodone race against list */
560                         TAILQ_REMOVE(buffer->io.mod_list, &buffer->io,
561                                      mod_entry);
562                         crit_exit();
563                         buffer->io.mod_list = NULL;
564                         KKASSERT(buffer->io.modified == 0);
565                 }
566                 goto found;
567         }
568
569         /*
570          * What is the buffer class?
571          */
572         zone = HAMMER_ZONE_DECODE(buf_offset);
573
574         switch(zone) {
575         case HAMMER_ZONE_LARGE_DATA_INDEX:
576         case HAMMER_ZONE_SMALL_DATA_INDEX:
577                 iotype = HAMMER_STRUCTURE_DATA_BUFFER;
578                 break;
579         case HAMMER_ZONE_UNDO_INDEX:
580                 iotype = HAMMER_STRUCTURE_UNDO_BUFFER;
581                 break;
582         case HAMMER_ZONE_META_INDEX:
583         default:
584                 /*
585                  * NOTE: inode data and directory entries are placed in this
586                  * zone.  inode atime/mtime is updated in-place and thus
587                  * buffers containing inodes must be synchronized as
588                  * meta-buffers, same as buffers containing B-Tree info.
589                  */
590                 iotype = HAMMER_STRUCTURE_META_BUFFER;
591                 break;
592         }
593
594         /*
595          * Handle blockmap offset translations
596          */
597         if (zone >= HAMMER_ZONE_BTREE_INDEX) {
598                 zone2_offset = hammer_blockmap_lookup(hmp, buf_offset, errorp);
599         } else if (zone == HAMMER_ZONE_UNDO_INDEX) {
600                 zone2_offset = hammer_undo_lookup(hmp, buf_offset, errorp);
601         } else {
602                 KKASSERT(zone == HAMMER_ZONE_RAW_BUFFER_INDEX);
603                 zone2_offset = buf_offset;
604                 *errorp = 0;
605         }
606         if (*errorp)
607                 return(NULL);
608
609         /*
610          * NOTE: zone2_offset and maxbuf_off are both full zone-2 offset
611          * specifications.
612          */
613         KKASSERT((zone2_offset & HAMMER_OFF_ZONE_MASK) ==
614                  HAMMER_ZONE_RAW_BUFFER);
615         vol_no = HAMMER_VOL_DECODE(zone2_offset);
616         volume = hammer_get_volume(hmp, vol_no, errorp);
617         if (volume == NULL)
618                 return(NULL);
619
620         KKASSERT(zone2_offset < volume->maxbuf_off);
621
622         /*
623          * Allocate a new buffer structure.  We will check for races later.
624          */
625         ++hammer_count_buffers;
626         buffer = kmalloc(sizeof(*buffer), hmp->m_misc,
627                          M_WAITOK|M_ZERO|M_USE_RESERVE);
628         buffer->zone2_offset = zone2_offset;
629         buffer->zoneX_offset = buf_offset;
630
631         hammer_io_init(&buffer->io, volume, iotype);
632         buffer->io.offset = volume->ondisk->vol_buf_beg +
633                             (zone2_offset & HAMMER_OFF_SHORT_MASK);
634         buffer->io.bytes = bytes;
635         TAILQ_INIT(&buffer->clist);
636         hammer_ref(&buffer->io.lock);
637
638         /*
639          * Insert the buffer into the RB tree and handle late collisions.
640          */
641         if (RB_INSERT(hammer_buf_rb_tree, &hmp->rb_bufs_root, buffer)) {
642                 hammer_unref(&buffer->io.lock); /* safety */
643                 --hammer_count_buffers;
644                 hammer_rel_volume(volume, 0);
645                 buffer->io.volume = NULL;       /* safety */
646                 kfree(buffer, hmp->m_misc);
647                 goto again;
648         }
649         ++hammer_count_refedbufs;
650 found:
651
652         /*
653          * Deal with on-disk info and loading races.
654          */
655         if (buffer->ondisk == NULL || buffer->io.loading) {
656                 *errorp = hammer_load_buffer(buffer, isnew);
657                 if (*errorp) {
658                         hammer_rel_buffer(buffer, 1);
659                         buffer = NULL;
660                 }
661         } else {
662                 *errorp = 0;
663         }
664         return(buffer);
665 }
666
667 /*
668  * This is used by the direct-read code to deal with large-data buffers
669  * created by the reblocker and mirror-write code.  The direct-read code
670  * bypasses the HAMMER buffer subsystem and so any aliased dirty or write-
671  * running hammer buffers must be fully synced to disk before we can issue
672  * the direct-read.
673  *
674  * This code path is not considered critical as only the rebocker and
675  * mirror-write code will create large-data buffers via the HAMMER buffer
676  * subsystem.  They do that because they operate at the B-Tree level and
677  * do not access the vnode/inode structures.
678  */
679 void
680 hammer_sync_buffers(hammer_mount_t hmp, hammer_off_t base_offset, int bytes)
681 {
682         hammer_buffer_t buffer;
683         int error;
684
685         KKASSERT((base_offset & HAMMER_OFF_ZONE_MASK) ==
686                  HAMMER_ZONE_LARGE_DATA);
687
688         while (bytes > 0) {
689                 buffer = RB_LOOKUP(hammer_buf_rb_tree, &hmp->rb_bufs_root,
690                                    base_offset);
691                 if (buffer && (buffer->io.modified || buffer->io.running)) {
692                         error = hammer_ref_buffer(buffer);
693                         if (error == 0) {
694                                 hammer_io_wait(&buffer->io);
695                                 if (buffer->io.modified) {
696                                         hammer_io_write_interlock(&buffer->io);
697                                         hammer_io_flush(&buffer->io);
698                                         hammer_io_done_interlock(&buffer->io);
699                                         hammer_io_wait(&buffer->io);
700                                 }
701                                 hammer_rel_buffer(buffer, 0);
702                         }
703                 }
704                 base_offset += HAMMER_BUFSIZE;
705                 bytes -= HAMMER_BUFSIZE;
706         }
707 }
708
709 /*
710  * Destroy all buffers covering the specified zoneX offset range.  This
711  * is called when the related blockmap layer2 entry is freed or when
712  * a direct write bypasses our buffer/buffer-cache subsystem.
713  *
714  * The buffers may be referenced by the caller itself.  Setting reclaim
715  * will cause the buffer to be destroyed when it's ref count reaches zero.
716  *
717  * Return 0 on success, EAGAIN if some buffers could not be destroyed due
718  * to additional references held by other threads, or some other (typically
719  * fatal) error.
720  */
721 int
722 hammer_del_buffers(hammer_mount_t hmp, hammer_off_t base_offset,
723                    hammer_off_t zone2_offset, int bytes,
724                    int report_conflicts)
725 {
726         hammer_buffer_t buffer;
727         hammer_volume_t volume;
728         int vol_no;
729         int error;
730         int ret_error;
731
732         vol_no = HAMMER_VOL_DECODE(zone2_offset);
733         volume = hammer_get_volume(hmp, vol_no, &ret_error);
734         KKASSERT(ret_error == 0);
735
736         while (bytes > 0) {
737                 buffer = RB_LOOKUP(hammer_buf_rb_tree, &hmp->rb_bufs_root,
738                                    base_offset);
739                 if (buffer) {
740                         error = hammer_ref_buffer(buffer);
741                         if (error == 0 && buffer->io.lock.refs != 1) {
742                                 error = EAGAIN;
743                                 hammer_rel_buffer(buffer, 0);
744                         }
745                         if (error == 0) {
746                                 KKASSERT(buffer->zone2_offset == zone2_offset);
747                                 hammer_io_clear_modify(&buffer->io, 1);
748                                 buffer->io.reclaim = 1;
749                                 buffer->io.waitdep = 1;
750                                 KKASSERT(buffer->io.volume == volume);
751                                 hammer_rel_buffer(buffer, 0);
752                         }
753                 } else {
754                         error = hammer_io_inval(volume, zone2_offset);
755                 }
756                 if (error) {
757                         ret_error = error;
758                         if (report_conflicts || (hammer_debug_general & 0x8000))
759                                 kprintf("hammer_del_buffers: unable to invalidate %016llx buffer=%p rep=%d\n", base_offset, buffer, report_conflicts);
760                 }
761                 base_offset += HAMMER_BUFSIZE;
762                 zone2_offset += HAMMER_BUFSIZE;
763                 bytes -= HAMMER_BUFSIZE;
764         }
765         hammer_rel_volume(volume, 0);
766         return (ret_error);
767 }
768
769 static int
770 hammer_load_buffer(hammer_buffer_t buffer, int isnew)
771 {
772         hammer_volume_t volume;
773         int error;
774
775         /*
776          * Load the buffer's on-disk info
777          */
778         volume = buffer->io.volume;
779         ++buffer->io.loading;
780         hammer_lock_ex(&buffer->io.lock);
781
782         if (hammer_debug_io & 0x0001) {
783                 kprintf("load_buffer %016llx %016llx isnew=%d od=%p\n",
784                         buffer->zoneX_offset, buffer->zone2_offset, isnew,
785                         buffer->ondisk);
786         }
787
788         if (buffer->ondisk == NULL) {
789                 if (isnew) {
790                         error = hammer_io_new(volume->devvp, &buffer->io);
791                 } else {
792                         error = hammer_io_read(volume->devvp, &buffer->io,
793                                                volume->maxraw_off);
794                 }
795                 if (error == 0)
796                         buffer->ondisk = (void *)buffer->io.bp->b_data;
797         } else if (isnew) {
798                 error = hammer_io_new(volume->devvp, &buffer->io);
799         } else {
800                 error = 0;
801         }
802         --buffer->io.loading;
803         hammer_unlock(&buffer->io.lock);
804         return (error);
805 }
806
807 /*
808  * NOTE: Called from RB_SCAN, must return >= 0 for scan to continue.
809  * This routine is only called during unmount.
810  */
811 int
812 hammer_unload_buffer(hammer_buffer_t buffer, void *data __unused)
813 {
814         /*
815          * Clean up the persistent ref ioerror might have on the buffer
816          * and acquire a ref (steal ioerror's if we can).
817          */
818         if (buffer->io.ioerror) {
819                 buffer->io.ioerror = 0;
820         } else {
821                 if (buffer->io.lock.refs == 0)
822                         ++hammer_count_refedbufs;
823                 hammer_ref(&buffer->io.lock);
824         }
825
826         /*
827          * We must not flush a dirty buffer to disk on umount.  It should
828          * have already been dealt with by the flusher, or we may be in
829          * catastrophic failure.
830          */
831         hammer_io_clear_modify(&buffer->io, 1);
832         hammer_flush_buffer_nodes(buffer);
833         KKASSERT(buffer->io.lock.refs == 1);
834         hammer_rel_buffer(buffer, 2);
835         return(0);
836 }
837
838 /*
839  * Reference a buffer that is either already referenced or via a specially
840  * handled pointer (aka cursor->buffer).
841  */
842 int
843 hammer_ref_buffer(hammer_buffer_t buffer)
844 {
845         int error;
846
847         if (buffer->io.lock.refs == 0)
848                 ++hammer_count_refedbufs;
849         hammer_ref(&buffer->io.lock);
850
851         /*
852          * At this point a biodone() will not touch the buffer other then
853          * incidental bits.  However, lose_list can be modified via
854          * a biodone() interrupt.
855          *
856          * No longer loose
857          */
858         if (buffer->io.mod_list == &buffer->io.hmp->lose_list) {
859                 crit_enter();
860                 TAILQ_REMOVE(buffer->io.mod_list, &buffer->io, mod_entry);
861                 buffer->io.mod_list = NULL;
862                 crit_exit();
863         }
864
865         if (buffer->ondisk == NULL || buffer->io.loading) {
866                 error = hammer_load_buffer(buffer, 0);
867                 if (error) {
868                         hammer_rel_buffer(buffer, 1);
869                         /*
870                          * NOTE: buffer pointer can become stale after
871                          * the above release.
872                          */
873                 }
874         } else {
875                 error = 0;
876         }
877         return(error);
878 }
879
880 /*
881  * Release a buffer.  We have to deal with several places where
882  * another thread can ref the buffer.
883  *
884  * Only destroy the structure itself if the related buffer cache buffer
885  * was disassociated from it.  This ties the management of the structure
886  * to the buffer cache subsystem.  buffer->ondisk determines whether the
887  * embedded io is referenced or not.
888  */
889 void
890 hammer_rel_buffer(hammer_buffer_t buffer, int flush)
891 {
892         hammer_volume_t volume;
893         hammer_mount_t hmp;
894         struct buf *bp = NULL;
895         int freeme = 0;
896
897         hmp = buffer->io.hmp;
898
899         crit_enter();
900         if (buffer->io.lock.refs == 1) {
901                 ++buffer->io.loading;   /* force interlock check */
902                 hammer_lock_ex(&buffer->io.lock);
903                 if (buffer->io.lock.refs == 1) {
904                         bp = hammer_io_release(&buffer->io, flush);
905
906                         if (buffer->io.lock.refs == 1)
907                                 --hammer_count_refedbufs;
908
909                         if (buffer->io.bp == NULL &&
910                             buffer->io.lock.refs == 1) {
911                                 /*
912                                  * Final cleanup
913                                  *
914                                  * NOTE: It is impossible for any associated
915                                  * B-Tree nodes to have refs if the buffer
916                                  * has no additional refs.
917                                  */
918                                 RB_REMOVE(hammer_buf_rb_tree,
919                                           &buffer->io.hmp->rb_bufs_root,
920                                           buffer);
921                                 volume = buffer->io.volume;
922                                 buffer->io.volume = NULL; /* sanity */
923                                 hammer_rel_volume(volume, 0);
924                                 hammer_io_clear_modlist(&buffer->io);
925                                 hammer_flush_buffer_nodes(buffer);
926                                 KKASSERT(TAILQ_EMPTY(&buffer->clist));
927                                 freeme = 1;
928                         }
929                 }
930                 --buffer->io.loading;
931                 hammer_unlock(&buffer->io.lock);
932         }
933         hammer_unref(&buffer->io.lock);
934         crit_exit();
935         if (bp)
936                 brelse(bp);
937         if (freeme) {
938                 --hammer_count_buffers;
939                 kfree(buffer, hmp->m_misc);
940         }
941 }
942
943 /*
944  * Access the filesystem buffer containing the specified hammer offset.
945  * buf_offset is a conglomeration of the volume number and vol_buf_beg
946  * relative buffer offset.  It must also have bit 55 set to be valid.
947  * (see hammer_off_t in hammer_disk.h).
948  *
949  * Any prior buffer in *bufferp will be released and replaced by the
950  * requested buffer.
951  *
952  * NOTE: The buffer is indexed via its zoneX_offset but we allow the
953  * passed cached *bufferp to match against either zoneX or zone2.
954  */
955 static __inline
956 void *
957 _hammer_bread(hammer_mount_t hmp, hammer_off_t buf_offset, int bytes,
958              int *errorp, struct hammer_buffer **bufferp)
959 {
960         hammer_buffer_t buffer;
961         int32_t xoff = (int32_t)buf_offset & HAMMER_BUFMASK;
962
963         buf_offset &= ~HAMMER_BUFMASK64;
964         KKASSERT((buf_offset & HAMMER_OFF_ZONE_MASK) != 0);
965
966         buffer = *bufferp;
967         if (buffer == NULL || (buffer->zone2_offset != buf_offset &&
968                                buffer->zoneX_offset != buf_offset)) {
969                 if (buffer)
970                         hammer_rel_buffer(buffer, 0);
971                 buffer = hammer_get_buffer(hmp, buf_offset, bytes, 0, errorp);
972                 *bufferp = buffer;
973         } else {
974                 *errorp = 0;
975         }
976
977         /*
978          * Return a pointer to the buffer data.
979          */
980         if (buffer == NULL)
981                 return(NULL);
982         else
983                 return((char *)buffer->ondisk + xoff);
984 }
985
986 void *
987 hammer_bread(hammer_mount_t hmp, hammer_off_t buf_offset,
988              int *errorp, struct hammer_buffer **bufferp)
989 {
990         return(_hammer_bread(hmp, buf_offset, HAMMER_BUFSIZE, errorp, bufferp));
991 }
992
993 void *
994 hammer_bread_ext(hammer_mount_t hmp, hammer_off_t buf_offset, int bytes,
995                  int *errorp, struct hammer_buffer **bufferp)
996 {
997         bytes = (bytes + HAMMER_BUFMASK) & ~HAMMER_BUFMASK;
998         return(_hammer_bread(hmp, buf_offset, bytes, errorp, bufferp));
999 }
1000
1001 /*
1002  * Access the filesystem buffer containing the specified hammer offset.
1003  * No disk read operation occurs.  The result buffer may contain garbage.
1004  *
1005  * Any prior buffer in *bufferp will be released and replaced by the
1006  * requested buffer.
1007  *
1008  * This function marks the buffer dirty but does not increment its
1009  * modify_refs count.
1010  */
1011 static __inline
1012 void *
1013 _hammer_bnew(hammer_mount_t hmp, hammer_off_t buf_offset, int bytes,
1014              int *errorp, struct hammer_buffer **bufferp)
1015 {
1016         hammer_buffer_t buffer;
1017         int32_t xoff = (int32_t)buf_offset & HAMMER_BUFMASK;
1018
1019         buf_offset &= ~HAMMER_BUFMASK64;
1020
1021         buffer = *bufferp;
1022         if (buffer == NULL || (buffer->zone2_offset != buf_offset &&
1023                                buffer->zoneX_offset != buf_offset)) {
1024                 if (buffer)
1025                         hammer_rel_buffer(buffer, 0);
1026                 buffer = hammer_get_buffer(hmp, buf_offset, bytes, 1, errorp);
1027                 *bufferp = buffer;
1028         } else {
1029                 *errorp = 0;
1030         }
1031
1032         /*
1033          * Return a pointer to the buffer data.
1034          */
1035         if (buffer == NULL)
1036                 return(NULL);
1037         else
1038                 return((char *)buffer->ondisk + xoff);
1039 }
1040
1041 void *
1042 hammer_bnew(hammer_mount_t hmp, hammer_off_t buf_offset,
1043              int *errorp, struct hammer_buffer **bufferp)
1044 {
1045         return(_hammer_bnew(hmp, buf_offset, HAMMER_BUFSIZE, errorp, bufferp));
1046 }
1047
1048 void *
1049 hammer_bnew_ext(hammer_mount_t hmp, hammer_off_t buf_offset, int bytes,
1050                 int *errorp, struct hammer_buffer **bufferp)
1051 {
1052         bytes = (bytes + HAMMER_BUFMASK) & ~HAMMER_BUFMASK;
1053         return(_hammer_bnew(hmp, buf_offset, bytes, errorp, bufferp));
1054 }
1055
1056 /************************************************************************
1057  *                              NODES                                   *
1058  ************************************************************************
1059  *
1060  * Manage B-Tree nodes.  B-Tree nodes represent the primary indexing
1061  * method used by the HAMMER filesystem.
1062  *
1063  * Unlike other HAMMER structures, a hammer_node can be PASSIVELY
1064  * associated with its buffer, and will only referenced the buffer while
1065  * the node itself is referenced.
1066  *
1067  * A hammer_node can also be passively associated with other HAMMER
1068  * structures, such as inodes, while retaining 0 references.  These
1069  * associations can be cleared backwards using a pointer-to-pointer in
1070  * the hammer_node.
1071  *
1072  * This allows the HAMMER implementation to cache hammer_nodes long-term
1073  * and short-cut a great deal of the infrastructure's complexity.  In
1074  * most cases a cached node can be reacquired without having to dip into
1075  * either the buffer or cluster management code.
1076  *
1077  * The caller must pass a referenced cluster on call and will retain
1078  * ownership of the reference on return.  The node will acquire its own
1079  * additional references, if necessary.
1080  */
1081 hammer_node_t
1082 hammer_get_node(hammer_transaction_t trans, hammer_off_t node_offset,
1083                 int isnew, int *errorp)
1084 {
1085         hammer_mount_t hmp = trans->hmp;
1086         hammer_node_t node;
1087
1088         KKASSERT((node_offset & HAMMER_OFF_ZONE_MASK) == HAMMER_ZONE_BTREE);
1089
1090         /*
1091          * Locate the structure, allocating one if necessary.
1092          */
1093 again:
1094         node = RB_LOOKUP(hammer_nod_rb_tree, &hmp->rb_nods_root, node_offset);
1095         if (node == NULL) {
1096                 ++hammer_count_nodes;
1097                 node = kmalloc(sizeof(*node), hmp->m_misc, M_WAITOK|M_ZERO|M_USE_RESERVE);
1098                 node->node_offset = node_offset;
1099                 node->hmp = hmp;
1100                 TAILQ_INIT(&node->cursor_list);
1101                 TAILQ_INIT(&node->cache_list);
1102                 if (RB_INSERT(hammer_nod_rb_tree, &hmp->rb_nods_root, node)) {
1103                         --hammer_count_nodes;
1104                         kfree(node, hmp->m_misc);
1105                         goto again;
1106                 }
1107         }
1108         hammer_ref(&node->lock);
1109         if (node->ondisk) {
1110                 *errorp = 0;
1111         } else {
1112                 *errorp = hammer_load_node(node, isnew);
1113                 trans->flags |= HAMMER_TRANSF_DIDIO;
1114         }
1115         if (*errorp) {
1116                 hammer_rel_node(node);
1117                 node = NULL;
1118         }
1119         return(node);
1120 }
1121
1122 /*
1123  * Reference an already-referenced node.
1124  */
1125 void
1126 hammer_ref_node(hammer_node_t node)
1127 {
1128         KKASSERT(node->lock.refs > 0 && node->ondisk != NULL);
1129         hammer_ref(&node->lock);
1130 }
1131
1132 /*
1133  * Load a node's on-disk data reference.
1134  */
1135 static int
1136 hammer_load_node(hammer_node_t node, int isnew)
1137 {
1138         hammer_buffer_t buffer;
1139         hammer_off_t buf_offset;
1140         int error;
1141
1142         error = 0;
1143         ++node->loading;
1144         hammer_lock_ex(&node->lock);
1145         if (node->ondisk == NULL) {
1146                 /*
1147                  * This is a little confusing but the jist is that
1148                  * node->buffer determines whether the node is on
1149                  * the buffer's clist and node->ondisk determines
1150                  * whether the buffer is referenced.
1151                  *
1152                  * We could be racing a buffer release, in which case
1153                  * node->buffer may become NULL while we are blocked
1154                  * referencing the buffer.
1155                  */
1156                 if ((buffer = node->buffer) != NULL) {
1157                         error = hammer_ref_buffer(buffer);
1158                         if (error == 0 && node->buffer == NULL) {
1159                                 TAILQ_INSERT_TAIL(&buffer->clist,
1160                                                   node, entry);
1161                                 node->buffer = buffer;
1162                         }
1163                 } else {
1164                         buf_offset = node->node_offset & ~HAMMER_BUFMASK64;
1165                         buffer = hammer_get_buffer(node->hmp, buf_offset,
1166                                                    HAMMER_BUFSIZE, 0, &error);
1167                         if (buffer) {
1168                                 KKASSERT(error == 0);
1169                                 TAILQ_INSERT_TAIL(&buffer->clist,
1170                                                   node, entry);
1171                                 node->buffer = buffer;
1172                         }
1173                 }
1174                 if (error)
1175                         goto failed;
1176                 node->ondisk = (void *)((char *)buffer->ondisk +
1177                                         (node->node_offset & HAMMER_BUFMASK));
1178                 if (isnew == 0 && 
1179                     (node->flags & HAMMER_NODE_CRCGOOD) == 0) {
1180                         if (hammer_crc_test_btree(node->ondisk) == 0)
1181                                 Debugger("CRC FAILED: B-TREE NODE");
1182                         node->flags |= HAMMER_NODE_CRCGOOD;
1183                 }
1184         }
1185 failed:
1186         --node->loading;
1187         hammer_unlock(&node->lock);
1188         return (error);
1189 }
1190
1191 /*
1192  * Safely reference a node, interlock against flushes via the IO subsystem.
1193  */
1194 hammer_node_t
1195 hammer_ref_node_safe(struct hammer_mount *hmp, hammer_node_cache_t cache,
1196                      int *errorp)
1197 {
1198         hammer_node_t node;
1199
1200         node = cache->node;
1201         if (node != NULL) {
1202                 hammer_ref(&node->lock);
1203                 if (node->ondisk)
1204                         *errorp = 0;
1205                 else
1206                         *errorp = hammer_load_node(node, 0);
1207                 if (*errorp) {
1208                         hammer_rel_node(node);
1209                         node = NULL;
1210                 }
1211         } else {
1212                 *errorp = ENOENT;
1213         }
1214         return(node);
1215 }
1216
1217 /*
1218  * Release a hammer_node.  On the last release the node dereferences
1219  * its underlying buffer and may or may not be destroyed.
1220  */
1221 void
1222 hammer_rel_node(hammer_node_t node)
1223 {
1224         hammer_buffer_t buffer;
1225
1226         /*
1227          * If this isn't the last ref just decrement the ref count and
1228          * return.
1229          */
1230         if (node->lock.refs > 1) {
1231                 hammer_unref(&node->lock);
1232                 return;
1233         }
1234
1235         /*
1236          * If there is no ondisk info or no buffer the node failed to load,
1237          * remove the last reference and destroy the node.
1238          */
1239         if (node->ondisk == NULL) {
1240                 hammer_unref(&node->lock);
1241                 hammer_flush_node(node);
1242                 /* node is stale now */
1243                 return;
1244         }
1245
1246         /*
1247          * Do not disassociate the node from the buffer if it represents
1248          * a modified B-Tree node that still needs its crc to be generated.
1249          */
1250         if (node->flags & HAMMER_NODE_NEEDSCRC)
1251                 return;
1252
1253         /*
1254          * Do final cleanups and then either destroy the node and leave it
1255          * passively cached.  The buffer reference is removed regardless.
1256          */
1257         buffer = node->buffer;
1258         node->ondisk = NULL;
1259
1260         if ((node->flags & HAMMER_NODE_FLUSH) == 0) {
1261                 hammer_unref(&node->lock);
1262                 hammer_rel_buffer(buffer, 0);
1263                 return;
1264         }
1265
1266         /*
1267          * Destroy the node.
1268          */
1269         hammer_unref(&node->lock);
1270         hammer_flush_node(node);
1271         /* node is stale */
1272         hammer_rel_buffer(buffer, 0);
1273 }
1274
1275 /*
1276  * Free space on-media associated with a B-Tree node.
1277  */
1278 void
1279 hammer_delete_node(hammer_transaction_t trans, hammer_node_t node)
1280 {
1281         KKASSERT((node->flags & HAMMER_NODE_DELETED) == 0);
1282         node->flags |= HAMMER_NODE_DELETED;
1283         hammer_blockmap_free(trans, node->node_offset, sizeof(*node->ondisk));
1284 }
1285
1286 /*
1287  * Passively cache a referenced hammer_node.  The caller may release
1288  * the node on return.
1289  */
1290 void
1291 hammer_cache_node(hammer_node_cache_t cache, hammer_node_t node)
1292 {
1293         /*
1294          * If the node doesn't exist, or is being deleted, don't cache it!
1295          *
1296          * The node can only ever be NULL in the I/O failure path.
1297          */
1298         if (node == NULL || (node->flags & HAMMER_NODE_DELETED))
1299                 return;
1300         if (cache->node == node)
1301                 return;
1302         while (cache->node)
1303                 hammer_uncache_node(cache);
1304         if (node->flags & HAMMER_NODE_DELETED)
1305                 return;
1306         cache->node = node;
1307         TAILQ_INSERT_TAIL(&node->cache_list, cache, entry);
1308 }
1309
1310 void
1311 hammer_uncache_node(hammer_node_cache_t cache)
1312 {
1313         hammer_node_t node;
1314
1315         if ((node = cache->node) != NULL) {
1316                 TAILQ_REMOVE(&node->cache_list, cache, entry);
1317                 cache->node = NULL;
1318                 if (TAILQ_EMPTY(&node->cache_list))
1319                         hammer_flush_node(node);
1320         }
1321 }
1322
1323 /*
1324  * Remove a node's cache references and destroy the node if it has no
1325  * other references or backing store.
1326  */
1327 void
1328 hammer_flush_node(hammer_node_t node)
1329 {
1330         hammer_node_cache_t cache;
1331         hammer_buffer_t buffer;
1332         hammer_mount_t hmp = node->hmp;
1333
1334         while ((cache = TAILQ_FIRST(&node->cache_list)) != NULL) {
1335                 TAILQ_REMOVE(&node->cache_list, cache, entry);
1336                 cache->node = NULL;
1337         }
1338         if (node->lock.refs == 0 && node->ondisk == NULL) {
1339                 KKASSERT((node->flags & HAMMER_NODE_NEEDSCRC) == 0);
1340                 RB_REMOVE(hammer_nod_rb_tree, &node->hmp->rb_nods_root, node);
1341                 if ((buffer = node->buffer) != NULL) {
1342                         node->buffer = NULL;
1343                         TAILQ_REMOVE(&buffer->clist, node, entry);
1344                         /* buffer is unreferenced because ondisk is NULL */
1345                 }
1346                 --hammer_count_nodes;
1347                 kfree(node, hmp->m_misc);
1348         }
1349 }
1350
1351 /*
1352  * Flush passively cached B-Tree nodes associated with this buffer.
1353  * This is only called when the buffer is about to be destroyed, so
1354  * none of the nodes should have any references.  The buffer is locked.
1355  *
1356  * We may be interlocked with the buffer.
1357  */
1358 void
1359 hammer_flush_buffer_nodes(hammer_buffer_t buffer)
1360 {
1361         hammer_node_t node;
1362
1363         while ((node = TAILQ_FIRST(&buffer->clist)) != NULL) {
1364                 KKASSERT(node->ondisk == NULL);
1365                 KKASSERT((node->flags & HAMMER_NODE_NEEDSCRC) == 0);
1366
1367                 if (node->lock.refs == 0) {
1368                         hammer_ref(&node->lock);
1369                         node->flags |= HAMMER_NODE_FLUSH;
1370                         hammer_rel_node(node);
1371                 } else {
1372                         KKASSERT(node->loading != 0);
1373                         KKASSERT(node->buffer != NULL);
1374                         buffer = node->buffer;
1375                         node->buffer = NULL;
1376                         TAILQ_REMOVE(&buffer->clist, node, entry);
1377                         /* buffer is unreferenced because ondisk is NULL */
1378                 }
1379         }
1380 }
1381
1382
1383 /************************************************************************
1384  *                              ALLOCATORS                              *
1385  ************************************************************************/
1386
1387 /*
1388  * Allocate a B-Tree node.
1389  */
1390 hammer_node_t
1391 hammer_alloc_btree(hammer_transaction_t trans, int *errorp)
1392 {
1393         hammer_buffer_t buffer = NULL;
1394         hammer_node_t node = NULL;
1395         hammer_off_t node_offset;
1396
1397         node_offset = hammer_blockmap_alloc(trans, HAMMER_ZONE_BTREE_INDEX,
1398                                             sizeof(struct hammer_node_ondisk),
1399                                             errorp);
1400         if (*errorp == 0) {
1401                 node = hammer_get_node(trans, node_offset, 1, errorp);
1402                 hammer_modify_node_noundo(trans, node);
1403                 bzero(node->ondisk, sizeof(*node->ondisk));
1404                 hammer_modify_node_done(node);
1405         }
1406         if (buffer)
1407                 hammer_rel_buffer(buffer, 0);
1408         return(node);
1409 }
1410
1411 /*
1412  * Allocate data.  If the address of a data buffer is supplied then
1413  * any prior non-NULL *data_bufferp will be released and *data_bufferp
1414  * will be set to the related buffer.  The caller must release it when
1415  * finally done.  The initial *data_bufferp should be set to NULL by
1416  * the caller.
1417  *
1418  * The caller is responsible for making hammer_modify*() calls on the
1419  * *data_bufferp.
1420  */
1421 void *
1422 hammer_alloc_data(hammer_transaction_t trans, int32_t data_len, 
1423                   u_int16_t rec_type, hammer_off_t *data_offsetp,
1424                   struct hammer_buffer **data_bufferp, int *errorp)
1425 {
1426         void *data;
1427         int zone;
1428
1429         /*
1430          * Allocate data
1431          */
1432         if (data_len) {
1433                 switch(rec_type) {
1434                 case HAMMER_RECTYPE_INODE:
1435                 case HAMMER_RECTYPE_DIRENTRY:
1436                 case HAMMER_RECTYPE_EXT:
1437                 case HAMMER_RECTYPE_FIX:
1438                 case HAMMER_RECTYPE_PFS:
1439                         zone = HAMMER_ZONE_META_INDEX;
1440                         break;
1441                 case HAMMER_RECTYPE_DATA:
1442                 case HAMMER_RECTYPE_DB:
1443                         if (data_len <= HAMMER_BUFSIZE / 2) {
1444                                 zone = HAMMER_ZONE_SMALL_DATA_INDEX;
1445                         } else {
1446                                 data_len = (data_len + HAMMER_BUFMASK) &
1447                                            ~HAMMER_BUFMASK;
1448                                 zone = HAMMER_ZONE_LARGE_DATA_INDEX;
1449                         }
1450                         break;
1451                 default:
1452                         panic("hammer_alloc_data: rec_type %04x unknown",
1453                               rec_type);
1454                         zone = 0;       /* NOT REACHED */
1455                         break;
1456                 }
1457                 *data_offsetp = hammer_blockmap_alloc(trans, zone,
1458                                                       data_len, errorp);
1459         } else {
1460                 *data_offsetp = 0;
1461         }
1462         if (*errorp == 0 && data_bufferp) {
1463                 if (data_len) {
1464                         data = hammer_bread_ext(trans->hmp, *data_offsetp,
1465                                                 data_len, errorp, data_bufferp);
1466                 } else {
1467                         data = NULL;
1468                 }
1469         } else {
1470                 data = NULL;
1471         }
1472         return(data);
1473 }
1474
1475 /*
1476  * Sync dirty buffers to the media and clean-up any loose ends.
1477  *
1478  * These functions do not start the flusher going, they simply
1479  * queue everything up to the flusher.
1480  */
1481 static int hammer_sync_scan1(struct mount *mp, struct vnode *vp, void *data);
1482 static int hammer_sync_scan2(struct mount *mp, struct vnode *vp, void *data);
1483
1484 int
1485 hammer_queue_inodes_flusher(hammer_mount_t hmp, int waitfor)
1486 {
1487         struct hammer_sync_info info;
1488
1489         info.error = 0;
1490         info.waitfor = waitfor;
1491         if (waitfor == MNT_WAIT) {
1492                 vmntvnodescan(hmp->mp, VMSC_GETVP|VMSC_ONEPASS,
1493                               hammer_sync_scan1, hammer_sync_scan2, &info);
1494         } else {
1495                 vmntvnodescan(hmp->mp, VMSC_GETVP|VMSC_ONEPASS|VMSC_NOWAIT,
1496                               hammer_sync_scan1, hammer_sync_scan2, &info);
1497         }
1498         return(info.error);
1499 }
1500
1501 /*
1502  * Filesystem sync.  If doing a synchronous sync make a second pass on
1503  * the vnodes in case any were already flushing during the first pass,
1504  * and activate the flusher twice (the second time brings the UNDO FIFO's
1505  * start position up to the end position after the first call).
1506  */
1507 int
1508 hammer_sync_hmp(hammer_mount_t hmp, int waitfor)
1509 {
1510         struct hammer_sync_info info;
1511
1512         info.error = 0;
1513         info.waitfor = MNT_NOWAIT;
1514         vmntvnodescan(hmp->mp, VMSC_GETVP|VMSC_NOWAIT,
1515                       hammer_sync_scan1, hammer_sync_scan2, &info);
1516         if (info.error == 0 && waitfor == MNT_WAIT) {
1517                 info.waitfor = waitfor;
1518                 vmntvnodescan(hmp->mp, VMSC_GETVP,
1519                               hammer_sync_scan1, hammer_sync_scan2, &info);
1520         }
1521         if (waitfor == MNT_WAIT) {
1522                 hammer_flusher_sync(hmp);
1523                 hammer_flusher_sync(hmp);
1524         } else {
1525                 hammer_flusher_async(hmp, NULL);
1526                 hammer_flusher_async(hmp, NULL);
1527         }
1528         return(info.error);
1529 }
1530
1531 static int
1532 hammer_sync_scan1(struct mount *mp, struct vnode *vp, void *data)
1533 {
1534         struct hammer_inode *ip;
1535
1536         ip = VTOI(vp);
1537         if (vp->v_type == VNON || ip == NULL ||
1538             ((ip->flags & HAMMER_INODE_MODMASK) == 0 &&
1539              RB_EMPTY(&vp->v_rbdirty_tree))) {
1540                 return(-1);
1541         }
1542         return(0);
1543 }
1544
1545 static int
1546 hammer_sync_scan2(struct mount *mp, struct vnode *vp, void *data)
1547 {
1548         struct hammer_sync_info *info = data;
1549         struct hammer_inode *ip;
1550         int error;
1551
1552         ip = VTOI(vp);
1553         if (vp->v_type == VNON || vp->v_type == VBAD ||
1554             ((ip->flags & HAMMER_INODE_MODMASK) == 0 &&
1555              RB_EMPTY(&vp->v_rbdirty_tree))) {
1556                 return(0);
1557         }
1558         error = VOP_FSYNC(vp, MNT_NOWAIT);
1559         if (error)
1560                 info->error = error;
1561         return(0);
1562 }
1563