HAMMER VFS - Fix umount panic related to volume flush
[dragonfly.git] / sys / vfs / hammer / hammer_ondisk.c
1 /*
2  * Copyright (c) 2007-2008 The DragonFly Project.  All rights reserved.
3  * 
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  * 
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  * 
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  * 
34  * $DragonFly: src/sys/vfs/hammer/hammer_ondisk.c,v 1.76 2008/08/29 20:19:08 dillon Exp $
35  */
36 /*
37  * Manage HAMMER's on-disk structures.  These routines are primarily
38  * responsible for interfacing with the kernel's I/O subsystem and for
39  * managing in-memory structures.
40  */
41
42 #include "hammer.h"
43 #include <sys/fcntl.h>
44 #include <sys/nlookup.h>
45 #include <sys/buf.h>
46 #include <sys/buf2.h>
47
48 static void hammer_free_volume(hammer_volume_t volume);
49 static int hammer_load_volume(hammer_volume_t volume);
50 static int hammer_load_buffer(hammer_buffer_t buffer, int isnew);
51 static int hammer_load_node(hammer_transaction_t trans,
52                                 hammer_node_t node, int isnew);
53
54 static int
55 hammer_vol_rb_compare(hammer_volume_t vol1, hammer_volume_t vol2)
56 {
57         if (vol1->vol_no < vol2->vol_no)
58                 return(-1);
59         if (vol1->vol_no > vol2->vol_no)
60                 return(1);
61         return(0);
62 }
63
64 /*
65  * hammer_buffer structures are indexed via their zoneX_offset, not
66  * their zone2_offset.
67  */
68 static int
69 hammer_buf_rb_compare(hammer_buffer_t buf1, hammer_buffer_t buf2)
70 {
71         if (buf1->zoneX_offset < buf2->zoneX_offset)
72                 return(-1);
73         if (buf1->zoneX_offset > buf2->zoneX_offset)
74                 return(1);
75         return(0);
76 }
77
78 static int
79 hammer_nod_rb_compare(hammer_node_t node1, hammer_node_t node2)
80 {
81         if (node1->node_offset < node2->node_offset)
82                 return(-1);
83         if (node1->node_offset > node2->node_offset)
84                 return(1);
85         return(0);
86 }
87
88 RB_GENERATE2(hammer_vol_rb_tree, hammer_volume, rb_node,
89              hammer_vol_rb_compare, int32_t, vol_no);
90 RB_GENERATE2(hammer_buf_rb_tree, hammer_buffer, rb_node,
91              hammer_buf_rb_compare, hammer_off_t, zoneX_offset);
92 RB_GENERATE2(hammer_nod_rb_tree, hammer_node, rb_node,
93              hammer_nod_rb_compare, hammer_off_t, node_offset);
94
95 /************************************************************************
96  *                              VOLUMES                                 *
97  ************************************************************************
98  *
99  * Load a HAMMER volume by name.  Returns 0 on success or a positive error
100  * code on failure.  Volumes must be loaded at mount time, get_volume() will
101  * not load a new volume.
102  *
103  * Calls made to hammer_load_volume() or single-threaded
104  */
105 int
106 hammer_install_volume(struct hammer_mount *hmp, const char *volname,
107                       struct vnode *devvp)
108 {
109         struct mount *mp;
110         hammer_volume_t volume;
111         struct hammer_volume_ondisk *ondisk;
112         struct nlookupdata nd;
113         struct buf *bp = NULL;
114         int error;
115         int ronly;
116         int setmp = 0;
117
118         mp = hmp->mp;
119         ronly = ((mp->mnt_flag & MNT_RDONLY) ? 1 : 0);
120
121         /*
122          * Allocate a volume structure
123          */
124         ++hammer_count_volumes;
125         volume = kmalloc(sizeof(*volume), hmp->m_misc, M_WAITOK|M_ZERO);
126         volume->vol_name = kstrdup(volname, hmp->m_misc);
127         volume->io.hmp = hmp;   /* bootstrap */
128         hammer_io_init(&volume->io, volume, HAMMER_STRUCTURE_VOLUME);
129         volume->io.offset = 0LL;
130         volume->io.bytes = HAMMER_BUFSIZE;
131
132         /*
133          * Get the device vnode
134          */
135         if (devvp == NULL) {
136                 error = nlookup_init(&nd, volume->vol_name, UIO_SYSSPACE, NLC_FOLLOW);
137                 if (error == 0)
138                         error = nlookup(&nd);
139                 if (error == 0)
140                         error = cache_vref(&nd.nl_nch, nd.nl_cred, &volume->devvp);
141                 nlookup_done(&nd);
142         } else {
143                 error = 0;
144                 volume->devvp = devvp;
145         }
146
147         if (error == 0) {
148                 if (vn_isdisk(volume->devvp, &error)) {
149                         error = vfs_mountedon(volume->devvp);
150                 }
151         }
152         if (error == 0 && vcount(volume->devvp) > 0)
153                 error = EBUSY;
154         if (error == 0) {
155                 vn_lock(volume->devvp, LK_EXCLUSIVE | LK_RETRY);
156                 error = vinvalbuf(volume->devvp, V_SAVE, 0, 0);
157                 if (error == 0) {
158                         error = VOP_OPEN(volume->devvp, 
159                                          (ronly ? FREAD : FREAD|FWRITE),
160                                          FSCRED, NULL);
161                 }
162                 vn_unlock(volume->devvp);
163         }
164         if (error) {
165                 hammer_free_volume(volume);
166                 return(error);
167         }
168         volume->devvp->v_rdev->si_mountpoint = mp;
169         setmp = 1;
170
171         /*
172          * Extract the volume number from the volume header and do various
173          * sanity checks.
174          */
175         error = bread(volume->devvp, 0LL, HAMMER_BUFSIZE, &bp);
176         if (error)
177                 goto late_failure;
178         ondisk = (void *)bp->b_data;
179         if (ondisk->vol_signature != HAMMER_FSBUF_VOLUME) {
180                 kprintf("hammer_mount: volume %s has an invalid header\n",
181                         volume->vol_name);
182                 error = EFTYPE;
183                 goto late_failure;
184         }
185         volume->vol_no = ondisk->vol_no;
186         volume->buffer_base = ondisk->vol_buf_beg;
187         volume->vol_flags = ondisk->vol_flags;
188         volume->nblocks = ondisk->vol_nblocks; 
189         volume->maxbuf_off = HAMMER_ENCODE_RAW_BUFFER(volume->vol_no,
190                                     ondisk->vol_buf_end - ondisk->vol_buf_beg);
191         volume->maxraw_off = ondisk->vol_buf_end;
192
193         if (RB_EMPTY(&hmp->rb_vols_root)) {
194                 hmp->fsid = ondisk->vol_fsid;
195         } else if (bcmp(&hmp->fsid, &ondisk->vol_fsid, sizeof(uuid_t))) {
196                 kprintf("hammer_mount: volume %s's fsid does not match "
197                         "other volumes\n", volume->vol_name);
198                 error = EFTYPE;
199                 goto late_failure;
200         }
201
202         /*
203          * Insert the volume structure into the red-black tree.
204          */
205         if (RB_INSERT(hammer_vol_rb_tree, &hmp->rb_vols_root, volume)) {
206                 kprintf("hammer_mount: volume %s has a duplicate vol_no %d\n",
207                         volume->vol_name, volume->vol_no);
208                 error = EEXIST;
209         }
210
211         /*
212          * Set the root volume .  HAMMER special cases rootvol the structure.
213          * We do not hold a ref because this would prevent related I/O
214          * from being flushed.
215          */
216         if (error == 0 && ondisk->vol_rootvol == ondisk->vol_no) {
217                 hmp->rootvol = volume;
218                 hmp->nvolumes = ondisk->vol_count;
219                 if (bp) {
220                         brelse(bp);
221                         bp = NULL;
222                 }
223                 hmp->mp->mnt_stat.f_blocks += ondisk->vol0_stat_bigblocks *
224                         (HAMMER_LARGEBLOCK_SIZE / HAMMER_BUFSIZE);
225                 hmp->mp->mnt_vstat.f_blocks += ondisk->vol0_stat_bigblocks *
226                         (HAMMER_LARGEBLOCK_SIZE / HAMMER_BUFSIZE);
227         }
228 late_failure:
229         if (bp)
230                 brelse(bp);
231         if (error) {
232                 /*vinvalbuf(volume->devvp, V_SAVE, 0, 0);*/
233                 if (setmp)
234                         volume->devvp->v_rdev->si_mountpoint = NULL;
235                 VOP_CLOSE(volume->devvp, ronly ? FREAD : FREAD|FWRITE);
236                 hammer_free_volume(volume);
237         }
238         return (error);
239 }
240
241 /*
242  * This is called for each volume when updating the mount point from
243  * read-write to read-only or vise-versa.
244  */
245 int
246 hammer_adjust_volume_mode(hammer_volume_t volume, void *data __unused)
247 {
248         if (volume->devvp) {
249                 vn_lock(volume->devvp, LK_EXCLUSIVE | LK_RETRY);
250                 if (volume->io.hmp->ronly) {
251                         /* do not call vinvalbuf */
252                         VOP_OPEN(volume->devvp, FREAD, FSCRED, NULL);
253                         VOP_CLOSE(volume->devvp, FREAD|FWRITE);
254                 } else {
255                         /* do not call vinvalbuf */
256                         VOP_OPEN(volume->devvp, FREAD|FWRITE, FSCRED, NULL);
257                         VOP_CLOSE(volume->devvp, FREAD);
258                 }
259                 vn_unlock(volume->devvp);
260         }
261         return(0);
262 }
263
264 /*
265  * Unload and free a HAMMER volume.  Must return >= 0 to continue scan
266  * so returns -1 on failure.
267  */
268 int
269 hammer_unload_volume(hammer_volume_t volume, void *data __unused)
270 {
271         hammer_mount_t hmp = volume->io.hmp;
272         int ronly = ((hmp->mp->mnt_flag & MNT_RDONLY) ? 1 : 0);
273
274         /*
275          * Clean up the root volume pointer, which is held unlocked in hmp.
276          */
277         if (hmp->rootvol == volume)
278                 hmp->rootvol = NULL;
279
280         /*
281          * We must not flush a dirty buffer to disk on umount.  It should
282          * have already been dealt with by the flusher, or we may be in
283          * catastrophic failure.
284          */
285         hammer_io_clear_modify(&volume->io, 1);
286         volume->io.waitdep = 1;
287
288         /*
289          * Clean up the persistent ref ioerror might have on the volume
290          */
291         if (volume->io.ioerror) {
292                 volume->io.ioerror = 0;
293                 hammer_unref(&volume->io.lock);
294         }
295
296         /*
297          * This should release the bp.
298          */
299         KKASSERT(volume->io.lock.refs == 0);
300         hammer_ref(&volume->io.lock);
301         hammer_rel_volume(volume, 1);
302         KKASSERT(volume->io.bp == NULL);
303
304         /*
305          * There should be no references on the volume, no clusters, and
306          * no super-clusters.
307          */
308         KKASSERT(volume->io.lock.refs == 0);
309
310         volume->ondisk = NULL;
311         if (volume->devvp) {
312                 if (volume->devvp->v_rdev &&
313                     volume->devvp->v_rdev->si_mountpoint == hmp->mp
314                 ) {
315                         volume->devvp->v_rdev->si_mountpoint = NULL;
316                 }
317                 if (ronly) {
318                         /*
319                          * Make sure we don't sync anything to disk if we
320                          * are in read-only mode (1) or critically-errored
321                          * (2).  Note that there may be dirty buffers in
322                          * normal read-only mode from crash recovery.
323                          */
324                         vinvalbuf(volume->devvp, 0, 0, 0);
325                         VOP_CLOSE(volume->devvp, FREAD);
326                 } else {
327                         /*
328                          * Normal termination, save any dirty buffers
329                          * (XXX there really shouldn't be any).
330                          */
331                         vinvalbuf(volume->devvp, V_SAVE, 0, 0);
332                         VOP_CLOSE(volume->devvp, FREAD|FWRITE);
333                 }
334         }
335
336         /*
337          * Destroy the structure
338          */
339         RB_REMOVE(hammer_vol_rb_tree, &hmp->rb_vols_root, volume);
340         hammer_free_volume(volume);
341         return(0);
342 }
343
344 static
345 void
346 hammer_free_volume(hammer_volume_t volume)
347 {
348         hammer_mount_t hmp = volume->io.hmp;
349
350         if (volume->vol_name) {
351                 kfree(volume->vol_name, hmp->m_misc);
352                 volume->vol_name = NULL;
353         }
354         if (volume->devvp) {
355                 vrele(volume->devvp);
356                 volume->devvp = NULL;
357         }
358         --hammer_count_volumes;
359         kfree(volume, hmp->m_misc);
360 }
361
362 /*
363  * Get a HAMMER volume.  The volume must already exist.
364  */
365 hammer_volume_t
366 hammer_get_volume(struct hammer_mount *hmp, int32_t vol_no, int *errorp)
367 {
368         struct hammer_volume *volume;
369
370         /*
371          * Locate the volume structure
372          */
373         volume = RB_LOOKUP(hammer_vol_rb_tree, &hmp->rb_vols_root, vol_no);
374         if (volume == NULL) {
375                 *errorp = ENOENT;
376                 return(NULL);
377         }
378         hammer_ref(&volume->io.lock);
379
380         /*
381          * Deal with on-disk info
382          */
383         if (volume->ondisk == NULL || volume->io.loading) {
384                 *errorp = hammer_load_volume(volume);
385                 if (*errorp) {
386                         hammer_rel_volume(volume, 1);
387                         volume = NULL;
388                 }
389         } else {
390                 *errorp = 0;
391         }
392         return(volume);
393 }
394
395 int
396 hammer_ref_volume(hammer_volume_t volume)
397 {
398         int error;
399
400         hammer_ref(&volume->io.lock);
401
402         /*
403          * Deal with on-disk info
404          */
405         if (volume->ondisk == NULL || volume->io.loading) {
406                 error = hammer_load_volume(volume);
407                 if (error)
408                         hammer_rel_volume(volume, 1);
409         } else {
410                 error = 0;
411         }
412         return (error);
413 }
414
415 hammer_volume_t
416 hammer_get_root_volume(struct hammer_mount *hmp, int *errorp)
417 {
418         hammer_volume_t volume;
419
420         volume = hmp->rootvol;
421         KKASSERT(volume != NULL);
422         hammer_ref(&volume->io.lock);
423
424         /*
425          * Deal with on-disk info
426          */
427         if (volume->ondisk == NULL || volume->io.loading) {
428                 *errorp = hammer_load_volume(volume);
429                 if (*errorp) {
430                         hammer_rel_volume(volume, 1);
431                         volume = NULL;
432                 }
433         } else {
434                 *errorp = 0;
435         }
436         return (volume);
437 }
438
439 /*
440  * Load a volume's on-disk information.  The volume must be referenced and
441  * not locked.  We temporarily acquire an exclusive lock to interlock
442  * against releases or multiple get's.
443  */
444 static int
445 hammer_load_volume(hammer_volume_t volume)
446 {
447         int error;
448
449         ++volume->io.loading;
450         hammer_lock_ex(&volume->io.lock);
451
452         if (volume->ondisk == NULL) {
453                 error = hammer_io_read(volume->devvp, &volume->io,
454                                        volume->maxraw_off);
455                 if (error == 0)
456                         volume->ondisk = (void *)volume->io.bp->b_data;
457         } else {
458                 error = 0;
459         }
460         --volume->io.loading;
461         hammer_unlock(&volume->io.lock);
462         return(error);
463 }
464
465 /*
466  * Release a volume.  Call hammer_io_release on the last reference.  We have
467  * to acquire an exclusive lock to interlock against volume->ondisk tests
468  * in hammer_load_volume(), and hammer_io_release() also expects an exclusive
469  * lock to be held.
470  *
471  * Volumes are not unloaded from memory during normal operation.
472  */
473 void
474 hammer_rel_volume(hammer_volume_t volume, int flush)
475 {
476         struct buf *bp = NULL;
477
478         crit_enter();
479         if (volume->io.lock.refs == 1) {
480                 ++volume->io.loading;
481                 hammer_lock_ex(&volume->io.lock);
482                 if (volume->io.lock.refs == 1) {
483                         volume->ondisk = NULL;
484                         bp = hammer_io_release(&volume->io, flush);
485                 }
486                 --volume->io.loading;
487                 hammer_unlock(&volume->io.lock);
488         }
489         hammer_unref(&volume->io.lock);
490         if (bp)
491                 brelse(bp);
492         crit_exit();
493 }
494
495 int
496 hammer_mountcheck_volumes(struct hammer_mount *hmp)
497 {
498         hammer_volume_t vol;
499         int i;
500
501         for (i = 0; i < hmp->nvolumes; ++i) {
502                 vol = RB_LOOKUP(hammer_vol_rb_tree, &hmp->rb_vols_root, i);
503                 if (vol == NULL)
504                         return(EINVAL);
505         }
506         return(0);
507 }
508
509 /************************************************************************
510  *                              BUFFERS                                 *
511  ************************************************************************
512  *
513  * Manage buffers.  Currently most blockmap-backed zones are direct-mapped
514  * to zone-2 buffer offsets, without a translation stage.  However, the
515  * hammer_buffer structure is indexed by its zoneX_offset, not its
516  * zone2_offset.
517  *
518  * The proper zone must be maintained throughout the code-base all the way
519  * through to the big-block allocator, or routines like hammer_del_buffers()
520  * will not be able to locate all potentially conflicting buffers.
521  */
522 hammer_buffer_t
523 hammer_get_buffer(hammer_mount_t hmp, hammer_off_t buf_offset,
524                   int bytes, int isnew, int *errorp)
525 {
526         hammer_buffer_t buffer;
527         hammer_volume_t volume;
528         hammer_off_t    zone2_offset;
529         hammer_io_type_t iotype;
530         int vol_no;
531         int zone;
532
533         buf_offset &= ~HAMMER_BUFMASK64;
534 again:
535         /*
536          * Shortcut if the buffer is already cached
537          */
538         buffer = RB_LOOKUP(hammer_buf_rb_tree, &hmp->rb_bufs_root, buf_offset);
539         if (buffer) {
540                 if (buffer->io.lock.refs == 0)
541                         ++hammer_count_refedbufs;
542                 hammer_ref(&buffer->io.lock);
543
544                 /*
545                  * Once refed the ondisk field will not be cleared by
546                  * any other action.
547                  */
548                 if (buffer->ondisk && buffer->io.loading == 0) {
549                         *errorp = 0;
550                         hammer_io_advance(&buffer->io);
551                         return(buffer);
552                 }
553
554                 /*
555                  * The buffer is no longer loose if it has a ref, and
556                  * cannot become loose once it gains a ref.  Loose
557                  * buffers will never be in a modified state.  This should
558                  * only occur on the 0->1 transition of refs.
559                  *
560                  * lose_list can be modified via a biodone() interrupt.
561                  */
562                 if (buffer->io.mod_list == &hmp->lose_list) {
563                         crit_enter();   /* biodone race against list */
564                         TAILQ_REMOVE(buffer->io.mod_list, &buffer->io,
565                                      mod_entry);
566                         crit_exit();
567                         buffer->io.mod_list = NULL;
568                         KKASSERT(buffer->io.modified == 0);
569                 }
570                 goto found;
571         }
572
573         /*
574          * What is the buffer class?
575          */
576         zone = HAMMER_ZONE_DECODE(buf_offset);
577
578         switch(zone) {
579         case HAMMER_ZONE_LARGE_DATA_INDEX:
580         case HAMMER_ZONE_SMALL_DATA_INDEX:
581                 iotype = HAMMER_STRUCTURE_DATA_BUFFER;
582                 break;
583         case HAMMER_ZONE_UNDO_INDEX:
584                 iotype = HAMMER_STRUCTURE_UNDO_BUFFER;
585                 break;
586         case HAMMER_ZONE_META_INDEX:
587         default:
588                 /*
589                  * NOTE: inode data and directory entries are placed in this
590                  * zone.  inode atime/mtime is updated in-place and thus
591                  * buffers containing inodes must be synchronized as
592                  * meta-buffers, same as buffers containing B-Tree info.
593                  */
594                 iotype = HAMMER_STRUCTURE_META_BUFFER;
595                 break;
596         }
597
598         /*
599          * Handle blockmap offset translations
600          */
601         if (zone >= HAMMER_ZONE_BTREE_INDEX) {
602                 zone2_offset = hammer_blockmap_lookup(hmp, buf_offset, errorp);
603         } else if (zone == HAMMER_ZONE_UNDO_INDEX) {
604                 zone2_offset = hammer_undo_lookup(hmp, buf_offset, errorp);
605         } else {
606                 KKASSERT(zone == HAMMER_ZONE_RAW_BUFFER_INDEX);
607                 zone2_offset = buf_offset;
608                 *errorp = 0;
609         }
610         if (*errorp)
611                 return(NULL);
612
613         /*
614          * NOTE: zone2_offset and maxbuf_off are both full zone-2 offset
615          * specifications.
616          */
617         KKASSERT((zone2_offset & HAMMER_OFF_ZONE_MASK) ==
618                  HAMMER_ZONE_RAW_BUFFER);
619         vol_no = HAMMER_VOL_DECODE(zone2_offset);
620         volume = hammer_get_volume(hmp, vol_no, errorp);
621         if (volume == NULL)
622                 return(NULL);
623
624         KKASSERT(zone2_offset < volume->maxbuf_off);
625
626         /*
627          * Allocate a new buffer structure.  We will check for races later.
628          */
629         ++hammer_count_buffers;
630         buffer = kmalloc(sizeof(*buffer), hmp->m_misc,
631                          M_WAITOK|M_ZERO|M_USE_RESERVE);
632         buffer->zone2_offset = zone2_offset;
633         buffer->zoneX_offset = buf_offset;
634
635         hammer_io_init(&buffer->io, volume, iotype);
636         buffer->io.offset = volume->ondisk->vol_buf_beg +
637                             (zone2_offset & HAMMER_OFF_SHORT_MASK);
638         buffer->io.bytes = bytes;
639         TAILQ_INIT(&buffer->clist);
640         hammer_ref(&buffer->io.lock);
641
642         /*
643          * Insert the buffer into the RB tree and handle late collisions.
644          */
645         if (RB_INSERT(hammer_buf_rb_tree, &hmp->rb_bufs_root, buffer)) {
646                 hammer_rel_volume(volume, 0);
647                 buffer->io.volume = NULL;       /* safety */
648                 hammer_unref(&buffer->io.lock); /* safety */
649                 --hammer_count_buffers;
650                 kfree(buffer, hmp->m_misc);
651                 goto again;
652         }
653         ++hammer_count_refedbufs;
654 found:
655
656         /*
657          * Deal with on-disk info and loading races.
658          */
659         if (buffer->ondisk == NULL || buffer->io.loading) {
660                 *errorp = hammer_load_buffer(buffer, isnew);
661                 if (*errorp) {
662                         hammer_rel_buffer(buffer, 1);
663                         buffer = NULL;
664                 } else {
665                         hammer_io_advance(&buffer->io);
666                 }
667         } else {
668                 *errorp = 0;
669                 hammer_io_advance(&buffer->io);
670         }
671         return(buffer);
672 }
673
674 /*
675  * This is used by the direct-read code to deal with large-data buffers
676  * created by the reblocker and mirror-write code.  The direct-read code
677  * bypasses the HAMMER buffer subsystem and so any aliased dirty or write-
678  * running hammer buffers must be fully synced to disk before we can issue
679  * the direct-read.
680  *
681  * This code path is not considered critical as only the rebocker and
682  * mirror-write code will create large-data buffers via the HAMMER buffer
683  * subsystem.  They do that because they operate at the B-Tree level and
684  * do not access the vnode/inode structures.
685  */
686 void
687 hammer_sync_buffers(hammer_mount_t hmp, hammer_off_t base_offset, int bytes)
688 {
689         hammer_buffer_t buffer;
690         int error;
691
692         KKASSERT((base_offset & HAMMER_OFF_ZONE_MASK) ==
693                  HAMMER_ZONE_LARGE_DATA);
694
695         while (bytes > 0) {
696                 buffer = RB_LOOKUP(hammer_buf_rb_tree, &hmp->rb_bufs_root,
697                                    base_offset);
698                 if (buffer && (buffer->io.modified || buffer->io.running)) {
699                         error = hammer_ref_buffer(buffer);
700                         if (error == 0) {
701                                 hammer_io_wait(&buffer->io);
702                                 if (buffer->io.modified) {
703                                         hammer_io_write_interlock(&buffer->io);
704                                         hammer_io_flush(&buffer->io, 0);
705                                         hammer_io_done_interlock(&buffer->io);
706                                         hammer_io_wait(&buffer->io);
707                                 }
708                                 hammer_rel_buffer(buffer, 0);
709                         }
710                 }
711                 base_offset += HAMMER_BUFSIZE;
712                 bytes -= HAMMER_BUFSIZE;
713         }
714 }
715
716 /*
717  * Destroy all buffers covering the specified zoneX offset range.  This
718  * is called when the related blockmap layer2 entry is freed or when
719  * a direct write bypasses our buffer/buffer-cache subsystem.
720  *
721  * The buffers may be referenced by the caller itself.  Setting reclaim
722  * will cause the buffer to be destroyed when it's ref count reaches zero.
723  *
724  * Return 0 on success, EAGAIN if some buffers could not be destroyed due
725  * to additional references held by other threads, or some other (typically
726  * fatal) error.
727  */
728 int
729 hammer_del_buffers(hammer_mount_t hmp, hammer_off_t base_offset,
730                    hammer_off_t zone2_offset, int bytes,
731                    int report_conflicts)
732 {
733         hammer_buffer_t buffer;
734         hammer_volume_t volume;
735         int vol_no;
736         int error;
737         int ret_error;
738
739         vol_no = HAMMER_VOL_DECODE(zone2_offset);
740         volume = hammer_get_volume(hmp, vol_no, &ret_error);
741         KKASSERT(ret_error == 0);
742
743         while (bytes > 0) {
744                 buffer = RB_LOOKUP(hammer_buf_rb_tree, &hmp->rb_bufs_root,
745                                    base_offset);
746                 if (buffer) {
747                         error = hammer_ref_buffer(buffer);
748                         if (error == 0 && buffer->io.lock.refs != 1) {
749                                 error = EAGAIN;
750                                 hammer_rel_buffer(buffer, 0);
751                         }
752                         if (error == 0) {
753                                 KKASSERT(buffer->zone2_offset == zone2_offset);
754                                 hammer_io_clear_modify(&buffer->io, 1);
755                                 buffer->io.reclaim = 1;
756                                 buffer->io.waitdep = 1;
757                                 KKASSERT(buffer->io.volume == volume);
758                                 hammer_rel_buffer(buffer, 0);
759                         }
760                 } else {
761                         error = hammer_io_inval(volume, zone2_offset);
762                 }
763                 if (error) {
764                         ret_error = error;
765                         if (report_conflicts ||
766                             (hammer_debug_general & 0x8000)) {
767                                 kprintf("hammer_del_buffers: unable to "
768                                         "invalidate %016llx buffer=%p rep=%d\n",
769                                         (long long)base_offset,
770                                         buffer, report_conflicts);
771                         }
772                 }
773                 base_offset += HAMMER_BUFSIZE;
774                 zone2_offset += HAMMER_BUFSIZE;
775                 bytes -= HAMMER_BUFSIZE;
776         }
777         hammer_rel_volume(volume, 0);
778         return (ret_error);
779 }
780
781 static int
782 hammer_load_buffer(hammer_buffer_t buffer, int isnew)
783 {
784         hammer_volume_t volume;
785         int error;
786
787         /*
788          * Load the buffer's on-disk info
789          */
790         volume = buffer->io.volume;
791         ++buffer->io.loading;
792         hammer_lock_ex(&buffer->io.lock);
793
794         if (hammer_debug_io & 0x0001) {
795                 kprintf("load_buffer %016llx %016llx isnew=%d od=%p\n",
796                         (long long)buffer->zoneX_offset,
797                         (long long)buffer->zone2_offset,
798                         isnew, buffer->ondisk);
799         }
800
801         if (buffer->ondisk == NULL) {
802                 if (isnew) {
803                         error = hammer_io_new(volume->devvp, &buffer->io);
804                 } else {
805                         error = hammer_io_read(volume->devvp, &buffer->io,
806                                                volume->maxraw_off);
807                 }
808                 if (error == 0)
809                         buffer->ondisk = (void *)buffer->io.bp->b_data;
810         } else if (isnew) {
811                 error = hammer_io_new(volume->devvp, &buffer->io);
812         } else {
813                 error = 0;
814         }
815         --buffer->io.loading;
816         hammer_unlock(&buffer->io.lock);
817         return (error);
818 }
819
820 /*
821  * NOTE: Called from RB_SCAN, must return >= 0 for scan to continue.
822  * This routine is only called during unmount or when a volume is
823  * removed.
824  *
825  * If data != NULL, it specifies a volume whoose buffers should
826  * be unloaded.
827  */
828 int
829 hammer_unload_buffer(hammer_buffer_t buffer, void *data)
830 {
831         struct hammer_volume *volume = (struct hammer_volume *) data;
832
833         if (volume != NULL && volume != buffer->io.volume) {
834                 /*
835                  * We are only interested in unloading buffers of volume,
836                  * so skip it
837                  */
838                 return 0;
839         }
840
841         /*
842          * Clean up the persistent ref ioerror might have on the buffer
843          * and acquire a ref (steal ioerror's if we can).
844          */
845         if (buffer->io.ioerror) {
846                 buffer->io.ioerror = 0;
847         } else {
848                 if (buffer->io.lock.refs == 0)
849                         ++hammer_count_refedbufs;
850                 hammer_ref(&buffer->io.lock);
851         }
852
853         /*
854          * We must not flush a dirty buffer to disk on umount.  It should
855          * have already been dealt with by the flusher, or we may be in
856          * catastrophic failure.
857          *
858          * We must set waitdep to ensure that a running buffer is waited
859          * on and released prior to us trying to unload the volume.
860          */
861         hammer_io_clear_modify(&buffer->io, 1);
862         hammer_flush_buffer_nodes(buffer);
863         KKASSERT(buffer->io.lock.refs == 1);
864         buffer->io.waitdep = 1;
865         hammer_rel_buffer(buffer, 2);
866         return(0);
867 }
868
869 /*
870  * Reference a buffer that is either already referenced or via a specially
871  * handled pointer (aka cursor->buffer).
872  */
873 int
874 hammer_ref_buffer(hammer_buffer_t buffer)
875 {
876         int error;
877
878         if (buffer->io.lock.refs == 0)
879                 ++hammer_count_refedbufs;
880         hammer_ref(&buffer->io.lock);
881
882         /*
883          * At this point a biodone() will not touch the buffer other then
884          * incidental bits.  However, lose_list can be modified via
885          * a biodone() interrupt.
886          *
887          * No longer loose
888          */
889         if (buffer->io.mod_list == &buffer->io.hmp->lose_list) {
890                 crit_enter();
891                 TAILQ_REMOVE(buffer->io.mod_list, &buffer->io, mod_entry);
892                 buffer->io.mod_list = NULL;
893                 crit_exit();
894         }
895
896         if (buffer->ondisk == NULL || buffer->io.loading) {
897                 error = hammer_load_buffer(buffer, 0);
898                 if (error) {
899                         hammer_rel_buffer(buffer, 1);
900                         /*
901                          * NOTE: buffer pointer can become stale after
902                          * the above release.
903                          */
904                 }
905         } else {
906                 error = 0;
907         }
908         return(error);
909 }
910
911 /*
912  * Release a buffer.  We have to deal with several places where
913  * another thread can ref the buffer.
914  *
915  * Only destroy the structure itself if the related buffer cache buffer
916  * was disassociated from it.  This ties the management of the structure
917  * to the buffer cache subsystem.  buffer->ondisk determines whether the
918  * embedded io is referenced or not.
919  */
920 void
921 hammer_rel_buffer(hammer_buffer_t buffer, int flush)
922 {
923         hammer_volume_t volume;
924         hammer_mount_t hmp;
925         struct buf *bp = NULL;
926         int freeme = 0;
927
928         hmp = buffer->io.hmp;
929
930         crit_enter();
931         if (buffer->io.lock.refs == 1) {
932                 ++buffer->io.loading;   /* force interlock check */
933                 hammer_lock_ex(&buffer->io.lock);
934                 if (buffer->io.lock.refs == 1) {
935                         bp = hammer_io_release(&buffer->io, flush);
936
937                         if (buffer->io.lock.refs == 1)
938                                 --hammer_count_refedbufs;
939
940                         if (buffer->io.bp == NULL &&
941                             buffer->io.lock.refs == 1) {
942                                 /*
943                                  * Final cleanup
944                                  *
945                                  * NOTE: It is impossible for any associated
946                                  * B-Tree nodes to have refs if the buffer
947                                  * has no additional refs.
948                                  */
949                                 RB_REMOVE(hammer_buf_rb_tree,
950                                           &buffer->io.hmp->rb_bufs_root,
951                                           buffer);
952                                 volume = buffer->io.volume;
953                                 buffer->io.volume = NULL; /* sanity */
954                                 hammer_rel_volume(volume, 0);
955                                 hammer_io_clear_modlist(&buffer->io);
956                                 hammer_flush_buffer_nodes(buffer);
957                                 KKASSERT(TAILQ_EMPTY(&buffer->clist));
958                                 freeme = 1;
959                         }
960                 }
961                 --buffer->io.loading;
962                 hammer_unlock(&buffer->io.lock);
963         }
964         hammer_unref(&buffer->io.lock);
965         crit_exit();
966         if (bp)
967                 brelse(bp);
968         if (freeme) {
969                 --hammer_count_buffers;
970                 kfree(buffer, hmp->m_misc);
971         }
972 }
973
974 /*
975  * Access the filesystem buffer containing the specified hammer offset.
976  * buf_offset is a conglomeration of the volume number and vol_buf_beg
977  * relative buffer offset.  It must also have bit 55 set to be valid.
978  * (see hammer_off_t in hammer_disk.h).
979  *
980  * Any prior buffer in *bufferp will be released and replaced by the
981  * requested buffer.
982  *
983  * NOTE: The buffer is indexed via its zoneX_offset but we allow the
984  * passed cached *bufferp to match against either zoneX or zone2.
985  */
986 static __inline
987 void *
988 _hammer_bread(hammer_mount_t hmp, hammer_off_t buf_offset, int bytes,
989              int *errorp, struct hammer_buffer **bufferp)
990 {
991         hammer_buffer_t buffer;
992         int32_t xoff = (int32_t)buf_offset & HAMMER_BUFMASK;
993
994         buf_offset &= ~HAMMER_BUFMASK64;
995         KKASSERT((buf_offset & HAMMER_OFF_ZONE_MASK) != 0);
996
997         buffer = *bufferp;
998         if (buffer == NULL || (buffer->zone2_offset != buf_offset &&
999                                buffer->zoneX_offset != buf_offset)) {
1000                 if (buffer)
1001                         hammer_rel_buffer(buffer, 0);
1002                 buffer = hammer_get_buffer(hmp, buf_offset, bytes, 0, errorp);
1003                 *bufferp = buffer;
1004         } else {
1005                 *errorp = 0;
1006         }
1007
1008         /*
1009          * Return a pointer to the buffer data.
1010          */
1011         if (buffer == NULL)
1012                 return(NULL);
1013         else
1014                 return((char *)buffer->ondisk + xoff);
1015 }
1016
1017 void *
1018 hammer_bread(hammer_mount_t hmp, hammer_off_t buf_offset,
1019              int *errorp, struct hammer_buffer **bufferp)
1020 {
1021         return(_hammer_bread(hmp, buf_offset, HAMMER_BUFSIZE, errorp, bufferp));
1022 }
1023
1024 void *
1025 hammer_bread_ext(hammer_mount_t hmp, hammer_off_t buf_offset, int bytes,
1026                  int *errorp, struct hammer_buffer **bufferp)
1027 {
1028         bytes = (bytes + HAMMER_BUFMASK) & ~HAMMER_BUFMASK;
1029         return(_hammer_bread(hmp, buf_offset, bytes, errorp, bufferp));
1030 }
1031
1032 /*
1033  * Access the filesystem buffer containing the specified hammer offset.
1034  * No disk read operation occurs.  The result buffer may contain garbage.
1035  *
1036  * Any prior buffer in *bufferp will be released and replaced by the
1037  * requested buffer.
1038  *
1039  * This function marks the buffer dirty but does not increment its
1040  * modify_refs count.
1041  */
1042 static __inline
1043 void *
1044 _hammer_bnew(hammer_mount_t hmp, hammer_off_t buf_offset, int bytes,
1045              int *errorp, struct hammer_buffer **bufferp)
1046 {
1047         hammer_buffer_t buffer;
1048         int32_t xoff = (int32_t)buf_offset & HAMMER_BUFMASK;
1049
1050         buf_offset &= ~HAMMER_BUFMASK64;
1051
1052         buffer = *bufferp;
1053         if (buffer == NULL || (buffer->zone2_offset != buf_offset &&
1054                                buffer->zoneX_offset != buf_offset)) {
1055                 if (buffer)
1056                         hammer_rel_buffer(buffer, 0);
1057                 buffer = hammer_get_buffer(hmp, buf_offset, bytes, 1, errorp);
1058                 *bufferp = buffer;
1059         } else {
1060                 *errorp = 0;
1061         }
1062
1063         /*
1064          * Return a pointer to the buffer data.
1065          */
1066         if (buffer == NULL)
1067                 return(NULL);
1068         else
1069                 return((char *)buffer->ondisk + xoff);
1070 }
1071
1072 void *
1073 hammer_bnew(hammer_mount_t hmp, hammer_off_t buf_offset,
1074              int *errorp, struct hammer_buffer **bufferp)
1075 {
1076         return(_hammer_bnew(hmp, buf_offset, HAMMER_BUFSIZE, errorp, bufferp));
1077 }
1078
1079 void *
1080 hammer_bnew_ext(hammer_mount_t hmp, hammer_off_t buf_offset, int bytes,
1081                 int *errorp, struct hammer_buffer **bufferp)
1082 {
1083         bytes = (bytes + HAMMER_BUFMASK) & ~HAMMER_BUFMASK;
1084         return(_hammer_bnew(hmp, buf_offset, bytes, errorp, bufferp));
1085 }
1086
1087 /************************************************************************
1088  *                              NODES                                   *
1089  ************************************************************************
1090  *
1091  * Manage B-Tree nodes.  B-Tree nodes represent the primary indexing
1092  * method used by the HAMMER filesystem.
1093  *
1094  * Unlike other HAMMER structures, a hammer_node can be PASSIVELY
1095  * associated with its buffer, and will only referenced the buffer while
1096  * the node itself is referenced.
1097  *
1098  * A hammer_node can also be passively associated with other HAMMER
1099  * structures, such as inodes, while retaining 0 references.  These
1100  * associations can be cleared backwards using a pointer-to-pointer in
1101  * the hammer_node.
1102  *
1103  * This allows the HAMMER implementation to cache hammer_nodes long-term
1104  * and short-cut a great deal of the infrastructure's complexity.  In
1105  * most cases a cached node can be reacquired without having to dip into
1106  * either the buffer or cluster management code.
1107  *
1108  * The caller must pass a referenced cluster on call and will retain
1109  * ownership of the reference on return.  The node will acquire its own
1110  * additional references, if necessary.
1111  */
1112 hammer_node_t
1113 hammer_get_node(hammer_transaction_t trans, hammer_off_t node_offset,
1114                 int isnew, int *errorp)
1115 {
1116         hammer_mount_t hmp = trans->hmp;
1117         hammer_node_t node;
1118
1119         KKASSERT((node_offset & HAMMER_OFF_ZONE_MASK) == HAMMER_ZONE_BTREE);
1120
1121         /*
1122          * Locate the structure, allocating one if necessary.
1123          */
1124 again:
1125         node = RB_LOOKUP(hammer_nod_rb_tree, &hmp->rb_nods_root, node_offset);
1126         if (node == NULL) {
1127                 ++hammer_count_nodes;
1128                 node = kmalloc(sizeof(*node), hmp->m_misc, M_WAITOK|M_ZERO|M_USE_RESERVE);
1129                 node->node_offset = node_offset;
1130                 node->hmp = hmp;
1131                 TAILQ_INIT(&node->cursor_list);
1132                 TAILQ_INIT(&node->cache_list);
1133                 if (RB_INSERT(hammer_nod_rb_tree, &hmp->rb_nods_root, node)) {
1134                         --hammer_count_nodes;
1135                         kfree(node, hmp->m_misc);
1136                         goto again;
1137                 }
1138         }
1139         hammer_ref(&node->lock);
1140         if (node->ondisk) {
1141                 *errorp = 0;
1142                 hammer_io_advance(&node->buffer->io);
1143         } else {
1144                 *errorp = hammer_load_node(trans, node, isnew);
1145                 trans->flags |= HAMMER_TRANSF_DIDIO;
1146         }
1147         if (*errorp) {
1148                 hammer_rel_node(node);
1149                 node = NULL;
1150         }
1151         return(node);
1152 }
1153
1154 /*
1155  * Reference an already-referenced node.
1156  */
1157 void
1158 hammer_ref_node(hammer_node_t node)
1159 {
1160         KKASSERT(node->lock.refs > 0 && node->ondisk != NULL);
1161         hammer_ref(&node->lock);
1162 }
1163
1164 /*
1165  * Load a node's on-disk data reference.
1166  */
1167 static int
1168 hammer_load_node(hammer_transaction_t trans, hammer_node_t node, int isnew)
1169 {
1170         hammer_buffer_t buffer;
1171         hammer_off_t buf_offset;
1172         int error;
1173
1174         error = 0;
1175         ++node->loading;
1176         hammer_lock_ex(&node->lock);
1177         if (node->ondisk == NULL) {
1178                 /*
1179                  * This is a little confusing but the jist is that
1180                  * node->buffer determines whether the node is on
1181                  * the buffer's clist and node->ondisk determines
1182                  * whether the buffer is referenced.
1183                  *
1184                  * We could be racing a buffer release, in which case
1185                  * node->buffer may become NULL while we are blocked
1186                  * referencing the buffer.
1187                  */
1188                 if ((buffer = node->buffer) != NULL) {
1189                         error = hammer_ref_buffer(buffer);
1190                         if (error == 0 && node->buffer == NULL) {
1191                                 TAILQ_INSERT_TAIL(&buffer->clist,
1192                                                   node, entry);
1193                                 node->buffer = buffer;
1194                         }
1195                 } else {
1196                         buf_offset = node->node_offset & ~HAMMER_BUFMASK64;
1197                         buffer = hammer_get_buffer(node->hmp, buf_offset,
1198                                                    HAMMER_BUFSIZE, 0, &error);
1199                         if (buffer) {
1200                                 KKASSERT(error == 0);
1201                                 TAILQ_INSERT_TAIL(&buffer->clist,
1202                                                   node, entry);
1203                                 node->buffer = buffer;
1204                         }
1205                 }
1206                 if (error)
1207                         goto failed;
1208                 node->ondisk = (void *)((char *)buffer->ondisk +
1209                                         (node->node_offset & HAMMER_BUFMASK));
1210
1211                 /*
1212                  * Check CRC.  NOTE: Neither flag is set and the CRC is not
1213                  * generated on new B-Tree nodes.
1214                  */
1215                 if (isnew == 0 && 
1216                     (node->flags & HAMMER_NODE_CRCANY) == 0) {
1217                         if (hammer_crc_test_btree(node->ondisk) == 0) {
1218                                 if (hammer_debug_critical)
1219                                         Debugger("CRC FAILED: B-TREE NODE");
1220                                 node->flags |= HAMMER_NODE_CRCBAD;
1221                         } else {
1222                                 node->flags |= HAMMER_NODE_CRCGOOD;
1223                         }
1224                 }
1225         }
1226         if (node->flags & HAMMER_NODE_CRCBAD) {
1227                 if (trans->flags & HAMMER_TRANSF_CRCDOM)
1228                         error = EDOM;
1229                 else
1230                         error = EIO;
1231         }
1232 failed:
1233         --node->loading;
1234         hammer_unlock(&node->lock);
1235         return (error);
1236 }
1237
1238 /*
1239  * Safely reference a node, interlock against flushes via the IO subsystem.
1240  */
1241 hammer_node_t
1242 hammer_ref_node_safe(hammer_transaction_t trans, hammer_node_cache_t cache,
1243                      int *errorp)
1244 {
1245         hammer_node_t node;
1246
1247         node = cache->node;
1248         if (node != NULL) {
1249                 hammer_ref(&node->lock);
1250                 if (node->ondisk) {
1251                         if (node->flags & HAMMER_NODE_CRCBAD) {
1252                                 if (trans->flags & HAMMER_TRANSF_CRCDOM)
1253                                         *errorp = EDOM;
1254                                 else
1255                                         *errorp = EIO;
1256                         } else {
1257                                 *errorp = 0;
1258                         }
1259                 } else {
1260                         *errorp = hammer_load_node(trans, node, 0);
1261                 }
1262                 if (*errorp) {
1263                         hammer_rel_node(node);
1264                         node = NULL;
1265                 }
1266         } else {
1267                 *errorp = ENOENT;
1268         }
1269         return(node);
1270 }
1271
1272 /*
1273  * Release a hammer_node.  On the last release the node dereferences
1274  * its underlying buffer and may or may not be destroyed.
1275  */
1276 void
1277 hammer_rel_node(hammer_node_t node)
1278 {
1279         hammer_buffer_t buffer;
1280
1281         /*
1282          * If this isn't the last ref just decrement the ref count and
1283          * return.
1284          */
1285         if (node->lock.refs > 1) {
1286                 hammer_unref(&node->lock);
1287                 return;
1288         }
1289
1290         /*
1291          * If there is no ondisk info or no buffer the node failed to load,
1292          * remove the last reference and destroy the node.
1293          */
1294         if (node->ondisk == NULL) {
1295                 hammer_unref(&node->lock);
1296                 hammer_flush_node(node);
1297                 /* node is stale now */
1298                 return;
1299         }
1300
1301         /*
1302          * Do not disassociate the node from the buffer if it represents
1303          * a modified B-Tree node that still needs its crc to be generated.
1304          */
1305         if (node->flags & HAMMER_NODE_NEEDSCRC)
1306                 return;
1307
1308         /*
1309          * Do final cleanups and then either destroy the node and leave it
1310          * passively cached.  The buffer reference is removed regardless.
1311          */
1312         buffer = node->buffer;
1313         node->ondisk = NULL;
1314
1315         if ((node->flags & HAMMER_NODE_FLUSH) == 0) {
1316                 hammer_unref(&node->lock);
1317                 hammer_rel_buffer(buffer, 0);
1318                 return;
1319         }
1320
1321         /*
1322          * Destroy the node.
1323          */
1324         hammer_unref(&node->lock);
1325         hammer_flush_node(node);
1326         /* node is stale */
1327         hammer_rel_buffer(buffer, 0);
1328 }
1329
1330 /*
1331  * Free space on-media associated with a B-Tree node.
1332  */
1333 void
1334 hammer_delete_node(hammer_transaction_t trans, hammer_node_t node)
1335 {
1336         KKASSERT((node->flags & HAMMER_NODE_DELETED) == 0);
1337         node->flags |= HAMMER_NODE_DELETED;
1338         hammer_blockmap_free(trans, node->node_offset, sizeof(*node->ondisk));
1339 }
1340
1341 /*
1342  * Passively cache a referenced hammer_node.  The caller may release
1343  * the node on return.
1344  */
1345 void
1346 hammer_cache_node(hammer_node_cache_t cache, hammer_node_t node)
1347 {
1348         /*
1349          * If the node doesn't exist, or is being deleted, don't cache it!
1350          *
1351          * The node can only ever be NULL in the I/O failure path.
1352          */
1353         if (node == NULL || (node->flags & HAMMER_NODE_DELETED))
1354                 return;
1355         if (cache->node == node)
1356                 return;
1357         while (cache->node)
1358                 hammer_uncache_node(cache);
1359         if (node->flags & HAMMER_NODE_DELETED)
1360                 return;
1361         cache->node = node;
1362         TAILQ_INSERT_TAIL(&node->cache_list, cache, entry);
1363 }
1364
1365 void
1366 hammer_uncache_node(hammer_node_cache_t cache)
1367 {
1368         hammer_node_t node;
1369
1370         if ((node = cache->node) != NULL) {
1371                 TAILQ_REMOVE(&node->cache_list, cache, entry);
1372                 cache->node = NULL;
1373                 if (TAILQ_EMPTY(&node->cache_list))
1374                         hammer_flush_node(node);
1375         }
1376 }
1377
1378 /*
1379  * Remove a node's cache references and destroy the node if it has no
1380  * other references or backing store.
1381  */
1382 void
1383 hammer_flush_node(hammer_node_t node)
1384 {
1385         hammer_node_cache_t cache;
1386         hammer_buffer_t buffer;
1387         hammer_mount_t hmp = node->hmp;
1388
1389         while ((cache = TAILQ_FIRST(&node->cache_list)) != NULL) {
1390                 TAILQ_REMOVE(&node->cache_list, cache, entry);
1391                 cache->node = NULL;
1392         }
1393         if (node->lock.refs == 0 && node->ondisk == NULL) {
1394                 KKASSERT((node->flags & HAMMER_NODE_NEEDSCRC) == 0);
1395                 RB_REMOVE(hammer_nod_rb_tree, &node->hmp->rb_nods_root, node);
1396                 if ((buffer = node->buffer) != NULL) {
1397                         node->buffer = NULL;
1398                         TAILQ_REMOVE(&buffer->clist, node, entry);
1399                         /* buffer is unreferenced because ondisk is NULL */
1400                 }
1401                 --hammer_count_nodes;
1402                 kfree(node, hmp->m_misc);
1403         }
1404 }
1405
1406 /*
1407  * Flush passively cached B-Tree nodes associated with this buffer.
1408  * This is only called when the buffer is about to be destroyed, so
1409  * none of the nodes should have any references.  The buffer is locked.
1410  *
1411  * We may be interlocked with the buffer.
1412  */
1413 void
1414 hammer_flush_buffer_nodes(hammer_buffer_t buffer)
1415 {
1416         hammer_node_t node;
1417
1418         while ((node = TAILQ_FIRST(&buffer->clist)) != NULL) {
1419                 KKASSERT(node->ondisk == NULL);
1420                 KKASSERT((node->flags & HAMMER_NODE_NEEDSCRC) == 0);
1421
1422                 if (node->lock.refs == 0) {
1423                         hammer_ref(&node->lock);
1424                         node->flags |= HAMMER_NODE_FLUSH;
1425                         hammer_rel_node(node);
1426                 } else {
1427                         KKASSERT(node->loading != 0);
1428                         KKASSERT(node->buffer != NULL);
1429                         buffer = node->buffer;
1430                         node->buffer = NULL;
1431                         TAILQ_REMOVE(&buffer->clist, node, entry);
1432                         /* buffer is unreferenced because ondisk is NULL */
1433                 }
1434         }
1435 }
1436
1437
1438 /************************************************************************
1439  *                              ALLOCATORS                              *
1440  ************************************************************************/
1441
1442 /*
1443  * Allocate a B-Tree node.
1444  */
1445 hammer_node_t
1446 hammer_alloc_btree(hammer_transaction_t trans, hammer_off_t hint, int *errorp)
1447 {
1448         hammer_buffer_t buffer = NULL;
1449         hammer_node_t node = NULL;
1450         hammer_off_t node_offset;
1451
1452         node_offset = hammer_blockmap_alloc(trans, HAMMER_ZONE_BTREE_INDEX,
1453                                             sizeof(struct hammer_node_ondisk),
1454                                             hint, errorp);
1455         if (*errorp == 0) {
1456                 node = hammer_get_node(trans, node_offset, 1, errorp);
1457                 hammer_modify_node_noundo(trans, node);
1458                 bzero(node->ondisk, sizeof(*node->ondisk));
1459                 hammer_modify_node_done(node);
1460         }
1461         if (buffer)
1462                 hammer_rel_buffer(buffer, 0);
1463         return(node);
1464 }
1465
1466 /*
1467  * Allocate data.  If the address of a data buffer is supplied then
1468  * any prior non-NULL *data_bufferp will be released and *data_bufferp
1469  * will be set to the related buffer.  The caller must release it when
1470  * finally done.  The initial *data_bufferp should be set to NULL by
1471  * the caller.
1472  *
1473  * The caller is responsible for making hammer_modify*() calls on the
1474  * *data_bufferp.
1475  */
1476 void *
1477 hammer_alloc_data(hammer_transaction_t trans, int32_t data_len, 
1478                   u_int16_t rec_type, hammer_off_t *data_offsetp,
1479                   struct hammer_buffer **data_bufferp,
1480                   hammer_off_t hint, int *errorp)
1481 {
1482         void *data;
1483         int zone;
1484
1485         /*
1486          * Allocate data
1487          */
1488         if (data_len) {
1489                 switch(rec_type) {
1490                 case HAMMER_RECTYPE_INODE:
1491                 case HAMMER_RECTYPE_DIRENTRY:
1492                 case HAMMER_RECTYPE_EXT:
1493                 case HAMMER_RECTYPE_FIX:
1494                 case HAMMER_RECTYPE_PFS:
1495                 case HAMMER_RECTYPE_SNAPSHOT:
1496                 case HAMMER_RECTYPE_CONFIG:
1497                         zone = HAMMER_ZONE_META_INDEX;
1498                         break;
1499                 case HAMMER_RECTYPE_DATA:
1500                 case HAMMER_RECTYPE_DB:
1501                         if (data_len <= HAMMER_BUFSIZE / 2) {
1502                                 zone = HAMMER_ZONE_SMALL_DATA_INDEX;
1503                         } else {
1504                                 data_len = (data_len + HAMMER_BUFMASK) &
1505                                            ~HAMMER_BUFMASK;
1506                                 zone = HAMMER_ZONE_LARGE_DATA_INDEX;
1507                         }
1508                         break;
1509                 default:
1510                         panic("hammer_alloc_data: rec_type %04x unknown",
1511                               rec_type);
1512                         zone = 0;       /* NOT REACHED */
1513                         break;
1514                 }
1515                 *data_offsetp = hammer_blockmap_alloc(trans, zone, data_len,
1516                                                       hint, errorp);
1517         } else {
1518                 *data_offsetp = 0;
1519         }
1520         if (*errorp == 0 && data_bufferp) {
1521                 if (data_len) {
1522                         data = hammer_bread_ext(trans->hmp, *data_offsetp,
1523                                                 data_len, errorp, data_bufferp);
1524                 } else {
1525                         data = NULL;
1526                 }
1527         } else {
1528                 data = NULL;
1529         }
1530         return(data);
1531 }
1532
1533 /*
1534  * Sync dirty buffers to the media and clean-up any loose ends.
1535  *
1536  * These functions do not start the flusher going, they simply
1537  * queue everything up to the flusher.
1538  */
1539 static int hammer_sync_scan1(struct mount *mp, struct vnode *vp, void *data);
1540 static int hammer_sync_scan2(struct mount *mp, struct vnode *vp, void *data);
1541
1542 int
1543 hammer_queue_inodes_flusher(hammer_mount_t hmp, int waitfor)
1544 {
1545         struct hammer_sync_info info;
1546
1547         info.error = 0;
1548         info.waitfor = waitfor;
1549         if (waitfor == MNT_WAIT) {
1550                 vmntvnodescan(hmp->mp, VMSC_GETVP|VMSC_ONEPASS,
1551                               hammer_sync_scan1, hammer_sync_scan2, &info);
1552         } else {
1553                 vmntvnodescan(hmp->mp, VMSC_GETVP|VMSC_ONEPASS|VMSC_NOWAIT,
1554                               hammer_sync_scan1, hammer_sync_scan2, &info);
1555         }
1556         return(info.error);
1557 }
1558
1559 /*
1560  * Filesystem sync.  If doing a synchronous sync make a second pass on
1561  * the vnodes in case any were already flushing during the first pass,
1562  * and activate the flusher twice (the second time brings the UNDO FIFO's
1563  * start position up to the end position after the first call).
1564  */
1565 int
1566 hammer_sync_hmp(hammer_mount_t hmp, int waitfor)
1567 {
1568         struct hammer_sync_info info;
1569
1570         info.error = 0;
1571         info.waitfor = MNT_NOWAIT;
1572         vmntvnodescan(hmp->mp, VMSC_GETVP|VMSC_NOWAIT,
1573                       hammer_sync_scan1, hammer_sync_scan2, &info);
1574         if (info.error == 0 && waitfor == MNT_WAIT) {
1575                 info.waitfor = waitfor;
1576                 vmntvnodescan(hmp->mp, VMSC_GETVP,
1577                               hammer_sync_scan1, hammer_sync_scan2, &info);
1578         }
1579         if (waitfor == MNT_WAIT) {
1580                 hammer_flusher_sync(hmp);
1581                 hammer_flusher_sync(hmp);
1582         } else {
1583                 hammer_flusher_async(hmp, NULL);
1584                 hammer_flusher_async(hmp, NULL);
1585         }
1586         return(info.error);
1587 }
1588
1589 static int
1590 hammer_sync_scan1(struct mount *mp, struct vnode *vp, void *data)
1591 {
1592         struct hammer_inode *ip;
1593
1594         ip = VTOI(vp);
1595         if (vp->v_type == VNON || ip == NULL ||
1596             ((ip->flags & HAMMER_INODE_MODMASK) == 0 &&
1597              RB_EMPTY(&vp->v_rbdirty_tree))) {
1598                 return(-1);
1599         }
1600         return(0);
1601 }
1602
1603 static int
1604 hammer_sync_scan2(struct mount *mp, struct vnode *vp, void *data)
1605 {
1606         struct hammer_sync_info *info = data;
1607         struct hammer_inode *ip;
1608         int error;
1609
1610         ip = VTOI(vp);
1611         if (vp->v_type == VNON || vp->v_type == VBAD ||
1612             ((ip->flags & HAMMER_INODE_MODMASK) == 0 &&
1613              RB_EMPTY(&vp->v_rbdirty_tree))) {
1614                 return(0);
1615         }
1616         error = VOP_FSYNC(vp, MNT_NOWAIT, 0);
1617         if (error)
1618                 info->error = error;
1619         return(0);
1620 }
1621