Merge branch 'vendor/OPENSSH' (early part)
[dragonfly.git] / sys / vfs / hammer / hammer_ondisk.c
1 /*
2  * Copyright (c) 2007-2008 The DragonFly Project.  All rights reserved.
3  * 
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  * 
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  * 
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  * 
34  * $DragonFly: src/sys/vfs/hammer/hammer_ondisk.c,v 1.76 2008/08/29 20:19:08 dillon Exp $
35  */
36 /*
37  * Manage HAMMER's on-disk structures.  These routines are primarily
38  * responsible for interfacing with the kernel's I/O subsystem and for
39  * managing in-memory structures.
40  */
41
42 #include "hammer.h"
43 #include <sys/fcntl.h>
44 #include <sys/nlookup.h>
45 #include <sys/buf.h>
46 #include <sys/buf2.h>
47
48 static void hammer_free_volume(hammer_volume_t volume);
49 static int hammer_load_volume(hammer_volume_t volume);
50 static int hammer_load_buffer(hammer_buffer_t buffer, int isnew);
51 static int hammer_load_node(hammer_transaction_t trans,
52                                 hammer_node_t node, int isnew);
53
54 static int
55 hammer_vol_rb_compare(hammer_volume_t vol1, hammer_volume_t vol2)
56 {
57         if (vol1->vol_no < vol2->vol_no)
58                 return(-1);
59         if (vol1->vol_no > vol2->vol_no)
60                 return(1);
61         return(0);
62 }
63
64 /*
65  * hammer_buffer structures are indexed via their zoneX_offset, not
66  * their zone2_offset.
67  */
68 static int
69 hammer_buf_rb_compare(hammer_buffer_t buf1, hammer_buffer_t buf2)
70 {
71         if (buf1->zoneX_offset < buf2->zoneX_offset)
72                 return(-1);
73         if (buf1->zoneX_offset > buf2->zoneX_offset)
74                 return(1);
75         return(0);
76 }
77
78 static int
79 hammer_nod_rb_compare(hammer_node_t node1, hammer_node_t node2)
80 {
81         if (node1->node_offset < node2->node_offset)
82                 return(-1);
83         if (node1->node_offset > node2->node_offset)
84                 return(1);
85         return(0);
86 }
87
88 RB_GENERATE2(hammer_vol_rb_tree, hammer_volume, rb_node,
89              hammer_vol_rb_compare, int32_t, vol_no);
90 RB_GENERATE2(hammer_buf_rb_tree, hammer_buffer, rb_node,
91              hammer_buf_rb_compare, hammer_off_t, zoneX_offset);
92 RB_GENERATE2(hammer_nod_rb_tree, hammer_node, rb_node,
93              hammer_nod_rb_compare, hammer_off_t, node_offset);
94
95 /************************************************************************
96  *                              VOLUMES                                 *
97  ************************************************************************
98  *
99  * Load a HAMMER volume by name.  Returns 0 on success or a positive error
100  * code on failure.  Volumes must be loaded at mount time, get_volume() will
101  * not load a new volume.
102  *
103  * Calls made to hammer_load_volume() or single-threaded
104  */
105 int
106 hammer_install_volume(struct hammer_mount *hmp, const char *volname,
107                       struct vnode *devvp)
108 {
109         struct mount *mp;
110         hammer_volume_t volume;
111         struct hammer_volume_ondisk *ondisk;
112         struct nlookupdata nd;
113         struct buf *bp = NULL;
114         int error;
115         int ronly;
116         int setmp = 0;
117
118         mp = hmp->mp;
119         ronly = ((mp->mnt_flag & MNT_RDONLY) ? 1 : 0);
120
121         /*
122          * Allocate a volume structure
123          */
124         ++hammer_count_volumes;
125         volume = kmalloc(sizeof(*volume), hmp->m_misc, M_WAITOK|M_ZERO);
126         volume->vol_name = kstrdup(volname, hmp->m_misc);
127         volume->io.hmp = hmp;   /* bootstrap */
128         hammer_io_init(&volume->io, volume, HAMMER_STRUCTURE_VOLUME);
129         volume->io.offset = 0LL;
130         volume->io.bytes = HAMMER_BUFSIZE;
131
132         /*
133          * Get the device vnode
134          */
135         if (devvp == NULL) {
136                 error = nlookup_init(&nd, volume->vol_name, UIO_SYSSPACE, NLC_FOLLOW);
137                 if (error == 0)
138                         error = nlookup(&nd);
139                 if (error == 0)
140                         error = cache_vref(&nd.nl_nch, nd.nl_cred, &volume->devvp);
141                 nlookup_done(&nd);
142         } else {
143                 error = 0;
144                 volume->devvp = devvp;
145         }
146
147         if (error == 0) {
148                 if (vn_isdisk(volume->devvp, &error)) {
149                         error = vfs_mountedon(volume->devvp);
150                 }
151         }
152         if (error == 0 &&
153             count_udev(volume->devvp->v_umajor, volume->devvp->v_uminor) > 0) {
154                 error = EBUSY;
155         }
156         if (error == 0) {
157                 vn_lock(volume->devvp, LK_EXCLUSIVE | LK_RETRY);
158                 error = vinvalbuf(volume->devvp, V_SAVE, 0, 0);
159                 if (error == 0) {
160                         error = VOP_OPEN(volume->devvp, 
161                                          (ronly ? FREAD : FREAD|FWRITE),
162                                          FSCRED, NULL);
163                 }
164                 vn_unlock(volume->devvp);
165         }
166         if (error) {
167                 hammer_free_volume(volume);
168                 return(error);
169         }
170         volume->devvp->v_rdev->si_mountpoint = mp;
171         setmp = 1;
172
173         /*
174          * Extract the volume number from the volume header and do various
175          * sanity checks.
176          */
177         error = bread(volume->devvp, 0LL, HAMMER_BUFSIZE, &bp);
178         if (error)
179                 goto late_failure;
180         ondisk = (void *)bp->b_data;
181         if (ondisk->vol_signature != HAMMER_FSBUF_VOLUME) {
182                 kprintf("hammer_mount: volume %s has an invalid header\n",
183                         volume->vol_name);
184                 error = EFTYPE;
185                 goto late_failure;
186         }
187         volume->vol_no = ondisk->vol_no;
188         volume->buffer_base = ondisk->vol_buf_beg;
189         volume->vol_flags = ondisk->vol_flags;
190         volume->nblocks = ondisk->vol_nblocks; 
191         volume->maxbuf_off = HAMMER_ENCODE_RAW_BUFFER(volume->vol_no,
192                                     ondisk->vol_buf_end - ondisk->vol_buf_beg);
193         volume->maxraw_off = ondisk->vol_buf_end;
194
195         if (RB_EMPTY(&hmp->rb_vols_root)) {
196                 hmp->fsid = ondisk->vol_fsid;
197         } else if (bcmp(&hmp->fsid, &ondisk->vol_fsid, sizeof(uuid_t))) {
198                 kprintf("hammer_mount: volume %s's fsid does not match "
199                         "other volumes\n", volume->vol_name);
200                 error = EFTYPE;
201                 goto late_failure;
202         }
203
204         /*
205          * Insert the volume structure into the red-black tree.
206          */
207         if (RB_INSERT(hammer_vol_rb_tree, &hmp->rb_vols_root, volume)) {
208                 kprintf("hammer_mount: volume %s has a duplicate vol_no %d\n",
209                         volume->vol_name, volume->vol_no);
210                 error = EEXIST;
211         }
212
213         /*
214          * Set the root volume .  HAMMER special cases rootvol the structure.
215          * We do not hold a ref because this would prevent related I/O
216          * from being flushed.
217          */
218         if (error == 0 && ondisk->vol_rootvol == ondisk->vol_no) {
219                 hmp->rootvol = volume;
220                 hmp->nvolumes = ondisk->vol_count;
221                 if (bp) {
222                         brelse(bp);
223                         bp = NULL;
224                 }
225                 hmp->mp->mnt_stat.f_blocks += ondisk->vol0_stat_bigblocks *
226                         (HAMMER_LARGEBLOCK_SIZE / HAMMER_BUFSIZE);
227                 hmp->mp->mnt_vstat.f_blocks += ondisk->vol0_stat_bigblocks *
228                         (HAMMER_LARGEBLOCK_SIZE / HAMMER_BUFSIZE);
229         }
230 late_failure:
231         if (bp)
232                 brelse(bp);
233         if (error) {
234                 /*vinvalbuf(volume->devvp, V_SAVE, 0, 0);*/
235                 if (setmp)
236                         volume->devvp->v_rdev->si_mountpoint = NULL;
237                 VOP_CLOSE(volume->devvp, ronly ? FREAD : FREAD|FWRITE);
238                 hammer_free_volume(volume);
239         }
240         return (error);
241 }
242
243 /*
244  * This is called for each volume when updating the mount point from
245  * read-write to read-only or vise-versa.
246  */
247 int
248 hammer_adjust_volume_mode(hammer_volume_t volume, void *data __unused)
249 {
250         if (volume->devvp) {
251                 vn_lock(volume->devvp, LK_EXCLUSIVE | LK_RETRY);
252                 if (volume->io.hmp->ronly) {
253                         /* do not call vinvalbuf */
254                         VOP_OPEN(volume->devvp, FREAD, FSCRED, NULL);
255                         VOP_CLOSE(volume->devvp, FREAD|FWRITE);
256                 } else {
257                         /* do not call vinvalbuf */
258                         VOP_OPEN(volume->devvp, FREAD|FWRITE, FSCRED, NULL);
259                         VOP_CLOSE(volume->devvp, FREAD);
260                 }
261                 vn_unlock(volume->devvp);
262         }
263         return(0);
264 }
265
266 /*
267  * Unload and free a HAMMER volume.  Must return >= 0 to continue scan
268  * so returns -1 on failure.
269  */
270 int
271 hammer_unload_volume(hammer_volume_t volume, void *data __unused)
272 {
273         hammer_mount_t hmp = volume->io.hmp;
274         int ronly = ((hmp->mp->mnt_flag & MNT_RDONLY) ? 1 : 0);
275         struct buf *bp;
276
277         /*
278          * Clean up the root volume pointer, which is held unlocked in hmp.
279          */
280         if (hmp->rootvol == volume)
281                 hmp->rootvol = NULL;
282
283         /*
284          * We must not flush a dirty buffer to disk on umount.  It should
285          * have already been dealt with by the flusher, or we may be in
286          * catastrophic failure.
287          */
288         hammer_io_clear_modify(&volume->io, 1);
289         volume->io.waitdep = 1;
290         bp = hammer_io_release(&volume->io, 1);
291
292         /*
293          * Clean up the persistent ref ioerror might have on the volume
294          */
295         if (volume->io.ioerror) {
296                 volume->io.ioerror = 0;
297                 hammer_unref(&volume->io.lock);
298         }
299
300         /*
301          * There should be no references on the volume, no clusters, and
302          * no super-clusters.
303          */
304         KKASSERT(volume->io.lock.refs == 0);
305         if (bp)
306                 brelse(bp);
307
308         volume->ondisk = NULL;
309         if (volume->devvp) {
310                 if (volume->devvp->v_rdev &&
311                     volume->devvp->v_rdev->si_mountpoint == hmp->mp
312                 ) {
313                         volume->devvp->v_rdev->si_mountpoint = NULL;
314                 }
315                 if (ronly) {
316                         /*
317                          * Make sure we don't sync anything to disk if we
318                          * are in read-only mode (1) or critically-errored
319                          * (2).  Note that there may be dirty buffers in
320                          * normal read-only mode from crash recovery.
321                          */
322                         vinvalbuf(volume->devvp, 0, 0, 0);
323                         VOP_CLOSE(volume->devvp, FREAD);
324                 } else {
325                         /*
326                          * Normal termination, save any dirty buffers
327                          * (XXX there really shouldn't be any).
328                          */
329                         vinvalbuf(volume->devvp, V_SAVE, 0, 0);
330                         VOP_CLOSE(volume->devvp, FREAD|FWRITE);
331                 }
332         }
333
334         /*
335          * Destroy the structure
336          */
337         RB_REMOVE(hammer_vol_rb_tree, &hmp->rb_vols_root, volume);
338         hammer_free_volume(volume);
339         return(0);
340 }
341
342 static
343 void
344 hammer_free_volume(hammer_volume_t volume)
345 {
346         hammer_mount_t hmp = volume->io.hmp;
347
348         if (volume->vol_name) {
349                 kfree(volume->vol_name, hmp->m_misc);
350                 volume->vol_name = NULL;
351         }
352         if (volume->devvp) {
353                 vrele(volume->devvp);
354                 volume->devvp = NULL;
355         }
356         --hammer_count_volumes;
357         kfree(volume, hmp->m_misc);
358 }
359
360 /*
361  * Get a HAMMER volume.  The volume must already exist.
362  */
363 hammer_volume_t
364 hammer_get_volume(struct hammer_mount *hmp, int32_t vol_no, int *errorp)
365 {
366         struct hammer_volume *volume;
367
368         /*
369          * Locate the volume structure
370          */
371         volume = RB_LOOKUP(hammer_vol_rb_tree, &hmp->rb_vols_root, vol_no);
372         if (volume == NULL) {
373                 *errorp = ENOENT;
374                 return(NULL);
375         }
376         hammer_ref(&volume->io.lock);
377
378         /*
379          * Deal with on-disk info
380          */
381         if (volume->ondisk == NULL || volume->io.loading) {
382                 *errorp = hammer_load_volume(volume);
383                 if (*errorp) {
384                         hammer_rel_volume(volume, 1);
385                         volume = NULL;
386                 }
387         } else {
388                 *errorp = 0;
389         }
390         return(volume);
391 }
392
393 int
394 hammer_ref_volume(hammer_volume_t volume)
395 {
396         int error;
397
398         hammer_ref(&volume->io.lock);
399
400         /*
401          * Deal with on-disk info
402          */
403         if (volume->ondisk == NULL || volume->io.loading) {
404                 error = hammer_load_volume(volume);
405                 if (error)
406                         hammer_rel_volume(volume, 1);
407         } else {
408                 error = 0;
409         }
410         return (error);
411 }
412
413 hammer_volume_t
414 hammer_get_root_volume(struct hammer_mount *hmp, int *errorp)
415 {
416         hammer_volume_t volume;
417
418         volume = hmp->rootvol;
419         KKASSERT(volume != NULL);
420         hammer_ref(&volume->io.lock);
421
422         /*
423          * Deal with on-disk info
424          */
425         if (volume->ondisk == NULL || volume->io.loading) {
426                 *errorp = hammer_load_volume(volume);
427                 if (*errorp) {
428                         hammer_rel_volume(volume, 1);
429                         volume = NULL;
430                 }
431         } else {
432                 *errorp = 0;
433         }
434         return (volume);
435 }
436
437 /*
438  * Load a volume's on-disk information.  The volume must be referenced and
439  * not locked.  We temporarily acquire an exclusive lock to interlock
440  * against releases or multiple get's.
441  */
442 static int
443 hammer_load_volume(hammer_volume_t volume)
444 {
445         int error;
446
447         ++volume->io.loading;
448         hammer_lock_ex(&volume->io.lock);
449
450         if (volume->ondisk == NULL) {
451                 error = hammer_io_read(volume->devvp, &volume->io,
452                                        volume->maxraw_off);
453                 if (error == 0)
454                         volume->ondisk = (void *)volume->io.bp->b_data;
455         } else {
456                 error = 0;
457         }
458         --volume->io.loading;
459         hammer_unlock(&volume->io.lock);
460         return(error);
461 }
462
463 /*
464  * Release a volume.  Call hammer_io_release on the last reference.  We have
465  * to acquire an exclusive lock to interlock against volume->ondisk tests
466  * in hammer_load_volume(), and hammer_io_release() also expects an exclusive
467  * lock to be held.
468  *
469  * Volumes are not unloaded from memory during normal operation.
470  */
471 void
472 hammer_rel_volume(hammer_volume_t volume, int flush)
473 {
474         struct buf *bp = NULL;
475
476         crit_enter();
477         if (volume->io.lock.refs == 1) {
478                 ++volume->io.loading;
479                 hammer_lock_ex(&volume->io.lock);
480                 if (volume->io.lock.refs == 1) {
481                         volume->ondisk = NULL;
482                         bp = hammer_io_release(&volume->io, flush);
483                 }
484                 --volume->io.loading;
485                 hammer_unlock(&volume->io.lock);
486         }
487         hammer_unref(&volume->io.lock);
488         if (bp)
489                 brelse(bp);
490         crit_exit();
491 }
492
493 int
494 hammer_mountcheck_volumes(struct hammer_mount *hmp)
495 {
496         hammer_volume_t vol;
497         int i;
498
499         for (i = 0; i < hmp->nvolumes; ++i) {
500                 vol = RB_LOOKUP(hammer_vol_rb_tree, &hmp->rb_vols_root, i);
501                 if (vol == NULL)
502                         return(EINVAL);
503         }
504         return(0);
505 }
506
507 /************************************************************************
508  *                              BUFFERS                                 *
509  ************************************************************************
510  *
511  * Manage buffers.  Currently all blockmap-backed zones are direct-mapped
512  * to zone-2 buffer offsets, without a translation stage.  However, the
513  * hammer_buffer structure is indexed by its zoneX_offset, not its
514  * zone2_offset.
515  *
516  * The proper zone must be maintained throughout the code-base all the way
517  * through to the big-block allocator, or routines like hammer_del_buffers()
518  * will not be able to locate all potentially conflicting buffers.
519  */
520 hammer_buffer_t
521 hammer_get_buffer(hammer_mount_t hmp, hammer_off_t buf_offset,
522                   int bytes, int isnew, int *errorp)
523 {
524         hammer_buffer_t buffer;
525         hammer_volume_t volume;
526         hammer_off_t    zone2_offset;
527         hammer_io_type_t iotype;
528         int vol_no;
529         int zone;
530
531         buf_offset &= ~HAMMER_BUFMASK64;
532 again:
533         /*
534          * Shortcut if the buffer is already cached
535          */
536         buffer = RB_LOOKUP(hammer_buf_rb_tree, &hmp->rb_bufs_root, buf_offset);
537         if (buffer) {
538                 if (buffer->io.lock.refs == 0)
539                         ++hammer_count_refedbufs;
540                 hammer_ref(&buffer->io.lock);
541
542                 /*
543                  * Once refed the ondisk field will not be cleared by
544                  * any other action.
545                  */
546                 if (buffer->ondisk && buffer->io.loading == 0) {
547                         *errorp = 0;
548                         return(buffer);
549                 }
550
551                 /*
552                  * The buffer is no longer loose if it has a ref, and
553                  * cannot become loose once it gains a ref.  Loose
554                  * buffers will never be in a modified state.  This should
555                  * only occur on the 0->1 transition of refs.
556                  *
557                  * lose_list can be modified via a biodone() interrupt.
558                  */
559                 if (buffer->io.mod_list == &hmp->lose_list) {
560                         crit_enter();   /* biodone race against list */
561                         TAILQ_REMOVE(buffer->io.mod_list, &buffer->io,
562                                      mod_entry);
563                         crit_exit();
564                         buffer->io.mod_list = NULL;
565                         KKASSERT(buffer->io.modified == 0);
566                 }
567                 goto found;
568         }
569
570         /*
571          * What is the buffer class?
572          */
573         zone = HAMMER_ZONE_DECODE(buf_offset);
574
575         switch(zone) {
576         case HAMMER_ZONE_LARGE_DATA_INDEX:
577         case HAMMER_ZONE_SMALL_DATA_INDEX:
578                 iotype = HAMMER_STRUCTURE_DATA_BUFFER;
579                 break;
580         case HAMMER_ZONE_UNDO_INDEX:
581                 iotype = HAMMER_STRUCTURE_UNDO_BUFFER;
582                 break;
583         case HAMMER_ZONE_META_INDEX:
584         default:
585                 /*
586                  * NOTE: inode data and directory entries are placed in this
587                  * zone.  inode atime/mtime is updated in-place and thus
588                  * buffers containing inodes must be synchronized as
589                  * meta-buffers, same as buffers containing B-Tree info.
590                  */
591                 iotype = HAMMER_STRUCTURE_META_BUFFER;
592                 break;
593         }
594
595         /*
596          * Handle blockmap offset translations
597          */
598         if (zone >= HAMMER_ZONE_BTREE_INDEX) {
599                 zone2_offset = hammer_blockmap_lookup(hmp, buf_offset, errorp);
600         } else if (zone == HAMMER_ZONE_UNDO_INDEX) {
601                 zone2_offset = hammer_undo_lookup(hmp, buf_offset, errorp);
602         } else {
603                 KKASSERT(zone == HAMMER_ZONE_RAW_BUFFER_INDEX);
604                 zone2_offset = buf_offset;
605                 *errorp = 0;
606         }
607         if (*errorp)
608                 return(NULL);
609
610         /*
611          * NOTE: zone2_offset and maxbuf_off are both full zone-2 offset
612          * specifications.
613          */
614         KKASSERT((zone2_offset & HAMMER_OFF_ZONE_MASK) ==
615                  HAMMER_ZONE_RAW_BUFFER);
616         vol_no = HAMMER_VOL_DECODE(zone2_offset);
617         volume = hammer_get_volume(hmp, vol_no, errorp);
618         if (volume == NULL)
619                 return(NULL);
620
621         KKASSERT(zone2_offset < volume->maxbuf_off);
622
623         /*
624          * Allocate a new buffer structure.  We will check for races later.
625          */
626         ++hammer_count_buffers;
627         buffer = kmalloc(sizeof(*buffer), hmp->m_misc,
628                          M_WAITOK|M_ZERO|M_USE_RESERVE);
629         buffer->zone2_offset = zone2_offset;
630         buffer->zoneX_offset = buf_offset;
631
632         hammer_io_init(&buffer->io, volume, iotype);
633         buffer->io.offset = volume->ondisk->vol_buf_beg +
634                             (zone2_offset & HAMMER_OFF_SHORT_MASK);
635         buffer->io.bytes = bytes;
636         TAILQ_INIT(&buffer->clist);
637         hammer_ref(&buffer->io.lock);
638
639         /*
640          * Insert the buffer into the RB tree and handle late collisions.
641          */
642         if (RB_INSERT(hammer_buf_rb_tree, &hmp->rb_bufs_root, buffer)) {
643                 hammer_unref(&buffer->io.lock); /* safety */
644                 --hammer_count_buffers;
645                 hammer_rel_volume(volume, 0);
646                 buffer->io.volume = NULL;       /* safety */
647                 kfree(buffer, hmp->m_misc);
648                 goto again;
649         }
650         ++hammer_count_refedbufs;
651 found:
652
653         /*
654          * Deal with on-disk info and loading races.
655          */
656         if (buffer->ondisk == NULL || buffer->io.loading) {
657                 *errorp = hammer_load_buffer(buffer, isnew);
658                 if (*errorp) {
659                         hammer_rel_buffer(buffer, 1);
660                         buffer = NULL;
661                 }
662         } else {
663                 *errorp = 0;
664         }
665         return(buffer);
666 }
667
668 /*
669  * This is used by the direct-read code to deal with large-data buffers
670  * created by the reblocker and mirror-write code.  The direct-read code
671  * bypasses the HAMMER buffer subsystem and so any aliased dirty or write-
672  * running hammer buffers must be fully synced to disk before we can issue
673  * the direct-read.
674  *
675  * This code path is not considered critical as only the rebocker and
676  * mirror-write code will create large-data buffers via the HAMMER buffer
677  * subsystem.  They do that because they operate at the B-Tree level and
678  * do not access the vnode/inode structures.
679  */
680 void
681 hammer_sync_buffers(hammer_mount_t hmp, hammer_off_t base_offset, int bytes)
682 {
683         hammer_buffer_t buffer;
684         int error;
685
686         KKASSERT((base_offset & HAMMER_OFF_ZONE_MASK) ==
687                  HAMMER_ZONE_LARGE_DATA);
688
689         while (bytes > 0) {
690                 buffer = RB_LOOKUP(hammer_buf_rb_tree, &hmp->rb_bufs_root,
691                                    base_offset);
692                 if (buffer && (buffer->io.modified || buffer->io.running)) {
693                         error = hammer_ref_buffer(buffer);
694                         if (error == 0) {
695                                 hammer_io_wait(&buffer->io);
696                                 if (buffer->io.modified) {
697                                         hammer_io_write_interlock(&buffer->io);
698                                         hammer_io_flush(&buffer->io);
699                                         hammer_io_done_interlock(&buffer->io);
700                                         hammer_io_wait(&buffer->io);
701                                 }
702                                 hammer_rel_buffer(buffer, 0);
703                         }
704                 }
705                 base_offset += HAMMER_BUFSIZE;
706                 bytes -= HAMMER_BUFSIZE;
707         }
708 }
709
710 /*
711  * Destroy all buffers covering the specified zoneX offset range.  This
712  * is called when the related blockmap layer2 entry is freed or when
713  * a direct write bypasses our buffer/buffer-cache subsystem.
714  *
715  * The buffers may be referenced by the caller itself.  Setting reclaim
716  * will cause the buffer to be destroyed when it's ref count reaches zero.
717  *
718  * Return 0 on success, EAGAIN if some buffers could not be destroyed due
719  * to additional references held by other threads, or some other (typically
720  * fatal) error.
721  */
722 int
723 hammer_del_buffers(hammer_mount_t hmp, hammer_off_t base_offset,
724                    hammer_off_t zone2_offset, int bytes,
725                    int report_conflicts)
726 {
727         hammer_buffer_t buffer;
728         hammer_volume_t volume;
729         int vol_no;
730         int error;
731         int ret_error;
732
733         vol_no = HAMMER_VOL_DECODE(zone2_offset);
734         volume = hammer_get_volume(hmp, vol_no, &ret_error);
735         KKASSERT(ret_error == 0);
736
737         while (bytes > 0) {
738                 buffer = RB_LOOKUP(hammer_buf_rb_tree, &hmp->rb_bufs_root,
739                                    base_offset);
740                 if (buffer) {
741                         error = hammer_ref_buffer(buffer);
742                         if (error == 0 && buffer->io.lock.refs != 1) {
743                                 error = EAGAIN;
744                                 hammer_rel_buffer(buffer, 0);
745                         }
746                         if (error == 0) {
747                                 KKASSERT(buffer->zone2_offset == zone2_offset);
748                                 hammer_io_clear_modify(&buffer->io, 1);
749                                 buffer->io.reclaim = 1;
750                                 buffer->io.waitdep = 1;
751                                 KKASSERT(buffer->io.volume == volume);
752                                 hammer_rel_buffer(buffer, 0);
753                         }
754                 } else {
755                         error = hammer_io_inval(volume, zone2_offset);
756                 }
757                 if (error) {
758                         ret_error = error;
759                         if (report_conflicts || (hammer_debug_general & 0x8000))
760                                 kprintf("hammer_del_buffers: unable to invalidate %016llx buffer=%p rep=%d\n", base_offset, buffer, report_conflicts);
761                 }
762                 base_offset += HAMMER_BUFSIZE;
763                 zone2_offset += HAMMER_BUFSIZE;
764                 bytes -= HAMMER_BUFSIZE;
765         }
766         hammer_rel_volume(volume, 0);
767         return (ret_error);
768 }
769
770 static int
771 hammer_load_buffer(hammer_buffer_t buffer, int isnew)
772 {
773         hammer_volume_t volume;
774         int error;
775
776         /*
777          * Load the buffer's on-disk info
778          */
779         volume = buffer->io.volume;
780         ++buffer->io.loading;
781         hammer_lock_ex(&buffer->io.lock);
782
783         if (hammer_debug_io & 0x0001) {
784                 kprintf("load_buffer %016llx %016llx isnew=%d od=%p\n",
785                         buffer->zoneX_offset, buffer->zone2_offset, isnew,
786                         buffer->ondisk);
787         }
788
789         if (buffer->ondisk == NULL) {
790                 if (isnew) {
791                         error = hammer_io_new(volume->devvp, &buffer->io);
792                 } else {
793                         error = hammer_io_read(volume->devvp, &buffer->io,
794                                                volume->maxraw_off);
795                 }
796                 if (error == 0)
797                         buffer->ondisk = (void *)buffer->io.bp->b_data;
798         } else if (isnew) {
799                 error = hammer_io_new(volume->devvp, &buffer->io);
800         } else {
801                 error = 0;
802         }
803         --buffer->io.loading;
804         hammer_unlock(&buffer->io.lock);
805         return (error);
806 }
807
808 /*
809  * NOTE: Called from RB_SCAN, must return >= 0 for scan to continue.
810  * This routine is only called during unmount.
811  */
812 int
813 hammer_unload_buffer(hammer_buffer_t buffer, void *data __unused)
814 {
815         /*
816          * Clean up the persistent ref ioerror might have on the buffer
817          * and acquire a ref (steal ioerror's if we can).
818          */
819         if (buffer->io.ioerror) {
820                 buffer->io.ioerror = 0;
821         } else {
822                 if (buffer->io.lock.refs == 0)
823                         ++hammer_count_refedbufs;
824                 hammer_ref(&buffer->io.lock);
825         }
826
827         /*
828          * We must not flush a dirty buffer to disk on umount.  It should
829          * have already been dealt with by the flusher, or we may be in
830          * catastrophic failure.
831          */
832         hammer_io_clear_modify(&buffer->io, 1);
833         hammer_flush_buffer_nodes(buffer);
834         KKASSERT(buffer->io.lock.refs == 1);
835         hammer_rel_buffer(buffer, 2);
836         return(0);
837 }
838
839 /*
840  * Reference a buffer that is either already referenced or via a specially
841  * handled pointer (aka cursor->buffer).
842  */
843 int
844 hammer_ref_buffer(hammer_buffer_t buffer)
845 {
846         int error;
847
848         if (buffer->io.lock.refs == 0)
849                 ++hammer_count_refedbufs;
850         hammer_ref(&buffer->io.lock);
851
852         /*
853          * At this point a biodone() will not touch the buffer other then
854          * incidental bits.  However, lose_list can be modified via
855          * a biodone() interrupt.
856          *
857          * No longer loose
858          */
859         if (buffer->io.mod_list == &buffer->io.hmp->lose_list) {
860                 crit_enter();
861                 TAILQ_REMOVE(buffer->io.mod_list, &buffer->io, mod_entry);
862                 buffer->io.mod_list = NULL;
863                 crit_exit();
864         }
865
866         if (buffer->ondisk == NULL || buffer->io.loading) {
867                 error = hammer_load_buffer(buffer, 0);
868                 if (error) {
869                         hammer_rel_buffer(buffer, 1);
870                         /*
871                          * NOTE: buffer pointer can become stale after
872                          * the above release.
873                          */
874                 }
875         } else {
876                 error = 0;
877         }
878         return(error);
879 }
880
881 /*
882  * Release a buffer.  We have to deal with several places where
883  * another thread can ref the buffer.
884  *
885  * Only destroy the structure itself if the related buffer cache buffer
886  * was disassociated from it.  This ties the management of the structure
887  * to the buffer cache subsystem.  buffer->ondisk determines whether the
888  * embedded io is referenced or not.
889  */
890 void
891 hammer_rel_buffer(hammer_buffer_t buffer, int flush)
892 {
893         hammer_volume_t volume;
894         hammer_mount_t hmp;
895         struct buf *bp = NULL;
896         int freeme = 0;
897
898         hmp = buffer->io.hmp;
899
900         crit_enter();
901         if (buffer->io.lock.refs == 1) {
902                 ++buffer->io.loading;   /* force interlock check */
903                 hammer_lock_ex(&buffer->io.lock);
904                 if (buffer->io.lock.refs == 1) {
905                         bp = hammer_io_release(&buffer->io, flush);
906
907                         if (buffer->io.lock.refs == 1)
908                                 --hammer_count_refedbufs;
909
910                         if (buffer->io.bp == NULL &&
911                             buffer->io.lock.refs == 1) {
912                                 /*
913                                  * Final cleanup
914                                  *
915                                  * NOTE: It is impossible for any associated
916                                  * B-Tree nodes to have refs if the buffer
917                                  * has no additional refs.
918                                  */
919                                 RB_REMOVE(hammer_buf_rb_tree,
920                                           &buffer->io.hmp->rb_bufs_root,
921                                           buffer);
922                                 volume = buffer->io.volume;
923                                 buffer->io.volume = NULL; /* sanity */
924                                 hammer_rel_volume(volume, 0);
925                                 hammer_io_clear_modlist(&buffer->io);
926                                 hammer_flush_buffer_nodes(buffer);
927                                 KKASSERT(TAILQ_EMPTY(&buffer->clist));
928                                 freeme = 1;
929                         }
930                 }
931                 --buffer->io.loading;
932                 hammer_unlock(&buffer->io.lock);
933         }
934         hammer_unref(&buffer->io.lock);
935         crit_exit();
936         if (bp)
937                 brelse(bp);
938         if (freeme) {
939                 --hammer_count_buffers;
940                 kfree(buffer, hmp->m_misc);
941         }
942 }
943
944 /*
945  * Access the filesystem buffer containing the specified hammer offset.
946  * buf_offset is a conglomeration of the volume number and vol_buf_beg
947  * relative buffer offset.  It must also have bit 55 set to be valid.
948  * (see hammer_off_t in hammer_disk.h).
949  *
950  * Any prior buffer in *bufferp will be released and replaced by the
951  * requested buffer.
952  *
953  * NOTE: The buffer is indexed via its zoneX_offset but we allow the
954  * passed cached *bufferp to match against either zoneX or zone2.
955  */
956 static __inline
957 void *
958 _hammer_bread(hammer_mount_t hmp, hammer_off_t buf_offset, int bytes,
959              int *errorp, struct hammer_buffer **bufferp)
960 {
961         hammer_buffer_t buffer;
962         int32_t xoff = (int32_t)buf_offset & HAMMER_BUFMASK;
963
964         buf_offset &= ~HAMMER_BUFMASK64;
965         KKASSERT((buf_offset & HAMMER_OFF_ZONE_MASK) != 0);
966
967         buffer = *bufferp;
968         if (buffer == NULL || (buffer->zone2_offset != buf_offset &&
969                                buffer->zoneX_offset != buf_offset)) {
970                 if (buffer)
971                         hammer_rel_buffer(buffer, 0);
972                 buffer = hammer_get_buffer(hmp, buf_offset, bytes, 0, errorp);
973                 *bufferp = buffer;
974         } else {
975                 *errorp = 0;
976         }
977
978         /*
979          * Return a pointer to the buffer data.
980          */
981         if (buffer == NULL)
982                 return(NULL);
983         else
984                 return((char *)buffer->ondisk + xoff);
985 }
986
987 void *
988 hammer_bread(hammer_mount_t hmp, hammer_off_t buf_offset,
989              int *errorp, struct hammer_buffer **bufferp)
990 {
991         return(_hammer_bread(hmp, buf_offset, HAMMER_BUFSIZE, errorp, bufferp));
992 }
993
994 void *
995 hammer_bread_ext(hammer_mount_t hmp, hammer_off_t buf_offset, int bytes,
996                  int *errorp, struct hammer_buffer **bufferp)
997 {
998         bytes = (bytes + HAMMER_BUFMASK) & ~HAMMER_BUFMASK;
999         return(_hammer_bread(hmp, buf_offset, bytes, errorp, bufferp));
1000 }
1001
1002 /*
1003  * Access the filesystem buffer containing the specified hammer offset.
1004  * No disk read operation occurs.  The result buffer may contain garbage.
1005  *
1006  * Any prior buffer in *bufferp will be released and replaced by the
1007  * requested buffer.
1008  *
1009  * This function marks the buffer dirty but does not increment its
1010  * modify_refs count.
1011  */
1012 static __inline
1013 void *
1014 _hammer_bnew(hammer_mount_t hmp, hammer_off_t buf_offset, int bytes,
1015              int *errorp, struct hammer_buffer **bufferp)
1016 {
1017         hammer_buffer_t buffer;
1018         int32_t xoff = (int32_t)buf_offset & HAMMER_BUFMASK;
1019
1020         buf_offset &= ~HAMMER_BUFMASK64;
1021
1022         buffer = *bufferp;
1023         if (buffer == NULL || (buffer->zone2_offset != buf_offset &&
1024                                buffer->zoneX_offset != buf_offset)) {
1025                 if (buffer)
1026                         hammer_rel_buffer(buffer, 0);
1027                 buffer = hammer_get_buffer(hmp, buf_offset, bytes, 1, errorp);
1028                 *bufferp = buffer;
1029         } else {
1030                 *errorp = 0;
1031         }
1032
1033         /*
1034          * Return a pointer to the buffer data.
1035          */
1036         if (buffer == NULL)
1037                 return(NULL);
1038         else
1039                 return((char *)buffer->ondisk + xoff);
1040 }
1041
1042 void *
1043 hammer_bnew(hammer_mount_t hmp, hammer_off_t buf_offset,
1044              int *errorp, struct hammer_buffer **bufferp)
1045 {
1046         return(_hammer_bnew(hmp, buf_offset, HAMMER_BUFSIZE, errorp, bufferp));
1047 }
1048
1049 void *
1050 hammer_bnew_ext(hammer_mount_t hmp, hammer_off_t buf_offset, int bytes,
1051                 int *errorp, struct hammer_buffer **bufferp)
1052 {
1053         bytes = (bytes + HAMMER_BUFMASK) & ~HAMMER_BUFMASK;
1054         return(_hammer_bnew(hmp, buf_offset, bytes, errorp, bufferp));
1055 }
1056
1057 /************************************************************************
1058  *                              NODES                                   *
1059  ************************************************************************
1060  *
1061  * Manage B-Tree nodes.  B-Tree nodes represent the primary indexing
1062  * method used by the HAMMER filesystem.
1063  *
1064  * Unlike other HAMMER structures, a hammer_node can be PASSIVELY
1065  * associated with its buffer, and will only referenced the buffer while
1066  * the node itself is referenced.
1067  *
1068  * A hammer_node can also be passively associated with other HAMMER
1069  * structures, such as inodes, while retaining 0 references.  These
1070  * associations can be cleared backwards using a pointer-to-pointer in
1071  * the hammer_node.
1072  *
1073  * This allows the HAMMER implementation to cache hammer_nodes long-term
1074  * and short-cut a great deal of the infrastructure's complexity.  In
1075  * most cases a cached node can be reacquired without having to dip into
1076  * either the buffer or cluster management code.
1077  *
1078  * The caller must pass a referenced cluster on call and will retain
1079  * ownership of the reference on return.  The node will acquire its own
1080  * additional references, if necessary.
1081  */
1082 hammer_node_t
1083 hammer_get_node(hammer_transaction_t trans, hammer_off_t node_offset,
1084                 int isnew, int *errorp)
1085 {
1086         hammer_mount_t hmp = trans->hmp;
1087         hammer_node_t node;
1088
1089         KKASSERT((node_offset & HAMMER_OFF_ZONE_MASK) == HAMMER_ZONE_BTREE);
1090
1091         /*
1092          * Locate the structure, allocating one if necessary.
1093          */
1094 again:
1095         node = RB_LOOKUP(hammer_nod_rb_tree, &hmp->rb_nods_root, node_offset);
1096         if (node == NULL) {
1097                 ++hammer_count_nodes;
1098                 node = kmalloc(sizeof(*node), hmp->m_misc, M_WAITOK|M_ZERO|M_USE_RESERVE);
1099                 node->node_offset = node_offset;
1100                 node->hmp = hmp;
1101                 TAILQ_INIT(&node->cursor_list);
1102                 TAILQ_INIT(&node->cache_list);
1103                 if (RB_INSERT(hammer_nod_rb_tree, &hmp->rb_nods_root, node)) {
1104                         --hammer_count_nodes;
1105                         kfree(node, hmp->m_misc);
1106                         goto again;
1107                 }
1108         }
1109         hammer_ref(&node->lock);
1110         if (node->ondisk) {
1111                 *errorp = 0;
1112         } else {
1113                 *errorp = hammer_load_node(trans, node, isnew);
1114                 trans->flags |= HAMMER_TRANSF_DIDIO;
1115         }
1116         if (*errorp) {
1117                 hammer_rel_node(node);
1118                 node = NULL;
1119         }
1120         return(node);
1121 }
1122
1123 /*
1124  * Reference an already-referenced node.
1125  */
1126 void
1127 hammer_ref_node(hammer_node_t node)
1128 {
1129         KKASSERT(node->lock.refs > 0 && node->ondisk != NULL);
1130         hammer_ref(&node->lock);
1131 }
1132
1133 /*
1134  * Load a node's on-disk data reference.
1135  */
1136 static int
1137 hammer_load_node(hammer_transaction_t trans, hammer_node_t node, int isnew)
1138 {
1139         hammer_buffer_t buffer;
1140         hammer_off_t buf_offset;
1141         int error;
1142
1143         error = 0;
1144         ++node->loading;
1145         hammer_lock_ex(&node->lock);
1146         if (node->ondisk == NULL) {
1147                 /*
1148                  * This is a little confusing but the jist is that
1149                  * node->buffer determines whether the node is on
1150                  * the buffer's clist and node->ondisk determines
1151                  * whether the buffer is referenced.
1152                  *
1153                  * We could be racing a buffer release, in which case
1154                  * node->buffer may become NULL while we are blocked
1155                  * referencing the buffer.
1156                  */
1157                 if ((buffer = node->buffer) != NULL) {
1158                         error = hammer_ref_buffer(buffer);
1159                         if (error == 0 && node->buffer == NULL) {
1160                                 TAILQ_INSERT_TAIL(&buffer->clist,
1161                                                   node, entry);
1162                                 node->buffer = buffer;
1163                         }
1164                 } else {
1165                         buf_offset = node->node_offset & ~HAMMER_BUFMASK64;
1166                         buffer = hammer_get_buffer(node->hmp, buf_offset,
1167                                                    HAMMER_BUFSIZE, 0, &error);
1168                         if (buffer) {
1169                                 KKASSERT(error == 0);
1170                                 TAILQ_INSERT_TAIL(&buffer->clist,
1171                                                   node, entry);
1172                                 node->buffer = buffer;
1173                         }
1174                 }
1175                 if (error)
1176                         goto failed;
1177                 node->ondisk = (void *)((char *)buffer->ondisk +
1178                                         (node->node_offset & HAMMER_BUFMASK));
1179
1180                 /*
1181                  * Check CRC.  NOTE: Neither flag is set and the CRC is not
1182                  * generated on new B-Tree nodes.
1183                  */
1184                 if (isnew == 0 && 
1185                     (node->flags & HAMMER_NODE_CRCANY) == 0) {
1186                         if (hammer_crc_test_btree(node->ondisk) == 0) {
1187                                 if (hammer_debug_debug & 0x0002)
1188                                         Debugger("CRC FAILED: B-TREE NODE");
1189                                 node->flags |= HAMMER_NODE_CRCBAD;
1190                         } else {
1191                                 node->flags |= HAMMER_NODE_CRCGOOD;
1192                         }
1193                 }
1194         }
1195         if (node->flags & HAMMER_NODE_CRCBAD) {
1196                 if (trans->flags & HAMMER_TRANSF_CRCDOM)
1197                         error = EDOM;
1198                 else
1199                         error = EIO;
1200         }
1201 failed:
1202         --node->loading;
1203         hammer_unlock(&node->lock);
1204         return (error);
1205 }
1206
1207 /*
1208  * Safely reference a node, interlock against flushes via the IO subsystem.
1209  */
1210 hammer_node_t
1211 hammer_ref_node_safe(hammer_transaction_t trans, hammer_node_cache_t cache,
1212                      int *errorp)
1213 {
1214         hammer_node_t node;
1215
1216         node = cache->node;
1217         if (node != NULL) {
1218                 hammer_ref(&node->lock);
1219                 if (node->ondisk) {
1220                         if (node->flags & HAMMER_NODE_CRCBAD) {
1221                                 if (trans->flags & HAMMER_TRANSF_CRCDOM)
1222                                         *errorp = EDOM;
1223                                 else
1224                                         *errorp = EIO;
1225                         } else {
1226                                 *errorp = 0;
1227                         }
1228                 } else {
1229                         *errorp = hammer_load_node(trans, node, 0);
1230                 }
1231                 if (*errorp) {
1232                         hammer_rel_node(node);
1233                         node = NULL;
1234                 }
1235         } else {
1236                 *errorp = ENOENT;
1237         }
1238         return(node);
1239 }
1240
1241 /*
1242  * Release a hammer_node.  On the last release the node dereferences
1243  * its underlying buffer and may or may not be destroyed.
1244  */
1245 void
1246 hammer_rel_node(hammer_node_t node)
1247 {
1248         hammer_buffer_t buffer;
1249
1250         /*
1251          * If this isn't the last ref just decrement the ref count and
1252          * return.
1253          */
1254         if (node->lock.refs > 1) {
1255                 hammer_unref(&node->lock);
1256                 return;
1257         }
1258
1259         /*
1260          * If there is no ondisk info or no buffer the node failed to load,
1261          * remove the last reference and destroy the node.
1262          */
1263         if (node->ondisk == NULL) {
1264                 hammer_unref(&node->lock);
1265                 hammer_flush_node(node);
1266                 /* node is stale now */
1267                 return;
1268         }
1269
1270         /*
1271          * Do not disassociate the node from the buffer if it represents
1272          * a modified B-Tree node that still needs its crc to be generated.
1273          */
1274         if (node->flags & HAMMER_NODE_NEEDSCRC)
1275                 return;
1276
1277         /*
1278          * Do final cleanups and then either destroy the node and leave it
1279          * passively cached.  The buffer reference is removed regardless.
1280          */
1281         buffer = node->buffer;
1282         node->ondisk = NULL;
1283
1284         if ((node->flags & HAMMER_NODE_FLUSH) == 0) {
1285                 hammer_unref(&node->lock);
1286                 hammer_rel_buffer(buffer, 0);
1287                 return;
1288         }
1289
1290         /*
1291          * Destroy the node.
1292          */
1293         hammer_unref(&node->lock);
1294         hammer_flush_node(node);
1295         /* node is stale */
1296         hammer_rel_buffer(buffer, 0);
1297 }
1298
1299 /*
1300  * Free space on-media associated with a B-Tree node.
1301  */
1302 void
1303 hammer_delete_node(hammer_transaction_t trans, hammer_node_t node)
1304 {
1305         KKASSERT((node->flags & HAMMER_NODE_DELETED) == 0);
1306         node->flags |= HAMMER_NODE_DELETED;
1307         hammer_blockmap_free(trans, node->node_offset, sizeof(*node->ondisk));
1308 }
1309
1310 /*
1311  * Passively cache a referenced hammer_node.  The caller may release
1312  * the node on return.
1313  */
1314 void
1315 hammer_cache_node(hammer_node_cache_t cache, hammer_node_t node)
1316 {
1317         /*
1318          * If the node doesn't exist, or is being deleted, don't cache it!
1319          *
1320          * The node can only ever be NULL in the I/O failure path.
1321          */
1322         if (node == NULL || (node->flags & HAMMER_NODE_DELETED))
1323                 return;
1324         if (cache->node == node)
1325                 return;
1326         while (cache->node)
1327                 hammer_uncache_node(cache);
1328         if (node->flags & HAMMER_NODE_DELETED)
1329                 return;
1330         cache->node = node;
1331         TAILQ_INSERT_TAIL(&node->cache_list, cache, entry);
1332 }
1333
1334 void
1335 hammer_uncache_node(hammer_node_cache_t cache)
1336 {
1337         hammer_node_t node;
1338
1339         if ((node = cache->node) != NULL) {
1340                 TAILQ_REMOVE(&node->cache_list, cache, entry);
1341                 cache->node = NULL;
1342                 if (TAILQ_EMPTY(&node->cache_list))
1343                         hammer_flush_node(node);
1344         }
1345 }
1346
1347 /*
1348  * Remove a node's cache references and destroy the node if it has no
1349  * other references or backing store.
1350  */
1351 void
1352 hammer_flush_node(hammer_node_t node)
1353 {
1354         hammer_node_cache_t cache;
1355         hammer_buffer_t buffer;
1356         hammer_mount_t hmp = node->hmp;
1357
1358         while ((cache = TAILQ_FIRST(&node->cache_list)) != NULL) {
1359                 TAILQ_REMOVE(&node->cache_list, cache, entry);
1360                 cache->node = NULL;
1361         }
1362         if (node->lock.refs == 0 && node->ondisk == NULL) {
1363                 KKASSERT((node->flags & HAMMER_NODE_NEEDSCRC) == 0);
1364                 RB_REMOVE(hammer_nod_rb_tree, &node->hmp->rb_nods_root, node);
1365                 if ((buffer = node->buffer) != NULL) {
1366                         node->buffer = NULL;
1367                         TAILQ_REMOVE(&buffer->clist, node, entry);
1368                         /* buffer is unreferenced because ondisk is NULL */
1369                 }
1370                 --hammer_count_nodes;
1371                 kfree(node, hmp->m_misc);
1372         }
1373 }
1374
1375 /*
1376  * Flush passively cached B-Tree nodes associated with this buffer.
1377  * This is only called when the buffer is about to be destroyed, so
1378  * none of the nodes should have any references.  The buffer is locked.
1379  *
1380  * We may be interlocked with the buffer.
1381  */
1382 void
1383 hammer_flush_buffer_nodes(hammer_buffer_t buffer)
1384 {
1385         hammer_node_t node;
1386
1387         while ((node = TAILQ_FIRST(&buffer->clist)) != NULL) {
1388                 KKASSERT(node->ondisk == NULL);
1389                 KKASSERT((node->flags & HAMMER_NODE_NEEDSCRC) == 0);
1390
1391                 if (node->lock.refs == 0) {
1392                         hammer_ref(&node->lock);
1393                         node->flags |= HAMMER_NODE_FLUSH;
1394                         hammer_rel_node(node);
1395                 } else {
1396                         KKASSERT(node->loading != 0);
1397                         KKASSERT(node->buffer != NULL);
1398                         buffer = node->buffer;
1399                         node->buffer = NULL;
1400                         TAILQ_REMOVE(&buffer->clist, node, entry);
1401                         /* buffer is unreferenced because ondisk is NULL */
1402                 }
1403         }
1404 }
1405
1406
1407 /************************************************************************
1408  *                              ALLOCATORS                              *
1409  ************************************************************************/
1410
1411 /*
1412  * Allocate a B-Tree node.
1413  */
1414 hammer_node_t
1415 hammer_alloc_btree(hammer_transaction_t trans, int *errorp)
1416 {
1417         hammer_buffer_t buffer = NULL;
1418         hammer_node_t node = NULL;
1419         hammer_off_t node_offset;
1420
1421         node_offset = hammer_blockmap_alloc(trans, HAMMER_ZONE_BTREE_INDEX,
1422                                             sizeof(struct hammer_node_ondisk),
1423                                             errorp);
1424         if (*errorp == 0) {
1425                 node = hammer_get_node(trans, node_offset, 1, errorp);
1426                 hammer_modify_node_noundo(trans, node);
1427                 bzero(node->ondisk, sizeof(*node->ondisk));
1428                 hammer_modify_node_done(node);
1429         }
1430         if (buffer)
1431                 hammer_rel_buffer(buffer, 0);
1432         return(node);
1433 }
1434
1435 /*
1436  * Allocate data.  If the address of a data buffer is supplied then
1437  * any prior non-NULL *data_bufferp will be released and *data_bufferp
1438  * will be set to the related buffer.  The caller must release it when
1439  * finally done.  The initial *data_bufferp should be set to NULL by
1440  * the caller.
1441  *
1442  * The caller is responsible for making hammer_modify*() calls on the
1443  * *data_bufferp.
1444  */
1445 void *
1446 hammer_alloc_data(hammer_transaction_t trans, int32_t data_len, 
1447                   u_int16_t rec_type, hammer_off_t *data_offsetp,
1448                   struct hammer_buffer **data_bufferp, int *errorp)
1449 {
1450         void *data;
1451         int zone;
1452
1453         /*
1454          * Allocate data
1455          */
1456         if (data_len) {
1457                 switch(rec_type) {
1458                 case HAMMER_RECTYPE_INODE:
1459                 case HAMMER_RECTYPE_DIRENTRY:
1460                 case HAMMER_RECTYPE_EXT:
1461                 case HAMMER_RECTYPE_FIX:
1462                 case HAMMER_RECTYPE_PFS:
1463                         zone = HAMMER_ZONE_META_INDEX;
1464                         break;
1465                 case HAMMER_RECTYPE_DATA:
1466                 case HAMMER_RECTYPE_DB:
1467                         if (data_len <= HAMMER_BUFSIZE / 2) {
1468                                 zone = HAMMER_ZONE_SMALL_DATA_INDEX;
1469                         } else {
1470                                 data_len = (data_len + HAMMER_BUFMASK) &
1471                                            ~HAMMER_BUFMASK;
1472                                 zone = HAMMER_ZONE_LARGE_DATA_INDEX;
1473                         }
1474                         break;
1475                 default:
1476                         panic("hammer_alloc_data: rec_type %04x unknown",
1477                               rec_type);
1478                         zone = 0;       /* NOT REACHED */
1479                         break;
1480                 }
1481                 *data_offsetp = hammer_blockmap_alloc(trans, zone,
1482                                                       data_len, errorp);
1483         } else {
1484                 *data_offsetp = 0;
1485         }
1486         if (*errorp == 0 && data_bufferp) {
1487                 if (data_len) {
1488                         data = hammer_bread_ext(trans->hmp, *data_offsetp,
1489                                                 data_len, errorp, data_bufferp);
1490                 } else {
1491                         data = NULL;
1492                 }
1493         } else {
1494                 data = NULL;
1495         }
1496         return(data);
1497 }
1498
1499 /*
1500  * Sync dirty buffers to the media and clean-up any loose ends.
1501  *
1502  * These functions do not start the flusher going, they simply
1503  * queue everything up to the flusher.
1504  */
1505 static int hammer_sync_scan1(struct mount *mp, struct vnode *vp, void *data);
1506 static int hammer_sync_scan2(struct mount *mp, struct vnode *vp, void *data);
1507
1508 int
1509 hammer_queue_inodes_flusher(hammer_mount_t hmp, int waitfor)
1510 {
1511         struct hammer_sync_info info;
1512
1513         info.error = 0;
1514         info.waitfor = waitfor;
1515         if (waitfor == MNT_WAIT) {
1516                 vmntvnodescan(hmp->mp, VMSC_GETVP|VMSC_ONEPASS,
1517                               hammer_sync_scan1, hammer_sync_scan2, &info);
1518         } else {
1519                 vmntvnodescan(hmp->mp, VMSC_GETVP|VMSC_ONEPASS|VMSC_NOWAIT,
1520                               hammer_sync_scan1, hammer_sync_scan2, &info);
1521         }
1522         return(info.error);
1523 }
1524
1525 /*
1526  * Filesystem sync.  If doing a synchronous sync make a second pass on
1527  * the vnodes in case any were already flushing during the first pass,
1528  * and activate the flusher twice (the second time brings the UNDO FIFO's
1529  * start position up to the end position after the first call).
1530  */
1531 int
1532 hammer_sync_hmp(hammer_mount_t hmp, int waitfor)
1533 {
1534         struct hammer_sync_info info;
1535
1536         info.error = 0;
1537         info.waitfor = MNT_NOWAIT;
1538         vmntvnodescan(hmp->mp, VMSC_GETVP|VMSC_NOWAIT,
1539                       hammer_sync_scan1, hammer_sync_scan2, &info);
1540         if (info.error == 0 && waitfor == MNT_WAIT) {
1541                 info.waitfor = waitfor;
1542                 vmntvnodescan(hmp->mp, VMSC_GETVP,
1543                               hammer_sync_scan1, hammer_sync_scan2, &info);
1544         }
1545         if (waitfor == MNT_WAIT) {
1546                 hammer_flusher_sync(hmp);
1547                 hammer_flusher_sync(hmp);
1548         } else {
1549                 hammer_flusher_async(hmp, NULL);
1550                 hammer_flusher_async(hmp, NULL);
1551         }
1552         return(info.error);
1553 }
1554
1555 static int
1556 hammer_sync_scan1(struct mount *mp, struct vnode *vp, void *data)
1557 {
1558         struct hammer_inode *ip;
1559
1560         ip = VTOI(vp);
1561         if (vp->v_type == VNON || ip == NULL ||
1562             ((ip->flags & HAMMER_INODE_MODMASK) == 0 &&
1563              RB_EMPTY(&vp->v_rbdirty_tree))) {
1564                 return(-1);
1565         }
1566         return(0);
1567 }
1568
1569 static int
1570 hammer_sync_scan2(struct mount *mp, struct vnode *vp, void *data)
1571 {
1572         struct hammer_sync_info *info = data;
1573         struct hammer_inode *ip;
1574         int error;
1575
1576         ip = VTOI(vp);
1577         if (vp->v_type == VNON || vp->v_type == VBAD ||
1578             ((ip->flags & HAMMER_INODE_MODMASK) == 0 &&
1579              RB_EMPTY(&vp->v_rbdirty_tree))) {
1580                 return(0);
1581         }
1582         error = VOP_FSYNC(vp, MNT_NOWAIT);
1583         if (error)
1584                 info->error = error;
1585         return(0);
1586 }
1587