fsync - Add sysctl to relax fsync requirements.
[dragonfly.git] / sys / vfs / hammer / hammer_ondisk.c
1 /*
2  * Copyright (c) 2007-2008 The DragonFly Project.  All rights reserved.
3  * 
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  * 
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  * 
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  * 
34  * $DragonFly: src/sys/vfs/hammer/hammer_ondisk.c,v 1.76 2008/08/29 20:19:08 dillon Exp $
35  */
36 /*
37  * Manage HAMMER's on-disk structures.  These routines are primarily
38  * responsible for interfacing with the kernel's I/O subsystem and for
39  * managing in-memory structures.
40  */
41
42 #include "hammer.h"
43 #include <sys/fcntl.h>
44 #include <sys/nlookup.h>
45 #include <sys/buf.h>
46 #include <sys/buf2.h>
47
48 static void hammer_free_volume(hammer_volume_t volume);
49 static int hammer_load_volume(hammer_volume_t volume);
50 static int hammer_load_buffer(hammer_buffer_t buffer, int isnew);
51 static int hammer_load_node(hammer_transaction_t trans,
52                                 hammer_node_t node, int isnew);
53
54 static int
55 hammer_vol_rb_compare(hammer_volume_t vol1, hammer_volume_t vol2)
56 {
57         if (vol1->vol_no < vol2->vol_no)
58                 return(-1);
59         if (vol1->vol_no > vol2->vol_no)
60                 return(1);
61         return(0);
62 }
63
64 /*
65  * hammer_buffer structures are indexed via their zoneX_offset, not
66  * their zone2_offset.
67  */
68 static int
69 hammer_buf_rb_compare(hammer_buffer_t buf1, hammer_buffer_t buf2)
70 {
71         if (buf1->zoneX_offset < buf2->zoneX_offset)
72                 return(-1);
73         if (buf1->zoneX_offset > buf2->zoneX_offset)
74                 return(1);
75         return(0);
76 }
77
78 static int
79 hammer_nod_rb_compare(hammer_node_t node1, hammer_node_t node2)
80 {
81         if (node1->node_offset < node2->node_offset)
82                 return(-1);
83         if (node1->node_offset > node2->node_offset)
84                 return(1);
85         return(0);
86 }
87
88 RB_GENERATE2(hammer_vol_rb_tree, hammer_volume, rb_node,
89              hammer_vol_rb_compare, int32_t, vol_no);
90 RB_GENERATE2(hammer_buf_rb_tree, hammer_buffer, rb_node,
91              hammer_buf_rb_compare, hammer_off_t, zoneX_offset);
92 RB_GENERATE2(hammer_nod_rb_tree, hammer_node, rb_node,
93              hammer_nod_rb_compare, hammer_off_t, node_offset);
94
95 /************************************************************************
96  *                              VOLUMES                                 *
97  ************************************************************************
98  *
99  * Load a HAMMER volume by name.  Returns 0 on success or a positive error
100  * code on failure.  Volumes must be loaded at mount time, get_volume() will
101  * not load a new volume.
102  *
103  * Calls made to hammer_load_volume() or single-threaded
104  */
105 int
106 hammer_install_volume(struct hammer_mount *hmp, const char *volname,
107                       struct vnode *devvp)
108 {
109         struct mount *mp;
110         hammer_volume_t volume;
111         struct hammer_volume_ondisk *ondisk;
112         struct nlookupdata nd;
113         struct buf *bp = NULL;
114         int error;
115         int ronly;
116         int setmp = 0;
117
118         mp = hmp->mp;
119         ronly = ((mp->mnt_flag & MNT_RDONLY) ? 1 : 0);
120
121         /*
122          * Allocate a volume structure
123          */
124         ++hammer_count_volumes;
125         volume = kmalloc(sizeof(*volume), hmp->m_misc, M_WAITOK|M_ZERO);
126         volume->vol_name = kstrdup(volname, hmp->m_misc);
127         volume->io.hmp = hmp;   /* bootstrap */
128         hammer_io_init(&volume->io, volume, HAMMER_STRUCTURE_VOLUME);
129         volume->io.offset = 0LL;
130         volume->io.bytes = HAMMER_BUFSIZE;
131
132         /*
133          * Get the device vnode
134          */
135         if (devvp == NULL) {
136                 error = nlookup_init(&nd, volume->vol_name, UIO_SYSSPACE, NLC_FOLLOW);
137                 if (error == 0)
138                         error = nlookup(&nd);
139                 if (error == 0)
140                         error = cache_vref(&nd.nl_nch, nd.nl_cred, &volume->devvp);
141                 nlookup_done(&nd);
142         } else {
143                 error = 0;
144                 volume->devvp = devvp;
145         }
146
147         if (error == 0) {
148                 if (vn_isdisk(volume->devvp, &error)) {
149                         error = vfs_mountedon(volume->devvp);
150                 }
151         }
152         if (error == 0 && vcount(volume->devvp) > 0)
153                 error = EBUSY;
154         if (error == 0) {
155                 vn_lock(volume->devvp, LK_EXCLUSIVE | LK_RETRY);
156                 error = vinvalbuf(volume->devvp, V_SAVE, 0, 0);
157                 if (error == 0) {
158                         error = VOP_OPEN(volume->devvp, 
159                                          (ronly ? FREAD : FREAD|FWRITE),
160                                          FSCRED, NULL);
161                 }
162                 vn_unlock(volume->devvp);
163         }
164         if (error) {
165                 hammer_free_volume(volume);
166                 return(error);
167         }
168         volume->devvp->v_rdev->si_mountpoint = mp;
169         setmp = 1;
170
171         /*
172          * Extract the volume number from the volume header and do various
173          * sanity checks.
174          */
175         error = bread(volume->devvp, 0LL, HAMMER_BUFSIZE, &bp);
176         if (error)
177                 goto late_failure;
178         ondisk = (void *)bp->b_data;
179         if (ondisk->vol_signature != HAMMER_FSBUF_VOLUME) {
180                 kprintf("hammer_mount: volume %s has an invalid header\n",
181                         volume->vol_name);
182                 error = EFTYPE;
183                 goto late_failure;
184         }
185         volume->vol_no = ondisk->vol_no;
186         volume->buffer_base = ondisk->vol_buf_beg;
187         volume->vol_flags = ondisk->vol_flags;
188         volume->nblocks = ondisk->vol_nblocks; 
189         volume->maxbuf_off = HAMMER_ENCODE_RAW_BUFFER(volume->vol_no,
190                                     ondisk->vol_buf_end - ondisk->vol_buf_beg);
191         volume->maxraw_off = ondisk->vol_buf_end;
192
193         if (RB_EMPTY(&hmp->rb_vols_root)) {
194                 hmp->fsid = ondisk->vol_fsid;
195         } else if (bcmp(&hmp->fsid, &ondisk->vol_fsid, sizeof(uuid_t))) {
196                 kprintf("hammer_mount: volume %s's fsid does not match "
197                         "other volumes\n", volume->vol_name);
198                 error = EFTYPE;
199                 goto late_failure;
200         }
201
202         /*
203          * Insert the volume structure into the red-black tree.
204          */
205         if (RB_INSERT(hammer_vol_rb_tree, &hmp->rb_vols_root, volume)) {
206                 kprintf("hammer_mount: volume %s has a duplicate vol_no %d\n",
207                         volume->vol_name, volume->vol_no);
208                 error = EEXIST;
209         }
210
211         /*
212          * Set the root volume .  HAMMER special cases rootvol the structure.
213          * We do not hold a ref because this would prevent related I/O
214          * from being flushed.
215          */
216         if (error == 0 && ondisk->vol_rootvol == ondisk->vol_no) {
217                 hmp->rootvol = volume;
218                 hmp->nvolumes = ondisk->vol_count;
219                 if (bp) {
220                         brelse(bp);
221                         bp = NULL;
222                 }
223                 hmp->mp->mnt_stat.f_blocks += ondisk->vol0_stat_bigblocks *
224                         (HAMMER_LARGEBLOCK_SIZE / HAMMER_BUFSIZE);
225                 hmp->mp->mnt_vstat.f_blocks += ondisk->vol0_stat_bigblocks *
226                         (HAMMER_LARGEBLOCK_SIZE / HAMMER_BUFSIZE);
227         }
228 late_failure:
229         if (bp)
230                 brelse(bp);
231         if (error) {
232                 /*vinvalbuf(volume->devvp, V_SAVE, 0, 0);*/
233                 if (setmp)
234                         volume->devvp->v_rdev->si_mountpoint = NULL;
235                 VOP_CLOSE(volume->devvp, ronly ? FREAD : FREAD|FWRITE);
236                 hammer_free_volume(volume);
237         }
238         return (error);
239 }
240
241 /*
242  * This is called for each volume when updating the mount point from
243  * read-write to read-only or vise-versa.
244  */
245 int
246 hammer_adjust_volume_mode(hammer_volume_t volume, void *data __unused)
247 {
248         if (volume->devvp) {
249                 vn_lock(volume->devvp, LK_EXCLUSIVE | LK_RETRY);
250                 if (volume->io.hmp->ronly) {
251                         /* do not call vinvalbuf */
252                         VOP_OPEN(volume->devvp, FREAD, FSCRED, NULL);
253                         VOP_CLOSE(volume->devvp, FREAD|FWRITE);
254                 } else {
255                         /* do not call vinvalbuf */
256                         VOP_OPEN(volume->devvp, FREAD|FWRITE, FSCRED, NULL);
257                         VOP_CLOSE(volume->devvp, FREAD);
258                 }
259                 vn_unlock(volume->devvp);
260         }
261         return(0);
262 }
263
264 /*
265  * Unload and free a HAMMER volume.  Must return >= 0 to continue scan
266  * so returns -1 on failure.
267  */
268 int
269 hammer_unload_volume(hammer_volume_t volume, void *data __unused)
270 {
271         hammer_mount_t hmp = volume->io.hmp;
272         int ronly = ((hmp->mp->mnt_flag & MNT_RDONLY) ? 1 : 0);
273         struct buf *bp;
274
275         /*
276          * Clean up the root volume pointer, which is held unlocked in hmp.
277          */
278         if (hmp->rootvol == volume)
279                 hmp->rootvol = NULL;
280
281         /*
282          * We must not flush a dirty buffer to disk on umount.  It should
283          * have already been dealt with by the flusher, or we may be in
284          * catastrophic failure.
285          */
286         hammer_io_clear_modify(&volume->io, 1);
287         volume->io.waitdep = 1;
288         bp = hammer_io_release(&volume->io, 1);
289
290         /*
291          * Clean up the persistent ref ioerror might have on the volume
292          */
293         if (volume->io.ioerror) {
294                 volume->io.ioerror = 0;
295                 hammer_unref(&volume->io.lock);
296         }
297
298         /*
299          * There should be no references on the volume, no clusters, and
300          * no super-clusters.
301          */
302         KKASSERT(volume->io.lock.refs == 0);
303         if (bp)
304                 brelse(bp);
305
306         volume->ondisk = NULL;
307         if (volume->devvp) {
308                 if (volume->devvp->v_rdev &&
309                     volume->devvp->v_rdev->si_mountpoint == hmp->mp
310                 ) {
311                         volume->devvp->v_rdev->si_mountpoint = NULL;
312                 }
313                 if (ronly) {
314                         /*
315                          * Make sure we don't sync anything to disk if we
316                          * are in read-only mode (1) or critically-errored
317                          * (2).  Note that there may be dirty buffers in
318                          * normal read-only mode from crash recovery.
319                          */
320                         vinvalbuf(volume->devvp, 0, 0, 0);
321                         VOP_CLOSE(volume->devvp, FREAD);
322                 } else {
323                         /*
324                          * Normal termination, save any dirty buffers
325                          * (XXX there really shouldn't be any).
326                          */
327                         vinvalbuf(volume->devvp, V_SAVE, 0, 0);
328                         VOP_CLOSE(volume->devvp, FREAD|FWRITE);
329                 }
330         }
331
332         /*
333          * Destroy the structure
334          */
335         RB_REMOVE(hammer_vol_rb_tree, &hmp->rb_vols_root, volume);
336         hammer_free_volume(volume);
337         return(0);
338 }
339
340 static
341 void
342 hammer_free_volume(hammer_volume_t volume)
343 {
344         hammer_mount_t hmp = volume->io.hmp;
345
346         if (volume->vol_name) {
347                 kfree(volume->vol_name, hmp->m_misc);
348                 volume->vol_name = NULL;
349         }
350         if (volume->devvp) {
351                 vrele(volume->devvp);
352                 volume->devvp = NULL;
353         }
354         --hammer_count_volumes;
355         kfree(volume, hmp->m_misc);
356 }
357
358 /*
359  * Get a HAMMER volume.  The volume must already exist.
360  */
361 hammer_volume_t
362 hammer_get_volume(struct hammer_mount *hmp, int32_t vol_no, int *errorp)
363 {
364         struct hammer_volume *volume;
365
366         /*
367          * Locate the volume structure
368          */
369         volume = RB_LOOKUP(hammer_vol_rb_tree, &hmp->rb_vols_root, vol_no);
370         if (volume == NULL) {
371                 *errorp = ENOENT;
372                 return(NULL);
373         }
374         hammer_ref(&volume->io.lock);
375
376         /*
377          * Deal with on-disk info
378          */
379         if (volume->ondisk == NULL || volume->io.loading) {
380                 *errorp = hammer_load_volume(volume);
381                 if (*errorp) {
382                         hammer_rel_volume(volume, 1);
383                         volume = NULL;
384                 }
385         } else {
386                 *errorp = 0;
387         }
388         return(volume);
389 }
390
391 int
392 hammer_ref_volume(hammer_volume_t volume)
393 {
394         int error;
395
396         hammer_ref(&volume->io.lock);
397
398         /*
399          * Deal with on-disk info
400          */
401         if (volume->ondisk == NULL || volume->io.loading) {
402                 error = hammer_load_volume(volume);
403                 if (error)
404                         hammer_rel_volume(volume, 1);
405         } else {
406                 error = 0;
407         }
408         return (error);
409 }
410
411 hammer_volume_t
412 hammer_get_root_volume(struct hammer_mount *hmp, int *errorp)
413 {
414         hammer_volume_t volume;
415
416         volume = hmp->rootvol;
417         KKASSERT(volume != NULL);
418         hammer_ref(&volume->io.lock);
419
420         /*
421          * Deal with on-disk info
422          */
423         if (volume->ondisk == NULL || volume->io.loading) {
424                 *errorp = hammer_load_volume(volume);
425                 if (*errorp) {
426                         hammer_rel_volume(volume, 1);
427                         volume = NULL;
428                 }
429         } else {
430                 *errorp = 0;
431         }
432         return (volume);
433 }
434
435 /*
436  * Load a volume's on-disk information.  The volume must be referenced and
437  * not locked.  We temporarily acquire an exclusive lock to interlock
438  * against releases or multiple get's.
439  */
440 static int
441 hammer_load_volume(hammer_volume_t volume)
442 {
443         int error;
444
445         ++volume->io.loading;
446         hammer_lock_ex(&volume->io.lock);
447
448         if (volume->ondisk == NULL) {
449                 error = hammer_io_read(volume->devvp, &volume->io,
450                                        volume->maxraw_off);
451                 if (error == 0)
452                         volume->ondisk = (void *)volume->io.bp->b_data;
453         } else {
454                 error = 0;
455         }
456         --volume->io.loading;
457         hammer_unlock(&volume->io.lock);
458         return(error);
459 }
460
461 /*
462  * Release a volume.  Call hammer_io_release on the last reference.  We have
463  * to acquire an exclusive lock to interlock against volume->ondisk tests
464  * in hammer_load_volume(), and hammer_io_release() also expects an exclusive
465  * lock to be held.
466  *
467  * Volumes are not unloaded from memory during normal operation.
468  */
469 void
470 hammer_rel_volume(hammer_volume_t volume, int flush)
471 {
472         struct buf *bp = NULL;
473
474         crit_enter();
475         if (volume->io.lock.refs == 1) {
476                 ++volume->io.loading;
477                 hammer_lock_ex(&volume->io.lock);
478                 if (volume->io.lock.refs == 1) {
479                         volume->ondisk = NULL;
480                         bp = hammer_io_release(&volume->io, flush);
481                 }
482                 --volume->io.loading;
483                 hammer_unlock(&volume->io.lock);
484         }
485         hammer_unref(&volume->io.lock);
486         if (bp)
487                 brelse(bp);
488         crit_exit();
489 }
490
491 int
492 hammer_mountcheck_volumes(struct hammer_mount *hmp)
493 {
494         hammer_volume_t vol;
495         int i;
496
497         for (i = 0; i < hmp->nvolumes; ++i) {
498                 vol = RB_LOOKUP(hammer_vol_rb_tree, &hmp->rb_vols_root, i);
499                 if (vol == NULL)
500                         return(EINVAL);
501         }
502         return(0);
503 }
504
505 /************************************************************************
506  *                              BUFFERS                                 *
507  ************************************************************************
508  *
509  * Manage buffers.  Currently all blockmap-backed zones are direct-mapped
510  * to zone-2 buffer offsets, without a translation stage.  However, the
511  * hammer_buffer structure is indexed by its zoneX_offset, not its
512  * zone2_offset.
513  *
514  * The proper zone must be maintained throughout the code-base all the way
515  * through to the big-block allocator, or routines like hammer_del_buffers()
516  * will not be able to locate all potentially conflicting buffers.
517  */
518 hammer_buffer_t
519 hammer_get_buffer(hammer_mount_t hmp, hammer_off_t buf_offset,
520                   int bytes, int isnew, int *errorp)
521 {
522         hammer_buffer_t buffer;
523         hammer_volume_t volume;
524         hammer_off_t    zone2_offset;
525         hammer_io_type_t iotype;
526         int vol_no;
527         int zone;
528
529         buf_offset &= ~HAMMER_BUFMASK64;
530 again:
531         /*
532          * Shortcut if the buffer is already cached
533          */
534         buffer = RB_LOOKUP(hammer_buf_rb_tree, &hmp->rb_bufs_root, buf_offset);
535         if (buffer) {
536                 if (buffer->io.lock.refs == 0)
537                         ++hammer_count_refedbufs;
538                 hammer_ref(&buffer->io.lock);
539
540                 /*
541                  * Once refed the ondisk field will not be cleared by
542                  * any other action.
543                  */
544                 if (buffer->ondisk && buffer->io.loading == 0) {
545                         *errorp = 0;
546                         return(buffer);
547                 }
548
549                 /*
550                  * The buffer is no longer loose if it has a ref, and
551                  * cannot become loose once it gains a ref.  Loose
552                  * buffers will never be in a modified state.  This should
553                  * only occur on the 0->1 transition of refs.
554                  *
555                  * lose_list can be modified via a biodone() interrupt.
556                  */
557                 if (buffer->io.mod_list == &hmp->lose_list) {
558                         crit_enter();   /* biodone race against list */
559                         TAILQ_REMOVE(buffer->io.mod_list, &buffer->io,
560                                      mod_entry);
561                         crit_exit();
562                         buffer->io.mod_list = NULL;
563                         KKASSERT(buffer->io.modified == 0);
564                 }
565                 goto found;
566         }
567
568         /*
569          * What is the buffer class?
570          */
571         zone = HAMMER_ZONE_DECODE(buf_offset);
572
573         switch(zone) {
574         case HAMMER_ZONE_LARGE_DATA_INDEX:
575         case HAMMER_ZONE_SMALL_DATA_INDEX:
576                 iotype = HAMMER_STRUCTURE_DATA_BUFFER;
577                 break;
578         case HAMMER_ZONE_UNDO_INDEX:
579                 iotype = HAMMER_STRUCTURE_UNDO_BUFFER;
580                 break;
581         case HAMMER_ZONE_META_INDEX:
582         default:
583                 /*
584                  * NOTE: inode data and directory entries are placed in this
585                  * zone.  inode atime/mtime is updated in-place and thus
586                  * buffers containing inodes must be synchronized as
587                  * meta-buffers, same as buffers containing B-Tree info.
588                  */
589                 iotype = HAMMER_STRUCTURE_META_BUFFER;
590                 break;
591         }
592
593         /*
594          * Handle blockmap offset translations
595          */
596         if (zone >= HAMMER_ZONE_BTREE_INDEX) {
597                 zone2_offset = hammer_blockmap_lookup(hmp, buf_offset, errorp);
598         } else if (zone == HAMMER_ZONE_UNDO_INDEX) {
599                 zone2_offset = hammer_undo_lookup(hmp, buf_offset, errorp);
600         } else {
601                 KKASSERT(zone == HAMMER_ZONE_RAW_BUFFER_INDEX);
602                 zone2_offset = buf_offset;
603                 *errorp = 0;
604         }
605         if (*errorp)
606                 return(NULL);
607
608         /*
609          * NOTE: zone2_offset and maxbuf_off are both full zone-2 offset
610          * specifications.
611          */
612         KKASSERT((zone2_offset & HAMMER_OFF_ZONE_MASK) ==
613                  HAMMER_ZONE_RAW_BUFFER);
614         vol_no = HAMMER_VOL_DECODE(zone2_offset);
615         volume = hammer_get_volume(hmp, vol_no, errorp);
616         if (volume == NULL)
617                 return(NULL);
618
619         KKASSERT(zone2_offset < volume->maxbuf_off);
620
621         /*
622          * Allocate a new buffer structure.  We will check for races later.
623          */
624         ++hammer_count_buffers;
625         buffer = kmalloc(sizeof(*buffer), hmp->m_misc,
626                          M_WAITOK|M_ZERO|M_USE_RESERVE);
627         buffer->zone2_offset = zone2_offset;
628         buffer->zoneX_offset = buf_offset;
629
630         hammer_io_init(&buffer->io, volume, iotype);
631         buffer->io.offset = volume->ondisk->vol_buf_beg +
632                             (zone2_offset & HAMMER_OFF_SHORT_MASK);
633         buffer->io.bytes = bytes;
634         TAILQ_INIT(&buffer->clist);
635         hammer_ref(&buffer->io.lock);
636
637         /*
638          * Insert the buffer into the RB tree and handle late collisions.
639          */
640         if (RB_INSERT(hammer_buf_rb_tree, &hmp->rb_bufs_root, buffer)) {
641                 hammer_unref(&buffer->io.lock); /* safety */
642                 --hammer_count_buffers;
643                 hammer_rel_volume(volume, 0);
644                 buffer->io.volume = NULL;       /* safety */
645                 kfree(buffer, hmp->m_misc);
646                 goto again;
647         }
648         ++hammer_count_refedbufs;
649 found:
650
651         /*
652          * Deal with on-disk info and loading races.
653          */
654         if (buffer->ondisk == NULL || buffer->io.loading) {
655                 *errorp = hammer_load_buffer(buffer, isnew);
656                 if (*errorp) {
657                         hammer_rel_buffer(buffer, 1);
658                         buffer = NULL;
659                 }
660         } else {
661                 *errorp = 0;
662         }
663         return(buffer);
664 }
665
666 /*
667  * This is used by the direct-read code to deal with large-data buffers
668  * created by the reblocker and mirror-write code.  The direct-read code
669  * bypasses the HAMMER buffer subsystem and so any aliased dirty or write-
670  * running hammer buffers must be fully synced to disk before we can issue
671  * the direct-read.
672  *
673  * This code path is not considered critical as only the rebocker and
674  * mirror-write code will create large-data buffers via the HAMMER buffer
675  * subsystem.  They do that because they operate at the B-Tree level and
676  * do not access the vnode/inode structures.
677  */
678 void
679 hammer_sync_buffers(hammer_mount_t hmp, hammer_off_t base_offset, int bytes)
680 {
681         hammer_buffer_t buffer;
682         int error;
683
684         KKASSERT((base_offset & HAMMER_OFF_ZONE_MASK) ==
685                  HAMMER_ZONE_LARGE_DATA);
686
687         while (bytes > 0) {
688                 buffer = RB_LOOKUP(hammer_buf_rb_tree, &hmp->rb_bufs_root,
689                                    base_offset);
690                 if (buffer && (buffer->io.modified || buffer->io.running)) {
691                         error = hammer_ref_buffer(buffer);
692                         if (error == 0) {
693                                 hammer_io_wait(&buffer->io);
694                                 if (buffer->io.modified) {
695                                         hammer_io_write_interlock(&buffer->io);
696                                         hammer_io_flush(&buffer->io, 0);
697                                         hammer_io_done_interlock(&buffer->io);
698                                         hammer_io_wait(&buffer->io);
699                                 }
700                                 hammer_rel_buffer(buffer, 0);
701                         }
702                 }
703                 base_offset += HAMMER_BUFSIZE;
704                 bytes -= HAMMER_BUFSIZE;
705         }
706 }
707
708 /*
709  * Destroy all buffers covering the specified zoneX offset range.  This
710  * is called when the related blockmap layer2 entry is freed or when
711  * a direct write bypasses our buffer/buffer-cache subsystem.
712  *
713  * The buffers may be referenced by the caller itself.  Setting reclaim
714  * will cause the buffer to be destroyed when it's ref count reaches zero.
715  *
716  * Return 0 on success, EAGAIN if some buffers could not be destroyed due
717  * to additional references held by other threads, or some other (typically
718  * fatal) error.
719  */
720 int
721 hammer_del_buffers(hammer_mount_t hmp, hammer_off_t base_offset,
722                    hammer_off_t zone2_offset, int bytes,
723                    int report_conflicts)
724 {
725         hammer_buffer_t buffer;
726         hammer_volume_t volume;
727         int vol_no;
728         int error;
729         int ret_error;
730
731         vol_no = HAMMER_VOL_DECODE(zone2_offset);
732         volume = hammer_get_volume(hmp, vol_no, &ret_error);
733         KKASSERT(ret_error == 0);
734
735         while (bytes > 0) {
736                 buffer = RB_LOOKUP(hammer_buf_rb_tree, &hmp->rb_bufs_root,
737                                    base_offset);
738                 if (buffer) {
739                         error = hammer_ref_buffer(buffer);
740                         if (error == 0 && buffer->io.lock.refs != 1) {
741                                 error = EAGAIN;
742                                 hammer_rel_buffer(buffer, 0);
743                         }
744                         if (error == 0) {
745                                 KKASSERT(buffer->zone2_offset == zone2_offset);
746                                 hammer_io_clear_modify(&buffer->io, 1);
747                                 buffer->io.reclaim = 1;
748                                 buffer->io.waitdep = 1;
749                                 KKASSERT(buffer->io.volume == volume);
750                                 hammer_rel_buffer(buffer, 0);
751                         }
752                 } else {
753                         error = hammer_io_inval(volume, zone2_offset);
754                 }
755                 if (error) {
756                         ret_error = error;
757                         if (report_conflicts ||
758                             (hammer_debug_general & 0x8000)) {
759                                 kprintf("hammer_del_buffers: unable to "
760                                         "invalidate %016llx buffer=%p rep=%d\n",
761                                         (long long)base_offset,
762                                         buffer, report_conflicts);
763                         }
764                 }
765                 base_offset += HAMMER_BUFSIZE;
766                 zone2_offset += HAMMER_BUFSIZE;
767                 bytes -= HAMMER_BUFSIZE;
768         }
769         hammer_rel_volume(volume, 0);
770         return (ret_error);
771 }
772
773 static int
774 hammer_load_buffer(hammer_buffer_t buffer, int isnew)
775 {
776         hammer_volume_t volume;
777         int error;
778
779         /*
780          * Load the buffer's on-disk info
781          */
782         volume = buffer->io.volume;
783         ++buffer->io.loading;
784         hammer_lock_ex(&buffer->io.lock);
785
786         if (hammer_debug_io & 0x0001) {
787                 kprintf("load_buffer %016llx %016llx isnew=%d od=%p\n",
788                         (long long)buffer->zoneX_offset,
789                         (long long)buffer->zone2_offset,
790                         isnew, buffer->ondisk);
791         }
792
793         if (buffer->ondisk == NULL) {
794                 if (isnew) {
795                         error = hammer_io_new(volume->devvp, &buffer->io);
796                 } else {
797                         error = hammer_io_read(volume->devvp, &buffer->io,
798                                                volume->maxraw_off);
799                 }
800                 if (error == 0)
801                         buffer->ondisk = (void *)buffer->io.bp->b_data;
802         } else if (isnew) {
803                 error = hammer_io_new(volume->devvp, &buffer->io);
804         } else {
805                 error = 0;
806         }
807         --buffer->io.loading;
808         hammer_unlock(&buffer->io.lock);
809         return (error);
810 }
811
812 /*
813  * NOTE: Called from RB_SCAN, must return >= 0 for scan to continue.
814  * This routine is only called during unmount.
815  */
816 int
817 hammer_unload_buffer(hammer_buffer_t buffer, void *data __unused)
818 {
819         /*
820          * Clean up the persistent ref ioerror might have on the buffer
821          * and acquire a ref (steal ioerror's if we can).
822          */
823         if (buffer->io.ioerror) {
824                 buffer->io.ioerror = 0;
825         } else {
826                 if (buffer->io.lock.refs == 0)
827                         ++hammer_count_refedbufs;
828                 hammer_ref(&buffer->io.lock);
829         }
830
831         /*
832          * We must not flush a dirty buffer to disk on umount.  It should
833          * have already been dealt with by the flusher, or we may be in
834          * catastrophic failure.
835          *
836          * We must set waitdep to ensure that a running buffer is waited
837          * on and released prior to us trying to unload the volume.
838          */
839         hammer_io_clear_modify(&buffer->io, 1);
840         hammer_flush_buffer_nodes(buffer);
841         KKASSERT(buffer->io.lock.refs == 1);
842         buffer->io.waitdep = 1;
843         hammer_rel_buffer(buffer, 2);
844         return(0);
845 }
846
847 /*
848  * Reference a buffer that is either already referenced or via a specially
849  * handled pointer (aka cursor->buffer).
850  */
851 int
852 hammer_ref_buffer(hammer_buffer_t buffer)
853 {
854         int error;
855
856         if (buffer->io.lock.refs == 0)
857                 ++hammer_count_refedbufs;
858         hammer_ref(&buffer->io.lock);
859
860         /*
861          * At this point a biodone() will not touch the buffer other then
862          * incidental bits.  However, lose_list can be modified via
863          * a biodone() interrupt.
864          *
865          * No longer loose
866          */
867         if (buffer->io.mod_list == &buffer->io.hmp->lose_list) {
868                 crit_enter();
869                 TAILQ_REMOVE(buffer->io.mod_list, &buffer->io, mod_entry);
870                 buffer->io.mod_list = NULL;
871                 crit_exit();
872         }
873
874         if (buffer->ondisk == NULL || buffer->io.loading) {
875                 error = hammer_load_buffer(buffer, 0);
876                 if (error) {
877                         hammer_rel_buffer(buffer, 1);
878                         /*
879                          * NOTE: buffer pointer can become stale after
880                          * the above release.
881                          */
882                 }
883         } else {
884                 error = 0;
885         }
886         return(error);
887 }
888
889 /*
890  * Release a buffer.  We have to deal with several places where
891  * another thread can ref the buffer.
892  *
893  * Only destroy the structure itself if the related buffer cache buffer
894  * was disassociated from it.  This ties the management of the structure
895  * to the buffer cache subsystem.  buffer->ondisk determines whether the
896  * embedded io is referenced or not.
897  */
898 void
899 hammer_rel_buffer(hammer_buffer_t buffer, int flush)
900 {
901         hammer_volume_t volume;
902         hammer_mount_t hmp;
903         struct buf *bp = NULL;
904         int freeme = 0;
905
906         hmp = buffer->io.hmp;
907
908         crit_enter();
909         if (buffer->io.lock.refs == 1) {
910                 ++buffer->io.loading;   /* force interlock check */
911                 hammer_lock_ex(&buffer->io.lock);
912                 if (buffer->io.lock.refs == 1) {
913                         bp = hammer_io_release(&buffer->io, flush);
914
915                         if (buffer->io.lock.refs == 1)
916                                 --hammer_count_refedbufs;
917
918                         if (buffer->io.bp == NULL &&
919                             buffer->io.lock.refs == 1) {
920                                 /*
921                                  * Final cleanup
922                                  *
923                                  * NOTE: It is impossible for any associated
924                                  * B-Tree nodes to have refs if the buffer
925                                  * has no additional refs.
926                                  */
927                                 RB_REMOVE(hammer_buf_rb_tree,
928                                           &buffer->io.hmp->rb_bufs_root,
929                                           buffer);
930                                 volume = buffer->io.volume;
931                                 buffer->io.volume = NULL; /* sanity */
932                                 hammer_rel_volume(volume, 0);
933                                 hammer_io_clear_modlist(&buffer->io);
934                                 hammer_flush_buffer_nodes(buffer);
935                                 KKASSERT(TAILQ_EMPTY(&buffer->clist));
936                                 freeme = 1;
937                         }
938                 }
939                 --buffer->io.loading;
940                 hammer_unlock(&buffer->io.lock);
941         }
942         hammer_unref(&buffer->io.lock);
943         crit_exit();
944         if (bp)
945                 brelse(bp);
946         if (freeme) {
947                 --hammer_count_buffers;
948                 kfree(buffer, hmp->m_misc);
949         }
950 }
951
952 /*
953  * Access the filesystem buffer containing the specified hammer offset.
954  * buf_offset is a conglomeration of the volume number and vol_buf_beg
955  * relative buffer offset.  It must also have bit 55 set to be valid.
956  * (see hammer_off_t in hammer_disk.h).
957  *
958  * Any prior buffer in *bufferp will be released and replaced by the
959  * requested buffer.
960  *
961  * NOTE: The buffer is indexed via its zoneX_offset but we allow the
962  * passed cached *bufferp to match against either zoneX or zone2.
963  */
964 static __inline
965 void *
966 _hammer_bread(hammer_mount_t hmp, hammer_off_t buf_offset, int bytes,
967              int *errorp, struct hammer_buffer **bufferp)
968 {
969         hammer_buffer_t buffer;
970         int32_t xoff = (int32_t)buf_offset & HAMMER_BUFMASK;
971
972         buf_offset &= ~HAMMER_BUFMASK64;
973         KKASSERT((buf_offset & HAMMER_OFF_ZONE_MASK) != 0);
974
975         buffer = *bufferp;
976         if (buffer == NULL || (buffer->zone2_offset != buf_offset &&
977                                buffer->zoneX_offset != buf_offset)) {
978                 if (buffer)
979                         hammer_rel_buffer(buffer, 0);
980                 buffer = hammer_get_buffer(hmp, buf_offset, bytes, 0, errorp);
981                 *bufferp = buffer;
982         } else {
983                 *errorp = 0;
984         }
985
986         /*
987          * Return a pointer to the buffer data.
988          */
989         if (buffer == NULL)
990                 return(NULL);
991         else
992                 return((char *)buffer->ondisk + xoff);
993 }
994
995 void *
996 hammer_bread(hammer_mount_t hmp, hammer_off_t buf_offset,
997              int *errorp, struct hammer_buffer **bufferp)
998 {
999         return(_hammer_bread(hmp, buf_offset, HAMMER_BUFSIZE, errorp, bufferp));
1000 }
1001
1002 void *
1003 hammer_bread_ext(hammer_mount_t hmp, hammer_off_t buf_offset, int bytes,
1004                  int *errorp, struct hammer_buffer **bufferp)
1005 {
1006         bytes = (bytes + HAMMER_BUFMASK) & ~HAMMER_BUFMASK;
1007         return(_hammer_bread(hmp, buf_offset, bytes, errorp, bufferp));
1008 }
1009
1010 /*
1011  * Access the filesystem buffer containing the specified hammer offset.
1012  * No disk read operation occurs.  The result buffer may contain garbage.
1013  *
1014  * Any prior buffer in *bufferp will be released and replaced by the
1015  * requested buffer.
1016  *
1017  * This function marks the buffer dirty but does not increment its
1018  * modify_refs count.
1019  */
1020 static __inline
1021 void *
1022 _hammer_bnew(hammer_mount_t hmp, hammer_off_t buf_offset, int bytes,
1023              int *errorp, struct hammer_buffer **bufferp)
1024 {
1025         hammer_buffer_t buffer;
1026         int32_t xoff = (int32_t)buf_offset & HAMMER_BUFMASK;
1027
1028         buf_offset &= ~HAMMER_BUFMASK64;
1029
1030         buffer = *bufferp;
1031         if (buffer == NULL || (buffer->zone2_offset != buf_offset &&
1032                                buffer->zoneX_offset != buf_offset)) {
1033                 if (buffer)
1034                         hammer_rel_buffer(buffer, 0);
1035                 buffer = hammer_get_buffer(hmp, buf_offset, bytes, 1, errorp);
1036                 *bufferp = buffer;
1037         } else {
1038                 *errorp = 0;
1039         }
1040
1041         /*
1042          * Return a pointer to the buffer data.
1043          */
1044         if (buffer == NULL)
1045                 return(NULL);
1046         else
1047                 return((char *)buffer->ondisk + xoff);
1048 }
1049
1050 void *
1051 hammer_bnew(hammer_mount_t hmp, hammer_off_t buf_offset,
1052              int *errorp, struct hammer_buffer **bufferp)
1053 {
1054         return(_hammer_bnew(hmp, buf_offset, HAMMER_BUFSIZE, errorp, bufferp));
1055 }
1056
1057 void *
1058 hammer_bnew_ext(hammer_mount_t hmp, hammer_off_t buf_offset, int bytes,
1059                 int *errorp, struct hammer_buffer **bufferp)
1060 {
1061         bytes = (bytes + HAMMER_BUFMASK) & ~HAMMER_BUFMASK;
1062         return(_hammer_bnew(hmp, buf_offset, bytes, errorp, bufferp));
1063 }
1064
1065 /************************************************************************
1066  *                              NODES                                   *
1067  ************************************************************************
1068  *
1069  * Manage B-Tree nodes.  B-Tree nodes represent the primary indexing
1070  * method used by the HAMMER filesystem.
1071  *
1072  * Unlike other HAMMER structures, a hammer_node can be PASSIVELY
1073  * associated with its buffer, and will only referenced the buffer while
1074  * the node itself is referenced.
1075  *
1076  * A hammer_node can also be passively associated with other HAMMER
1077  * structures, such as inodes, while retaining 0 references.  These
1078  * associations can be cleared backwards using a pointer-to-pointer in
1079  * the hammer_node.
1080  *
1081  * This allows the HAMMER implementation to cache hammer_nodes long-term
1082  * and short-cut a great deal of the infrastructure's complexity.  In
1083  * most cases a cached node can be reacquired without having to dip into
1084  * either the buffer or cluster management code.
1085  *
1086  * The caller must pass a referenced cluster on call and will retain
1087  * ownership of the reference on return.  The node will acquire its own
1088  * additional references, if necessary.
1089  */
1090 hammer_node_t
1091 hammer_get_node(hammer_transaction_t trans, hammer_off_t node_offset,
1092                 int isnew, int *errorp)
1093 {
1094         hammer_mount_t hmp = trans->hmp;
1095         hammer_node_t node;
1096
1097         KKASSERT((node_offset & HAMMER_OFF_ZONE_MASK) == HAMMER_ZONE_BTREE);
1098
1099         /*
1100          * Locate the structure, allocating one if necessary.
1101          */
1102 again:
1103         node = RB_LOOKUP(hammer_nod_rb_tree, &hmp->rb_nods_root, node_offset);
1104         if (node == NULL) {
1105                 ++hammer_count_nodes;
1106                 node = kmalloc(sizeof(*node), hmp->m_misc, M_WAITOK|M_ZERO|M_USE_RESERVE);
1107                 node->node_offset = node_offset;
1108                 node->hmp = hmp;
1109                 TAILQ_INIT(&node->cursor_list);
1110                 TAILQ_INIT(&node->cache_list);
1111                 if (RB_INSERT(hammer_nod_rb_tree, &hmp->rb_nods_root, node)) {
1112                         --hammer_count_nodes;
1113                         kfree(node, hmp->m_misc);
1114                         goto again;
1115                 }
1116         }
1117         hammer_ref(&node->lock);
1118         if (node->ondisk) {
1119                 *errorp = 0;
1120         } else {
1121                 *errorp = hammer_load_node(trans, node, isnew);
1122                 trans->flags |= HAMMER_TRANSF_DIDIO;
1123         }
1124         if (*errorp) {
1125                 hammer_rel_node(node);
1126                 node = NULL;
1127         }
1128         return(node);
1129 }
1130
1131 /*
1132  * Reference an already-referenced node.
1133  */
1134 void
1135 hammer_ref_node(hammer_node_t node)
1136 {
1137         KKASSERT(node->lock.refs > 0 && node->ondisk != NULL);
1138         hammer_ref(&node->lock);
1139 }
1140
1141 /*
1142  * Load a node's on-disk data reference.
1143  */
1144 static int
1145 hammer_load_node(hammer_transaction_t trans, hammer_node_t node, int isnew)
1146 {
1147         hammer_buffer_t buffer;
1148         hammer_off_t buf_offset;
1149         int error;
1150
1151         error = 0;
1152         ++node->loading;
1153         hammer_lock_ex(&node->lock);
1154         if (node->ondisk == NULL) {
1155                 /*
1156                  * This is a little confusing but the jist is that
1157                  * node->buffer determines whether the node is on
1158                  * the buffer's clist and node->ondisk determines
1159                  * whether the buffer is referenced.
1160                  *
1161                  * We could be racing a buffer release, in which case
1162                  * node->buffer may become NULL while we are blocked
1163                  * referencing the buffer.
1164                  */
1165                 if ((buffer = node->buffer) != NULL) {
1166                         error = hammer_ref_buffer(buffer);
1167                         if (error == 0 && node->buffer == NULL) {
1168                                 TAILQ_INSERT_TAIL(&buffer->clist,
1169                                                   node, entry);
1170                                 node->buffer = buffer;
1171                         }
1172                 } else {
1173                         buf_offset = node->node_offset & ~HAMMER_BUFMASK64;
1174                         buffer = hammer_get_buffer(node->hmp, buf_offset,
1175                                                    HAMMER_BUFSIZE, 0, &error);
1176                         if (buffer) {
1177                                 KKASSERT(error == 0);
1178                                 TAILQ_INSERT_TAIL(&buffer->clist,
1179                                                   node, entry);
1180                                 node->buffer = buffer;
1181                         }
1182                 }
1183                 if (error)
1184                         goto failed;
1185                 node->ondisk = (void *)((char *)buffer->ondisk +
1186                                         (node->node_offset & HAMMER_BUFMASK));
1187
1188                 /*
1189                  * Check CRC.  NOTE: Neither flag is set and the CRC is not
1190                  * generated on new B-Tree nodes.
1191                  */
1192                 if (isnew == 0 && 
1193                     (node->flags & HAMMER_NODE_CRCANY) == 0) {
1194                         if (hammer_crc_test_btree(node->ondisk) == 0) {
1195                                 if (hammer_debug_debug & 0x0002)
1196                                         Debugger("CRC FAILED: B-TREE NODE");
1197                                 node->flags |= HAMMER_NODE_CRCBAD;
1198                         } else {
1199                                 node->flags |= HAMMER_NODE_CRCGOOD;
1200                         }
1201                 }
1202         }
1203         if (node->flags & HAMMER_NODE_CRCBAD) {
1204                 if (trans->flags & HAMMER_TRANSF_CRCDOM)
1205                         error = EDOM;
1206                 else
1207                         error = EIO;
1208         }
1209 failed:
1210         --node->loading;
1211         hammer_unlock(&node->lock);
1212         return (error);
1213 }
1214
1215 /*
1216  * Safely reference a node, interlock against flushes via the IO subsystem.
1217  */
1218 hammer_node_t
1219 hammer_ref_node_safe(hammer_transaction_t trans, hammer_node_cache_t cache,
1220                      int *errorp)
1221 {
1222         hammer_node_t node;
1223
1224         node = cache->node;
1225         if (node != NULL) {
1226                 hammer_ref(&node->lock);
1227                 if (node->ondisk) {
1228                         if (node->flags & HAMMER_NODE_CRCBAD) {
1229                                 if (trans->flags & HAMMER_TRANSF_CRCDOM)
1230                                         *errorp = EDOM;
1231                                 else
1232                                         *errorp = EIO;
1233                         } else {
1234                                 *errorp = 0;
1235                         }
1236                 } else {
1237                         *errorp = hammer_load_node(trans, node, 0);
1238                 }
1239                 if (*errorp) {
1240                         hammer_rel_node(node);
1241                         node = NULL;
1242                 }
1243         } else {
1244                 *errorp = ENOENT;
1245         }
1246         return(node);
1247 }
1248
1249 /*
1250  * Release a hammer_node.  On the last release the node dereferences
1251  * its underlying buffer and may or may not be destroyed.
1252  */
1253 void
1254 hammer_rel_node(hammer_node_t node)
1255 {
1256         hammer_buffer_t buffer;
1257
1258         /*
1259          * If this isn't the last ref just decrement the ref count and
1260          * return.
1261          */
1262         if (node->lock.refs > 1) {
1263                 hammer_unref(&node->lock);
1264                 return;
1265         }
1266
1267         /*
1268          * If there is no ondisk info or no buffer the node failed to load,
1269          * remove the last reference and destroy the node.
1270          */
1271         if (node->ondisk == NULL) {
1272                 hammer_unref(&node->lock);
1273                 hammer_flush_node(node);
1274                 /* node is stale now */
1275                 return;
1276         }
1277
1278         /*
1279          * Do not disassociate the node from the buffer if it represents
1280          * a modified B-Tree node that still needs its crc to be generated.
1281          */
1282         if (node->flags & HAMMER_NODE_NEEDSCRC)
1283                 return;
1284
1285         /*
1286          * Do final cleanups and then either destroy the node and leave it
1287          * passively cached.  The buffer reference is removed regardless.
1288          */
1289         buffer = node->buffer;
1290         node->ondisk = NULL;
1291
1292         if ((node->flags & HAMMER_NODE_FLUSH) == 0) {
1293                 hammer_unref(&node->lock);
1294                 hammer_rel_buffer(buffer, 0);
1295                 return;
1296         }
1297
1298         /*
1299          * Destroy the node.
1300          */
1301         hammer_unref(&node->lock);
1302         hammer_flush_node(node);
1303         /* node is stale */
1304         hammer_rel_buffer(buffer, 0);
1305 }
1306
1307 /*
1308  * Free space on-media associated with a B-Tree node.
1309  */
1310 void
1311 hammer_delete_node(hammer_transaction_t trans, hammer_node_t node)
1312 {
1313         KKASSERT((node->flags & HAMMER_NODE_DELETED) == 0);
1314         node->flags |= HAMMER_NODE_DELETED;
1315         hammer_blockmap_free(trans, node->node_offset, sizeof(*node->ondisk));
1316 }
1317
1318 /*
1319  * Passively cache a referenced hammer_node.  The caller may release
1320  * the node on return.
1321  */
1322 void
1323 hammer_cache_node(hammer_node_cache_t cache, hammer_node_t node)
1324 {
1325         /*
1326          * If the node doesn't exist, or is being deleted, don't cache it!
1327          *
1328          * The node can only ever be NULL in the I/O failure path.
1329          */
1330         if (node == NULL || (node->flags & HAMMER_NODE_DELETED))
1331                 return;
1332         if (cache->node == node)
1333                 return;
1334         while (cache->node)
1335                 hammer_uncache_node(cache);
1336         if (node->flags & HAMMER_NODE_DELETED)
1337                 return;
1338         cache->node = node;
1339         TAILQ_INSERT_TAIL(&node->cache_list, cache, entry);
1340 }
1341
1342 void
1343 hammer_uncache_node(hammer_node_cache_t cache)
1344 {
1345         hammer_node_t node;
1346
1347         if ((node = cache->node) != NULL) {
1348                 TAILQ_REMOVE(&node->cache_list, cache, entry);
1349                 cache->node = NULL;
1350                 if (TAILQ_EMPTY(&node->cache_list))
1351                         hammer_flush_node(node);
1352         }
1353 }
1354
1355 /*
1356  * Remove a node's cache references and destroy the node if it has no
1357  * other references or backing store.
1358  */
1359 void
1360 hammer_flush_node(hammer_node_t node)
1361 {
1362         hammer_node_cache_t cache;
1363         hammer_buffer_t buffer;
1364         hammer_mount_t hmp = node->hmp;
1365
1366         while ((cache = TAILQ_FIRST(&node->cache_list)) != NULL) {
1367                 TAILQ_REMOVE(&node->cache_list, cache, entry);
1368                 cache->node = NULL;
1369         }
1370         if (node->lock.refs == 0 && node->ondisk == NULL) {
1371                 KKASSERT((node->flags & HAMMER_NODE_NEEDSCRC) == 0);
1372                 RB_REMOVE(hammer_nod_rb_tree, &node->hmp->rb_nods_root, node);
1373                 if ((buffer = node->buffer) != NULL) {
1374                         node->buffer = NULL;
1375                         TAILQ_REMOVE(&buffer->clist, node, entry);
1376                         /* buffer is unreferenced because ondisk is NULL */
1377                 }
1378                 --hammer_count_nodes;
1379                 kfree(node, hmp->m_misc);
1380         }
1381 }
1382
1383 /*
1384  * Flush passively cached B-Tree nodes associated with this buffer.
1385  * This is only called when the buffer is about to be destroyed, so
1386  * none of the nodes should have any references.  The buffer is locked.
1387  *
1388  * We may be interlocked with the buffer.
1389  */
1390 void
1391 hammer_flush_buffer_nodes(hammer_buffer_t buffer)
1392 {
1393         hammer_node_t node;
1394
1395         while ((node = TAILQ_FIRST(&buffer->clist)) != NULL) {
1396                 KKASSERT(node->ondisk == NULL);
1397                 KKASSERT((node->flags & HAMMER_NODE_NEEDSCRC) == 0);
1398
1399                 if (node->lock.refs == 0) {
1400                         hammer_ref(&node->lock);
1401                         node->flags |= HAMMER_NODE_FLUSH;
1402                         hammer_rel_node(node);
1403                 } else {
1404                         KKASSERT(node->loading != 0);
1405                         KKASSERT(node->buffer != NULL);
1406                         buffer = node->buffer;
1407                         node->buffer = NULL;
1408                         TAILQ_REMOVE(&buffer->clist, node, entry);
1409                         /* buffer is unreferenced because ondisk is NULL */
1410                 }
1411         }
1412 }
1413
1414
1415 /************************************************************************
1416  *                              ALLOCATORS                              *
1417  ************************************************************************/
1418
1419 /*
1420  * Allocate a B-Tree node.
1421  */
1422 hammer_node_t
1423 hammer_alloc_btree(hammer_transaction_t trans, hammer_off_t hint, int *errorp)
1424 {
1425         hammer_buffer_t buffer = NULL;
1426         hammer_node_t node = NULL;
1427         hammer_off_t node_offset;
1428
1429         node_offset = hammer_blockmap_alloc(trans, HAMMER_ZONE_BTREE_INDEX,
1430                                             sizeof(struct hammer_node_ondisk),
1431                                             hint, errorp);
1432         if (*errorp == 0) {
1433                 node = hammer_get_node(trans, node_offset, 1, errorp);
1434                 hammer_modify_node_noundo(trans, node);
1435                 bzero(node->ondisk, sizeof(*node->ondisk));
1436                 hammer_modify_node_done(node);
1437         }
1438         if (buffer)
1439                 hammer_rel_buffer(buffer, 0);
1440         return(node);
1441 }
1442
1443 /*
1444  * Allocate data.  If the address of a data buffer is supplied then
1445  * any prior non-NULL *data_bufferp will be released and *data_bufferp
1446  * will be set to the related buffer.  The caller must release it when
1447  * finally done.  The initial *data_bufferp should be set to NULL by
1448  * the caller.
1449  *
1450  * The caller is responsible for making hammer_modify*() calls on the
1451  * *data_bufferp.
1452  */
1453 void *
1454 hammer_alloc_data(hammer_transaction_t trans, int32_t data_len, 
1455                   u_int16_t rec_type, hammer_off_t *data_offsetp,
1456                   struct hammer_buffer **data_bufferp,
1457                   hammer_off_t hint, int *errorp)
1458 {
1459         void *data;
1460         int zone;
1461
1462         /*
1463          * Allocate data
1464          */
1465         if (data_len) {
1466                 switch(rec_type) {
1467                 case HAMMER_RECTYPE_INODE:
1468                 case HAMMER_RECTYPE_DIRENTRY:
1469                 case HAMMER_RECTYPE_EXT:
1470                 case HAMMER_RECTYPE_FIX:
1471                 case HAMMER_RECTYPE_PFS:
1472                         zone = HAMMER_ZONE_META_INDEX;
1473                         break;
1474                 case HAMMER_RECTYPE_DATA:
1475                 case HAMMER_RECTYPE_DB:
1476                         if (data_len <= HAMMER_BUFSIZE / 2) {
1477                                 zone = HAMMER_ZONE_SMALL_DATA_INDEX;
1478                         } else {
1479                                 data_len = (data_len + HAMMER_BUFMASK) &
1480                                            ~HAMMER_BUFMASK;
1481                                 zone = HAMMER_ZONE_LARGE_DATA_INDEX;
1482                         }
1483                         break;
1484                 default:
1485                         panic("hammer_alloc_data: rec_type %04x unknown",
1486                               rec_type);
1487                         zone = 0;       /* NOT REACHED */
1488                         break;
1489                 }
1490                 *data_offsetp = hammer_blockmap_alloc(trans, zone, data_len,
1491                                                       hint, errorp);
1492         } else {
1493                 *data_offsetp = 0;
1494         }
1495         if (*errorp == 0 && data_bufferp) {
1496                 if (data_len) {
1497                         data = hammer_bread_ext(trans->hmp, *data_offsetp,
1498                                                 data_len, errorp, data_bufferp);
1499                 } else {
1500                         data = NULL;
1501                 }
1502         } else {
1503                 data = NULL;
1504         }
1505         return(data);
1506 }
1507
1508 /*
1509  * Sync dirty buffers to the media and clean-up any loose ends.
1510  *
1511  * These functions do not start the flusher going, they simply
1512  * queue everything up to the flusher.
1513  */
1514 static int hammer_sync_scan1(struct mount *mp, struct vnode *vp, void *data);
1515 static int hammer_sync_scan2(struct mount *mp, struct vnode *vp, void *data);
1516
1517 int
1518 hammer_queue_inodes_flusher(hammer_mount_t hmp, int waitfor)
1519 {
1520         struct hammer_sync_info info;
1521
1522         info.error = 0;
1523         info.waitfor = waitfor;
1524         if (waitfor == MNT_WAIT) {
1525                 vmntvnodescan(hmp->mp, VMSC_GETVP|VMSC_ONEPASS,
1526                               hammer_sync_scan1, hammer_sync_scan2, &info);
1527         } else {
1528                 vmntvnodescan(hmp->mp, VMSC_GETVP|VMSC_ONEPASS|VMSC_NOWAIT,
1529                               hammer_sync_scan1, hammer_sync_scan2, &info);
1530         }
1531         return(info.error);
1532 }
1533
1534 /*
1535  * Filesystem sync.  If doing a synchronous sync make a second pass on
1536  * the vnodes in case any were already flushing during the first pass,
1537  * and activate the flusher twice (the second time brings the UNDO FIFO's
1538  * start position up to the end position after the first call).
1539  */
1540 int
1541 hammer_sync_hmp(hammer_mount_t hmp, int waitfor)
1542 {
1543         struct hammer_sync_info info;
1544
1545         info.error = 0;
1546         info.waitfor = MNT_NOWAIT;
1547         vmntvnodescan(hmp->mp, VMSC_GETVP|VMSC_NOWAIT,
1548                       hammer_sync_scan1, hammer_sync_scan2, &info);
1549         if (info.error == 0 && waitfor == MNT_WAIT) {
1550                 info.waitfor = waitfor;
1551                 vmntvnodescan(hmp->mp, VMSC_GETVP,
1552                               hammer_sync_scan1, hammer_sync_scan2, &info);
1553         }
1554         if (waitfor == MNT_WAIT) {
1555                 hammer_flusher_sync(hmp);
1556                 hammer_flusher_sync(hmp);
1557         } else {
1558                 hammer_flusher_async(hmp, NULL);
1559                 hammer_flusher_async(hmp, NULL);
1560         }
1561         return(info.error);
1562 }
1563
1564 static int
1565 hammer_sync_scan1(struct mount *mp, struct vnode *vp, void *data)
1566 {
1567         struct hammer_inode *ip;
1568
1569         ip = VTOI(vp);
1570         if (vp->v_type == VNON || ip == NULL ||
1571             ((ip->flags & HAMMER_INODE_MODMASK) == 0 &&
1572              RB_EMPTY(&vp->v_rbdirty_tree))) {
1573                 return(-1);
1574         }
1575         return(0);
1576 }
1577
1578 static int
1579 hammer_sync_scan2(struct mount *mp, struct vnode *vp, void *data)
1580 {
1581         struct hammer_sync_info *info = data;
1582         struct hammer_inode *ip;
1583         int error;
1584
1585         ip = VTOI(vp);
1586         if (vp->v_type == VNON || vp->v_type == VBAD ||
1587             ((ip->flags & HAMMER_INODE_MODMASK) == 0 &&
1588              RB_EMPTY(&vp->v_rbdirty_tree))) {
1589                 return(0);
1590         }
1591         error = VOP_FSYNC(vp, MNT_NOWAIT, 0);
1592         if (error)
1593                 info->error = error;
1594         return(0);
1595 }
1596