kernel - Do a better job with the filesystem background sync
[dragonfly.git] / sys / vfs / hammer / hammer_ondisk.c
CommitLineData
427e5fc6 1/*
b84de5af 2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved.
427e5fc6
MD
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
748efb59 34 * $DragonFly: src/sys/vfs/hammer/hammer_ondisk.c,v 1.76 2008/08/29 20:19:08 dillon Exp $
427e5fc6
MD
35 */
36/*
37 * Manage HAMMER's on-disk structures. These routines are primarily
38 * responsible for interfacing with the kernel's I/O subsystem and for
39 * managing in-memory structures.
40 */
41
42#include "hammer.h"
43#include <sys/fcntl.h>
44#include <sys/nlookup.h>
45#include <sys/buf.h>
46#include <sys/buf2.h>
47
8cd0a023
MD
48static void hammer_free_volume(hammer_volume_t volume);
49static int hammer_load_volume(hammer_volume_t volume);
47197d71 50static int hammer_load_buffer(hammer_buffer_t buffer, int isnew);
4c286c36
MD
51static int hammer_load_node(hammer_transaction_t trans,
52 hammer_node_t node, int isnew);
250aec18 53static void _hammer_rel_node(hammer_node_t node, int locked);
427e5fc6 54
427e5fc6 55static int
8cd0a023 56hammer_vol_rb_compare(hammer_volume_t vol1, hammer_volume_t vol2)
427e5fc6
MD
57{
58 if (vol1->vol_no < vol2->vol_no)
59 return(-1);
60 if (vol1->vol_no > vol2->vol_no)
61 return(1);
62 return(0);
63}
64
362ec2dc
MD
65/*
66 * hammer_buffer structures are indexed via their zoneX_offset, not
67 * their zone2_offset.
68 */
427e5fc6 69static int
8cd0a023 70hammer_buf_rb_compare(hammer_buffer_t buf1, hammer_buffer_t buf2)
427e5fc6 71{
0832c9bb 72 if (buf1->zoneX_offset < buf2->zoneX_offset)
427e5fc6 73 return(-1);
0832c9bb 74 if (buf1->zoneX_offset > buf2->zoneX_offset)
427e5fc6
MD
75 return(1);
76 return(0);
77}
78
8cd0a023
MD
79static int
80hammer_nod_rb_compare(hammer_node_t node1, hammer_node_t node2)
81{
82 if (node1->node_offset < node2->node_offset)
83 return(-1);
c0ade690 84 if (node1->node_offset > node2->node_offset)
8cd0a023
MD
85 return(1);
86 return(0);
87}
88
427e5fc6
MD
89RB_GENERATE2(hammer_vol_rb_tree, hammer_volume, rb_node,
90 hammer_vol_rb_compare, int32_t, vol_no);
427e5fc6 91RB_GENERATE2(hammer_buf_rb_tree, hammer_buffer, rb_node,
0832c9bb 92 hammer_buf_rb_compare, hammer_off_t, zoneX_offset);
8cd0a023 93RB_GENERATE2(hammer_nod_rb_tree, hammer_node, rb_node,
47197d71 94 hammer_nod_rb_compare, hammer_off_t, node_offset);
427e5fc6 95
8cd0a023
MD
96/************************************************************************
97 * VOLUMES *
98 ************************************************************************
99 *
427e5fc6
MD
100 * Load a HAMMER volume by name. Returns 0 on success or a positive error
101 * code on failure. Volumes must be loaded at mount time, get_volume() will
102 * not load a new volume.
103 *
104 * Calls made to hammer_load_volume() or single-threaded
105 */
106int
7c19b529
MN
107hammer_install_volume(struct hammer_mount *hmp, const char *volname,
108 struct vnode *devvp)
427e5fc6
MD
109{
110 struct mount *mp;
8cd0a023 111 hammer_volume_t volume;
427e5fc6
MD
112 struct hammer_volume_ondisk *ondisk;
113 struct nlookupdata nd;
114 struct buf *bp = NULL;
115 int error;
116 int ronly;
b3c3feed 117 int setmp = 0;
427e5fc6
MD
118
119 mp = hmp->mp;
120 ronly = ((mp->mnt_flag & MNT_RDONLY) ? 1 : 0);
121
122 /*
123 * Allocate a volume structure
124 */
b3deaf57 125 ++hammer_count_volumes;
bac808fe
MD
126 volume = kmalloc(sizeof(*volume), hmp->m_misc, M_WAITOK|M_ZERO);
127 volume->vol_name = kstrdup(volname, hmp->m_misc);
748efb59
MD
128 volume->io.hmp = hmp; /* bootstrap */
129 hammer_io_init(&volume->io, volume, HAMMER_STRUCTURE_VOLUME);
66325755 130 volume->io.offset = 0LL;
4a2796f3 131 volume->io.bytes = HAMMER_BUFSIZE;
427e5fc6
MD
132
133 /*
134 * Get the device vnode
135 */
7c19b529
MN
136 if (devvp == NULL) {
137 error = nlookup_init(&nd, volume->vol_name, UIO_SYSSPACE, NLC_FOLLOW);
138 if (error == 0)
139 error = nlookup(&nd);
140 if (error == 0)
141 error = cache_vref(&nd.nl_nch, nd.nl_cred, &volume->devvp);
142 nlookup_done(&nd);
143 } else {
144 error = 0;
145 volume->devvp = devvp;
146 }
147
427e5fc6 148 if (error == 0) {
42c7d26b
MD
149 if (vn_isdisk(volume->devvp, &error)) {
150 error = vfs_mountedon(volume->devvp);
151 }
152 }
8be7edad 153 if (error == 0 && vcount(volume->devvp) > 0)
42c7d26b 154 error = EBUSY;
427e5fc6
MD
155 if (error == 0) {
156 vn_lock(volume->devvp, LK_EXCLUSIVE | LK_RETRY);
42c7d26b
MD
157 error = vinvalbuf(volume->devvp, V_SAVE, 0, 0);
158 if (error == 0) {
159 error = VOP_OPEN(volume->devvp,
160 (ronly ? FREAD : FREAD|FWRITE),
161 FSCRED, NULL);
162 }
427e5fc6
MD
163 vn_unlock(volume->devvp);
164 }
165 if (error) {
166 hammer_free_volume(volume);
167 return(error);
168 }
42c7d26b 169 volume->devvp->v_rdev->si_mountpoint = mp;
b3c3feed 170 setmp = 1;
427e5fc6
MD
171
172 /*
173 * Extract the volume number from the volume header and do various
174 * sanity checks.
175 */
176 error = bread(volume->devvp, 0LL, HAMMER_BUFSIZE, &bp);
177 if (error)
178 goto late_failure;
179 ondisk = (void *)bp->b_data;
47197d71 180 if (ondisk->vol_signature != HAMMER_FSBUF_VOLUME) {
427e5fc6
MD
181 kprintf("hammer_mount: volume %s has an invalid header\n",
182 volume->vol_name);
183 error = EFTYPE;
184 goto late_failure;
185 }
186 volume->vol_no = ondisk->vol_no;
47197d71 187 volume->buffer_base = ondisk->vol_buf_beg;
427e5fc6 188 volume->vol_flags = ondisk->vol_flags;
fbc6e32a 189 volume->nblocks = ondisk->vol_nblocks;
47197d71
MD
190 volume->maxbuf_off = HAMMER_ENCODE_RAW_BUFFER(volume->vol_no,
191 ondisk->vol_buf_end - ondisk->vol_buf_beg);
2f85fa4d 192 volume->maxraw_off = ondisk->vol_buf_end;
427e5fc6
MD
193
194 if (RB_EMPTY(&hmp->rb_vols_root)) {
195 hmp->fsid = ondisk->vol_fsid;
196 } else if (bcmp(&hmp->fsid, &ondisk->vol_fsid, sizeof(uuid_t))) {
197 kprintf("hammer_mount: volume %s's fsid does not match "
198 "other volumes\n", volume->vol_name);
199 error = EFTYPE;
200 goto late_failure;
201 }
202
203 /*
204 * Insert the volume structure into the red-black tree.
205 */
206 if (RB_INSERT(hammer_vol_rb_tree, &hmp->rb_vols_root, volume)) {
207 kprintf("hammer_mount: volume %s has a duplicate vol_no %d\n",
208 volume->vol_name, volume->vol_no);
209 error = EEXIST;
210 }
211
212 /*
47197d71 213 * Set the root volume . HAMMER special cases rootvol the structure.
8cd0a023
MD
214 * We do not hold a ref because this would prevent related I/O
215 * from being flushed.
427e5fc6
MD
216 */
217 if (error == 0 && ondisk->vol_rootvol == ondisk->vol_no) {
218 hmp->rootvol = volume;
1b0ab2c3 219 hmp->nvolumes = ondisk->vol_count;
9944ae54
MD
220 if (bp) {
221 brelse(bp);
222 bp = NULL;
223 }
9480ff55
MD
224 hmp->mp->mnt_stat.f_blocks += ondisk->vol0_stat_bigblocks *
225 (HAMMER_LARGEBLOCK_SIZE / HAMMER_BUFSIZE);
6f97fce3
MD
226 hmp->mp->mnt_vstat.f_blocks += ondisk->vol0_stat_bigblocks *
227 (HAMMER_LARGEBLOCK_SIZE / HAMMER_BUFSIZE);
427e5fc6
MD
228 }
229late_failure:
230 if (bp)
231 brelse(bp);
232 if (error) {
233 /*vinvalbuf(volume->devvp, V_SAVE, 0, 0);*/
b3c3feed
MD
234 if (setmp)
235 volume->devvp->v_rdev->si_mountpoint = NULL;
427e5fc6
MD
236 VOP_CLOSE(volume->devvp, ronly ? FREAD : FREAD|FWRITE);
237 hammer_free_volume(volume);
238 }
239 return (error);
240}
241
242/*
51c35492
MD
243 * This is called for each volume when updating the mount point from
244 * read-write to read-only or vise-versa.
245 */
246int
247hammer_adjust_volume_mode(hammer_volume_t volume, void *data __unused)
248{
249 if (volume->devvp) {
250 vn_lock(volume->devvp, LK_EXCLUSIVE | LK_RETRY);
251 if (volume->io.hmp->ronly) {
252 /* do not call vinvalbuf */
253 VOP_OPEN(volume->devvp, FREAD, FSCRED, NULL);
254 VOP_CLOSE(volume->devvp, FREAD|FWRITE);
255 } else {
256 /* do not call vinvalbuf */
257 VOP_OPEN(volume->devvp, FREAD|FWRITE, FSCRED, NULL);
258 VOP_CLOSE(volume->devvp, FREAD);
259 }
260 vn_unlock(volume->devvp);
261 }
262 return(0);
263}
264
265/*
427e5fc6
MD
266 * Unload and free a HAMMER volume. Must return >= 0 to continue scan
267 * so returns -1 on failure.
268 */
269int
8cd0a023 270hammer_unload_volume(hammer_volume_t volume, void *data __unused)
427e5fc6 271{
bac808fe 272 hammer_mount_t hmp = volume->io.hmp;
66325755 273 int ronly = ((hmp->mp->mnt_flag & MNT_RDONLY) ? 1 : 0);
427e5fc6
MD
274
275 /*
47197d71 276 * Clean up the root volume pointer, which is held unlocked in hmp.
27ea2398 277 */
47197d71 278 if (hmp->rootvol == volume)
27ea2398 279 hmp->rootvol = NULL;
27ea2398 280
66325755 281 /*
cdb6e4e6
MD
282 * We must not flush a dirty buffer to disk on umount. It should
283 * have already been dealt with by the flusher, or we may be in
284 * catastrophic failure.
66325755 285 */
cdb6e4e6 286 hammer_io_clear_modify(&volume->io, 1);
b58c6388 287 volume->io.waitdep = 1;
cdb6e4e6
MD
288
289 /*
290 * Clean up the persistent ref ioerror might have on the volume
291 */
77912481
MD
292 if (volume->io.ioerror)
293 hammer_io_clear_error_noassert(&volume->io);
a89aec1b
MD
294
295 /*
250aec18
MD
296 * This should release the bp. Releasing the volume with flush set
297 * implies the interlock is set.
544f91bf 298 */
250aec18 299 hammer_ref_interlock_true(&volume->io.lock);
544f91bf
MD
300 hammer_rel_volume(volume, 1);
301 KKASSERT(volume->io.bp == NULL);
302
303 /*
fbc6e32a
MD
304 * There should be no references on the volume, no clusters, and
305 * no super-clusters.
a89aec1b 306 */
250aec18 307 KKASSERT(hammer_norefs(&volume->io.lock));
a89aec1b 308
66325755
MD
309 volume->ondisk = NULL;
310 if (volume->devvp) {
b3c3feed
MD
311 if (volume->devvp->v_rdev &&
312 volume->devvp->v_rdev->si_mountpoint == hmp->mp
313 ) {
314 volume->devvp->v_rdev->si_mountpoint = NULL;
315 }
66325755 316 if (ronly) {
cdb6e4e6
MD
317 /*
318 * Make sure we don't sync anything to disk if we
319 * are in read-only mode (1) or critically-errored
320 * (2). Note that there may be dirty buffers in
321 * normal read-only mode from crash recovery.
322 */
66325755
MD
323 vinvalbuf(volume->devvp, 0, 0, 0);
324 VOP_CLOSE(volume->devvp, FREAD);
325 } else {
cdb6e4e6
MD
326 /*
327 * Normal termination, save any dirty buffers
328 * (XXX there really shouldn't be any).
329 */
66325755
MD
330 vinvalbuf(volume->devvp, V_SAVE, 0, 0);
331 VOP_CLOSE(volume->devvp, FREAD|FWRITE);
332 }
333 }
427e5fc6
MD
334
335 /*
336 * Destroy the structure
337 */
338 RB_REMOVE(hammer_vol_rb_tree, &hmp->rb_vols_root, volume);
339 hammer_free_volume(volume);
340 return(0);
341}
342
343static
344void
8cd0a023 345hammer_free_volume(hammer_volume_t volume)
427e5fc6 346{
bac808fe
MD
347 hammer_mount_t hmp = volume->io.hmp;
348
427e5fc6 349 if (volume->vol_name) {
bac808fe 350 kfree(volume->vol_name, hmp->m_misc);
427e5fc6
MD
351 volume->vol_name = NULL;
352 }
353 if (volume->devvp) {
354 vrele(volume->devvp);
355 volume->devvp = NULL;
356 }
b3deaf57 357 --hammer_count_volumes;
bac808fe 358 kfree(volume, hmp->m_misc);
427e5fc6
MD
359}
360
361/*
362 * Get a HAMMER volume. The volume must already exist.
363 */
8cd0a023 364hammer_volume_t
427e5fc6
MD
365hammer_get_volume(struct hammer_mount *hmp, int32_t vol_no, int *errorp)
366{
367 struct hammer_volume *volume;
427e5fc6
MD
368
369 /*
370 * Locate the volume structure
371 */
372 volume = RB_LOOKUP(hammer_vol_rb_tree, &hmp->rb_vols_root, vol_no);
373 if (volume == NULL) {
374 *errorp = ENOENT;
375 return(NULL);
376 }
377
378 /*
250aec18
MD
379 * Reference the volume, load/check the data on the 0->1 transition.
380 * hammer_load_volume() will dispose of the interlock on return,
381 * and also clean up the ref count on error.
427e5fc6 382 */
250aec18 383 if (hammer_ref_interlock(&volume->io.lock)) {
8cd0a023 384 *errorp = hammer_load_volume(volume);
250aec18 385 if (*errorp)
8cd0a023 386 volume = NULL;
8cd0a023 387 } else {
250aec18 388 KKASSERT(volume->ondisk);
8cd0a023
MD
389 *errorp = 0;
390 }
391 return(volume);
392}
393
fbc6e32a
MD
394int
395hammer_ref_volume(hammer_volume_t volume)
396{
397 int error;
398
fbc6e32a 399 /*
250aec18
MD
400 * Reference the volume and deal with the check condition used to
401 * load its ondisk info.
fbc6e32a 402 */
250aec18 403 if (hammer_ref_interlock(&volume->io.lock)) {
fbc6e32a 404 error = hammer_load_volume(volume);
fbc6e32a 405 } else {
250aec18 406 KKASSERT(volume->ondisk);
fbc6e32a
MD
407 error = 0;
408 }
409 return (error);
410}
411
8cd0a023
MD
412hammer_volume_t
413hammer_get_root_volume(struct hammer_mount *hmp, int *errorp)
414{
415 hammer_volume_t volume;
416
417 volume = hmp->rootvol;
418 KKASSERT(volume != NULL);
8cd0a023
MD
419
420 /*
250aec18
MD
421 * Reference the volume and deal with the check condition used to
422 * load its ondisk info.
8cd0a023 423 */
250aec18 424 if (hammer_ref_interlock(&volume->io.lock)) {
8cd0a023 425 *errorp = hammer_load_volume(volume);
250aec18 426 if (*errorp)
8cd0a023 427 volume = NULL;
8cd0a023 428 } else {
250aec18 429 KKASSERT(volume->ondisk);
8cd0a023
MD
430 *errorp = 0;
431 }
432 return (volume);
433}
434
435/*
436 * Load a volume's on-disk information. The volume must be referenced and
250aec18
MD
437 * the interlock is held on call. The interlock will be released on return.
438 * The reference will also be released on return if an error occurs.
8cd0a023
MD
439 */
440static int
441hammer_load_volume(hammer_volume_t volume)
442{
8cd0a023
MD
443 int error;
444
8cd0a023 445 if (volume->ondisk == NULL) {
2f85fa4d 446 error = hammer_io_read(volume->devvp, &volume->io,
b7de8aa5 447 HAMMER_BUFSIZE);
250aec18 448 if (error == 0) {
b58c6388 449 volume->ondisk = (void *)volume->io.bp->b_data;
250aec18
MD
450 hammer_ref_interlock_done(&volume->io.lock);
451 } else {
452 hammer_rel_volume(volume, 1);
453 }
8cd0a023
MD
454 } else {
455 error = 0;
427e5fc6 456 }
b58c6388 457 return(error);
427e5fc6
MD
458}
459
66325755 460/*
250aec18 461 * Release a previously acquired reference on the volume.
fbc6e32a
MD
462 *
463 * Volumes are not unloaded from memory during normal operation.
66325755 464 */
427e5fc6 465void
250aec18 466hammer_rel_volume(hammer_volume_t volume, int locked)
427e5fc6 467{
250aec18 468 struct buf *bp;
ecca949a 469
250aec18
MD
470 if (hammer_rel_interlock(&volume->io.lock, locked)) {
471 volume->ondisk = NULL;
472 bp = hammer_io_release(&volume->io, locked);
473 hammer_rel_interlock_done(&volume->io.lock, locked);
474 if (bp)
475 brelse(bp);
427e5fc6 476 }
427e5fc6
MD
477}
478
1b0ab2c3
MD
479int
480hammer_mountcheck_volumes(struct hammer_mount *hmp)
481{
482 hammer_volume_t vol;
483 int i;
484
485 for (i = 0; i < hmp->nvolumes; ++i) {
486 vol = RB_LOOKUP(hammer_vol_rb_tree, &hmp->rb_vols_root, i);
487 if (vol == NULL)
488 return(EINVAL);
489 }
490 return(0);
491}
492
8cd0a023 493/************************************************************************
47197d71 494 * BUFFERS *
8cd0a023
MD
495 ************************************************************************
496 *
2faf0737 497 * Manage buffers. Currently most blockmap-backed zones are direct-mapped
362ec2dc
MD
498 * to zone-2 buffer offsets, without a translation stage. However, the
499 * hammer_buffer structure is indexed by its zoneX_offset, not its
500 * zone2_offset.
501 *
502 * The proper zone must be maintained throughout the code-base all the way
503 * through to the big-block allocator, or routines like hammer_del_buffers()
504 * will not be able to locate all potentially conflicting buffers.
8cd0a023 505 */
dc97405f
MD
506
507/*
508 * Helper function returns whether a zone offset can be directly translated
509 * to a raw buffer index or not. Really only the volume and undo zones
510 * can't be directly translated. Volumes are special-cased and undo zones
511 * shouldn't be aliased accessed in read-only mode.
512 *
513 * This function is ONLY used to detect aliased zones during a read-only
514 * mount.
515 */
516static __inline int
517hammer_direct_zone(hammer_off_t buf_offset)
518{
519 switch(HAMMER_ZONE_DECODE(buf_offset)) {
520 case HAMMER_ZONE_RAW_BUFFER_INDEX:
521 case HAMMER_ZONE_FREEMAP_INDEX:
522 case HAMMER_ZONE_BTREE_INDEX:
523 case HAMMER_ZONE_META_INDEX:
524 case HAMMER_ZONE_LARGE_DATA_INDEX:
525 case HAMMER_ZONE_SMALL_DATA_INDEX:
526 return(1);
527 default:
528 return(0);
529 }
530 /* NOT REACHED */
531}
532
47197d71
MD
533hammer_buffer_t
534hammer_get_buffer(hammer_mount_t hmp, hammer_off_t buf_offset,
4a2796f3 535 int bytes, int isnew, int *errorp)
61aeeb33 536{
47197d71
MD
537 hammer_buffer_t buffer;
538 hammer_volume_t volume;
0832c9bb 539 hammer_off_t zone2_offset;
10a5d1ba 540 hammer_io_type_t iotype;
47197d71 541 int vol_no;
40043e7f 542 int zone;
61aeeb33 543
bcac4bbb 544 buf_offset &= ~HAMMER_BUFMASK64;
0832c9bb
MD
545again:
546 /*
547 * Shortcut if the buffer is already cached
548 */
bcac4bbb 549 buffer = RB_LOOKUP(hammer_buf_rb_tree, &hmp->rb_bufs_root, buf_offset);
0832c9bb 550 if (buffer) {
7bc5b8c2 551 /*
e469566b 552 * Once refed the ondisk field will not be cleared by
250aec18
MD
553 * any other action. Shortcut the operation if the
554 * ondisk structure is valid.
7bc5b8c2 555 */
6494bbf5 556found_aliased:
250aec18
MD
557 if (hammer_ref_interlock(&buffer->io.lock) == 0) {
558 hammer_io_advance(&buffer->io);
559 KKASSERT(buffer->ondisk);
0832c9bb 560 *errorp = 0;
250aec18
MD
561 return(buffer);
562 }
563
564 /*
565 * 0->1 transition or defered 0->1 transition (CHECK),
566 * interlock now held. Shortcut if ondisk is already
567 * assigned.
568 */
569 ++hammer_count_refedbufs;
570 if (buffer->ondisk) {
0e8bd897 571 hammer_io_advance(&buffer->io);
250aec18
MD
572 hammer_ref_interlock_done(&buffer->io.lock);
573 *errorp = 0;
0832c9bb
MD
574 return(buffer);
575 }
576
577 /*
7bc5b8c2
MD
578 * The buffer is no longer loose if it has a ref, and
579 * cannot become loose once it gains a ref. Loose
0832c9bb
MD
580 * buffers will never be in a modified state. This should
581 * only occur on the 0->1 transition of refs.
bf3b416b 582 *
b0aab9b9
MD
583 * lose_list can be modified via a biodone() interrupt
584 * so the io_token must be held.
0832c9bb
MD
585 */
586 if (buffer->io.mod_list == &hmp->lose_list) {
b0aab9b9
MD
587 lwkt_gettoken(&hmp->io_token);
588 if (buffer->io.mod_list == &hmp->lose_list) {
589 TAILQ_REMOVE(buffer->io.mod_list, &buffer->io,
590 mod_entry);
591 buffer->io.mod_list = NULL;
592 KKASSERT(buffer->io.modified == 0);
593 }
594 lwkt_reltoken(&hmp->io_token);
0832c9bb
MD
595 }
596 goto found;
dc97405f 597 } else if (hmp->ronly && hammer_direct_zone(buf_offset)) {
6494bbf5
MD
598 /*
599 * If this is a read-only mount there could be an alias
600 * in the raw-zone. If there is we use that buffer instead.
601 *
602 * rw mounts will not have aliases. Also note when going
603 * from ro -> rw the recovered raw buffers are flushed and
604 * reclaimed, so again there will not be any aliases once
605 * the mount is rw.
606 */
607 buffer = RB_LOOKUP(hammer_buf_rb_tree, &hmp->rb_bufs_root,
608 (buf_offset & ~HAMMER_OFF_ZONE_MASK) |
609 HAMMER_ZONE_RAW_BUFFER);
610 if (buffer) {
611 kprintf("HAMMER: recovered aliased %016jx\n",
612 (intmax_t)buf_offset);
613 goto found_aliased;
614 }
0832c9bb 615 }
10a5d1ba 616
f90dde4c
MD
617 /*
618 * What is the buffer class?
619 */
0832c9bb
MD
620 zone = HAMMER_ZONE_DECODE(buf_offset);
621
f90dde4c
MD
622 switch(zone) {
623 case HAMMER_ZONE_LARGE_DATA_INDEX:
624 case HAMMER_ZONE_SMALL_DATA_INDEX:
10a5d1ba 625 iotype = HAMMER_STRUCTURE_DATA_BUFFER;
f90dde4c
MD
626 break;
627 case HAMMER_ZONE_UNDO_INDEX:
628 iotype = HAMMER_STRUCTURE_UNDO_BUFFER;
629 break;
4a2796f3 630 case HAMMER_ZONE_META_INDEX:
f90dde4c 631 default:
4a2796f3
MD
632 /*
633 * NOTE: inode data and directory entries are placed in this
634 * zone. inode atime/mtime is updated in-place and thus
635 * buffers containing inodes must be synchronized as
636 * meta-buffers, same as buffers containing B-Tree info.
637 */
10a5d1ba 638 iotype = HAMMER_STRUCTURE_META_BUFFER;
f90dde4c 639 break;
10a5d1ba
MD
640 }
641
f90dde4c
MD
642 /*
643 * Handle blockmap offset translations
644 */
bf686dbe 645 if (zone >= HAMMER_ZONE_BTREE_INDEX) {
0832c9bb 646 zone2_offset = hammer_blockmap_lookup(hmp, buf_offset, errorp);
bf686dbe 647 } else if (zone == HAMMER_ZONE_UNDO_INDEX) {
0832c9bb
MD
648 zone2_offset = hammer_undo_lookup(hmp, buf_offset, errorp);
649 } else {
650 KKASSERT(zone == HAMMER_ZONE_RAW_BUFFER_INDEX);
651 zone2_offset = buf_offset;
652 *errorp = 0;
40043e7f 653 }
0832c9bb
MD
654 if (*errorp)
655 return(NULL);
f90dde4c
MD
656
657 /*
0832c9bb
MD
658 * NOTE: zone2_offset and maxbuf_off are both full zone-2 offset
659 * specifications.
f90dde4c 660 */
0832c9bb
MD
661 KKASSERT((zone2_offset & HAMMER_OFF_ZONE_MASK) ==
662 HAMMER_ZONE_RAW_BUFFER);
663 vol_no = HAMMER_VOL_DECODE(zone2_offset);
47197d71
MD
664 volume = hammer_get_volume(hmp, vol_no, errorp);
665 if (volume == NULL)
666 return(NULL);
40043e7f 667
0832c9bb 668 KKASSERT(zone2_offset < volume->maxbuf_off);
427e5fc6
MD
669
670 /*
0832c9bb 671 * Allocate a new buffer structure. We will check for races later.
427e5fc6 672 */
0832c9bb 673 ++hammer_count_buffers;
bac808fe 674 buffer = kmalloc(sizeof(*buffer), hmp->m_misc,
df301614 675 M_WAITOK|M_ZERO|M_USE_RESERVE);
0832c9bb
MD
676 buffer->zone2_offset = zone2_offset;
677 buffer->zoneX_offset = buf_offset;
0832c9bb 678
748efb59 679 hammer_io_init(&buffer->io, volume, iotype);
0832c9bb
MD
680 buffer->io.offset = volume->ondisk->vol_buf_beg +
681 (zone2_offset & HAMMER_OFF_SHORT_MASK);
4a2796f3 682 buffer->io.bytes = bytes;
0832c9bb 683 TAILQ_INIT(&buffer->clist);
250aec18 684 hammer_ref_interlock_true(&buffer->io.lock);
427e5fc6
MD
685
686 /*
0832c9bb 687 * Insert the buffer into the RB tree and handle late collisions.
34d829f7 688 */
0832c9bb 689 if (RB_INSERT(hammer_buf_rb_tree, &hmp->rb_bufs_root, buffer)) {
530dd494 690 hammer_rel_volume(volume, 0);
250aec18
MD
691 buffer->io.volume = NULL; /* safety */
692 if (hammer_rel_interlock(&buffer->io.lock, 1)) /* safety */
693 hammer_rel_interlock_done(&buffer->io.lock, 1);
2faf0737 694 --hammer_count_buffers;
bac808fe 695 kfree(buffer, hmp->m_misc);
0832c9bb
MD
696 goto again;
697 }
a99b9ea2 698 ++hammer_count_refedbufs;
0832c9bb 699found:
34d829f7
MD
700
701 /*
250aec18
MD
702 * The buffer is referenced and interlocked. Load the buffer
703 * if necessary. hammer_load_buffer() deals with the interlock
704 * and, if an error is returned, also deals with the ref.
427e5fc6 705 */
250aec18 706 if (buffer->ondisk == NULL) {
47197d71 707 *errorp = hammer_load_buffer(buffer, isnew);
250aec18 708 if (*errorp)
47197d71 709 buffer = NULL;
8cd0a023 710 } else {
2faf0737 711 hammer_io_advance(&buffer->io);
250aec18
MD
712 hammer_ref_interlock_done(&buffer->io.lock);
713 *errorp = 0;
8cd0a023 714 }
47197d71 715 return(buffer);
8cd0a023
MD
716}
717
2f85fa4d 718/*
1b0ab2c3
MD
719 * This is used by the direct-read code to deal with large-data buffers
720 * created by the reblocker and mirror-write code. The direct-read code
5c8d05e2
MD
721 * bypasses the HAMMER buffer subsystem and so any aliased dirty or write-
722 * running hammer buffers must be fully synced to disk before we can issue
723 * the direct-read.
1b0ab2c3
MD
724 *
725 * This code path is not considered critical as only the rebocker and
726 * mirror-write code will create large-data buffers via the HAMMER buffer
727 * subsystem. They do that because they operate at the B-Tree level and
728 * do not access the vnode/inode structures.
729 */
730void
731hammer_sync_buffers(hammer_mount_t hmp, hammer_off_t base_offset, int bytes)
732{
733 hammer_buffer_t buffer;
734 int error;
735
736 KKASSERT((base_offset & HAMMER_OFF_ZONE_MASK) ==
737 HAMMER_ZONE_LARGE_DATA);
738
739 while (bytes > 0) {
740 buffer = RB_LOOKUP(hammer_buf_rb_tree, &hmp->rb_bufs_root,
741 base_offset);
5c8d05e2 742 if (buffer && (buffer->io.modified || buffer->io.running)) {
1b0ab2c3 743 error = hammer_ref_buffer(buffer);
5c8d05e2 744 if (error == 0) {
1b0ab2c3 745 hammer_io_wait(&buffer->io);
5c8d05e2
MD
746 if (buffer->io.modified) {
747 hammer_io_write_interlock(&buffer->io);
710733a6 748 hammer_io_flush(&buffer->io, 0);
5c8d05e2
MD
749 hammer_io_done_interlock(&buffer->io);
750 hammer_io_wait(&buffer->io);
751 }
1b0ab2c3
MD
752 hammer_rel_buffer(buffer, 0);
753 }
754 }
755 base_offset += HAMMER_BUFSIZE;
756 bytes -= HAMMER_BUFSIZE;
757 }
758}
759
760/*
0832c9bb 761 * Destroy all buffers covering the specified zoneX offset range. This
cebe9493
MD
762 * is called when the related blockmap layer2 entry is freed or when
763 * a direct write bypasses our buffer/buffer-cache subsystem.
764 *
765 * The buffers may be referenced by the caller itself. Setting reclaim
766 * will cause the buffer to be destroyed when it's ref count reaches zero.
362ec2dc
MD
767 *
768 * Return 0 on success, EAGAIN if some buffers could not be destroyed due
769 * to additional references held by other threads, or some other (typically
770 * fatal) error.
2f85fa4d 771 */
362ec2dc 772int
0832c9bb 773hammer_del_buffers(hammer_mount_t hmp, hammer_off_t base_offset,
362ec2dc
MD
774 hammer_off_t zone2_offset, int bytes,
775 int report_conflicts)
2f85fa4d
MD
776{
777 hammer_buffer_t buffer;
778 hammer_volume_t volume;
779 int vol_no;
780 int error;
362ec2dc 781 int ret_error;
2f85fa4d 782
0832c9bb 783 vol_no = HAMMER_VOL_DECODE(zone2_offset);
362ec2dc
MD
784 volume = hammer_get_volume(hmp, vol_no, &ret_error);
785 KKASSERT(ret_error == 0);
0832c9bb
MD
786
787 while (bytes > 0) {
788 buffer = RB_LOOKUP(hammer_buf_rb_tree, &hmp->rb_bufs_root,
789 base_offset);
790 if (buffer) {
7b6ccb11 791 error = hammer_ref_buffer(buffer);
f7d0505a
MD
792 if (hammer_debug_general & 0x20000) {
793 kprintf("hammer: delbufr %016jx "
794 "rerr=%d 1ref=%d\n",
795 (intmax_t)buffer->zoneX_offset,
796 error,
797 hammer_oneref(&buffer->io.lock));
798 }
250aec18 799 if (error == 0 && !hammer_oneref(&buffer->io.lock)) {
362ec2dc 800 error = EAGAIN;
4983f1f6
MD
801 hammer_rel_buffer(buffer, 0);
802 }
7b6ccb11
MD
803 if (error == 0) {
804 KKASSERT(buffer->zone2_offset == zone2_offset);
805 hammer_io_clear_modify(&buffer->io, 1);
806 buffer->io.reclaim = 1;
5c8d05e2 807 buffer->io.waitdep = 1;
748efb59 808 KKASSERT(buffer->io.volume == volume);
7b6ccb11
MD
809 hammer_rel_buffer(buffer, 0);
810 }
cebe9493 811 } else {
362ec2dc
MD
812 error = hammer_io_inval(volume, zone2_offset);
813 }
814 if (error) {
815 ret_error = error;
973c11b9
MD
816 if (report_conflicts ||
817 (hammer_debug_general & 0x8000)) {
818 kprintf("hammer_del_buffers: unable to "
819 "invalidate %016llx buffer=%p rep=%d\n",
820 (long long)base_offset,
821 buffer, report_conflicts);
822 }
0832c9bb 823 }
0832c9bb
MD
824 base_offset += HAMMER_BUFSIZE;
825 zone2_offset += HAMMER_BUFSIZE;
826 bytes -= HAMMER_BUFSIZE;
827 }
2f85fa4d 828 hammer_rel_volume(volume, 0);
362ec2dc 829 return (ret_error);
2f85fa4d
MD
830}
831
250aec18
MD
832/*
833 * Given a referenced and interlocked buffer load/validate the data.
834 *
835 * The buffer interlock will be released on return. If an error is
836 * returned the buffer reference will also be released (and the buffer
837 * pointer will thus be stale).
838 */
8cd0a023 839static int
47197d71 840hammer_load_buffer(hammer_buffer_t buffer, int isnew)
8cd0a023 841{
47197d71 842 hammer_volume_t volume;
8cd0a023
MD
843 int error;
844
47197d71
MD
845 /*
846 * Load the buffer's on-disk info
847 */
748efb59 848 volume = buffer->io.volume;
b33e2cc0 849
24c8374a 850 if (hammer_debug_io & 0x0004) {
d99d6bf5 851 kprintf("load_buffer %016llx %016llx isnew=%d od=%p\n",
973c11b9
MD
852 (long long)buffer->zoneX_offset,
853 (long long)buffer->zone2_offset,
854 isnew, buffer->ondisk);
2f85fa4d
MD
855 }
856
47197d71 857 if (buffer->ondisk == NULL) {
b7de8aa5
MD
858 /*
859 * Issue the read or generate a new buffer. When reading
860 * the limit argument controls any read-ahead clustering
861 * hammer_io_read() is allowed to do.
862 *
863 * We cannot read-ahead in the large-data zone and we cannot
864 * cross a largeblock boundary as the next largeblock might
865 * use a different buffer size.
866 */
47197d71
MD
867 if (isnew) {
868 error = hammer_io_new(volume->devvp, &buffer->io);
b7de8aa5
MD
869 } else if ((buffer->zoneX_offset & HAMMER_OFF_ZONE_MASK) ==
870 HAMMER_ZONE_LARGE_DATA) {
871 error = hammer_io_read(volume->devvp, &buffer->io,
872 buffer->io.bytes);
47197d71 873 } else {
b7de8aa5
MD
874 hammer_off_t limit;
875
876 limit = (buffer->zone2_offset +
877 HAMMER_LARGEBLOCK_MASK64) &
878 ~HAMMER_LARGEBLOCK_MASK64;
879 limit -= buffer->zone2_offset;
2f85fa4d 880 error = hammer_io_read(volume->devvp, &buffer->io,
b7de8aa5 881 limit);
47197d71 882 }
b58c6388
MD
883 if (error == 0)
884 buffer->ondisk = (void *)buffer->io.bp->b_data;
7f7c1f84 885 } else if (isnew) {
47197d71 886 error = hammer_io_new(volume->devvp, &buffer->io);
7f7c1f84
MD
887 } else {
888 error = 0;
889 }
250aec18
MD
890 if (error == 0) {
891 hammer_io_advance(&buffer->io);
892 hammer_ref_interlock_done(&buffer->io.lock);
893 } else {
894 hammer_rel_buffer(buffer, 1);
895 }
8cd0a023 896 return (error);
427e5fc6
MD
897}
898
8cd0a023 899/*
a89aec1b 900 * NOTE: Called from RB_SCAN, must return >= 0 for scan to continue.
07be83b8
MN
901 * This routine is only called during unmount or when a volume is
902 * removed.
903 *
904 * If data != NULL, it specifies a volume whoose buffers should
905 * be unloaded.
a89aec1b
MD
906 */
907int
07be83b8 908hammer_unload_buffer(hammer_buffer_t buffer, void *data)
a89aec1b 909{
07be83b8
MN
910 struct hammer_volume *volume = (struct hammer_volume *) data;
911
250aec18
MD
912 /*
913 * If volume != NULL we are only interested in unloading buffers
914 * associated with a particular volume.
915 */
916 if (volume != NULL && volume != buffer->io.volume)
07be83b8 917 return 0;
07be83b8 918
cdb6e4e6
MD
919 /*
920 * Clean up the persistent ref ioerror might have on the buffer
250aec18 921 * and acquire a ref. Expect a 0->1 transition.
cdb6e4e6
MD
922 */
923 if (buffer->io.ioerror) {
77912481 924 hammer_io_clear_error_noassert(&buffer->io);
250aec18 925 --hammer_count_refedbufs;
cdb6e4e6 926 }
250aec18
MD
927 hammer_ref_interlock_true(&buffer->io.lock);
928 ++hammer_count_refedbufs;
cdb6e4e6
MD
929
930 /*
931 * We must not flush a dirty buffer to disk on umount. It should
932 * have already been dealt with by the flusher, or we may be in
933 * catastrophic failure.
18ecc6e8
MD
934 *
935 * We must set waitdep to ensure that a running buffer is waited
936 * on and released prior to us trying to unload the volume.
cdb6e4e6
MD
937 */
938 hammer_io_clear_modify(&buffer->io, 1);
47197d71 939 hammer_flush_buffer_nodes(buffer);
18ecc6e8 940 buffer->io.waitdep = 1;
250aec18 941 hammer_rel_buffer(buffer, 1);
a89aec1b
MD
942 return(0);
943}
944
945/*
47197d71
MD
946 * Reference a buffer that is either already referenced or via a specially
947 * handled pointer (aka cursor->buffer).
948 */
949int
950hammer_ref_buffer(hammer_buffer_t buffer)
951{
b0aab9b9 952 hammer_mount_t hmp;
47197d71 953 int error;
250aec18 954 int locked;
47197d71 955
250aec18
MD
956 /*
957 * Acquire a ref, plus the buffer will be interlocked on the
958 * 0->1 transition.
959 */
960 locked = hammer_ref_interlock(&buffer->io.lock);
b0aab9b9 961 hmp = buffer->io.hmp;
10a5d1ba
MD
962
963 /*
bf3b416b
MD
964 * At this point a biodone() will not touch the buffer other then
965 * incidental bits. However, lose_list can be modified via
966 * a biodone() interrupt.
967 *
b0aab9b9 968 * No longer loose. lose_list requires the io_token.
10a5d1ba 969 */
b0aab9b9
MD
970 if (buffer->io.mod_list == &hmp->lose_list) {
971 lwkt_gettoken(&hmp->io_token);
972 if (buffer->io.mod_list == &hmp->lose_list) {
250aec18
MD
973 TAILQ_REMOVE(buffer->io.mod_list, &buffer->io,
974 mod_entry);
975 buffer->io.mod_list = NULL;
976 }
b0aab9b9 977 lwkt_reltoken(&hmp->io_token);
10a5d1ba
MD
978 }
979
250aec18
MD
980 if (locked) {
981 ++hammer_count_refedbufs;
47197d71 982 error = hammer_load_buffer(buffer, 0);
250aec18 983 /* NOTE: on error the buffer pointer is stale */
47197d71
MD
984 } else {
985 error = 0;
986 }
987 return(error);
988}
989
990/*
250aec18
MD
991 * Release a reference on the buffer. On the 1->0 transition the
992 * underlying IO will be released but the data reference is left
993 * cached.
8cd0a023
MD
994 *
995 * Only destroy the structure itself if the related buffer cache buffer
996 * was disassociated from it. This ties the management of the structure
47197d71
MD
997 * to the buffer cache subsystem. buffer->ondisk determines whether the
998 * embedded io is referenced or not.
8cd0a023 999 */
427e5fc6 1000void
250aec18 1001hammer_rel_buffer(hammer_buffer_t buffer, int locked)
427e5fc6 1002{
8cd0a023 1003 hammer_volume_t volume;
bac808fe 1004 hammer_mount_t hmp;
ecca949a 1005 struct buf *bp = NULL;
b58c6388 1006 int freeme = 0;
66325755 1007
bac808fe
MD
1008 hmp = buffer->io.hmp;
1009
250aec18
MD
1010 if (hammer_rel_interlock(&buffer->io.lock, locked) == 0)
1011 return;
1012
1013 /*
1014 * hammer_count_refedbufs accounting. Decrement if we are in
1015 * the error path or if CHECK is clear.
1016 *
1017 * If we are not in the error path and CHECK is set the caller
1018 * probably just did a hammer_ref() and didn't account for it,
1019 * so we don't account for the loss here.
1020 */
1021 if (locked || (buffer->io.lock.refs & HAMMER_REFS_CHECK) == 0)
1022 --hammer_count_refedbufs;
1023
1024 /*
1025 * If the caller locked us or the normal released transitions
1026 * from 1->0 (and acquired the lock) attempt to release the
1027 * io. If the called locked us we tell hammer_io_release()
1028 * to flush (which would be the unload or failure path).
1029 */
1030 bp = hammer_io_release(&buffer->io, locked);
1031
1032 /*
1033 * If the buffer has no bp association and no refs we can destroy
1034 * it.
1035 *
1036 * NOTE: It is impossible for any associated B-Tree nodes to have
1037 * refs if the buffer has no additional refs.
1038 */
1039 if (buffer->io.bp == NULL && hammer_norefs(&buffer->io.lock)) {
1040 RB_REMOVE(hammer_buf_rb_tree,
1041 &buffer->io.hmp->rb_bufs_root,
1042 buffer);
1043 volume = buffer->io.volume;
1044 buffer->io.volume = NULL; /* sanity */
1045 hammer_rel_volume(volume, 0);
1046 hammer_io_clear_modlist(&buffer->io);
1047 hammer_flush_buffer_nodes(buffer);
1048 KKASSERT(TAILQ_EMPTY(&buffer->clist));
1049 freeme = 1;
427e5fc6 1050 }
250aec18
MD
1051
1052 /*
1053 * Cleanup
1054 */
1055 hammer_rel_interlock_done(&buffer->io.lock, locked);
ecca949a
MD
1056 if (bp)
1057 brelse(bp);
b58c6388
MD
1058 if (freeme) {
1059 --hammer_count_buffers;
bac808fe 1060 kfree(buffer, hmp->m_misc);
b58c6388
MD
1061 }
1062}
1063
1064/*
47197d71
MD
1065 * Access the filesystem buffer containing the specified hammer offset.
1066 * buf_offset is a conglomeration of the volume number and vol_buf_beg
1067 * relative buffer offset. It must also have bit 55 set to be valid.
1068 * (see hammer_off_t in hammer_disk.h).
8cd0a023 1069 *
47197d71
MD
1070 * Any prior buffer in *bufferp will be released and replaced by the
1071 * requested buffer.
362ec2dc
MD
1072 *
1073 * NOTE: The buffer is indexed via its zoneX_offset but we allow the
1074 * passed cached *bufferp to match against either zoneX or zone2.
8cd0a023 1075 */
4a2796f3 1076static __inline
47197d71 1077void *
4a2796f3
MD
1078_hammer_bread(hammer_mount_t hmp, hammer_off_t buf_offset, int bytes,
1079 int *errorp, struct hammer_buffer **bufferp)
427e5fc6 1080{
47197d71
MD
1081 hammer_buffer_t buffer;
1082 int32_t xoff = (int32_t)buf_offset & HAMMER_BUFMASK;
427e5fc6 1083
47197d71 1084 buf_offset &= ~HAMMER_BUFMASK64;
34d829f7 1085 KKASSERT((buf_offset & HAMMER_OFF_ZONE_MASK) != 0);
427e5fc6 1086
47197d71 1087 buffer = *bufferp;
34d829f7
MD
1088 if (buffer == NULL || (buffer->zone2_offset != buf_offset &&
1089 buffer->zoneX_offset != buf_offset)) {
47197d71
MD
1090 if (buffer)
1091 hammer_rel_buffer(buffer, 0);
4a2796f3 1092 buffer = hammer_get_buffer(hmp, buf_offset, bytes, 0, errorp);
47197d71 1093 *bufferp = buffer;
427e5fc6 1094 } else {
47197d71 1095 *errorp = 0;
427e5fc6 1096 }
8cd0a023
MD
1097
1098 /*
47197d71 1099 * Return a pointer to the buffer data.
8cd0a023 1100 */
47197d71
MD
1101 if (buffer == NULL)
1102 return(NULL);
1103 else
1104 return((char *)buffer->ondisk + xoff);
66325755
MD
1105}
1106
4a2796f3
MD
1107void *
1108hammer_bread(hammer_mount_t hmp, hammer_off_t buf_offset,
1109 int *errorp, struct hammer_buffer **bufferp)
1110{
1111 return(_hammer_bread(hmp, buf_offset, HAMMER_BUFSIZE, errorp, bufferp));
1112}
1113
1114void *
1115hammer_bread_ext(hammer_mount_t hmp, hammer_off_t buf_offset, int bytes,
1116 int *errorp, struct hammer_buffer **bufferp)
1117{
1118 bytes = (bytes + HAMMER_BUFMASK) & ~HAMMER_BUFMASK;
1119 return(_hammer_bread(hmp, buf_offset, bytes, errorp, bufferp));
1120}
1121
47197d71
MD
1122/*
1123 * Access the filesystem buffer containing the specified hammer offset.
1124 * No disk read operation occurs. The result buffer may contain garbage.
1125 *
1126 * Any prior buffer in *bufferp will be released and replaced by the
1127 * requested buffer.
10a5d1ba
MD
1128 *
1129 * This function marks the buffer dirty but does not increment its
1130 * modify_refs count.
47197d71 1131 */
4a2796f3 1132static __inline
47197d71 1133void *
4a2796f3
MD
1134_hammer_bnew(hammer_mount_t hmp, hammer_off_t buf_offset, int bytes,
1135 int *errorp, struct hammer_buffer **bufferp)
66325755 1136{
47197d71
MD
1137 hammer_buffer_t buffer;
1138 int32_t xoff = (int32_t)buf_offset & HAMMER_BUFMASK;
66325755 1139
47197d71 1140 buf_offset &= ~HAMMER_BUFMASK64;
66325755 1141
47197d71 1142 buffer = *bufferp;
34d829f7
MD
1143 if (buffer == NULL || (buffer->zone2_offset != buf_offset &&
1144 buffer->zoneX_offset != buf_offset)) {
47197d71
MD
1145 if (buffer)
1146 hammer_rel_buffer(buffer, 0);
4a2796f3 1147 buffer = hammer_get_buffer(hmp, buf_offset, bytes, 1, errorp);
47197d71 1148 *bufferp = buffer;
8cd0a023
MD
1149 } else {
1150 *errorp = 0;
1151 }
47197d71
MD
1152
1153 /*
1154 * Return a pointer to the buffer data.
1155 */
1156 if (buffer == NULL)
1157 return(NULL);
1158 else
1159 return((char *)buffer->ondisk + xoff);
8cd0a023 1160}
66325755 1161
4a2796f3
MD
1162void *
1163hammer_bnew(hammer_mount_t hmp, hammer_off_t buf_offset,
1164 int *errorp, struct hammer_buffer **bufferp)
1165{
1166 return(_hammer_bnew(hmp, buf_offset, HAMMER_BUFSIZE, errorp, bufferp));
1167}
1168
1169void *
1170hammer_bnew_ext(hammer_mount_t hmp, hammer_off_t buf_offset, int bytes,
1171 int *errorp, struct hammer_buffer **bufferp)
1172{
1173 bytes = (bytes + HAMMER_BUFMASK) & ~HAMMER_BUFMASK;
1174 return(_hammer_bnew(hmp, buf_offset, bytes, errorp, bufferp));
1175}
1176
47197d71
MD
1177/************************************************************************
1178 * NODES *
1179 ************************************************************************
1180 *
1181 * Manage B-Tree nodes. B-Tree nodes represent the primary indexing
1182 * method used by the HAMMER filesystem.
1183 *
1184 * Unlike other HAMMER structures, a hammer_node can be PASSIVELY
1185 * associated with its buffer, and will only referenced the buffer while
1186 * the node itself is referenced.
1187 *
1188 * A hammer_node can also be passively associated with other HAMMER
1189 * structures, such as inodes, while retaining 0 references. These
1190 * associations can be cleared backwards using a pointer-to-pointer in
1191 * the hammer_node.
1192 *
1193 * This allows the HAMMER implementation to cache hammer_nodes long-term
1194 * and short-cut a great deal of the infrastructure's complexity. In
1195 * most cases a cached node can be reacquired without having to dip into
1196 * either the buffer or cluster management code.
1197 *
1198 * The caller must pass a referenced cluster on call and will retain
1199 * ownership of the reference on return. The node will acquire its own
1200 * additional references, if necessary.
1201 */
1202hammer_node_t
82010f9f 1203hammer_get_node(hammer_transaction_t trans, hammer_off_t node_offset,
19619882 1204 int isnew, int *errorp)
66325755 1205{
82010f9f 1206 hammer_mount_t hmp = trans->hmp;
47197d71 1207 hammer_node_t node;
250aec18 1208 int doload;
b33e2cc0 1209
40043e7f 1210 KKASSERT((node_offset & HAMMER_OFF_ZONE_MASK) == HAMMER_ZONE_BTREE);
b33e2cc0
MD
1211
1212 /*
47197d71 1213 * Locate the structure, allocating one if necessary.
b33e2cc0 1214 */
47197d71 1215again:
40043e7f 1216 node = RB_LOOKUP(hammer_nod_rb_tree, &hmp->rb_nods_root, node_offset);
47197d71
MD
1217 if (node == NULL) {
1218 ++hammer_count_nodes;
bac808fe 1219 node = kmalloc(sizeof(*node), hmp->m_misc, M_WAITOK|M_ZERO|M_USE_RESERVE);
47197d71 1220 node->node_offset = node_offset;
40043e7f 1221 node->hmp = hmp;
b3bad96f 1222 TAILQ_INIT(&node->cursor_list);
bcac4bbb 1223 TAILQ_INIT(&node->cache_list);
40043e7f 1224 if (RB_INSERT(hammer_nod_rb_tree, &hmp->rb_nods_root, node)) {
47197d71 1225 --hammer_count_nodes;
bac808fe 1226 kfree(node, hmp->m_misc);
47197d71 1227 goto again;
b33e2cc0 1228 }
250aec18 1229 doload = hammer_ref_interlock_true(&node->lock);
82010f9f 1230 } else {
250aec18
MD
1231 doload = hammer_ref_interlock(&node->lock);
1232 }
1233 if (doload) {
4c286c36 1234 *errorp = hammer_load_node(trans, node, isnew);
82010f9f 1235 trans->flags |= HAMMER_TRANSF_DIDIO;
250aec18
MD
1236 if (*errorp)
1237 node = NULL;
1238 } else {
1239 KKASSERT(node->ondisk);
1240 *errorp = 0;
1241 hammer_io_advance(&node->buffer->io);
47197d71 1242 }
47197d71 1243 return(node);
8cd0a023
MD
1244}
1245
1246/*
250aec18
MD
1247 * Reference an already-referenced node. 0->1 transitions should assert
1248 * so we do not have to deal with hammer_ref() setting CHECK.
a89aec1b 1249 */
740d8317 1250void
47197d71 1251hammer_ref_node(hammer_node_t node)
eaeff70d 1252{
250aec18 1253 KKASSERT(hammer_isactive(&node->lock) && node->ondisk != NULL);
055f5ff8 1254 hammer_ref(&node->lock);
055f5ff8
MD
1255}
1256
1257/*
250aec18
MD
1258 * Load a node's on-disk data reference. Called with the node referenced
1259 * and interlocked.
1260 *
1261 * On return the node interlock will be unlocked. If a non-zero error code
1262 * is returned the node will also be dereferenced (and the caller's pointer
1263 * will be stale).
055f5ff8
MD
1264 */
1265static int
4c286c36 1266hammer_load_node(hammer_transaction_t trans, hammer_node_t node, int isnew)
055f5ff8 1267{
8cd0a023 1268 hammer_buffer_t buffer;
2f85fa4d 1269 hammer_off_t buf_offset;
8cd0a023
MD
1270 int error;
1271
a89aec1b 1272 error = 0;
8cd0a023 1273 if (node->ondisk == NULL) {
055f5ff8
MD
1274 /*
1275 * This is a little confusing but the jist is that
1276 * node->buffer determines whether the node is on
1277 * the buffer's clist and node->ondisk determines
1278 * whether the buffer is referenced.
740d8317
MD
1279 *
1280 * We could be racing a buffer release, in which case
1281 * node->buffer may become NULL while we are blocked
1282 * referencing the buffer.
055f5ff8
MD
1283 */
1284 if ((buffer = node->buffer) != NULL) {
1285 error = hammer_ref_buffer(buffer);
740d8317
MD
1286 if (error == 0 && node->buffer == NULL) {
1287 TAILQ_INSERT_TAIL(&buffer->clist,
1288 node, entry);
1289 node->buffer = buffer;
1290 }
055f5ff8 1291 } else {
2f85fa4d
MD
1292 buf_offset = node->node_offset & ~HAMMER_BUFMASK64;
1293 buffer = hammer_get_buffer(node->hmp, buf_offset,
4a2796f3 1294 HAMMER_BUFSIZE, 0, &error);
055f5ff8
MD
1295 if (buffer) {
1296 KKASSERT(error == 0);
1297 TAILQ_INSERT_TAIL(&buffer->clist,
1298 node, entry);
1299 node->buffer = buffer;
8cd0a023
MD
1300 }
1301 }
bcac4bbb
MD
1302 if (error)
1303 goto failed;
1304 node->ondisk = (void *)((char *)buffer->ondisk +
1305 (node->node_offset & HAMMER_BUFMASK));
4c286c36
MD
1306
1307 /*
1308 * Check CRC. NOTE: Neither flag is set and the CRC is not
1309 * generated on new B-Tree nodes.
1310 */
bcac4bbb 1311 if (isnew == 0 &&
4c286c36
MD
1312 (node->flags & HAMMER_NODE_CRCANY) == 0) {
1313 if (hammer_crc_test_btree(node->ondisk) == 0) {
fc73edd8 1314 if (hammer_debug_critical)
4c286c36
MD
1315 Debugger("CRC FAILED: B-TREE NODE");
1316 node->flags |= HAMMER_NODE_CRCBAD;
1317 } else {
1318 node->flags |= HAMMER_NODE_CRCGOOD;
1319 }
055f5ff8 1320 }
427e5fc6 1321 }
4c286c36
MD
1322 if (node->flags & HAMMER_NODE_CRCBAD) {
1323 if (trans->flags & HAMMER_TRANSF_CRCDOM)
1324 error = EDOM;
1325 else
1326 error = EIO;
1327 }
bcac4bbb 1328failed:
250aec18
MD
1329 if (error) {
1330 _hammer_rel_node(node, 1);
1331 } else {
1332 hammer_ref_interlock_done(&node->lock);
1333 }
8cd0a023 1334 return (error);
427e5fc6
MD
1335}
1336
8cd0a023 1337/*
055f5ff8
MD
1338 * Safely reference a node, interlock against flushes via the IO subsystem.
1339 */
1340hammer_node_t
4c286c36 1341hammer_ref_node_safe(hammer_transaction_t trans, hammer_node_cache_t cache,
055f5ff8
MD
1342 int *errorp)
1343{
1344 hammer_node_t node;
250aec18 1345 int doload;
055f5ff8 1346
bcac4bbb 1347 node = cache->node;
740d8317 1348 if (node != NULL) {
250aec18
MD
1349 doload = hammer_ref_interlock(&node->lock);
1350 if (doload) {
1351 *errorp = hammer_load_node(trans, node, 0);
1352 if (*errorp)
1353 node = NULL;
1354 } else {
1355 KKASSERT(node->ondisk);
4c286c36
MD
1356 if (node->flags & HAMMER_NODE_CRCBAD) {
1357 if (trans->flags & HAMMER_TRANSF_CRCDOM)
1358 *errorp = EDOM;
1359 else
1360 *errorp = EIO;
250aec18
MD
1361 _hammer_rel_node(node, 0);
1362 node = NULL;
4c286c36
MD
1363 } else {
1364 *errorp = 0;
1365 }
055f5ff8
MD
1366 }
1367 } else {
1368 *errorp = ENOENT;
1369 }
1370 return(node);
1371}
1372
1373/*
1374 * Release a hammer_node. On the last release the node dereferences
1375 * its underlying buffer and may or may not be destroyed.
250aec18
MD
1376 *
1377 * If locked is non-zero the passed node has been interlocked by the
1378 * caller and we are in the failure/unload path, otherwise it has not and
1379 * we are doing a normal release.
1380 *
1381 * This function will dispose of the interlock and the reference.
1382 * On return the node pointer is stale.
8cd0a023 1383 */
427e5fc6 1384void
250aec18 1385_hammer_rel_node(hammer_node_t node, int locked)
8cd0a023 1386{
8cd0a023
MD
1387 hammer_buffer_t buffer;
1388
055f5ff8 1389 /*
250aec18
MD
1390 * Deref the node. If this isn't the 1->0 transition we're basically
1391 * done. If locked is non-zero this function will just deref the
1392 * locked node and return TRUE, otherwise it will deref the locked
1393 * node and either lock and return TRUE on the 1->0 transition or
1394 * not lock and return FALSE.
055f5ff8 1395 */
250aec18 1396 if (hammer_rel_interlock(&node->lock, locked) == 0)
055f5ff8 1397 return;
8cd0a023 1398
055f5ff8 1399 /*
250aec18
MD
1400 * Either locked was non-zero and we are interlocked, or the
1401 * hammer_rel_interlock() call returned non-zero and we are
1402 * interlocked.
1403 *
1404 * The ref-count must still be decremented if locked != 0 so
1405 * the cleanup required still varies a bit.
1406 *
1407 * hammer_flush_node() when called with 1 or 2 will dispose of
1408 * the lock and possible ref-count.
055f5ff8
MD
1409 */
1410 if (node->ondisk == NULL) {
250aec18 1411 hammer_flush_node(node, locked + 1);
055f5ff8
MD
1412 /* node is stale now */
1413 return;
1414 }
b3deaf57 1415
055f5ff8 1416 /*
bcac4bbb
MD
1417 * Do not disassociate the node from the buffer if it represents
1418 * a modified B-Tree node that still needs its crc to be generated.
1419 */
250aec18
MD
1420 if (node->flags & HAMMER_NODE_NEEDSCRC) {
1421 hammer_rel_interlock_done(&node->lock, locked);
bcac4bbb 1422 return;
250aec18 1423 }
bcac4bbb
MD
1424
1425 /*
055f5ff8
MD
1426 * Do final cleanups and then either destroy the node and leave it
1427 * passively cached. The buffer reference is removed regardless.
1428 */
1429 buffer = node->buffer;
1430 node->ondisk = NULL;
b3deaf57 1431
36f82b23 1432 if ((node->flags & HAMMER_NODE_FLUSH) == 0) {
250aec18
MD
1433 /*
1434 * Normal release.
1435 */
1436 hammer_rel_interlock_done(&node->lock, locked);
1437 } else {
1438 /*
1439 * Destroy the node.
1440 */
1441 hammer_flush_node(node, locked + 1);
1442 /* node is stale */
055f5ff8 1443
250aec18 1444 }
055f5ff8 1445 hammer_rel_buffer(buffer, 0);
427e5fc6
MD
1446}
1447
250aec18
MD
1448void
1449hammer_rel_node(hammer_node_t node)
1450{
1451 _hammer_rel_node(node, 0);
1452}
1453
427e5fc6 1454/*
11ad5ade 1455 * Free space on-media associated with a B-Tree node.
36f82b23
MD
1456 */
1457void
1458hammer_delete_node(hammer_transaction_t trans, hammer_node_t node)
1459{
4e97774c 1460 KKASSERT((node->flags & HAMMER_NODE_DELETED) == 0);
36f82b23
MD
1461 node->flags |= HAMMER_NODE_DELETED;
1462 hammer_blockmap_free(trans, node->node_offset, sizeof(*node->ondisk));
1463}
1464
1465/*
bcac4bbb
MD
1466 * Passively cache a referenced hammer_node. The caller may release
1467 * the node on return.
8cd0a023
MD
1468 */
1469void
bcac4bbb 1470hammer_cache_node(hammer_node_cache_t cache, hammer_node_t node)
8cd0a023 1471{
b3deaf57 1472 /*
cdb6e4e6
MD
1473 * If the node doesn't exist, or is being deleted, don't cache it!
1474 *
1475 * The node can only ever be NULL in the I/O failure path.
b3deaf57 1476 */
cdb6e4e6 1477 if (node == NULL || (node->flags & HAMMER_NODE_DELETED))
b3deaf57 1478 return;
bcac4bbb
MD
1479 if (cache->node == node)
1480 return;
1481 while (cache->node)
1482 hammer_uncache_node(cache);
1483 if (node->flags & HAMMER_NODE_DELETED)
1484 return;
1485 cache->node = node;
1486 TAILQ_INSERT_TAIL(&node->cache_list, cache, entry);
8cd0a023
MD
1487}
1488
1489void
bcac4bbb 1490hammer_uncache_node(hammer_node_cache_t cache)
8cd0a023
MD
1491{
1492 hammer_node_t node;
1493
bcac4bbb
MD
1494 if ((node = cache->node) != NULL) {
1495 TAILQ_REMOVE(&node->cache_list, cache, entry);
1496 cache->node = NULL;
1497 if (TAILQ_EMPTY(&node->cache_list))
250aec18 1498 hammer_flush_node(node, 0);
8cd0a023
MD
1499 }
1500}
1501
1502/*
1503 * Remove a node's cache references and destroy the node if it has no
b3deaf57 1504 * other references or backing store.
250aec18
MD
1505 *
1506 * locked == 0 Normal unlocked operation
1507 * locked == 1 Call hammer_rel_interlock_done(..., 0);
1508 * locked == 2 Call hammer_rel_interlock_done(..., 1);
1509 *
1510 * XXX for now this isn't even close to being MPSAFE so the refs check
1511 * is sufficient.
8cd0a023
MD
1512 */
1513void
250aec18 1514hammer_flush_node(hammer_node_t node, int locked)
8cd0a023 1515{
bcac4bbb 1516 hammer_node_cache_t cache;
8cd0a023 1517 hammer_buffer_t buffer;
bac808fe 1518 hammer_mount_t hmp = node->hmp;
250aec18 1519 int dofree;
8cd0a023 1520
bcac4bbb
MD
1521 while ((cache = TAILQ_FIRST(&node->cache_list)) != NULL) {
1522 TAILQ_REMOVE(&node->cache_list, cache, entry);
1523 cache->node = NULL;
1524 }
250aec18
MD
1525
1526 /*
1527 * NOTE: refs is predisposed if another thread is blocking and
1528 * will be larger than 0 in that case. We aren't MPSAFE
1529 * here.
1530 */
1531 if (node->ondisk == NULL && hammer_norefs(&node->lock)) {
bcac4bbb 1532 KKASSERT((node->flags & HAMMER_NODE_NEEDSCRC) == 0);
40043e7f 1533 RB_REMOVE(hammer_nod_rb_tree, &node->hmp->rb_nods_root, node);
8cd0a023
MD
1534 if ((buffer = node->buffer) != NULL) {
1535 node->buffer = NULL;
055f5ff8 1536 TAILQ_REMOVE(&buffer->clist, node, entry);
b3deaf57 1537 /* buffer is unreferenced because ondisk is NULL */
8cd0a023 1538 }
250aec18
MD
1539 dofree = 1;
1540 } else {
1541 dofree = 0;
1542 }
1543
1544 /*
1545 * Deal with the interlock if locked == 1 or locked == 2.
1546 */
1547 if (locked)
1548 hammer_rel_interlock_done(&node->lock, locked - 1);
1549
1550 /*
1551 * Destroy if requested
1552 */
1553 if (dofree) {
b3deaf57 1554 --hammer_count_nodes;
bac808fe 1555 kfree(node, hmp->m_misc);
8cd0a023
MD
1556 }
1557}
1558
1559/*
055f5ff8
MD
1560 * Flush passively cached B-Tree nodes associated with this buffer.
1561 * This is only called when the buffer is about to be destroyed, so
740d8317
MD
1562 * none of the nodes should have any references. The buffer is locked.
1563 *
1564 * We may be interlocked with the buffer.
8cd0a023 1565 */
8cd0a023 1566void
055f5ff8 1567hammer_flush_buffer_nodes(hammer_buffer_t buffer)
8cd0a023 1568{
055f5ff8
MD
1569 hammer_node_t node;
1570
1571 while ((node = TAILQ_FIRST(&buffer->clist)) != NULL) {
740d8317 1572 KKASSERT(node->ondisk == NULL);
bcac4bbb 1573 KKASSERT((node->flags & HAMMER_NODE_NEEDSCRC) == 0);
740d8317 1574
250aec18 1575 if (hammer_try_interlock_norefs(&node->lock)) {
740d8317
MD
1576 hammer_ref(&node->lock);
1577 node->flags |= HAMMER_NODE_FLUSH;
250aec18 1578 _hammer_rel_node(node, 1);
740d8317 1579 } else {
740d8317
MD
1580 KKASSERT(node->buffer != NULL);
1581 buffer = node->buffer;
1582 node->buffer = NULL;
1583 TAILQ_REMOVE(&buffer->clist, node, entry);
1584 /* buffer is unreferenced because ondisk is NULL */
1585 }
055f5ff8 1586 }
8cd0a023
MD
1587}
1588
47197d71 1589
8cd0a023 1590/************************************************************************
47197d71 1591 * ALLOCATORS *
8cd0a023
MD
1592 ************************************************************************/
1593
1594/*
47197d71 1595 * Allocate a B-Tree node.
d26d0ae9 1596 */
47197d71 1597hammer_node_t
df2ccbac 1598hammer_alloc_btree(hammer_transaction_t trans, hammer_off_t hint, int *errorp)
47197d71
MD
1599{
1600 hammer_buffer_t buffer = NULL;
1601 hammer_node_t node = NULL;
1602 hammer_off_t node_offset;
1603
36f82b23 1604 node_offset = hammer_blockmap_alloc(trans, HAMMER_ZONE_BTREE_INDEX,
40043e7f 1605 sizeof(struct hammer_node_ondisk),
df2ccbac 1606 hint, errorp);
40043e7f 1607 if (*errorp == 0) {
82010f9f 1608 node = hammer_get_node(trans, node_offset, 1, errorp);
36f82b23 1609 hammer_modify_node_noundo(trans, node);
40043e7f 1610 bzero(node->ondisk, sizeof(*node->ondisk));
10a5d1ba 1611 hammer_modify_node_done(node);
40043e7f 1612 }
47197d71
MD
1613 if (buffer)
1614 hammer_rel_buffer(buffer, 0);
1615 return(node);
1616}
d26d0ae9 1617
10a5d1ba
MD
1618/*
1619 * Allocate data. If the address of a data buffer is supplied then
1620 * any prior non-NULL *data_bufferp will be released and *data_bufferp
1621 * will be set to the related buffer. The caller must release it when
1622 * finally done. The initial *data_bufferp should be set to NULL by
1623 * the caller.
1624 *
1625 * The caller is responsible for making hammer_modify*() calls on the
1626 * *data_bufferp.
1627 */
bf686dbe 1628void *
36f82b23 1629hammer_alloc_data(hammer_transaction_t trans, int32_t data_len,
bf3b416b 1630 u_int16_t rec_type, hammer_off_t *data_offsetp,
df2ccbac
MD
1631 struct hammer_buffer **data_bufferp,
1632 hammer_off_t hint, int *errorp)
47197d71 1633{
bf686dbe 1634 void *data;
bf3b416b 1635 int zone;
c0ade690 1636
bf686dbe
MD
1637 /*
1638 * Allocate data
1639 */
1640 if (data_len) {
bf3b416b
MD
1641 switch(rec_type) {
1642 case HAMMER_RECTYPE_INODE:
bf3b416b
MD
1643 case HAMMER_RECTYPE_DIRENTRY:
1644 case HAMMER_RECTYPE_EXT:
1645 case HAMMER_RECTYPE_FIX:
ea434b6f 1646 case HAMMER_RECTYPE_PFS:
83f2a3aa
MD
1647 case HAMMER_RECTYPE_SNAPSHOT:
1648 case HAMMER_RECTYPE_CONFIG:
bf3b416b
MD
1649 zone = HAMMER_ZONE_META_INDEX;
1650 break;
1651 case HAMMER_RECTYPE_DATA:
1652 case HAMMER_RECTYPE_DB:
4a2796f3 1653 if (data_len <= HAMMER_BUFSIZE / 2) {
bf3b416b 1654 zone = HAMMER_ZONE_SMALL_DATA_INDEX;
4a2796f3
MD
1655 } else {
1656 data_len = (data_len + HAMMER_BUFMASK) &
1657 ~HAMMER_BUFMASK;
bf3b416b 1658 zone = HAMMER_ZONE_LARGE_DATA_INDEX;
4a2796f3 1659 }
bf3b416b
MD
1660 break;
1661 default:
1662 panic("hammer_alloc_data: rec_type %04x unknown",
1663 rec_type);
1664 zone = 0; /* NOT REACHED */
1665 break;
47197d71 1666 }
df2ccbac
MD
1667 *data_offsetp = hammer_blockmap_alloc(trans, zone, data_len,
1668 hint, errorp);
bf686dbe
MD
1669 } else {
1670 *data_offsetp = 0;
1671 }
1672 if (*errorp == 0 && data_bufferp) {
1673 if (data_len) {
4a2796f3
MD
1674 data = hammer_bread_ext(trans->hmp, *data_offsetp,
1675 data_len, errorp, data_bufferp);
40043e7f 1676 } else {
bf686dbe 1677 data = NULL;
47197d71 1678 }
bf686dbe
MD
1679 } else {
1680 data = NULL;
47197d71 1681 }
bf686dbe 1682 return(data);
427e5fc6
MD
1683}
1684
fbc6e32a 1685/*
10a5d1ba 1686 * Sync dirty buffers to the media and clean-up any loose ends.
7b6ccb11
MD
1687 *
1688 * These functions do not start the flusher going, they simply
1689 * queue everything up to the flusher.
fbc6e32a 1690 */
0b075555
MD
1691static int hammer_sync_scan1(struct mount *mp, struct vnode *vp, void *data);
1692static int hammer_sync_scan2(struct mount *mp, struct vnode *vp, void *data);
1693
fbc6e32a 1694int
f36a9737
MD
1695hammer_queue_inodes_flusher(hammer_mount_t hmp, int waitfor)
1696{
1697 struct hammer_sync_info info;
1698
1699 info.error = 0;
1700 info.waitfor = waitfor;
1701 if (waitfor == MNT_WAIT) {
19b97e01 1702 vmntvnodescan(hmp->mp, VMSC_GETVP|VMSC_ONEPASS,
f36a9737
MD
1703 hammer_sync_scan1, hammer_sync_scan2, &info);
1704 } else {
19b97e01 1705 vmntvnodescan(hmp->mp, VMSC_GETVP|VMSC_ONEPASS|VMSC_NOWAIT,
f36a9737
MD
1706 hammer_sync_scan1, hammer_sync_scan2, &info);
1707 }
1708 return(info.error);
1709}
1710
ddfdf542
MD
1711/*
1712 * Filesystem sync. If doing a synchronous sync make a second pass on
1713 * the vnodes in case any were already flushing during the first pass,
1714 * and activate the flusher twice (the second time brings the UNDO FIFO's
1715 * start position up to the end position after the first call).
28271622
MD
1716 *
1717 * If doing a lazy sync make just one pass on the vnode list, ignoring
1718 * any new vnodes added to the list while the sync is in progress.
ddfdf542 1719 */
f36a9737 1720int
fbc6e32a
MD
1721hammer_sync_hmp(hammer_mount_t hmp, int waitfor)
1722{
1723 struct hammer_sync_info info;
28271622
MD
1724 int flags;
1725
1726 flags = VMSC_GETVP;
1727 if (waitfor & MNT_LAZY)
1728 flags |= VMSC_ONEPASS;
fbc6e32a
MD
1729
1730 info.error = 0;
ddfdf542 1731 info.waitfor = MNT_NOWAIT;
28271622 1732 vmntvnodescan(hmp->mp, flags | VMSC_NOWAIT,
0b075555 1733 hammer_sync_scan1, hammer_sync_scan2, &info);
28271622
MD
1734
1735 if (info.error == 0 && (waitfor & MNT_WAIT)) {
ddfdf542 1736 info.waitfor = waitfor;
28271622 1737 vmntvnodescan(hmp->mp, flags,
ddfdf542
MD
1738 hammer_sync_scan1, hammer_sync_scan2, &info);
1739 }
1740 if (waitfor == MNT_WAIT) {
1741 hammer_flusher_sync(hmp);
10a5d1ba 1742 hammer_flusher_sync(hmp);
ddfdf542 1743 } else {
7a61b85d 1744 hammer_flusher_async(hmp, NULL);
4889cbd4 1745 hammer_flusher_async(hmp, NULL);
ddfdf542 1746 }
fbc6e32a
MD
1747 return(info.error);
1748}
1749
0b075555
MD
1750static int
1751hammer_sync_scan1(struct mount *mp, struct vnode *vp, void *data)
1752{
1753 struct hammer_inode *ip;
1754
1755 ip = VTOI(vp);
6a37e7e4
MD
1756 if (vp->v_type == VNON || ip == NULL ||
1757 ((ip->flags & HAMMER_INODE_MODMASK) == 0 &&
1758 RB_EMPTY(&vp->v_rbdirty_tree))) {
0b075555
MD
1759 return(-1);
1760 }
1761 return(0);
1762}
1763
1764static int
1765hammer_sync_scan2(struct mount *mp, struct vnode *vp, void *data)
1766{
1767 struct hammer_sync_info *info = data;
1768 struct hammer_inode *ip;
1769 int error;
1770
1771 ip = VTOI(vp);
1772 if (vp->v_type == VNON || vp->v_type == VBAD ||
1773 ((ip->flags & HAMMER_INODE_MODMASK) == 0 &&
1774 RB_EMPTY(&vp->v_rbdirty_tree))) {
1775 return(0);
1776 }
6f3d87c0 1777 error = VOP_FSYNC(vp, MNT_NOWAIT, 0);
d5ef456e
MD
1778 if (error)
1779 info->error = error;
0b075555
MD
1780 return(0);
1781}
1782