HAMMER 38C/Many: Undo/Synchronization and crash recovery
[dragonfly.git] / sys / vfs / hammer / hammer_io.c
CommitLineData
66325755 1/*
b84de5af 2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved.
66325755
MD
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
10a5d1ba 34 * $DragonFly: src/sys/vfs/hammer/hammer_io.c,v 1.26 2008/04/25 21:49:49 dillon Exp $
66325755
MD
35 */
36/*
37 * IO Primitives and buffer cache management
38 *
39 * All major data-tracking structures in HAMMER contain a struct hammer_io
40 * which is used to manage their backing store. We use filesystem buffers
41 * for backing store and we leave them passively associated with their
42 * HAMMER structures.
43 *
44 * If the kernel tries to release a passively associated buf which we cannot
45 * yet let go we set B_LOCKED in the buffer and then actively released it
46 * later when we can.
47 */
48
49#include "hammer.h"
50#include <sys/fcntl.h>
51#include <sys/nlookup.h>
52#include <sys/buf.h>
53#include <sys/buf2.h>
54
10a5d1ba 55static void hammer_io_modify(hammer_io_t io, int count);
055f5ff8 56static void hammer_io_deallocate(struct buf *bp);
055f5ff8
MD
57
58/*
10a5d1ba
MD
59 * Initialize a new, already-zero'd hammer_io structure, or reinitialize
60 * an existing hammer_io structure which may have switched to another type.
055f5ff8
MD
61 */
62void
10a5d1ba 63hammer_io_init(hammer_io_t io, hammer_mount_t hmp, enum hammer_io_type type)
055f5ff8 64{
10a5d1ba 65 io->hmp = hmp;
055f5ff8 66 io->type = type;
055f5ff8
MD
67}
68
10a5d1ba
MD
69void
70hammer_io_reinit(hammer_io_t io, enum hammer_io_type type)
71{
72 hammer_mount_t hmp = io->hmp;
73
74 if (io->modified) {
75 KKASSERT(io->mod_list != NULL);
76 if (io->mod_list == &hmp->volu_list ||
77 io->mod_list == &hmp->meta_list) {
78 --hmp->locked_dirty_count;
79 }
80 TAILQ_REMOVE(io->mod_list, io, mod_entry);
81 io->mod_list = NULL;
82 }
83 io->type = type;
84 if (io->modified) {
85 switch(io->type) {
86 case HAMMER_STRUCTURE_VOLUME:
87 io->mod_list = &hmp->volu_list;
88 ++hmp->locked_dirty_count;
89 break;
90 case HAMMER_STRUCTURE_META_BUFFER:
91 io->mod_list = &hmp->meta_list;
92 ++hmp->locked_dirty_count;
93 break;
94 case HAMMER_STRUCTURE_UNDO_BUFFER:
95 io->mod_list = &hmp->undo_list;
96 break;
97 case HAMMER_STRUCTURE_DATA_BUFFER:
98 io->mod_list = &hmp->data_list;
99 break;
100 }
101 TAILQ_INSERT_TAIL(io->mod_list, io, mod_entry);
102 }
103}
104
66325755 105/*
fbc6e32a 106 * Helper routine to disassociate a buffer cache buffer from an I/O
055f5ff8
MD
107 * structure. Called with the io structure exclusively locked.
108 *
109 * The io may have 0 or 1 references depending on who called us. The
110 * caller is responsible for dealing with the refs.
111 *
112 * This call can only be made when no action is required on the buffer.
d8971d2b 113 * HAMMER must own the buffer (released == 0) since we mess around with it.
66325755
MD
114 */
115static void
055f5ff8 116hammer_io_disassociate(hammer_io_structure_t iou, int elseit)
66325755 117{
055f5ff8 118 struct buf *bp = iou->io.bp;
66325755 119
b58c6388 120 KKASSERT(iou->io.modified == 0);
4d75d829 121 buf_dep_init(bp);
055f5ff8 122 iou->io.bp = NULL;
d8971d2b 123 bp->b_flags &= ~B_LOCKED;
055f5ff8
MD
124 if (elseit) {
125 KKASSERT(iou->io.released == 0);
126 iou->io.released = 1;
127 bqrelse(bp);
128 } else {
129 KKASSERT(iou->io.released);
130 }
66325755 131
055f5ff8 132 switch(iou->io.type) {
66325755 133 case HAMMER_STRUCTURE_VOLUME:
055f5ff8 134 iou->volume.ondisk = NULL;
66325755 135 break;
10a5d1ba
MD
136 case HAMMER_STRUCTURE_DATA_BUFFER:
137 case HAMMER_STRUCTURE_META_BUFFER:
138 case HAMMER_STRUCTURE_UNDO_BUFFER:
055f5ff8 139 iou->buffer.ondisk = NULL;
66325755
MD
140 break;
141 }
fbc6e32a
MD
142}
143
144/*
055f5ff8 145 * Wait for any physical IO to complete
fbc6e32a
MD
146 */
147static void
055f5ff8 148hammer_io_wait(hammer_io_t io)
fbc6e32a 149{
055f5ff8
MD
150 if (io->running) {
151 crit_enter();
152 tsleep_interlock(io);
153 io->waiting = 1;
154 for (;;) {
155 tsleep(io, 0, "hmrflw", 0);
156 if (io->running == 0)
157 break;
158 tsleep_interlock(io);
159 io->waiting = 1;
160 if (io->running == 0)
161 break;
162 }
163 crit_exit();
66325755
MD
164 }
165}
166
167/*
10a5d1ba
MD
168 * Load bp for a HAMMER structure. The io must be exclusively locked by
169 * the caller.
66325755
MD
170 */
171int
172hammer_io_read(struct vnode *devvp, struct hammer_io *io)
173{
174 struct buf *bp;
175 int error;
176
177 if ((bp = io->bp) == NULL) {
178 error = bread(devvp, io->offset, HAMMER_BUFSIZE, &io->bp);
179 if (error == 0) {
180 bp = io->bp;
181 bp->b_ops = &hammer_bioops;
182 LIST_INSERT_HEAD(&bp->b_dep, &io->worklist, node);
183 BUF_KERNPROC(bp);
184 }
10a5d1ba
MD
185 KKASSERT(io->modified == 0);
186 KKASSERT(io->running == 0);
187 KKASSERT(io->waiting == 0);
66325755
MD
188 io->released = 0; /* we hold an active lock on bp */
189 } else {
190 error = 0;
191 }
192 return(error);
193}
194
195/*
196 * Similar to hammer_io_read() but returns a zero'd out buffer instead.
10a5d1ba
MD
197 * Must be called with the IO exclusively locked.
198 *
66325755 199 * vfs_bio_clrbuf() is kinda nasty, enforce serialization against background
10a5d1ba
MD
200 * I/O by forcing the buffer to not be in a released state before calling
201 * it.
055f5ff8 202 *
10a5d1ba
MD
203 * This function will also mark the IO as modified but it will not
204 * increment the modify_refs count.
66325755
MD
205 */
206int
207hammer_io_new(struct vnode *devvp, struct hammer_io *io)
208{
209 struct buf *bp;
210
211 if ((bp = io->bp) == NULL) {
212 io->bp = getblk(devvp, io->offset, HAMMER_BUFSIZE, 0, 0);
213 bp = io->bp;
214 bp->b_ops = &hammer_bioops;
215 LIST_INSERT_HEAD(&bp->b_dep, &io->worklist, node);
055f5ff8 216 io->released = 0;
10a5d1ba 217 KKASSERT(io->running == 0);
055f5ff8 218 io->waiting = 0;
66325755
MD
219 BUF_KERNPROC(bp);
220 } else {
221 if (io->released) {
222 regetblk(bp);
66325755 223 BUF_KERNPROC(bp);
d113fda1 224 io->released = 0;
66325755
MD
225 }
226 }
10a5d1ba 227 hammer_io_modify(io, 0);
66325755
MD
228 vfs_bio_clrbuf(bp);
229 return(0);
230}
231
fbc6e32a 232/*
b3deaf57 233 * This routine is called on the last reference to a hammer structure.
055f5ff8 234 * The io is usually locked exclusively (but may not be during unmount).
b3deaf57 235 *
10a5d1ba
MD
236 * This routine is responsible for the disposition of the buffer cache
237 * buffer backing the IO. Only pure-data and undo buffers can be handed
238 * back to the kernel. Volume and meta-data buffers must be retained
239 * by HAMMER until explicitly flushed by the backend.
66325755
MD
240 */
241void
b58c6388 242hammer_io_release(struct hammer_io *io)
66325755 243{
66325755
MD
244 struct buf *bp;
245
055f5ff8
MD
246 if ((bp = io->bp) == NULL)
247 return;
fbc6e32a 248
055f5ff8 249 /*
10a5d1ba
MD
250 * Try to flush a dirty IO to disk if asked to by the
251 * caller or if the kernel tried to flush the buffer in the past.
055f5ff8 252 *
10a5d1ba
MD
253 * Kernel-initiated flushes are only allowed for pure-data buffers.
254 * meta-data and volume buffers can only be flushed explicitly
255 * by HAMMER.
055f5ff8 256 */
10a5d1ba
MD
257 if (io->modified) {
258 if (io->flush) {
259 hammer_io_flush(io);
260 } else if (bp->b_flags & B_LOCKED) {
261 switch(io->type) {
262 case HAMMER_STRUCTURE_DATA_BUFFER:
263 case HAMMER_STRUCTURE_UNDO_BUFFER:
264 hammer_io_flush(io);
265 break;
266 default:
267 break;
268 }
269 } /* else no explicit request to flush the buffer */
270 }
fbc6e32a 271
055f5ff8 272 /*
10a5d1ba 273 * Wait for the IO to complete if asked to.
055f5ff8 274 */
b58c6388 275 if (io->waitdep && io->running) {
055f5ff8
MD
276 hammer_io_wait(io);
277 }
278
279 /*
10a5d1ba
MD
280 * Return control of the buffer to the kernel (with the provisio
281 * that our bioops can override kernel decisions with regards to
282 * the buffer).
055f5ff8 283 */
b58c6388 284 if (io->flush && io->modified == 0 && io->running == 0) {
10a5d1ba
MD
285 /*
286 * Always disassociate the bp if an explicit flush
287 * was requested and the IO completed with no error
288 * (so unmount can really clean up the structure).
289 */
055f5ff8 290 if (io->released) {
b3deaf57 291 regetblk(bp);
46fe7ae1 292 BUF_KERNPROC(bp);
055f5ff8
MD
293 io->released = 0;
294 }
295 hammer_io_disassociate((hammer_io_structure_t)io, 1);
296 } else if (io->modified) {
10a5d1ba
MD
297 /*
298 * Only certain IO types can be released to the kernel.
299 * volume and meta-data IO types must be explicitly flushed
300 * by HAMMER.
301 */
302 switch(io->type) {
303 case HAMMER_STRUCTURE_DATA_BUFFER:
304 case HAMMER_STRUCTURE_UNDO_BUFFER:
305 if (io->released == 0) {
306 io->released = 1;
307 bdwrite(bp);
308 }
309 break;
310 default:
311 break;
055f5ff8
MD
312 }
313 } else if (io->released == 0) {
10a5d1ba
MD
314 /*
315 * Clean buffers can be generally released to the kernel.
316 * We leave the bp passively associated with the HAMMER
317 * structure and use bioops to disconnect it later on
318 * if the kernel wants to discard the buffer.
319 */
055f5ff8
MD
320 io->released = 1;
321 bqrelse(bp);
66325755
MD
322 }
323}
324
fbc6e32a 325/*
b33e2cc0
MD
326 * This routine is called with a locked IO when a flush is desired and
327 * no other references to the structure exists other then ours. This
328 * routine is ONLY called when HAMMER believes it is safe to flush a
329 * potentially modified buffer out.
fbc6e32a
MD
330 */
331void
055f5ff8 332hammer_io_flush(struct hammer_io *io)
fbc6e32a
MD
333{
334 struct buf *bp;
fbc6e32a 335
055f5ff8 336 /*
10a5d1ba 337 * Degenerate case - nothing to flush if nothing is dirty.
055f5ff8 338 */
b58c6388
MD
339 if (io->modified == 0) {
340 io->flush = 0;
055f5ff8 341 return;
b58c6388 342 }
055f5ff8
MD
343
344 KKASSERT(io->bp);
10a5d1ba 345 KKASSERT(io->modify_refs == 0);
055f5ff8 346
b33e2cc0 347 /*
10a5d1ba
MD
348 * Acquire exclusive access to the bp and then clear the modified
349 * state of the buffer prior to issuing I/O to interlock any
350 * modifications made while the I/O is in progress. This shouldn't
351 * happen anyway but losing data would be worse. The modified bit
352 * will be rechecked after the IO completes.
353 *
b33e2cc0
MD
354 * This is only legal when lock.refs == 1 (otherwise we might clear
355 * the modified bit while there are still users of the cluster
356 * modifying the data).
357 *
b33e2cc0
MD
358 * Do this before potentially blocking so any attempt to modify the
359 * ondisk while we are blocked blocks waiting for us.
360 */
10a5d1ba
MD
361 KKASSERT(io->mod_list != NULL);
362 if (io->mod_list == &io->hmp->volu_list ||
363 io->mod_list == &io->hmp->meta_list) {
364 --io->hmp->locked_dirty_count;
365 }
366 TAILQ_REMOVE(io->mod_list, io, mod_entry);
367 io->mod_list = NULL;
368 io->modified = 0;
b58c6388 369 io->flush = 0;
055f5ff8 370 bp = io->bp;
0b075555 371
10a5d1ba
MD
372 /*
373 * Acquire ownership (released variable set for clarity)
374 */
0b075555 375 if (io->released) {
0b075555 376 regetblk(bp);
055f5ff8
MD
377 /* BUF_KERNPROC(io->bp); */
378 io->released = 0;
fbc6e32a 379 }
10a5d1ba
MD
380
381 /*
382 * Transfer ownership to the kernel and initiate I/O.
383 */
055f5ff8
MD
384 io->released = 1;
385 io->running = 1;
386 bawrite(bp);
fbc6e32a
MD
387}
388
055f5ff8
MD
389/************************************************************************
390 * BUFFER DIRTYING *
391 ************************************************************************
392 *
393 * These routines deal with dependancies created when IO buffers get
394 * modified. The caller must call hammer_modify_*() on a referenced
395 * HAMMER structure prior to modifying its on-disk data.
0b075555 396 *
055f5ff8
MD
397 * Any intent to modify an IO buffer acquires the related bp and imposes
398 * various write ordering dependancies.
0b075555 399 */
055f5ff8
MD
400
401/*
10a5d1ba
MD
402 * Mark a HAMMER structure as undergoing modification. Meta-data buffers
403 * are locked until the flusher can deal with them, pure data buffers
404 * can be written out.
055f5ff8 405 */
10a5d1ba 406static
b58c6388 407void
10a5d1ba 408hammer_io_modify(hammer_io_t io, int count)
0b075555 409{
10a5d1ba
MD
410 struct hammer_mount *hmp = io->hmp;
411
46fe7ae1
MD
412 /*
413 * Shortcut if nothing to do.
414 */
0b075555 415 KKASSERT(io->lock.refs != 0 && io->bp != NULL);
10a5d1ba 416 io->modify_refs += count;
b58c6388
MD
417 if (io->modified && io->released == 0)
418 return;
46fe7ae1
MD
419
420 hammer_lock_ex(&io->lock);
10a5d1ba
MD
421 if (io->modified == 0) {
422 KKASSERT(io->mod_list == NULL);
423 switch(io->type) {
424 case HAMMER_STRUCTURE_VOLUME:
425 io->mod_list = &hmp->volu_list;
426 ++hmp->locked_dirty_count;
427 break;
428 case HAMMER_STRUCTURE_META_BUFFER:
429 io->mod_list = &hmp->meta_list;
430 ++hmp->locked_dirty_count;
431 break;
432 case HAMMER_STRUCTURE_UNDO_BUFFER:
433 io->mod_list = &hmp->undo_list;
434 break;
435 case HAMMER_STRUCTURE_DATA_BUFFER:
436 io->mod_list = &hmp->data_list;
437 break;
438 }
439 TAILQ_INSERT_TAIL(io->mod_list, io, mod_entry);
440 io->modified = 1;
441 }
46fe7ae1
MD
442 if (io->released) {
443 regetblk(io->bp);
444 BUF_KERNPROC(io->bp);
445 io->released = 0;
446 KKASSERT(io->modified != 0);
447 }
46fe7ae1 448 hammer_unlock(&io->lock);
055f5ff8
MD
449}
450
10a5d1ba
MD
451static __inline
452void
453hammer_io_modify_done(hammer_io_t io)
454{
455 KKASSERT(io->modify_refs > 0);
456 --io->modify_refs;
457}
458
055f5ff8 459void
36f82b23
MD
460hammer_modify_volume(hammer_transaction_t trans, hammer_volume_t volume,
461 void *base, int len)
055f5ff8 462{
10a5d1ba 463 hammer_io_modify(&volume->io, 1);
055f5ff8 464
47197d71
MD
465 if (len) {
466 intptr_t rel_offset = (intptr_t)base - (intptr_t)volume->ondisk;
467 KKASSERT((rel_offset & ~(intptr_t)HAMMER_BUFMASK) == 0);
059819e3 468 hammer_generate_undo(trans, &volume->io,
47197d71
MD
469 HAMMER_ENCODE_RAW_VOLUME(volume->vol_no, rel_offset),
470 base, len);
471 }
0b075555
MD
472}
473
055f5ff8
MD
474/*
475 * Caller intends to modify a buffer's ondisk structure. The related
476 * cluster must be marked open prior to being able to flush the modified
477 * buffer so get that I/O going now.
478 */
0b075555 479void
36f82b23
MD
480hammer_modify_buffer(hammer_transaction_t trans, hammer_buffer_t buffer,
481 void *base, int len)
46fe7ae1 482{
10a5d1ba 483 hammer_io_modify(&buffer->io, 1);
47197d71
MD
484 if (len) {
485 intptr_t rel_offset = (intptr_t)base - (intptr_t)buffer->ondisk;
486 KKASSERT((rel_offset & ~(intptr_t)HAMMER_BUFMASK) == 0);
059819e3 487 hammer_generate_undo(trans, &buffer->io,
34d829f7 488 buffer->zone2_offset + rel_offset,
47197d71
MD
489 base, len);
490 }
46fe7ae1
MD
491}
492
10a5d1ba
MD
493void
494hammer_modify_volume_done(hammer_volume_t volume)
495{
496 hammer_io_modify_done(&volume->io);
497}
498
499void
500hammer_modify_buffer_done(hammer_buffer_t buffer)
501{
502 hammer_io_modify_done(&buffer->io);
503}
504
61aeeb33 505/*
055f5ff8 506 * Mark an entity as not being dirty any more -- this usually occurs when
61aeeb33 507 * the governing a-list has freed the entire entity.
055f5ff8
MD
508 *
509 * XXX
61aeeb33
MD
510 */
511void
512hammer_io_clear_modify(struct hammer_io *io)
513{
055f5ff8 514#if 0
61aeeb33
MD
515 struct buf *bp;
516
517 io->modified = 0;
10a5d1ba 518 XXX mod_list/entry
61aeeb33 519 if ((bp = io->bp) != NULL) {
055f5ff8 520 if (io->released) {
61aeeb33 521 regetblk(bp);
055f5ff8
MD
522 /* BUF_KERNPROC(io->bp); */
523 } else {
61aeeb33 524 io->released = 1;
055f5ff8 525 }
61aeeb33
MD
526 if (io->modified == 0) {
527 kprintf("hammer_io_clear_modify: cleared %p\n", io);
528 bundirty(bp);
529 bqrelse(bp);
530 } else {
531 bdwrite(bp);
532 }
533 }
055f5ff8 534#endif
61aeeb33
MD
535}
536
055f5ff8
MD
537/************************************************************************
538 * HAMMER_BIOOPS *
539 ************************************************************************
540 *
66325755
MD
541 */
542
543/*
055f5ff8 544 * Pre-IO initiation kernel callback - cluster build only
66325755 545 */
66325755
MD
546static void
547hammer_io_start(struct buf *bp)
548{
549}
550
055f5ff8
MD
551/*
552 * Post-IO completion kernel callback
b33e2cc0
MD
553 *
554 * NOTE: HAMMER may modify a buffer after initiating I/O. The modified bit
555 * may also be set if we were marking a cluster header open. Only remove
556 * our dependancy if the modified bit is clear.
055f5ff8 557 */
66325755
MD
558static void
559hammer_io_complete(struct buf *bp)
560{
055f5ff8
MD
561 union hammer_io_structure *iou = (void *)LIST_FIRST(&bp->b_dep);
562
563 KKASSERT(iou->io.released == 1);
fbc6e32a 564
055f5ff8 565 /*
055f5ff8
MD
566 * If no lock references remain and we can acquire the IO lock and
567 * someone at some point wanted us to flush (B_LOCKED test), then
568 * try to dispose of the IO.
569 */
055f5ff8
MD
570 iou->io.running = 0;
571 if (iou->io.waiting) {
572 iou->io.waiting = 0;
573 wakeup(iou);
574 }
575
576 /*
577 * Someone wanted us to flush, try to clean out the buffer.
578 */
579 if ((bp->b_flags & B_LOCKED) && iou->io.lock.refs == 0) {
b33e2cc0 580 KKASSERT(iou->io.modified == 0);
d5ef456e 581 bp->b_flags &= ~B_LOCKED;
055f5ff8
MD
582 hammer_io_deallocate(bp);
583 /* structure may be dead now */
584 }
66325755
MD
585}
586
587/*
588 * Callback from kernel when it wishes to deallocate a passively
10a5d1ba
MD
589 * associated structure. This mostly occurs with clean buffers
590 * but it may be possible for a holding structure to be marked dirty
591 * while its buffer is passively associated.
66325755
MD
592 *
593 * If we cannot disassociate we set B_LOCKED to prevent the buffer
594 * from getting reused.
46fe7ae1
MD
595 *
596 * WARNING: Because this can be called directly by getnewbuf we cannot
597 * recurse into the tree. If a bp cannot be immediately disassociated
598 * our only recourse is to set B_LOCKED.
66325755
MD
599 */
600static void
601hammer_io_deallocate(struct buf *bp)
602{
055f5ff8 603 hammer_io_structure_t iou = (void *)LIST_FIRST(&bp->b_dep);
66325755 604
055f5ff8 605 KKASSERT((bp->b_flags & B_LOCKED) == 0 && iou->io.running == 0);
46fe7ae1 606 if (iou->io.lock.refs > 0 || iou->io.modified) {
10a5d1ba
MD
607 /*
608 * It is not legal to disassociate a modified buffer. This
609 * case really shouldn't ever occur.
610 */
055f5ff8
MD
611 bp->b_flags |= B_LOCKED;
612 } else {
10a5d1ba
MD
613 /*
614 * Disassociate the BP. If the io has no refs left we
615 * have to add it to the loose list.
616 */
055f5ff8 617 hammer_io_disassociate(iou, 0);
10a5d1ba
MD
618 if (iou->io.bp == NULL &&
619 iou->io.type != HAMMER_STRUCTURE_VOLUME) {
620 kprintf("ADD LOOSE %p\n", &iou->io);
621 KKASSERT(iou->io.mod_list == NULL);
622 iou->io.mod_list = &iou->io.hmp->lose_list;
623 TAILQ_INSERT_TAIL(iou->io.mod_list, &iou->io, mod_entry);
66325755 624 }
66325755 625 }
66325755
MD
626}
627
628static int
629hammer_io_fsync(struct vnode *vp)
630{
631 return(0);
632}
633
634/*
635 * NOTE: will not be called unless we tell the kernel about the
636 * bioops. Unused... we use the mount's VFS_SYNC instead.
637 */
638static int
639hammer_io_sync(struct mount *mp)
640{
641 return(0);
642}
643
644static void
645hammer_io_movedeps(struct buf *bp1, struct buf *bp2)
646{
647}
648
649/*
650 * I/O pre-check for reading and writing. HAMMER only uses this for
651 * B_CACHE buffers so checkread just shouldn't happen, but if it does
652 * allow it.
653 *
fbc6e32a
MD
654 * Writing is a different case. We don't want the kernel to try to write
655 * out a buffer that HAMMER may be modifying passively or which has a
10a5d1ba
MD
656 * dependancy. In addition, kernel-demanded writes can only proceed for
657 * certain types of buffers (i.e. UNDO and DATA types). Other dirty
658 * buffer types can only be explicitly written by the flusher.
fbc6e32a 659 *
10a5d1ba
MD
660 * checkwrite will only be called for bdwrite()n buffers. If we return
661 * success the kernel is guaranteed to initiate the buffer write.
66325755
MD
662 */
663static int
664hammer_io_checkread(struct buf *bp)
665{
666 return(0);
667}
668
669static int
670hammer_io_checkwrite(struct buf *bp)
671{
10a5d1ba 672 hammer_io_t io = (void *)LIST_FIRST(&bp->b_dep);
66325755 673
b33e2cc0 674 /*
10a5d1ba
MD
675 * We can only clear the modified bit if the IO is not currently
676 * undergoing modification. Otherwise we may miss changes.
b33e2cc0 677 */
10a5d1ba
MD
678 if (io->modify_refs == 0 && io->modified) {
679 KKASSERT(io->mod_list != NULL);
680 if (io->mod_list == &io->hmp->volu_list ||
681 io->mod_list == &io->hmp->meta_list) {
682 --io->hmp->locked_dirty_count;
683 }
684 TAILQ_REMOVE(io->mod_list, io, mod_entry);
685 io->mod_list = NULL;
686 io->modified = 0;
b33e2cc0 687 }
055f5ff8 688 return(0);
66325755
MD
689}
690
8cd0a023
MD
691/*
692 * Return non-zero if the caller should flush the structure associated
693 * with this io sub-structure.
694 */
695int
696hammer_io_checkflush(struct hammer_io *io)
697{
055f5ff8 698 if (io->bp == NULL || (io->bp->b_flags & B_LOCKED)) {
8cd0a023 699 return(1);
055f5ff8 700 }
8cd0a023
MD
701 return(0);
702}
66325755
MD
703
704/*
705 * Return non-zero if we wish to delay the kernel's attempt to flush
706 * this buffer to disk.
707 */
708static int
709hammer_io_countdeps(struct buf *bp, int n)
710{
711 return(0);
712}
713
714struct bio_ops hammer_bioops = {
715 .io_start = hammer_io_start,
716 .io_complete = hammer_io_complete,
717 .io_deallocate = hammer_io_deallocate,
718 .io_fsync = hammer_io_fsync,
719 .io_sync = hammer_io_sync,
720 .io_movedeps = hammer_io_movedeps,
721 .io_countdeps = hammer_io_countdeps,
722 .io_checkread = hammer_io_checkread,
723 .io_checkwrite = hammer_io_checkwrite,
724};
725