HAMMER 41B/Many: Cleanup.
[dragonfly.git] / sys / vfs / hammer / hammer_io.c
CommitLineData
66325755 1/*
b84de5af 2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved.
66325755
MD
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
77062c8a 34 * $DragonFly: src/sys/vfs/hammer/hammer_io.c,v 1.30 2008/05/06 00:21:08 dillon Exp $
66325755
MD
35 */
36/*
37 * IO Primitives and buffer cache management
38 *
39 * All major data-tracking structures in HAMMER contain a struct hammer_io
40 * which is used to manage their backing store. We use filesystem buffers
41 * for backing store and we leave them passively associated with their
42 * HAMMER structures.
43 *
44 * If the kernel tries to release a passively associated buf which we cannot
45 * yet let go we set B_LOCKED in the buffer and then actively released it
46 * later when we can.
47 */
48
49#include "hammer.h"
50#include <sys/fcntl.h>
51#include <sys/nlookup.h>
52#include <sys/buf.h>
53#include <sys/buf2.h>
54
10a5d1ba 55static void hammer_io_modify(hammer_io_t io, int count);
055f5ff8 56static void hammer_io_deallocate(struct buf *bp);
055f5ff8
MD
57
58/*
10a5d1ba
MD
59 * Initialize a new, already-zero'd hammer_io structure, or reinitialize
60 * an existing hammer_io structure which may have switched to another type.
055f5ff8
MD
61 */
62void
10a5d1ba 63hammer_io_init(hammer_io_t io, hammer_mount_t hmp, enum hammer_io_type type)
055f5ff8 64{
10a5d1ba 65 io->hmp = hmp;
055f5ff8 66 io->type = type;
055f5ff8
MD
67}
68
10a5d1ba
MD
69void
70hammer_io_reinit(hammer_io_t io, enum hammer_io_type type)
71{
72 hammer_mount_t hmp = io->hmp;
73
74 if (io->modified) {
75 KKASSERT(io->mod_list != NULL);
76 if (io->mod_list == &hmp->volu_list ||
77 io->mod_list == &hmp->meta_list) {
78 --hmp->locked_dirty_count;
9480ff55 79 --hammer_count_dirtybufs;
10a5d1ba
MD
80 }
81 TAILQ_REMOVE(io->mod_list, io, mod_entry);
82 io->mod_list = NULL;
83 }
84 io->type = type;
85 if (io->modified) {
86 switch(io->type) {
87 case HAMMER_STRUCTURE_VOLUME:
88 io->mod_list = &hmp->volu_list;
89 ++hmp->locked_dirty_count;
9480ff55 90 ++hammer_count_dirtybufs;
10a5d1ba
MD
91 break;
92 case HAMMER_STRUCTURE_META_BUFFER:
93 io->mod_list = &hmp->meta_list;
94 ++hmp->locked_dirty_count;
9480ff55 95 ++hammer_count_dirtybufs;
10a5d1ba
MD
96 break;
97 case HAMMER_STRUCTURE_UNDO_BUFFER:
98 io->mod_list = &hmp->undo_list;
99 break;
100 case HAMMER_STRUCTURE_DATA_BUFFER:
101 io->mod_list = &hmp->data_list;
102 break;
103 }
104 TAILQ_INSERT_TAIL(io->mod_list, io, mod_entry);
105 }
106}
107
66325755 108/*
fbc6e32a 109 * Helper routine to disassociate a buffer cache buffer from an I/O
055f5ff8
MD
110 * structure. Called with the io structure exclusively locked.
111 *
112 * The io may have 0 or 1 references depending on who called us. The
113 * caller is responsible for dealing with the refs.
114 *
115 * This call can only be made when no action is required on the buffer.
d8971d2b 116 * HAMMER must own the buffer (released == 0) since we mess around with it.
66325755
MD
117 */
118static void
055f5ff8 119hammer_io_disassociate(hammer_io_structure_t iou, int elseit)
66325755 120{
055f5ff8 121 struct buf *bp = iou->io.bp;
66325755 122
b58c6388 123 KKASSERT(iou->io.modified == 0);
4d75d829 124 buf_dep_init(bp);
055f5ff8 125 iou->io.bp = NULL;
d8971d2b 126 bp->b_flags &= ~B_LOCKED;
055f5ff8
MD
127 if (elseit) {
128 KKASSERT(iou->io.released == 0);
129 iou->io.released = 1;
130 bqrelse(bp);
131 } else {
132 KKASSERT(iou->io.released);
133 }
66325755 134
055f5ff8 135 switch(iou->io.type) {
66325755 136 case HAMMER_STRUCTURE_VOLUME:
055f5ff8 137 iou->volume.ondisk = NULL;
66325755 138 break;
10a5d1ba
MD
139 case HAMMER_STRUCTURE_DATA_BUFFER:
140 case HAMMER_STRUCTURE_META_BUFFER:
141 case HAMMER_STRUCTURE_UNDO_BUFFER:
055f5ff8 142 iou->buffer.ondisk = NULL;
66325755
MD
143 break;
144 }
fbc6e32a
MD
145}
146
147/*
055f5ff8 148 * Wait for any physical IO to complete
fbc6e32a
MD
149 */
150static void
055f5ff8 151hammer_io_wait(hammer_io_t io)
fbc6e32a 152{
055f5ff8
MD
153 if (io->running) {
154 crit_enter();
155 tsleep_interlock(io);
156 io->waiting = 1;
157 for (;;) {
158 tsleep(io, 0, "hmrflw", 0);
159 if (io->running == 0)
160 break;
161 tsleep_interlock(io);
162 io->waiting = 1;
163 if (io->running == 0)
164 break;
165 }
166 crit_exit();
66325755
MD
167 }
168}
169
170/*
10a5d1ba
MD
171 * Load bp for a HAMMER structure. The io must be exclusively locked by
172 * the caller.
66325755
MD
173 */
174int
175hammer_io_read(struct vnode *devvp, struct hammer_io *io)
176{
177 struct buf *bp;
178 int error;
179
180 if ((bp = io->bp) == NULL) {
181 error = bread(devvp, io->offset, HAMMER_BUFSIZE, &io->bp);
182 if (error == 0) {
183 bp = io->bp;
184 bp->b_ops = &hammer_bioops;
185 LIST_INSERT_HEAD(&bp->b_dep, &io->worklist, node);
186 BUF_KERNPROC(bp);
187 }
10a5d1ba
MD
188 KKASSERT(io->modified == 0);
189 KKASSERT(io->running == 0);
190 KKASSERT(io->waiting == 0);
66325755
MD
191 io->released = 0; /* we hold an active lock on bp */
192 } else {
193 error = 0;
194 }
195 return(error);
196}
197
198/*
199 * Similar to hammer_io_read() but returns a zero'd out buffer instead.
10a5d1ba
MD
200 * Must be called with the IO exclusively locked.
201 *
66325755 202 * vfs_bio_clrbuf() is kinda nasty, enforce serialization against background
10a5d1ba
MD
203 * I/O by forcing the buffer to not be in a released state before calling
204 * it.
055f5ff8 205 *
10a5d1ba
MD
206 * This function will also mark the IO as modified but it will not
207 * increment the modify_refs count.
66325755
MD
208 */
209int
210hammer_io_new(struct vnode *devvp, struct hammer_io *io)
211{
212 struct buf *bp;
213
214 if ((bp = io->bp) == NULL) {
215 io->bp = getblk(devvp, io->offset, HAMMER_BUFSIZE, 0, 0);
216 bp = io->bp;
217 bp->b_ops = &hammer_bioops;
218 LIST_INSERT_HEAD(&bp->b_dep, &io->worklist, node);
055f5ff8 219 io->released = 0;
10a5d1ba 220 KKASSERT(io->running == 0);
055f5ff8 221 io->waiting = 0;
66325755
MD
222 BUF_KERNPROC(bp);
223 } else {
224 if (io->released) {
225 regetblk(bp);
66325755 226 BUF_KERNPROC(bp);
d113fda1 227 io->released = 0;
66325755
MD
228 }
229 }
10a5d1ba 230 hammer_io_modify(io, 0);
66325755
MD
231 vfs_bio_clrbuf(bp);
232 return(0);
233}
234
fbc6e32a 235/*
b3deaf57 236 * This routine is called on the last reference to a hammer structure.
055f5ff8 237 * The io is usually locked exclusively (but may not be during unmount).
b3deaf57 238 *
10a5d1ba
MD
239 * This routine is responsible for the disposition of the buffer cache
240 * buffer backing the IO. Only pure-data and undo buffers can be handed
241 * back to the kernel. Volume and meta-data buffers must be retained
242 * by HAMMER until explicitly flushed by the backend.
66325755
MD
243 */
244void
b58c6388 245hammer_io_release(struct hammer_io *io)
66325755 246{
66325755
MD
247 struct buf *bp;
248
055f5ff8
MD
249 if ((bp = io->bp) == NULL)
250 return;
fbc6e32a 251
055f5ff8 252 /*
10a5d1ba
MD
253 * Try to flush a dirty IO to disk if asked to by the
254 * caller or if the kernel tried to flush the buffer in the past.
055f5ff8 255 *
10a5d1ba
MD
256 * Kernel-initiated flushes are only allowed for pure-data buffers.
257 * meta-data and volume buffers can only be flushed explicitly
258 * by HAMMER.
055f5ff8 259 */
10a5d1ba
MD
260 if (io->modified) {
261 if (io->flush) {
262 hammer_io_flush(io);
263 } else if (bp->b_flags & B_LOCKED) {
264 switch(io->type) {
265 case HAMMER_STRUCTURE_DATA_BUFFER:
266 case HAMMER_STRUCTURE_UNDO_BUFFER:
267 hammer_io_flush(io);
268 break;
269 default:
270 break;
271 }
272 } /* else no explicit request to flush the buffer */
273 }
fbc6e32a 274
055f5ff8 275 /*
10a5d1ba 276 * Wait for the IO to complete if asked to.
055f5ff8 277 */
b58c6388 278 if (io->waitdep && io->running) {
055f5ff8
MD
279 hammer_io_wait(io);
280 }
281
282 /*
10a5d1ba
MD
283 * Return control of the buffer to the kernel (with the provisio
284 * that our bioops can override kernel decisions with regards to
285 * the buffer).
055f5ff8 286 */
b58c6388 287 if (io->flush && io->modified == 0 && io->running == 0) {
10a5d1ba
MD
288 /*
289 * Always disassociate the bp if an explicit flush
290 * was requested and the IO completed with no error
291 * (so unmount can really clean up the structure).
292 */
055f5ff8 293 if (io->released) {
b3deaf57 294 regetblk(bp);
46fe7ae1 295 BUF_KERNPROC(bp);
055f5ff8
MD
296 io->released = 0;
297 }
298 hammer_io_disassociate((hammer_io_structure_t)io, 1);
299 } else if (io->modified) {
10a5d1ba
MD
300 /*
301 * Only certain IO types can be released to the kernel.
302 * volume and meta-data IO types must be explicitly flushed
303 * by HAMMER.
304 */
305 switch(io->type) {
306 case HAMMER_STRUCTURE_DATA_BUFFER:
307 case HAMMER_STRUCTURE_UNDO_BUFFER:
308 if (io->released == 0) {
309 io->released = 1;
310 bdwrite(bp);
311 }
312 break;
313 default:
314 break;
055f5ff8
MD
315 }
316 } else if (io->released == 0) {
10a5d1ba
MD
317 /*
318 * Clean buffers can be generally released to the kernel.
319 * We leave the bp passively associated with the HAMMER
320 * structure and use bioops to disconnect it later on
321 * if the kernel wants to discard the buffer.
322 */
055f5ff8
MD
323 io->released = 1;
324 bqrelse(bp);
66325755
MD
325 }
326}
327
fbc6e32a 328/*
b33e2cc0
MD
329 * This routine is called with a locked IO when a flush is desired and
330 * no other references to the structure exists other then ours. This
331 * routine is ONLY called when HAMMER believes it is safe to flush a
332 * potentially modified buffer out.
fbc6e32a
MD
333 */
334void
055f5ff8 335hammer_io_flush(struct hammer_io *io)
fbc6e32a
MD
336{
337 struct buf *bp;
fbc6e32a 338
055f5ff8 339 /*
10a5d1ba 340 * Degenerate case - nothing to flush if nothing is dirty.
055f5ff8 341 */
b58c6388
MD
342 if (io->modified == 0) {
343 io->flush = 0;
055f5ff8 344 return;
b58c6388 345 }
055f5ff8
MD
346
347 KKASSERT(io->bp);
10a5d1ba 348 KKASSERT(io->modify_refs == 0);
055f5ff8 349
77062c8a
MD
350 /*
351 * Acquire ownership of the bp, particularly before we clear our
352 * modified flag.
353 *
354 * We are going to bawrite() this bp. Don't leave a window where
355 * io->released is set, we actually own the bp rather then our
356 * buffer.
357 */
358 bp = io->bp;
359 if (io->released) {
360 regetblk(bp);
361 /* BUF_KERNPROC(io->bp); */
362 /* io->released = 0; */
363 KKASSERT(io->released);
364 KKASSERT(io->bp == bp);
365 }
366 io->released = 1;
367
b33e2cc0 368 /*
10a5d1ba
MD
369 * Acquire exclusive access to the bp and then clear the modified
370 * state of the buffer prior to issuing I/O to interlock any
371 * modifications made while the I/O is in progress. This shouldn't
372 * happen anyway but losing data would be worse. The modified bit
373 * will be rechecked after the IO completes.
374 *
b33e2cc0
MD
375 * This is only legal when lock.refs == 1 (otherwise we might clear
376 * the modified bit while there are still users of the cluster
377 * modifying the data).
378 *
b33e2cc0
MD
379 * Do this before potentially blocking so any attempt to modify the
380 * ondisk while we are blocked blocks waiting for us.
381 */
10a5d1ba
MD
382 KKASSERT(io->mod_list != NULL);
383 if (io->mod_list == &io->hmp->volu_list ||
384 io->mod_list == &io->hmp->meta_list) {
385 --io->hmp->locked_dirty_count;
9480ff55 386 --hammer_count_dirtybufs;
10a5d1ba
MD
387 }
388 TAILQ_REMOVE(io->mod_list, io, mod_entry);
389 io->mod_list = NULL;
390 io->modified = 0;
b58c6388 391 io->flush = 0;
10a5d1ba
MD
392
393 /*
394 * Transfer ownership to the kernel and initiate I/O.
395 */
055f5ff8 396 io->running = 1;
f90dde4c 397 ++io->hmp->io_running_count;
055f5ff8 398 bawrite(bp);
fbc6e32a
MD
399}
400
055f5ff8
MD
401/************************************************************************
402 * BUFFER DIRTYING *
403 ************************************************************************
404 *
405 * These routines deal with dependancies created when IO buffers get
406 * modified. The caller must call hammer_modify_*() on a referenced
407 * HAMMER structure prior to modifying its on-disk data.
0b075555 408 *
055f5ff8
MD
409 * Any intent to modify an IO buffer acquires the related bp and imposes
410 * various write ordering dependancies.
0b075555 411 */
055f5ff8
MD
412
413/*
10a5d1ba
MD
414 * Mark a HAMMER structure as undergoing modification. Meta-data buffers
415 * are locked until the flusher can deal with them, pure data buffers
416 * can be written out.
055f5ff8 417 */
10a5d1ba 418static
b58c6388 419void
10a5d1ba 420hammer_io_modify(hammer_io_t io, int count)
0b075555 421{
10a5d1ba
MD
422 struct hammer_mount *hmp = io->hmp;
423
46fe7ae1
MD
424 /*
425 * Shortcut if nothing to do.
426 */
0b075555 427 KKASSERT(io->lock.refs != 0 && io->bp != NULL);
10a5d1ba 428 io->modify_refs += count;
b58c6388
MD
429 if (io->modified && io->released == 0)
430 return;
46fe7ae1
MD
431
432 hammer_lock_ex(&io->lock);
10a5d1ba
MD
433 if (io->modified == 0) {
434 KKASSERT(io->mod_list == NULL);
435 switch(io->type) {
436 case HAMMER_STRUCTURE_VOLUME:
437 io->mod_list = &hmp->volu_list;
438 ++hmp->locked_dirty_count;
9480ff55 439 ++hammer_count_dirtybufs;
10a5d1ba
MD
440 break;
441 case HAMMER_STRUCTURE_META_BUFFER:
442 io->mod_list = &hmp->meta_list;
443 ++hmp->locked_dirty_count;
9480ff55 444 ++hammer_count_dirtybufs;
10a5d1ba
MD
445 break;
446 case HAMMER_STRUCTURE_UNDO_BUFFER:
447 io->mod_list = &hmp->undo_list;
448 break;
449 case HAMMER_STRUCTURE_DATA_BUFFER:
450 io->mod_list = &hmp->data_list;
451 break;
452 }
453 TAILQ_INSERT_TAIL(io->mod_list, io, mod_entry);
454 io->modified = 1;
455 }
46fe7ae1
MD
456 if (io->released) {
457 regetblk(io->bp);
458 BUF_KERNPROC(io->bp);
459 io->released = 0;
460 KKASSERT(io->modified != 0);
461 }
46fe7ae1 462 hammer_unlock(&io->lock);
055f5ff8
MD
463}
464
10a5d1ba
MD
465static __inline
466void
467hammer_io_modify_done(hammer_io_t io)
468{
469 KKASSERT(io->modify_refs > 0);
470 --io->modify_refs;
471}
472
055f5ff8 473void
36f82b23
MD
474hammer_modify_volume(hammer_transaction_t trans, hammer_volume_t volume,
475 void *base, int len)
055f5ff8 476{
10a5d1ba 477 hammer_io_modify(&volume->io, 1);
055f5ff8 478
47197d71
MD
479 if (len) {
480 intptr_t rel_offset = (intptr_t)base - (intptr_t)volume->ondisk;
481 KKASSERT((rel_offset & ~(intptr_t)HAMMER_BUFMASK) == 0);
059819e3 482 hammer_generate_undo(trans, &volume->io,
47197d71
MD
483 HAMMER_ENCODE_RAW_VOLUME(volume->vol_no, rel_offset),
484 base, len);
485 }
0b075555
MD
486}
487
055f5ff8
MD
488/*
489 * Caller intends to modify a buffer's ondisk structure. The related
490 * cluster must be marked open prior to being able to flush the modified
491 * buffer so get that I/O going now.
492 */
0b075555 493void
36f82b23
MD
494hammer_modify_buffer(hammer_transaction_t trans, hammer_buffer_t buffer,
495 void *base, int len)
46fe7ae1 496{
10a5d1ba 497 hammer_io_modify(&buffer->io, 1);
47197d71
MD
498 if (len) {
499 intptr_t rel_offset = (intptr_t)base - (intptr_t)buffer->ondisk;
500 KKASSERT((rel_offset & ~(intptr_t)HAMMER_BUFMASK) == 0);
059819e3 501 hammer_generate_undo(trans, &buffer->io,
34d829f7 502 buffer->zone2_offset + rel_offset,
47197d71
MD
503 base, len);
504 }
46fe7ae1
MD
505}
506
10a5d1ba
MD
507void
508hammer_modify_volume_done(hammer_volume_t volume)
509{
510 hammer_io_modify_done(&volume->io);
511}
512
513void
514hammer_modify_buffer_done(hammer_buffer_t buffer)
515{
516 hammer_io_modify_done(&buffer->io);
517}
518
61aeeb33 519/*
055f5ff8 520 * Mark an entity as not being dirty any more -- this usually occurs when
61aeeb33 521 * the governing a-list has freed the entire entity.
055f5ff8
MD
522 *
523 * XXX
61aeeb33
MD
524 */
525void
526hammer_io_clear_modify(struct hammer_io *io)
527{
055f5ff8 528#if 0
61aeeb33
MD
529 struct buf *bp;
530
531 io->modified = 0;
10a5d1ba 532 XXX mod_list/entry
61aeeb33 533 if ((bp = io->bp) != NULL) {
055f5ff8 534 if (io->released) {
61aeeb33 535 regetblk(bp);
055f5ff8
MD
536 /* BUF_KERNPROC(io->bp); */
537 } else {
61aeeb33 538 io->released = 1;
055f5ff8 539 }
61aeeb33 540 if (io->modified == 0) {
77062c8a 541 hkprintf("hammer_io_clear_modify: cleared %p\n", io);
61aeeb33
MD
542 bundirty(bp);
543 bqrelse(bp);
544 } else {
545 bdwrite(bp);
546 }
547 }
055f5ff8 548#endif
61aeeb33
MD
549}
550
055f5ff8
MD
551/************************************************************************
552 * HAMMER_BIOOPS *
553 ************************************************************************
554 *
66325755
MD
555 */
556
557/*
055f5ff8 558 * Pre-IO initiation kernel callback - cluster build only
66325755 559 */
66325755
MD
560static void
561hammer_io_start(struct buf *bp)
562{
563}
564
055f5ff8
MD
565/*
566 * Post-IO completion kernel callback
b33e2cc0
MD
567 *
568 * NOTE: HAMMER may modify a buffer after initiating I/O. The modified bit
569 * may also be set if we were marking a cluster header open. Only remove
570 * our dependancy if the modified bit is clear.
055f5ff8 571 */
66325755
MD
572static void
573hammer_io_complete(struct buf *bp)
574{
055f5ff8
MD
575 union hammer_io_structure *iou = (void *)LIST_FIRST(&bp->b_dep);
576
577 KKASSERT(iou->io.released == 1);
fbc6e32a 578
f90dde4c
MD
579 if (iou->io.running) {
580 if (--iou->io.hmp->io_running_count == 0)
581 wakeup(&iou->io.hmp->io_running_count);
582 KKASSERT(iou->io.hmp->io_running_count >= 0);
583 iou->io.running = 0;
584 }
585
055f5ff8 586 /*
055f5ff8
MD
587 * If no lock references remain and we can acquire the IO lock and
588 * someone at some point wanted us to flush (B_LOCKED test), then
589 * try to dispose of the IO.
590 */
055f5ff8
MD
591 if (iou->io.waiting) {
592 iou->io.waiting = 0;
593 wakeup(iou);
594 }
595
596 /*
597 * Someone wanted us to flush, try to clean out the buffer.
598 */
599 if ((bp->b_flags & B_LOCKED) && iou->io.lock.refs == 0) {
b33e2cc0 600 KKASSERT(iou->io.modified == 0);
d5ef456e 601 bp->b_flags &= ~B_LOCKED;
055f5ff8
MD
602 hammer_io_deallocate(bp);
603 /* structure may be dead now */
604 }
66325755
MD
605}
606
607/*
608 * Callback from kernel when it wishes to deallocate a passively
10a5d1ba
MD
609 * associated structure. This mostly occurs with clean buffers
610 * but it may be possible for a holding structure to be marked dirty
611 * while its buffer is passively associated.
66325755
MD
612 *
613 * If we cannot disassociate we set B_LOCKED to prevent the buffer
614 * from getting reused.
46fe7ae1
MD
615 *
616 * WARNING: Because this can be called directly by getnewbuf we cannot
617 * recurse into the tree. If a bp cannot be immediately disassociated
618 * our only recourse is to set B_LOCKED.
66325755
MD
619 */
620static void
621hammer_io_deallocate(struct buf *bp)
622{
055f5ff8 623 hammer_io_structure_t iou = (void *)LIST_FIRST(&bp->b_dep);
66325755 624
055f5ff8 625 KKASSERT((bp->b_flags & B_LOCKED) == 0 && iou->io.running == 0);
46fe7ae1 626 if (iou->io.lock.refs > 0 || iou->io.modified) {
10a5d1ba
MD
627 /*
628 * It is not legal to disassociate a modified buffer. This
629 * case really shouldn't ever occur.
630 */
055f5ff8
MD
631 bp->b_flags |= B_LOCKED;
632 } else {
10a5d1ba
MD
633 /*
634 * Disassociate the BP. If the io has no refs left we
635 * have to add it to the loose list.
636 */
055f5ff8 637 hammer_io_disassociate(iou, 0);
10a5d1ba
MD
638 if (iou->io.bp == NULL &&
639 iou->io.type != HAMMER_STRUCTURE_VOLUME) {
10a5d1ba
MD
640 KKASSERT(iou->io.mod_list == NULL);
641 iou->io.mod_list = &iou->io.hmp->lose_list;
642 TAILQ_INSERT_TAIL(iou->io.mod_list, &iou->io, mod_entry);
66325755 643 }
66325755 644 }
66325755
MD
645}
646
647static int
648hammer_io_fsync(struct vnode *vp)
649{
650 return(0);
651}
652
653/*
654 * NOTE: will not be called unless we tell the kernel about the
655 * bioops. Unused... we use the mount's VFS_SYNC instead.
656 */
657static int
658hammer_io_sync(struct mount *mp)
659{
660 return(0);
661}
662
663static void
664hammer_io_movedeps(struct buf *bp1, struct buf *bp2)
665{
666}
667
668/*
669 * I/O pre-check for reading and writing. HAMMER only uses this for
670 * B_CACHE buffers so checkread just shouldn't happen, but if it does
671 * allow it.
672 *
fbc6e32a
MD
673 * Writing is a different case. We don't want the kernel to try to write
674 * out a buffer that HAMMER may be modifying passively or which has a
10a5d1ba
MD
675 * dependancy. In addition, kernel-demanded writes can only proceed for
676 * certain types of buffers (i.e. UNDO and DATA types). Other dirty
677 * buffer types can only be explicitly written by the flusher.
fbc6e32a 678 *
10a5d1ba
MD
679 * checkwrite will only be called for bdwrite()n buffers. If we return
680 * success the kernel is guaranteed to initiate the buffer write.
66325755
MD
681 */
682static int
683hammer_io_checkread(struct buf *bp)
684{
685 return(0);
686}
687
688static int
689hammer_io_checkwrite(struct buf *bp)
690{
10a5d1ba 691 hammer_io_t io = (void *)LIST_FIRST(&bp->b_dep);
66325755 692
77062c8a
MD
693 /*
694 * This shouldn't happen under normal operation.
695 */
696 if (io->type == HAMMER_STRUCTURE_VOLUME ||
697 io->type == HAMMER_STRUCTURE_META_BUFFER) {
698 if (!panicstr)
699 panic("hammer_io_checkwrite: illegal buffer");
700 hkprintf("x");
701 bp->b_flags |= B_LOCKED;
702 return(1);
703 }
c9b9e29d 704
b33e2cc0 705 /*
10a5d1ba
MD
706 * We can only clear the modified bit if the IO is not currently
707 * undergoing modification. Otherwise we may miss changes.
b33e2cc0 708 */
10a5d1ba
MD
709 if (io->modify_refs == 0 && io->modified) {
710 KKASSERT(io->mod_list != NULL);
711 if (io->mod_list == &io->hmp->volu_list ||
712 io->mod_list == &io->hmp->meta_list) {
713 --io->hmp->locked_dirty_count;
9480ff55 714 --hammer_count_dirtybufs;
10a5d1ba
MD
715 }
716 TAILQ_REMOVE(io->mod_list, io, mod_entry);
717 io->mod_list = NULL;
718 io->modified = 0;
b33e2cc0 719 }
f90dde4c
MD
720
721 /*
722 * The kernel is going to start the IO, set io->running.
723 */
724 KKASSERT(io->running == 0);
725 io->running = 1;
726 ++io->hmp->io_running_count;
055f5ff8 727 return(0);
66325755
MD
728}
729
8cd0a023
MD
730/*
731 * Return non-zero if the caller should flush the structure associated
732 * with this io sub-structure.
733 */
734int
735hammer_io_checkflush(struct hammer_io *io)
736{
055f5ff8 737 if (io->bp == NULL || (io->bp->b_flags & B_LOCKED)) {
8cd0a023 738 return(1);
055f5ff8 739 }
8cd0a023
MD
740 return(0);
741}
66325755
MD
742
743/*
744 * Return non-zero if we wish to delay the kernel's attempt to flush
745 * this buffer to disk.
746 */
747static int
748hammer_io_countdeps(struct buf *bp, int n)
749{
750 return(0);
751}
752
753struct bio_ops hammer_bioops = {
754 .io_start = hammer_io_start,
755 .io_complete = hammer_io_complete,
756 .io_deallocate = hammer_io_deallocate,
757 .io_fsync = hammer_io_fsync,
758 .io_sync = hammer_io_sync,
759 .io_movedeps = hammer_io_movedeps,
760 .io_countdeps = hammer_io_countdeps,
761 .io_checkread = hammer_io_checkread,
762 .io_checkwrite = hammer_io_checkwrite,
763};
764