HAMMER 46/Many: Performance pass, media changes, bug fixes.
[dragonfly.git] / sys / vfs / hammer / hammer_io.c
CommitLineData
66325755 1/*
b84de5af 2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved.
66325755
MD
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
2f85fa4d 34 * $DragonFly: src/sys/vfs/hammer/hammer_io.c,v 1.32 2008/05/18 01:48:50 dillon Exp $
66325755
MD
35 */
36/*
37 * IO Primitives and buffer cache management
38 *
39 * All major data-tracking structures in HAMMER contain a struct hammer_io
40 * which is used to manage their backing store. We use filesystem buffers
41 * for backing store and we leave them passively associated with their
42 * HAMMER structures.
43 *
44 * If the kernel tries to release a passively associated buf which we cannot
45 * yet let go we set B_LOCKED in the buffer and then actively released it
46 * later when we can.
47 */
48
49#include "hammer.h"
50#include <sys/fcntl.h>
51#include <sys/nlookup.h>
52#include <sys/buf.h>
53#include <sys/buf2.h>
54
10a5d1ba 55static void hammer_io_modify(hammer_io_t io, int count);
055f5ff8 56static void hammer_io_deallocate(struct buf *bp);
055f5ff8
MD
57
58/*
10a5d1ba
MD
59 * Initialize a new, already-zero'd hammer_io structure, or reinitialize
60 * an existing hammer_io structure which may have switched to another type.
055f5ff8
MD
61 */
62void
10a5d1ba 63hammer_io_init(hammer_io_t io, hammer_mount_t hmp, enum hammer_io_type type)
055f5ff8 64{
10a5d1ba 65 io->hmp = hmp;
055f5ff8 66 io->type = type;
055f5ff8
MD
67}
68
10a5d1ba
MD
69void
70hammer_io_reinit(hammer_io_t io, enum hammer_io_type type)
71{
72 hammer_mount_t hmp = io->hmp;
73
74 if (io->modified) {
75 KKASSERT(io->mod_list != NULL);
76 if (io->mod_list == &hmp->volu_list ||
77 io->mod_list == &hmp->meta_list) {
78 --hmp->locked_dirty_count;
9480ff55 79 --hammer_count_dirtybufs;
10a5d1ba
MD
80 }
81 TAILQ_REMOVE(io->mod_list, io, mod_entry);
82 io->mod_list = NULL;
83 }
84 io->type = type;
85 if (io->modified) {
86 switch(io->type) {
87 case HAMMER_STRUCTURE_VOLUME:
88 io->mod_list = &hmp->volu_list;
89 ++hmp->locked_dirty_count;
9480ff55 90 ++hammer_count_dirtybufs;
10a5d1ba
MD
91 break;
92 case HAMMER_STRUCTURE_META_BUFFER:
93 io->mod_list = &hmp->meta_list;
94 ++hmp->locked_dirty_count;
9480ff55 95 ++hammer_count_dirtybufs;
10a5d1ba
MD
96 break;
97 case HAMMER_STRUCTURE_UNDO_BUFFER:
98 io->mod_list = &hmp->undo_list;
99 break;
100 case HAMMER_STRUCTURE_DATA_BUFFER:
101 io->mod_list = &hmp->data_list;
102 break;
103 }
104 TAILQ_INSERT_TAIL(io->mod_list, io, mod_entry);
105 }
106}
107
66325755 108/*
fbc6e32a 109 * Helper routine to disassociate a buffer cache buffer from an I/O
055f5ff8
MD
110 * structure. Called with the io structure exclusively locked.
111 *
112 * The io may have 0 or 1 references depending on who called us. The
113 * caller is responsible for dealing with the refs.
114 *
115 * This call can only be made when no action is required on the buffer.
d8971d2b 116 * HAMMER must own the buffer (released == 0) since we mess around with it.
66325755
MD
117 */
118static void
055f5ff8 119hammer_io_disassociate(hammer_io_structure_t iou, int elseit)
66325755 120{
055f5ff8 121 struct buf *bp = iou->io.bp;
66325755 122
b58c6388 123 KKASSERT(iou->io.modified == 0);
4d75d829 124 buf_dep_init(bp);
055f5ff8 125 iou->io.bp = NULL;
d8971d2b 126 bp->b_flags &= ~B_LOCKED;
055f5ff8
MD
127 if (elseit) {
128 KKASSERT(iou->io.released == 0);
129 iou->io.released = 1;
130 bqrelse(bp);
131 } else {
132 KKASSERT(iou->io.released);
133 }
66325755 134
055f5ff8 135 switch(iou->io.type) {
66325755 136 case HAMMER_STRUCTURE_VOLUME:
055f5ff8 137 iou->volume.ondisk = NULL;
66325755 138 break;
10a5d1ba
MD
139 case HAMMER_STRUCTURE_DATA_BUFFER:
140 case HAMMER_STRUCTURE_META_BUFFER:
141 case HAMMER_STRUCTURE_UNDO_BUFFER:
055f5ff8 142 iou->buffer.ondisk = NULL;
66325755
MD
143 break;
144 }
fbc6e32a
MD
145}
146
147/*
055f5ff8 148 * Wait for any physical IO to complete
fbc6e32a
MD
149 */
150static void
055f5ff8 151hammer_io_wait(hammer_io_t io)
fbc6e32a 152{
055f5ff8
MD
153 if (io->running) {
154 crit_enter();
155 tsleep_interlock(io);
156 io->waiting = 1;
157 for (;;) {
158 tsleep(io, 0, "hmrflw", 0);
159 if (io->running == 0)
160 break;
161 tsleep_interlock(io);
162 io->waiting = 1;
163 if (io->running == 0)
164 break;
165 }
166 crit_exit();
66325755
MD
167 }
168}
169
2f85fa4d
MD
170#define HAMMER_MAXRA 4
171
66325755 172/*
10a5d1ba
MD
173 * Load bp for a HAMMER structure. The io must be exclusively locked by
174 * the caller.
2f85fa4d
MD
175 *
176 * Generally speaking HAMMER assumes either an optimized layout or that
177 * typical access patterns will be close to the original layout when the
178 * information was written. For this reason we try to cluster all reads.
66325755
MD
179 */
180int
2f85fa4d 181hammer_io_read(struct vnode *devvp, struct hammer_io *io, hammer_off_t limit)
66325755
MD
182{
183 struct buf *bp;
2f85fa4d 184 int error;
66325755
MD
185
186 if ((bp = io->bp) == NULL) {
2f85fa4d
MD
187#if 1
188 error = cluster_read(devvp, limit, io->offset,
189 HAMMER_BUFSIZE, MAXBSIZE, 16, &io->bp);
190#else
66325755 191 error = bread(devvp, io->offset, HAMMER_BUFSIZE, &io->bp);
2f85fa4d
MD
192#endif
193
66325755
MD
194 if (error == 0) {
195 bp = io->bp;
196 bp->b_ops = &hammer_bioops;
197 LIST_INSERT_HEAD(&bp->b_dep, &io->worklist, node);
198 BUF_KERNPROC(bp);
199 }
10a5d1ba
MD
200 KKASSERT(io->modified == 0);
201 KKASSERT(io->running == 0);
202 KKASSERT(io->waiting == 0);
66325755
MD
203 io->released = 0; /* we hold an active lock on bp */
204 } else {
205 error = 0;
206 }
207 return(error);
208}
209
210/*
211 * Similar to hammer_io_read() but returns a zero'd out buffer instead.
10a5d1ba
MD
212 * Must be called with the IO exclusively locked.
213 *
66325755 214 * vfs_bio_clrbuf() is kinda nasty, enforce serialization against background
10a5d1ba
MD
215 * I/O by forcing the buffer to not be in a released state before calling
216 * it.
055f5ff8 217 *
10a5d1ba
MD
218 * This function will also mark the IO as modified but it will not
219 * increment the modify_refs count.
66325755
MD
220 */
221int
222hammer_io_new(struct vnode *devvp, struct hammer_io *io)
223{
224 struct buf *bp;
225
226 if ((bp = io->bp) == NULL) {
227 io->bp = getblk(devvp, io->offset, HAMMER_BUFSIZE, 0, 0);
228 bp = io->bp;
229 bp->b_ops = &hammer_bioops;
230 LIST_INSERT_HEAD(&bp->b_dep, &io->worklist, node);
055f5ff8 231 io->released = 0;
10a5d1ba 232 KKASSERT(io->running == 0);
055f5ff8 233 io->waiting = 0;
66325755
MD
234 BUF_KERNPROC(bp);
235 } else {
236 if (io->released) {
237 regetblk(bp);
66325755 238 BUF_KERNPROC(bp);
d113fda1 239 io->released = 0;
66325755
MD
240 }
241 }
10a5d1ba 242 hammer_io_modify(io, 0);
66325755
MD
243 vfs_bio_clrbuf(bp);
244 return(0);
245}
246
fbc6e32a 247/*
b3deaf57 248 * This routine is called on the last reference to a hammer structure.
055f5ff8 249 * The io is usually locked exclusively (but may not be during unmount).
b3deaf57 250 *
10a5d1ba
MD
251 * This routine is responsible for the disposition of the buffer cache
252 * buffer backing the IO. Only pure-data and undo buffers can be handed
253 * back to the kernel. Volume and meta-data buffers must be retained
254 * by HAMMER until explicitly flushed by the backend.
66325755
MD
255 */
256void
09ac686b 257hammer_io_release(struct hammer_io *io, int flush)
66325755 258{
66325755
MD
259 struct buf *bp;
260
055f5ff8
MD
261 if ((bp = io->bp) == NULL)
262 return;
fbc6e32a 263
055f5ff8 264 /*
10a5d1ba
MD
265 * Try to flush a dirty IO to disk if asked to by the
266 * caller or if the kernel tried to flush the buffer in the past.
055f5ff8 267 *
10a5d1ba
MD
268 * Kernel-initiated flushes are only allowed for pure-data buffers.
269 * meta-data and volume buffers can only be flushed explicitly
270 * by HAMMER.
055f5ff8 271 */
10a5d1ba 272 if (io->modified) {
09ac686b 273 if (flush) {
10a5d1ba
MD
274 hammer_io_flush(io);
275 } else if (bp->b_flags & B_LOCKED) {
276 switch(io->type) {
277 case HAMMER_STRUCTURE_DATA_BUFFER:
278 case HAMMER_STRUCTURE_UNDO_BUFFER:
279 hammer_io_flush(io);
280 break;
281 default:
282 break;
283 }
284 } /* else no explicit request to flush the buffer */
285 }
fbc6e32a 286
055f5ff8 287 /*
10a5d1ba 288 * Wait for the IO to complete if asked to.
055f5ff8 289 */
b58c6388 290 if (io->waitdep && io->running) {
055f5ff8
MD
291 hammer_io_wait(io);
292 }
293
294 /*
10a5d1ba
MD
295 * Return control of the buffer to the kernel (with the provisio
296 * that our bioops can override kernel decisions with regards to
297 * the buffer).
055f5ff8 298 */
09ac686b 299 if (flush && io->modified == 0 && io->running == 0) {
10a5d1ba
MD
300 /*
301 * Always disassociate the bp if an explicit flush
302 * was requested and the IO completed with no error
303 * (so unmount can really clean up the structure).
304 */
055f5ff8 305 if (io->released) {
b3deaf57 306 regetblk(bp);
46fe7ae1 307 BUF_KERNPROC(bp);
055f5ff8
MD
308 io->released = 0;
309 }
310 hammer_io_disassociate((hammer_io_structure_t)io, 1);
311 } else if (io->modified) {
10a5d1ba
MD
312 /*
313 * Only certain IO types can be released to the kernel.
314 * volume and meta-data IO types must be explicitly flushed
315 * by HAMMER.
316 */
317 switch(io->type) {
318 case HAMMER_STRUCTURE_DATA_BUFFER:
319 case HAMMER_STRUCTURE_UNDO_BUFFER:
320 if (io->released == 0) {
321 io->released = 1;
322 bdwrite(bp);
323 }
324 break;
325 default:
326 break;
055f5ff8
MD
327 }
328 } else if (io->released == 0) {
10a5d1ba
MD
329 /*
330 * Clean buffers can be generally released to the kernel.
331 * We leave the bp passively associated with the HAMMER
332 * structure and use bioops to disconnect it later on
333 * if the kernel wants to discard the buffer.
334 */
055f5ff8
MD
335 io->released = 1;
336 bqrelse(bp);
66325755
MD
337 }
338}
339
fbc6e32a 340/*
b33e2cc0
MD
341 * This routine is called with a locked IO when a flush is desired and
342 * no other references to the structure exists other then ours. This
343 * routine is ONLY called when HAMMER believes it is safe to flush a
344 * potentially modified buffer out.
fbc6e32a
MD
345 */
346void
055f5ff8 347hammer_io_flush(struct hammer_io *io)
fbc6e32a
MD
348{
349 struct buf *bp;
fbc6e32a 350
055f5ff8 351 /*
10a5d1ba 352 * Degenerate case - nothing to flush if nothing is dirty.
055f5ff8 353 */
b58c6388 354 if (io->modified == 0) {
055f5ff8 355 return;
b58c6388 356 }
055f5ff8
MD
357
358 KKASSERT(io->bp);
10a5d1ba 359 KKASSERT(io->modify_refs == 0);
055f5ff8 360
77062c8a
MD
361 /*
362 * Acquire ownership of the bp, particularly before we clear our
363 * modified flag.
364 *
365 * We are going to bawrite() this bp. Don't leave a window where
366 * io->released is set, we actually own the bp rather then our
367 * buffer.
368 */
369 bp = io->bp;
370 if (io->released) {
371 regetblk(bp);
372 /* BUF_KERNPROC(io->bp); */
373 /* io->released = 0; */
374 KKASSERT(io->released);
375 KKASSERT(io->bp == bp);
376 }
377 io->released = 1;
378
b33e2cc0 379 /*
10a5d1ba
MD
380 * Acquire exclusive access to the bp and then clear the modified
381 * state of the buffer prior to issuing I/O to interlock any
382 * modifications made while the I/O is in progress. This shouldn't
383 * happen anyway but losing data would be worse. The modified bit
384 * will be rechecked after the IO completes.
385 *
b33e2cc0
MD
386 * This is only legal when lock.refs == 1 (otherwise we might clear
387 * the modified bit while there are still users of the cluster
388 * modifying the data).
389 *
b33e2cc0
MD
390 * Do this before potentially blocking so any attempt to modify the
391 * ondisk while we are blocked blocks waiting for us.
392 */
10a5d1ba
MD
393 KKASSERT(io->mod_list != NULL);
394 if (io->mod_list == &io->hmp->volu_list ||
395 io->mod_list == &io->hmp->meta_list) {
396 --io->hmp->locked_dirty_count;
9480ff55 397 --hammer_count_dirtybufs;
10a5d1ba
MD
398 }
399 TAILQ_REMOVE(io->mod_list, io, mod_entry);
400 io->mod_list = NULL;
401 io->modified = 0;
10a5d1ba
MD
402
403 /*
404 * Transfer ownership to the kernel and initiate I/O.
405 */
055f5ff8 406 io->running = 1;
f90dde4c 407 ++io->hmp->io_running_count;
055f5ff8 408 bawrite(bp);
fbc6e32a
MD
409}
410
055f5ff8
MD
411/************************************************************************
412 * BUFFER DIRTYING *
413 ************************************************************************
414 *
415 * These routines deal with dependancies created when IO buffers get
416 * modified. The caller must call hammer_modify_*() on a referenced
417 * HAMMER structure prior to modifying its on-disk data.
0b075555 418 *
055f5ff8
MD
419 * Any intent to modify an IO buffer acquires the related bp and imposes
420 * various write ordering dependancies.
0b075555 421 */
055f5ff8
MD
422
423/*
10a5d1ba
MD
424 * Mark a HAMMER structure as undergoing modification. Meta-data buffers
425 * are locked until the flusher can deal with them, pure data buffers
426 * can be written out.
055f5ff8 427 */
10a5d1ba 428static
b58c6388 429void
10a5d1ba 430hammer_io_modify(hammer_io_t io, int count)
0b075555 431{
10a5d1ba
MD
432 struct hammer_mount *hmp = io->hmp;
433
46fe7ae1
MD
434 /*
435 * Shortcut if nothing to do.
436 */
0b075555 437 KKASSERT(io->lock.refs != 0 && io->bp != NULL);
10a5d1ba 438 io->modify_refs += count;
b58c6388
MD
439 if (io->modified && io->released == 0)
440 return;
46fe7ae1
MD
441
442 hammer_lock_ex(&io->lock);
10a5d1ba
MD
443 if (io->modified == 0) {
444 KKASSERT(io->mod_list == NULL);
445 switch(io->type) {
446 case HAMMER_STRUCTURE_VOLUME:
447 io->mod_list = &hmp->volu_list;
448 ++hmp->locked_dirty_count;
9480ff55 449 ++hammer_count_dirtybufs;
10a5d1ba
MD
450 break;
451 case HAMMER_STRUCTURE_META_BUFFER:
452 io->mod_list = &hmp->meta_list;
453 ++hmp->locked_dirty_count;
9480ff55 454 ++hammer_count_dirtybufs;
10a5d1ba
MD
455 break;
456 case HAMMER_STRUCTURE_UNDO_BUFFER:
457 io->mod_list = &hmp->undo_list;
458 break;
459 case HAMMER_STRUCTURE_DATA_BUFFER:
460 io->mod_list = &hmp->data_list;
461 break;
462 }
463 TAILQ_INSERT_TAIL(io->mod_list, io, mod_entry);
464 io->modified = 1;
465 }
46fe7ae1
MD
466 if (io->released) {
467 regetblk(io->bp);
468 BUF_KERNPROC(io->bp);
469 io->released = 0;
470 KKASSERT(io->modified != 0);
471 }
46fe7ae1 472 hammer_unlock(&io->lock);
055f5ff8
MD
473}
474
10a5d1ba
MD
475static __inline
476void
477hammer_io_modify_done(hammer_io_t io)
478{
479 KKASSERT(io->modify_refs > 0);
480 --io->modify_refs;
481}
482
2f85fa4d
MD
483/*
484 * Caller intends to modify a volume's ondisk structure.
485 *
486 * This is only allowed if we are the flusher or we have a ref on the
487 * sync_lock.
488 */
055f5ff8 489void
36f82b23
MD
490hammer_modify_volume(hammer_transaction_t trans, hammer_volume_t volume,
491 void *base, int len)
055f5ff8 492{
2f85fa4d 493 KKASSERT (trans == NULL || trans->sync_lock_refs > 0);
055f5ff8 494
2f85fa4d 495 hammer_io_modify(&volume->io, 1);
47197d71
MD
496 if (len) {
497 intptr_t rel_offset = (intptr_t)base - (intptr_t)volume->ondisk;
498 KKASSERT((rel_offset & ~(intptr_t)HAMMER_BUFMASK) == 0);
059819e3 499 hammer_generate_undo(trans, &volume->io,
47197d71
MD
500 HAMMER_ENCODE_RAW_VOLUME(volume->vol_no, rel_offset),
501 base, len);
502 }
0b075555
MD
503}
504
055f5ff8 505/*
2f85fa4d
MD
506 * Caller intends to modify a buffer's ondisk structure.
507 *
508 * This is only allowed if we are the flusher or we have a ref on the
509 * sync_lock.
055f5ff8 510 */
0b075555 511void
36f82b23
MD
512hammer_modify_buffer(hammer_transaction_t trans, hammer_buffer_t buffer,
513 void *base, int len)
46fe7ae1 514{
2f85fa4d
MD
515 KKASSERT (trans == NULL || trans->sync_lock_refs > 0);
516
10a5d1ba 517 hammer_io_modify(&buffer->io, 1);
47197d71
MD
518 if (len) {
519 intptr_t rel_offset = (intptr_t)base - (intptr_t)buffer->ondisk;
520 KKASSERT((rel_offset & ~(intptr_t)HAMMER_BUFMASK) == 0);
059819e3 521 hammer_generate_undo(trans, &buffer->io,
34d829f7 522 buffer->zone2_offset + rel_offset,
47197d71
MD
523 base, len);
524 }
46fe7ae1
MD
525}
526
10a5d1ba
MD
527void
528hammer_modify_volume_done(hammer_volume_t volume)
529{
530 hammer_io_modify_done(&volume->io);
531}
532
533void
534hammer_modify_buffer_done(hammer_buffer_t buffer)
535{
536 hammer_io_modify_done(&buffer->io);
537}
538
61aeeb33 539/*
055f5ff8 540 * Mark an entity as not being dirty any more -- this usually occurs when
61aeeb33 541 * the governing a-list has freed the entire entity.
055f5ff8
MD
542 *
543 * XXX
61aeeb33
MD
544 */
545void
546hammer_io_clear_modify(struct hammer_io *io)
547{
055f5ff8 548#if 0
61aeeb33
MD
549 struct buf *bp;
550
551 io->modified = 0;
10a5d1ba 552 XXX mod_list/entry
61aeeb33 553 if ((bp = io->bp) != NULL) {
055f5ff8 554 if (io->released) {
61aeeb33 555 regetblk(bp);
055f5ff8
MD
556 /* BUF_KERNPROC(io->bp); */
557 } else {
61aeeb33 558 io->released = 1;
055f5ff8 559 }
61aeeb33 560 if (io->modified == 0) {
77062c8a 561 hkprintf("hammer_io_clear_modify: cleared %p\n", io);
61aeeb33
MD
562 bundirty(bp);
563 bqrelse(bp);
564 } else {
565 bdwrite(bp);
566 }
567 }
055f5ff8 568#endif
61aeeb33
MD
569}
570
055f5ff8
MD
571/************************************************************************
572 * HAMMER_BIOOPS *
573 ************************************************************************
574 *
66325755
MD
575 */
576
577/*
055f5ff8 578 * Pre-IO initiation kernel callback - cluster build only
66325755 579 */
66325755
MD
580static void
581hammer_io_start(struct buf *bp)
582{
583}
584
055f5ff8
MD
585/*
586 * Post-IO completion kernel callback
b33e2cc0
MD
587 *
588 * NOTE: HAMMER may modify a buffer after initiating I/O. The modified bit
589 * may also be set if we were marking a cluster header open. Only remove
590 * our dependancy if the modified bit is clear.
055f5ff8 591 */
66325755
MD
592static void
593hammer_io_complete(struct buf *bp)
594{
055f5ff8
MD
595 union hammer_io_structure *iou = (void *)LIST_FIRST(&bp->b_dep);
596
597 KKASSERT(iou->io.released == 1);
fbc6e32a 598
f90dde4c
MD
599 if (iou->io.running) {
600 if (--iou->io.hmp->io_running_count == 0)
601 wakeup(&iou->io.hmp->io_running_count);
602 KKASSERT(iou->io.hmp->io_running_count >= 0);
603 iou->io.running = 0;
604 }
605
055f5ff8 606 /*
055f5ff8
MD
607 * If no lock references remain and we can acquire the IO lock and
608 * someone at some point wanted us to flush (B_LOCKED test), then
609 * try to dispose of the IO.
610 */
055f5ff8
MD
611 if (iou->io.waiting) {
612 iou->io.waiting = 0;
613 wakeup(iou);
614 }
615
616 /*
617 * Someone wanted us to flush, try to clean out the buffer.
618 */
619 if ((bp->b_flags & B_LOCKED) && iou->io.lock.refs == 0) {
b33e2cc0 620 KKASSERT(iou->io.modified == 0);
d5ef456e 621 bp->b_flags &= ~B_LOCKED;
055f5ff8
MD
622 hammer_io_deallocate(bp);
623 /* structure may be dead now */
624 }
66325755
MD
625}
626
627/*
628 * Callback from kernel when it wishes to deallocate a passively
10a5d1ba
MD
629 * associated structure. This mostly occurs with clean buffers
630 * but it may be possible for a holding structure to be marked dirty
631 * while its buffer is passively associated.
66325755
MD
632 *
633 * If we cannot disassociate we set B_LOCKED to prevent the buffer
634 * from getting reused.
46fe7ae1
MD
635 *
636 * WARNING: Because this can be called directly by getnewbuf we cannot
637 * recurse into the tree. If a bp cannot be immediately disassociated
638 * our only recourse is to set B_LOCKED.
66325755
MD
639 */
640static void
641hammer_io_deallocate(struct buf *bp)
642{
055f5ff8 643 hammer_io_structure_t iou = (void *)LIST_FIRST(&bp->b_dep);
66325755 644
055f5ff8 645 KKASSERT((bp->b_flags & B_LOCKED) == 0 && iou->io.running == 0);
46fe7ae1 646 if (iou->io.lock.refs > 0 || iou->io.modified) {
10a5d1ba
MD
647 /*
648 * It is not legal to disassociate a modified buffer. This
649 * case really shouldn't ever occur.
650 */
055f5ff8
MD
651 bp->b_flags |= B_LOCKED;
652 } else {
10a5d1ba
MD
653 /*
654 * Disassociate the BP. If the io has no refs left we
655 * have to add it to the loose list.
656 */
055f5ff8 657 hammer_io_disassociate(iou, 0);
10a5d1ba
MD
658 if (iou->io.bp == NULL &&
659 iou->io.type != HAMMER_STRUCTURE_VOLUME) {
10a5d1ba
MD
660 KKASSERT(iou->io.mod_list == NULL);
661 iou->io.mod_list = &iou->io.hmp->lose_list;
662 TAILQ_INSERT_TAIL(iou->io.mod_list, &iou->io, mod_entry);
66325755 663 }
66325755 664 }
66325755
MD
665}
666
667static int
668hammer_io_fsync(struct vnode *vp)
669{
670 return(0);
671}
672
673/*
674 * NOTE: will not be called unless we tell the kernel about the
675 * bioops. Unused... we use the mount's VFS_SYNC instead.
676 */
677static int
678hammer_io_sync(struct mount *mp)
679{
680 return(0);
681}
682
683static void
684hammer_io_movedeps(struct buf *bp1, struct buf *bp2)
685{
686}
687
688/*
689 * I/O pre-check for reading and writing. HAMMER only uses this for
690 * B_CACHE buffers so checkread just shouldn't happen, but if it does
691 * allow it.
692 *
fbc6e32a
MD
693 * Writing is a different case. We don't want the kernel to try to write
694 * out a buffer that HAMMER may be modifying passively or which has a
10a5d1ba
MD
695 * dependancy. In addition, kernel-demanded writes can only proceed for
696 * certain types of buffers (i.e. UNDO and DATA types). Other dirty
697 * buffer types can only be explicitly written by the flusher.
fbc6e32a 698 *
10a5d1ba
MD
699 * checkwrite will only be called for bdwrite()n buffers. If we return
700 * success the kernel is guaranteed to initiate the buffer write.
66325755
MD
701 */
702static int
703hammer_io_checkread(struct buf *bp)
704{
705 return(0);
706}
707
708static int
709hammer_io_checkwrite(struct buf *bp)
710{
10a5d1ba 711 hammer_io_t io = (void *)LIST_FIRST(&bp->b_dep);
66325755 712
77062c8a
MD
713 /*
714 * This shouldn't happen under normal operation.
715 */
716 if (io->type == HAMMER_STRUCTURE_VOLUME ||
717 io->type == HAMMER_STRUCTURE_META_BUFFER) {
718 if (!panicstr)
719 panic("hammer_io_checkwrite: illegal buffer");
720 hkprintf("x");
721 bp->b_flags |= B_LOCKED;
722 return(1);
723 }
c9b9e29d 724
b33e2cc0 725 /*
10a5d1ba
MD
726 * We can only clear the modified bit if the IO is not currently
727 * undergoing modification. Otherwise we may miss changes.
b33e2cc0 728 */
10a5d1ba
MD
729 if (io->modify_refs == 0 && io->modified) {
730 KKASSERT(io->mod_list != NULL);
731 if (io->mod_list == &io->hmp->volu_list ||
732 io->mod_list == &io->hmp->meta_list) {
733 --io->hmp->locked_dirty_count;
9480ff55 734 --hammer_count_dirtybufs;
10a5d1ba
MD
735 }
736 TAILQ_REMOVE(io->mod_list, io, mod_entry);
737 io->mod_list = NULL;
738 io->modified = 0;
b33e2cc0 739 }
f90dde4c
MD
740
741 /*
742 * The kernel is going to start the IO, set io->running.
743 */
744 KKASSERT(io->running == 0);
745 io->running = 1;
746 ++io->hmp->io_running_count;
055f5ff8 747 return(0);
66325755
MD
748}
749
8cd0a023
MD
750/*
751 * Return non-zero if the caller should flush the structure associated
752 * with this io sub-structure.
753 */
754int
755hammer_io_checkflush(struct hammer_io *io)
756{
055f5ff8 757 if (io->bp == NULL || (io->bp->b_flags & B_LOCKED)) {
8cd0a023 758 return(1);
055f5ff8 759 }
8cd0a023
MD
760 return(0);
761}
66325755
MD
762
763/*
764 * Return non-zero if we wish to delay the kernel's attempt to flush
765 * this buffer to disk.
766 */
767static int
768hammer_io_countdeps(struct buf *bp, int n)
769{
770 return(0);
771}
772
773struct bio_ops hammer_bioops = {
774 .io_start = hammer_io_start,
775 .io_complete = hammer_io_complete,
776 .io_deallocate = hammer_io_deallocate,
777 .io_fsync = hammer_io_fsync,
778 .io_sync = hammer_io_sync,
779 .io_movedeps = hammer_io_movedeps,
780 .io_countdeps = hammer_io_countdeps,
781 .io_checkread = hammer_io_checkread,
782 .io_checkwrite = hammer_io_checkwrite,
783};
784