HAMMER VFS - Improve saturated write performance (2).
[dragonfly.git] / sys / vfs / hammer / hammer_io.c
CommitLineData
66325755 1/*
b84de5af 2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved.
66325755
MD
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
66325755
MD
33 */
34/*
35 * IO Primitives and buffer cache management
36 *
37 * All major data-tracking structures in HAMMER contain a struct hammer_io
38 * which is used to manage their backing store. We use filesystem buffers
39 * for backing store and we leave them passively associated with their
40 * HAMMER structures.
41 *
9f5097dc 42 * If the kernel tries to destroy a passively associated buf which we cannot
66325755
MD
43 * yet let go we set B_LOCKED in the buffer and then actively released it
44 * later when we can.
77912481
MD
45 *
46 * The io_token is required for anything which might race bioops and bio_done
47 * callbacks, with one exception: A successful hammer_try_interlock_norefs().
48 * the fs_token will be held in all other cases.
66325755
MD
49 */
50
51#include "hammer.h"
52#include <sys/fcntl.h>
53#include <sys/nlookup.h>
54#include <sys/buf.h>
55#include <sys/buf2.h>
56
10a5d1ba 57static void hammer_io_modify(hammer_io_t io, int count);
055f5ff8 58static void hammer_io_deallocate(struct buf *bp);
1b0ab2c3
MD
59#if 0
60static void hammer_io_direct_read_complete(struct bio *nbio);
61#endif
62static void hammer_io_direct_write_complete(struct bio *nbio);
43c665ae 63static int hammer_io_direct_uncache_callback(hammer_inode_t ip, void *data);
cdb6e4e6 64static void hammer_io_set_modlist(struct hammer_io *io);
748efb59 65static void hammer_io_flush_mark(hammer_volume_t volume);
748efb59 66
1afb73cf
MD
67static int
68hammer_mod_rb_compare(hammer_io_t io1, hammer_io_t io2)
69{
70 hammer_off_t io1_offset;
71 hammer_off_t io2_offset;
72
73 io1_offset = ((io1->offset & HAMMER_OFF_SHORT_MASK) << 8) |
74 HAMMER_VOL_DECODE(io1->offset);
75 io2_offset = ((io2->offset & HAMMER_OFF_SHORT_MASK) << 8) |
76 HAMMER_VOL_DECODE(io2->offset);
77
78 if (io1_offset < io2_offset)
79 return(-1);
80 if (io1_offset > io2_offset)
81 return(1);
82 return(0);
83}
84
85RB_GENERATE(hammer_mod_rb_tree, hammer_io, rb_node, hammer_mod_rb_compare);
86
055f5ff8 87/*
10a5d1ba
MD
88 * Initialize a new, already-zero'd hammer_io structure, or reinitialize
89 * an existing hammer_io structure which may have switched to another type.
055f5ff8
MD
90 */
91void
748efb59 92hammer_io_init(hammer_io_t io, hammer_volume_t volume, enum hammer_io_type type)
055f5ff8 93{
748efb59
MD
94 io->volume = volume;
95 io->hmp = volume->io.hmp;
055f5ff8 96 io->type = type;
055f5ff8
MD
97}
98
66325755 99/*
fbc6e32a 100 * Helper routine to disassociate a buffer cache buffer from an I/O
77912481 101 * structure. The io must be interlocked and marked appropriately for
b0aab9b9 102 * reclamation.
055f5ff8 103 *
b0aab9b9
MD
104 * The io must be in a released state with the io->bp owned and
105 * locked by the caller of this function. When not called from an
106 * io_deallocate() this cannot race an io_deallocate() since the
107 * kernel would be unable to get the buffer lock in that case.
77912481
MD
108 * (The released state in this case means we own the bp, not the
109 * hammer_io structure).
110 *
111 * The io may have 0 or 1 references depending on who called us. The
112 * caller is responsible for dealing with the refs.
b0aab9b9 113 *
055f5ff8 114 * This call can only be made when no action is required on the buffer.
ecca949a 115 *
77912481
MD
116 * This function is guaranteed not to race against anything because we
117 * own both the io lock and the bp lock and are interlocked with no
118 * references.
66325755
MD
119 */
120static void
ecca949a 121hammer_io_disassociate(hammer_io_structure_t iou)
66325755 122{
055f5ff8 123 struct buf *bp = iou->io.bp;
66325755 124
ecca949a 125 KKASSERT(iou->io.released);
b58c6388 126 KKASSERT(iou->io.modified == 0);
af209b0f 127 KKASSERT(LIST_FIRST(&bp->b_dep) == (void *)iou);
4d75d829 128 buf_dep_init(bp);
055f5ff8 129 iou->io.bp = NULL;
9f5097dc
MD
130
131 /*
132 * If the buffer was locked someone wanted to get rid of it.
133 */
a99b9ea2 134 if (bp->b_flags & B_LOCKED) {
b0aab9b9 135 atomic_add_int(&hammer_count_io_locked, -1);
9f5097dc 136 bp->b_flags &= ~B_LOCKED;
a99b9ea2 137 }
ecca949a
MD
138 if (iou->io.reclaim) {
139 bp->b_flags |= B_NOCACHE|B_RELBUF;
140 iou->io.reclaim = 0;
055f5ff8 141 }
66325755 142
055f5ff8 143 switch(iou->io.type) {
66325755 144 case HAMMER_STRUCTURE_VOLUME:
055f5ff8 145 iou->volume.ondisk = NULL;
66325755 146 break;
10a5d1ba
MD
147 case HAMMER_STRUCTURE_DATA_BUFFER:
148 case HAMMER_STRUCTURE_META_BUFFER:
149 case HAMMER_STRUCTURE_UNDO_BUFFER:
055f5ff8 150 iou->buffer.ondisk = NULL;
66325755 151 break;
eddadaee
MD
152 case HAMMER_STRUCTURE_DUMMY:
153 panic("hammer_io_disassociate: bad io type");
154 break;
66325755 155 }
fbc6e32a
MD
156}
157
158/*
055f5ff8 159 * Wait for any physical IO to complete
ae8e83e6
MD
160 *
161 * XXX we aren't interlocked against a spinlock or anything so there
162 * is a small window in the interlock / io->running == 0 test.
fbc6e32a 163 */
1b0ab2c3 164void
055f5ff8 165hammer_io_wait(hammer_io_t io)
fbc6e32a 166{
055f5ff8 167 if (io->running) {
b0aab9b9
MD
168 hammer_mount_t hmp = io->hmp;
169
170 lwkt_gettoken(&hmp->io_token);
171 while (io->running) {
ae8e83e6
MD
172 io->waiting = 1;
173 tsleep_interlock(io, 0);
b0aab9b9
MD
174 if (io->running)
175 tsleep(io, PINTERLOCKED, "hmrflw", hz);
055f5ff8 176 }
b0aab9b9 177 lwkt_reltoken(&hmp->io_token);
66325755
MD
178 }
179}
180
af209b0f 181/*
eddadaee
MD
182 * Wait for all currently queued HAMMER-initiated I/Os to complete.
183 *
184 * This is not supposed to count direct I/O's but some can leak
185 * through (for non-full-sized direct I/Os).
af209b0f
MD
186 */
187void
eddadaee 188hammer_io_wait_all(hammer_mount_t hmp, const char *ident, int doflush)
af209b0f 189{
eddadaee
MD
190 struct hammer_io iodummy;
191 hammer_io_t io;
192
193 /*
194 * Degenerate case, no I/O is running
195 */
b0aab9b9 196 lwkt_gettoken(&hmp->io_token);
eddadaee 197 if (TAILQ_EMPTY(&hmp->iorun_list)) {
b0aab9b9 198 lwkt_reltoken(&hmp->io_token);
eddadaee
MD
199 if (doflush)
200 hammer_io_flush_sync(hmp);
201 return;
202 }
203 bzero(&iodummy, sizeof(iodummy));
204 iodummy.type = HAMMER_STRUCTURE_DUMMY;
205
206 /*
207 * Add placemarker and then wait until it becomes the head of
208 * the list.
209 */
210 TAILQ_INSERT_TAIL(&hmp->iorun_list, &iodummy, iorun_entry);
211 while (TAILQ_FIRST(&hmp->iorun_list) != &iodummy) {
212 tsleep(&iodummy, 0, ident, 0);
213 }
214
215 /*
216 * Chain in case several placemarkers are present.
217 */
218 TAILQ_REMOVE(&hmp->iorun_list, &iodummy, iorun_entry);
219 io = TAILQ_FIRST(&hmp->iorun_list);
220 if (io && io->type == HAMMER_STRUCTURE_DUMMY)
221 wakeup(io);
b0aab9b9 222 lwkt_reltoken(&hmp->io_token);
eddadaee
MD
223
224 if (doflush)
225 hammer_io_flush_sync(hmp);
af209b0f
MD
226}
227
2faf0737
MD
228/*
229 * Clear a flagged error condition on a I/O buffer. The caller must hold
230 * its own ref on the buffer.
231 */
232void
233hammer_io_clear_error(struct hammer_io *io)
234{
77912481
MD
235 hammer_mount_t hmp = io->hmp;
236
237 lwkt_gettoken(&hmp->io_token);
2faf0737
MD
238 if (io->ioerror) {
239 io->ioerror = 0;
250aec18
MD
240 hammer_rel(&io->lock);
241 KKASSERT(hammer_isactive(&io->lock));
2faf0737 242 }
77912481
MD
243 lwkt_reltoken(&hmp->io_token);
244}
245
246void
247hammer_io_clear_error_noassert(struct hammer_io *io)
248{
249 hammer_mount_t hmp = io->hmp;
250
251 lwkt_gettoken(&hmp->io_token);
252 if (io->ioerror) {
253 io->ioerror = 0;
254 hammer_rel(&io->lock);
255 }
256 lwkt_reltoken(&hmp->io_token);
2faf0737
MD
257}
258
b8a41159
MD
259/*
260 * This is an advisory function only which tells the buffer cache
261 * the bp is not a meta-data buffer, even though it is backed by
262 * a block device.
263 *
264 * This is used by HAMMER's reblocking code to avoid trying to
265 * swapcache the filesystem's data when it is read or written
266 * by the reblocking code.
b0aab9b9
MD
267 *
268 * The caller has a ref on the buffer preventing the bp from
269 * being disassociated from it.
b8a41159
MD
270 */
271void
272hammer_io_notmeta(hammer_buffer_t buffer)
273{
b0aab9b9
MD
274 if ((buffer->io.bp->b_flags & B_NOTMETA) == 0) {
275 hammer_mount_t hmp = buffer->io.hmp;
276
277 lwkt_gettoken(&hmp->io_token);
278 buffer->io.bp->b_flags |= B_NOTMETA;
279 lwkt_reltoken(&hmp->io_token);
280 }
b8a41159
MD
281}
282
66325755 283/*
10a5d1ba
MD
284 * Load bp for a HAMMER structure. The io must be exclusively locked by
285 * the caller.
2f85fa4d 286 *
a99b9ea2 287 * This routine is mostly used on meta-data and small-data blocks. Generally
b7de8aa5 288 * speaking HAMMER assumes some locality of reference and will cluster.
af209b0f 289 *
b7de8aa5
MD
290 * Note that the caller (hammer_ondisk.c) may place further restrictions
291 * on clusterability via the limit (in bytes). Typically large-data
292 * zones cannot be clustered due to their mixed buffer sizes. This is
293 * not an issue since such clustering occurs in hammer_vnops at the
294 * regular file layer, whereas this is the buffered block device layer.
b0aab9b9
MD
295 *
296 * No I/O callbacks can occur while we hold the buffer locked.
66325755
MD
297 */
298int
b7de8aa5 299hammer_io_read(struct vnode *devvp, struct hammer_io *io, int limit)
66325755
MD
300{
301 struct buf *bp;
2f85fa4d 302 int error;
66325755
MD
303
304 if ((bp = io->bp) == NULL) {
b0aab9b9 305 atomic_add_int(&hammer_count_io_running_read, io->bytes);
b7de8aa5
MD
306 if (hammer_cluster_enable && limit > io->bytes) {
307 error = cluster_read(devvp, io->offset + limit,
ce0138a6
MD
308 io->offset, io->bytes,
309 HAMMER_CLUSTER_SIZE,
364c022c
MD
310 HAMMER_CLUSTER_SIZE,
311 &io->bp);
ce0138a6
MD
312 } else {
313 error = bread(devvp, io->offset, io->bytes, &io->bp);
314 }
315 hammer_stats_disk_read += io->bytes;
b0aab9b9 316 atomic_add_int(&hammer_count_io_running_read, -io->bytes);
cdb6e4e6
MD
317
318 /*
319 * The code generally assumes b_ops/b_dep has been set-up,
320 * even if we error out here.
321 */
322 bp = io->bp;
24c8374a
MD
323 if ((hammer_debug_io & 0x0001) && (bp->b_flags & B_IODEBUG)) {
324 const char *metatype;
325
326 switch(io->type) {
327 case HAMMER_STRUCTURE_VOLUME:
328 metatype = "volume";
329 break;
330 case HAMMER_STRUCTURE_META_BUFFER:
331 switch(((struct hammer_buffer *)io)->
332 zoneX_offset & HAMMER_OFF_ZONE_MASK) {
333 case HAMMER_ZONE_BTREE:
334 metatype = "btree";
335 break;
336 case HAMMER_ZONE_META:
337 metatype = "meta";
338 break;
339 case HAMMER_ZONE_FREEMAP:
340 metatype = "freemap";
341 break;
342 default:
343 metatype = "meta?";
344 break;
345 }
346 break;
347 case HAMMER_STRUCTURE_DATA_BUFFER:
348 metatype = "data";
349 break;
350 case HAMMER_STRUCTURE_UNDO_BUFFER:
351 metatype = "undo";
352 break;
353 default:
354 metatype = "unknown";
355 break;
356 }
357 kprintf("doff %016jx %s\n",
358 (intmax_t)bp->b_bio2.bio_offset,
359 metatype);
360 }
361 bp->b_flags &= ~B_IODEBUG;
cdb6e4e6
MD
362 bp->b_ops = &hammer_bioops;
363 KKASSERT(LIST_FIRST(&bp->b_dep) == NULL);
b0aab9b9
MD
364
365 /* io->worklist is locked by the io lock */
cdb6e4e6
MD
366 LIST_INSERT_HEAD(&bp->b_dep, &io->worklist, node);
367 BUF_KERNPROC(bp);
10a5d1ba
MD
368 KKASSERT(io->modified == 0);
369 KKASSERT(io->running == 0);
370 KKASSERT(io->waiting == 0);
66325755
MD
371 io->released = 0; /* we hold an active lock on bp */
372 } else {
373 error = 0;
374 }
375 return(error);
376}
377
378/*
379 * Similar to hammer_io_read() but returns a zero'd out buffer instead.
10a5d1ba
MD
380 * Must be called with the IO exclusively locked.
381 *
66325755 382 * vfs_bio_clrbuf() is kinda nasty, enforce serialization against background
10a5d1ba
MD
383 * I/O by forcing the buffer to not be in a released state before calling
384 * it.
055f5ff8 385 *
10a5d1ba
MD
386 * This function will also mark the IO as modified but it will not
387 * increment the modify_refs count.
b0aab9b9
MD
388 *
389 * No I/O callbacks can occur while we hold the buffer locked.
66325755
MD
390 */
391int
392hammer_io_new(struct vnode *devvp, struct hammer_io *io)
393{
394 struct buf *bp;
395
396 if ((bp = io->bp) == NULL) {
4a2796f3 397 io->bp = getblk(devvp, io->offset, io->bytes, 0, 0);
66325755
MD
398 bp = io->bp;
399 bp->b_ops = &hammer_bioops;
af209b0f 400 KKASSERT(LIST_FIRST(&bp->b_dep) == NULL);
b0aab9b9
MD
401
402 /* io->worklist is locked by the io lock */
66325755 403 LIST_INSERT_HEAD(&bp->b_dep, &io->worklist, node);
055f5ff8 404 io->released = 0;
10a5d1ba 405 KKASSERT(io->running == 0);
055f5ff8 406 io->waiting = 0;
66325755
MD
407 BUF_KERNPROC(bp);
408 } else {
409 if (io->released) {
410 regetblk(bp);
66325755 411 BUF_KERNPROC(bp);
d113fda1 412 io->released = 0;
66325755
MD
413 }
414 }
10a5d1ba 415 hammer_io_modify(io, 0);
66325755
MD
416 vfs_bio_clrbuf(bp);
417 return(0);
418}
419
420/*
0e8bd897
MD
421 * Advance the activity count on the underlying buffer because
422 * HAMMER does not getblk/brelse on every access.
b0aab9b9
MD
423 *
424 * The io->bp cannot go away while the buffer is referenced.
0e8bd897
MD
425 */
426void
427hammer_io_advance(struct hammer_io *io)
428{
429 if (io->bp)
430 buf_act_advance(io->bp);
431}
432
433/*
47637bff 434 * Remove potential device level aliases against buffers managed by high level
362ec2dc
MD
435 * vnodes. Aliases can also be created due to mixed buffer sizes or via
436 * direct access to the backing store device.
e469566b
MD
437 *
438 * This is nasty because the buffers are also VMIO-backed. Even if a buffer
439 * does not exist its backing VM pages might, and we have to invalidate
440 * those as well or a getblk() will reinstate them.
362ec2dc
MD
441 *
442 * Buffer cache buffers associated with hammer_buffers cannot be
443 * invalidated.
47637bff 444 */
362ec2dc 445int
47637bff
MD
446hammer_io_inval(hammer_volume_t volume, hammer_off_t zone2_offset)
447{
cebe9493 448 hammer_io_structure_t iou;
b0aab9b9 449 hammer_mount_t hmp;
47637bff
MD
450 hammer_off_t phys_offset;
451 struct buf *bp;
362ec2dc 452 int error;
47637bff 453
b0aab9b9
MD
454 hmp = volume->io.hmp;
455 lwkt_gettoken(&hmp->io_token);
456
9c90dba2 457 /*
3b98d912
MD
458 * If a device buffer already exists for the specified physical
459 * offset use that, otherwise instantiate a buffer to cover any
460 * related VM pages, set BNOCACHE, and brelse().
9c90dba2 461 */
47637bff
MD
462 phys_offset = volume->ondisk->vol_buf_beg +
463 (zone2_offset & HAMMER_OFF_SHORT_MASK);
3b98d912
MD
464 if ((bp = findblk(volume->devvp, phys_offset, 0)) != NULL)
465 bremfree(bp);
e469566b
MD
466 else
467 bp = getblk(volume->devvp, phys_offset, HAMMER_BUFSIZE, 0, 0);
b0aab9b9 468
e469566b 469 if ((iou = (void *)LIST_FIRST(&bp->b_dep)) != NULL) {
362ec2dc 470#if 0
5c8d05e2 471 hammer_ref(&iou->io.lock);
e469566b
MD
472 hammer_io_clear_modify(&iou->io, 1);
473 bundirty(bp);
e83ca595
MD
474 iou->io.released = 0;
475 BUF_KERNPROC(bp);
e469566b 476 iou->io.reclaim = 1;
77912481 477 iou->io.waitdep = 1; /* XXX this is a fs_token field */
250aec18 478 KKASSERT(hammer_isactive(&iou->io.lock) == 1);
5c8d05e2
MD
479 hammer_rel_buffer(&iou->buffer, 0);
480 /*hammer_io_deallocate(bp);*/
362ec2dc 481#endif
04b04ca6 482 bqrelse(bp);
362ec2dc 483 error = EAGAIN;
e469566b
MD
484 } else {
485 KKASSERT((bp->b_flags & B_LOCKED) == 0);
486 bundirty(bp);
487 bp->b_flags |= B_NOCACHE|B_RELBUF;
e83ca595 488 brelse(bp);
362ec2dc 489 error = 0;
47637bff 490 }
b0aab9b9 491 lwkt_reltoken(&hmp->io_token);
362ec2dc 492 return(error);
47637bff
MD
493}
494
495/*
b3deaf57 496 * This routine is called on the last reference to a hammer structure.
250aec18
MD
497 * The io must be interlocked with a refcount of zero. The hammer structure
498 * will remain interlocked on return.
b3deaf57 499 *
250aec18
MD
500 * This routine may return a non-NULL bp to the caller for dispoal.
501 * The caller typically brelse()'s the bp.
502 *
503 * The bp may or may not still be passively associated with the IO. It
504 * will remain passively associated if it is unreleasable (e.g. a modified
505 * meta-data buffer).
ecca949a
MD
506 *
507 * The only requirement here is that modified meta-data and volume-header
508 * buffer may NOT be disassociated from the IO structure, and consequently
509 * we also leave such buffers actively associated with the IO if they already
510 * are (since the kernel can't do anything with them anyway). Only the
511 * flusher is allowed to write such buffers out. Modified pure-data and
512 * undo buffers are returned to the kernel but left passively associated
513 * so we can track when the kernel writes the bp out.
66325755 514 */
ecca949a 515struct buf *
09ac686b 516hammer_io_release(struct hammer_io *io, int flush)
66325755 517{
9f5097dc 518 union hammer_io_structure *iou = (void *)io;
66325755
MD
519 struct buf *bp;
520
055f5ff8 521 if ((bp = io->bp) == NULL)
ecca949a 522 return(NULL);
fbc6e32a 523
055f5ff8 524 /*
10a5d1ba
MD
525 * Try to flush a dirty IO to disk if asked to by the
526 * caller or if the kernel tried to flush the buffer in the past.
055f5ff8 527 *
10a5d1ba
MD
528 * Kernel-initiated flushes are only allowed for pure-data buffers.
529 * meta-data and volume buffers can only be flushed explicitly
530 * by HAMMER.
055f5ff8 531 */
10a5d1ba 532 if (io->modified) {
09ac686b 533 if (flush) {
710733a6 534 hammer_io_flush(io, 0);
10a5d1ba
MD
535 } else if (bp->b_flags & B_LOCKED) {
536 switch(io->type) {
537 case HAMMER_STRUCTURE_DATA_BUFFER:
710733a6
MD
538 hammer_io_flush(io, 0);
539 break;
10a5d1ba 540 case HAMMER_STRUCTURE_UNDO_BUFFER:
710733a6 541 hammer_io_flush(io, hammer_undo_reclaim(io));
10a5d1ba
MD
542 break;
543 default:
544 break;
545 }
546 } /* else no explicit request to flush the buffer */
547 }
fbc6e32a 548
055f5ff8 549 /*
5c8d05e2
MD
550 * Wait for the IO to complete if asked to. This occurs when
551 * the buffer must be disposed of definitively during an umount
552 * or buffer invalidation.
055f5ff8 553 */
b58c6388 554 if (io->waitdep && io->running) {
055f5ff8
MD
555 hammer_io_wait(io);
556 }
557
558 /*
10a5d1ba
MD
559 * Return control of the buffer to the kernel (with the provisio
560 * that our bioops can override kernel decisions with regards to
561 * the buffer).
055f5ff8 562 */
cebe9493 563 if ((flush || io->reclaim) && io->modified == 0 && io->running == 0) {
10a5d1ba
MD
564 /*
565 * Always disassociate the bp if an explicit flush
566 * was requested and the IO completed with no error
567 * (so unmount can really clean up the structure).
568 */
055f5ff8 569 if (io->released) {
b3deaf57 570 regetblk(bp);
46fe7ae1 571 BUF_KERNPROC(bp);
ecca949a
MD
572 } else {
573 io->released = 1;
055f5ff8 574 }
ecca949a
MD
575 hammer_io_disassociate((hammer_io_structure_t)io);
576 /* return the bp */
055f5ff8 577 } else if (io->modified) {
10a5d1ba 578 /*
ecca949a
MD
579 * Only certain IO types can be released to the kernel if
580 * the buffer has been modified.
581 *
582 * volume and meta-data IO types may only be explicitly
583 * flushed by HAMMER.
10a5d1ba
MD
584 */
585 switch(io->type) {
586 case HAMMER_STRUCTURE_DATA_BUFFER:
587 case HAMMER_STRUCTURE_UNDO_BUFFER:
588 if (io->released == 0) {
589 io->released = 1;
590 bdwrite(bp);
591 }
592 break;
593 default:
594 break;
055f5ff8 595 }
ecca949a 596 bp = NULL; /* bp left associated */
055f5ff8 597 } else if (io->released == 0) {
10a5d1ba
MD
598 /*
599 * Clean buffers can be generally released to the kernel.
600 * We leave the bp passively associated with the HAMMER
601 * structure and use bioops to disconnect it later on
602 * if the kernel wants to discard the buffer.
ecca949a
MD
603 *
604 * We can steal the structure's ownership of the bp.
10a5d1ba 605 */
ecca949a 606 io->released = 1;
9f5097dc 607 if (bp->b_flags & B_LOCKED) {
ecca949a
MD
608 hammer_io_disassociate(iou);
609 /* return the bp */
9f5097dc 610 } else {
cebe9493 611 if (io->reclaim) {
ecca949a
MD
612 hammer_io_disassociate(iou);
613 /* return the bp */
cebe9493 614 } else {
ecca949a 615 /* return the bp (bp passively associated) */
cebe9493 616 }
9f5097dc 617 }
19b97e01
MD
618 } else {
619 /*
af209b0f
MD
620 * A released buffer is passively associate with our
621 * hammer_io structure. The kernel cannot destroy it
622 * without making a bioops call. If the kernel (B_LOCKED)
623 * or we (reclaim) requested that the buffer be destroyed
624 * we destroy it, otherwise we do a quick get/release to
625 * reset its position in the kernel's LRU list.
626 *
627 * Leaving the buffer passively associated allows us to
628 * use the kernel's LRU buffer flushing mechanisms rather
629 * then rolling our own.
cb51be26
MD
630 *
631 * XXX there are two ways of doing this. We can re-acquire
632 * and passively release to reset the LRU, or not.
19b97e01 633 */
af209b0f 634 if (io->running == 0) {
19b97e01 635 regetblk(bp);
cebe9493 636 if ((bp->b_flags & B_LOCKED) || io->reclaim) {
ecca949a
MD
637 hammer_io_disassociate(iou);
638 /* return the bp */
9f5097dc 639 } else {
ecca949a 640 /* return the bp (bp passively associated) */
9f5097dc 641 }
ecca949a
MD
642 } else {
643 /*
644 * bp is left passively associated but we do not
645 * try to reacquire it. Interactions with the io
646 * structure will occur on completion of the bp's
647 * I/O.
648 */
649 bp = NULL;
19b97e01 650 }
66325755 651 }
ecca949a 652 return(bp);
66325755
MD
653}
654
655/*
b33e2cc0
MD
656 * This routine is called with a locked IO when a flush is desired and
657 * no other references to the structure exists other then ours. This
658 * routine is ONLY called when HAMMER believes it is safe to flush a
659 * potentially modified buffer out.
77912481
MD
660 *
661 * The locked io or io reference prevents a flush from being initiated
662 * by the kernel.
fbc6e32a
MD
663 */
664void
710733a6 665hammer_io_flush(struct hammer_io *io, int reclaim)
fbc6e32a
MD
666{
667 struct buf *bp;
77912481 668 hammer_mount_t hmp;
fbc6e32a 669
055f5ff8 670 /*
10a5d1ba 671 * Degenerate case - nothing to flush if nothing is dirty.
055f5ff8 672 */
b0aab9b9 673 if (io->modified == 0)
055f5ff8
MD
674 return;
675
676 KKASSERT(io->bp);
9f5097dc 677 KKASSERT(io->modify_refs <= 0);
055f5ff8 678
b33e2cc0 679 /*
77062c8a
MD
680 * Acquire ownership of the bp, particularly before we clear our
681 * modified flag.
682 *
683 * We are going to bawrite() this bp. Don't leave a window where
684 * io->released is set, we actually own the bp rather then our
685 * buffer.
b0aab9b9
MD
686 *
687 * The io_token should not be required here as only
77062c8a 688 */
77912481 689 hmp = io->hmp;
77062c8a
MD
690 bp = io->bp;
691 if (io->released) {
692 regetblk(bp);
693 /* BUF_KERNPROC(io->bp); */
694 /* io->released = 0; */
695 KKASSERT(io->released);
696 KKASSERT(io->bp == bp);
b0aab9b9
MD
697 } else {
698 io->released = 1;
77062c8a 699 }
77062c8a 700
710733a6
MD
701 if (reclaim) {
702 io->reclaim = 1;
703 if ((bp->b_flags & B_LOCKED) == 0) {
704 bp->b_flags |= B_LOCKED;
b0aab9b9 705 atomic_add_int(&hammer_count_io_locked, 1);
710733a6
MD
706 }
707 }
708
77062c8a 709 /*
10a5d1ba
MD
710 * Acquire exclusive access to the bp and then clear the modified
711 * state of the buffer prior to issuing I/O to interlock any
712 * modifications made while the I/O is in progress. This shouldn't
713 * happen anyway but losing data would be worse. The modified bit
714 * will be rechecked after the IO completes.
715 *
4a2796f3
MD
716 * NOTE: This call also finalizes the buffer's content (inval == 0).
717 *
b33e2cc0
MD
718 * This is only legal when lock.refs == 1 (otherwise we might clear
719 * the modified bit while there are still users of the cluster
720 * modifying the data).
721 *
b33e2cc0
MD
722 * Do this before potentially blocking so any attempt to modify the
723 * ondisk while we are blocked blocks waiting for us.
724 */
5c8d05e2 725 hammer_ref(&io->lock);
4a2796f3 726 hammer_io_clear_modify(io, 0);
250aec18 727 hammer_rel(&io->lock);
bcac4bbb 728
6367d0f9
MD
729 if (hammer_debug_io & 0x0002)
730 kprintf("hammer io_write %016jx\n", bp->b_bio1.bio_offset);
731
bcac4bbb 732 /*
10a5d1ba 733 * Transfer ownership to the kernel and initiate I/O.
b0aab9b9
MD
734 *
735 * NOTE: We do not hold io_token so an atomic op is required to
736 * update io_running_space.
10a5d1ba 737 */
055f5ff8 738 io->running = 1;
77912481 739 atomic_add_int(&hmp->io_running_space, io->bytes);
b0aab9b9 740 atomic_add_int(&hammer_count_io_running_write, io->bytes);
77912481
MD
741 lwkt_gettoken(&hmp->io_token);
742 TAILQ_INSERT_TAIL(&hmp->iorun_list, io, iorun_entry);
743 lwkt_reltoken(&hmp->io_token);
055f5ff8 744 bawrite(bp);
748efb59 745 hammer_io_flush_mark(io->volume);
fbc6e32a
MD
746}
747
055f5ff8
MD
748/************************************************************************
749 * BUFFER DIRTYING *
750 ************************************************************************
751 *
752 * These routines deal with dependancies created when IO buffers get
753 * modified. The caller must call hammer_modify_*() on a referenced
754 * HAMMER structure prior to modifying its on-disk data.
0b075555 755 *
055f5ff8
MD
756 * Any intent to modify an IO buffer acquires the related bp and imposes
757 * various write ordering dependancies.
0b075555 758 */
055f5ff8
MD
759
760/*
10a5d1ba
MD
761 * Mark a HAMMER structure as undergoing modification. Meta-data buffers
762 * are locked until the flusher can deal with them, pure data buffers
763 * can be written out.
77912481
MD
764 *
765 * The referenced io prevents races.
055f5ff8 766 */
10a5d1ba 767static
b58c6388 768void
10a5d1ba 769hammer_io_modify(hammer_io_t io, int count)
0b075555 770{
46fe7ae1 771 /*
9f5097dc
MD
772 * io->modify_refs must be >= 0
773 */
774 while (io->modify_refs < 0) {
775 io->waitmod = 1;
776 tsleep(io, 0, "hmrmod", 0);
777 }
778
779 /*
46fe7ae1
MD
780 * Shortcut if nothing to do.
781 */
250aec18 782 KKASSERT(hammer_isactive(&io->lock) && io->bp != NULL);
10a5d1ba 783 io->modify_refs += count;
b58c6388
MD
784 if (io->modified && io->released == 0)
785 return;
46fe7ae1 786
77912481
MD
787 /*
788 * NOTE: It is important not to set the modified bit
789 * until after we have acquired the bp or we risk
790 * racing against checkwrite.
791 */
46fe7ae1 792 hammer_lock_ex(&io->lock);
46fe7ae1
MD
793 if (io->released) {
794 regetblk(io->bp);
795 BUF_KERNPROC(io->bp);
796 io->released = 0;
77912481
MD
797 }
798 if (io->modified == 0) {
799 hammer_io_set_modlist(io);
800 io->modified = 1;
46fe7ae1 801 }
46fe7ae1 802 hammer_unlock(&io->lock);
055f5ff8
MD
803}
804
10a5d1ba
MD
805static __inline
806void
807hammer_io_modify_done(hammer_io_t io)
808{
809 KKASSERT(io->modify_refs > 0);
810 --io->modify_refs;
9f5097dc
MD
811 if (io->modify_refs == 0 && io->waitmod) {
812 io->waitmod = 0;
813 wakeup(io);
814 }
815}
816
77912481
MD
817/*
818 * The write interlock blocks other threads trying to modify a buffer
819 * (they block in hammer_io_modify()) after us, or blocks us while other
820 * threads are in the middle of modifying a buffer.
821 *
822 * The caller also has a ref on the io, however if we are not careful
823 * we will race bioops callbacks (checkwrite). To deal with this
824 * we must at least acquire and release the io_token, and it is probably
825 * better to hold it through the setting of modify_refs.
826 */
9f5097dc
MD
827void
828hammer_io_write_interlock(hammer_io_t io)
829{
77912481
MD
830 hammer_mount_t hmp = io->hmp;
831
832 lwkt_gettoken(&hmp->io_token);
9f5097dc
MD
833 while (io->modify_refs != 0) {
834 io->waitmod = 1;
835 tsleep(io, 0, "hmrmod", 0);
836 }
837 io->modify_refs = -1;
77912481 838 lwkt_reltoken(&hmp->io_token);
9f5097dc
MD
839}
840
841void
842hammer_io_done_interlock(hammer_io_t io)
843{
844 KKASSERT(io->modify_refs == -1);
845 io->modify_refs = 0;
846 if (io->waitmod) {
847 io->waitmod = 0;
848 wakeup(io);
849 }
10a5d1ba
MD
850}
851
2f85fa4d
MD
852/*
853 * Caller intends to modify a volume's ondisk structure.
854 *
855 * This is only allowed if we are the flusher or we have a ref on the
856 * sync_lock.
857 */
055f5ff8 858void
36f82b23
MD
859hammer_modify_volume(hammer_transaction_t trans, hammer_volume_t volume,
860 void *base, int len)
055f5ff8 861{
2f85fa4d 862 KKASSERT (trans == NULL || trans->sync_lock_refs > 0);
055f5ff8 863
2f85fa4d 864 hammer_io_modify(&volume->io, 1);
47197d71
MD
865 if (len) {
866 intptr_t rel_offset = (intptr_t)base - (intptr_t)volume->ondisk;
867 KKASSERT((rel_offset & ~(intptr_t)HAMMER_BUFMASK) == 0);
02428fb6 868 hammer_generate_undo(trans,
47197d71
MD
869 HAMMER_ENCODE_RAW_VOLUME(volume->vol_no, rel_offset),
870 base, len);
871 }
0b075555
MD
872}
873
055f5ff8 874/*
2f85fa4d
MD
875 * Caller intends to modify a buffer's ondisk structure.
876 *
877 * This is only allowed if we are the flusher or we have a ref on the
878 * sync_lock.
055f5ff8 879 */
0b075555 880void
36f82b23
MD
881hammer_modify_buffer(hammer_transaction_t trans, hammer_buffer_t buffer,
882 void *base, int len)
46fe7ae1 883{
2f85fa4d
MD
884 KKASSERT (trans == NULL || trans->sync_lock_refs > 0);
885
10a5d1ba 886 hammer_io_modify(&buffer->io, 1);
47197d71
MD
887 if (len) {
888 intptr_t rel_offset = (intptr_t)base - (intptr_t)buffer->ondisk;
889 KKASSERT((rel_offset & ~(intptr_t)HAMMER_BUFMASK) == 0);
02428fb6 890 hammer_generate_undo(trans,
34d829f7 891 buffer->zone2_offset + rel_offset,
47197d71
MD
892 base, len);
893 }
46fe7ae1
MD
894}
895
10a5d1ba
MD
896void
897hammer_modify_volume_done(hammer_volume_t volume)
898{
899 hammer_io_modify_done(&volume->io);
900}
901
902void
903hammer_modify_buffer_done(hammer_buffer_t buffer)
904{
905 hammer_io_modify_done(&buffer->io);
906}
907
46fe7ae1 908/*
4a2796f3
MD
909 * Mark an entity as not being dirty any more and finalize any
910 * delayed adjustments to the buffer.
911 *
912 * Delayed adjustments are an important performance enhancement, allowing
913 * us to avoid recalculating B-Tree node CRCs over and over again when
914 * making bulk-modifications to the B-Tree.
915 *
916 * If inval is non-zero delayed adjustments are ignored.
5c8d05e2
MD
917 *
918 * This routine may dereference related btree nodes and cause the
919 * buffer to be dereferenced. The caller must own a reference on io.
61aeeb33
MD
920 */
921void
4a2796f3 922hammer_io_clear_modify(struct hammer_io *io, int inval)
61aeeb33 923{
77912481
MD
924 hammer_mount_t hmp;
925
926 /*
1afb73cf 927 * io_token is needed to avoid races on mod_root
77912481 928 */
4a2796f3
MD
929 if (io->modified == 0)
930 return;
77912481
MD
931 hmp = io->hmp;
932 lwkt_gettoken(&hmp->io_token);
933 if (io->modified == 0) {
934 lwkt_reltoken(&hmp->io_token);
935 return;
936 }
4a2796f3
MD
937
938 /*
939 * Take us off the mod-list and clear the modified bit.
940 */
1afb73cf
MD
941 KKASSERT(io->mod_root != NULL);
942 if (io->mod_root == &io->hmp->volu_root ||
943 io->mod_root == &io->hmp->meta_root) {
f5a07a7a 944 io->hmp->locked_dirty_space -= io->bytes;
b0aab9b9 945 atomic_add_int(&hammer_count_dirtybufspace, -io->bytes);
4a2796f3 946 }
1afb73cf
MD
947 RB_REMOVE(hammer_mod_rb_tree, io->mod_root, io);
948 io->mod_root = NULL;
4a2796f3
MD
949 io->modified = 0;
950
77912481
MD
951 lwkt_reltoken(&hmp->io_token);
952
4a2796f3
MD
953 /*
954 * If this bit is not set there are no delayed adjustments.
955 */
956 if (io->gencrc == 0)
957 return;
958 io->gencrc = 0;
959
960 /*
961 * Finalize requested CRCs. The NEEDSCRC flag also holds a reference
962 * on the node (& underlying buffer). Release the node after clearing
963 * the flag.
964 */
965 if (io->type == HAMMER_STRUCTURE_META_BUFFER) {
966 hammer_buffer_t buffer = (void *)io;
967 hammer_node_t node;
968
969restart:
970 TAILQ_FOREACH(node, &buffer->clist, entry) {
971 if ((node->flags & HAMMER_NODE_NEEDSCRC) == 0)
972 continue;
973 node->flags &= ~HAMMER_NODE_NEEDSCRC;
974 KKASSERT(node->ondisk);
975 if (inval == 0)
976 node->ondisk->crc = crc32(&node->ondisk->crc + 1, HAMMER_BTREE_CRCSIZE);
977 hammer_rel_node(node);
978 goto restart;
61aeeb33 979 }
cebe9493 980 }
5c8d05e2 981 /* caller must still have ref on io */
250aec18 982 KKASSERT(hammer_isactive(&io->lock));
cebe9493
MD
983}
984
985/*
986 * Clear the IO's modify list. Even though the IO is no longer modified
1afb73cf 987 * it may still be on the lose_root. This routine is called just before
cebe9493 988 * the governing hammer_buffer is destroyed.
b0aab9b9 989 *
1afb73cf 990 * mod_root requires io_token protection.
cebe9493
MD
991 */
992void
993hammer_io_clear_modlist(struct hammer_io *io)
994{
b0aab9b9
MD
995 hammer_mount_t hmp = io->hmp;
996
4a2796f3 997 KKASSERT(io->modified == 0);
1afb73cf 998 if (io->mod_root) {
b0aab9b9 999 lwkt_gettoken(&hmp->io_token);
1afb73cf
MD
1000 if (io->mod_root) {
1001 KKASSERT(io->mod_root == &io->hmp->lose_root);
1002 RB_REMOVE(hammer_mod_rb_tree, io->mod_root, io);
1003 io->mod_root = NULL;
b0aab9b9
MD
1004 }
1005 lwkt_reltoken(&hmp->io_token);
61aeeb33
MD
1006 }
1007}
1008
cdb6e4e6
MD
1009static void
1010hammer_io_set_modlist(struct hammer_io *io)
1011{
1012 struct hammer_mount *hmp = io->hmp;
1013
77912481 1014 lwkt_gettoken(&hmp->io_token);
1afb73cf 1015 KKASSERT(io->mod_root == NULL);
cdb6e4e6
MD
1016
1017 switch(io->type) {
1018 case HAMMER_STRUCTURE_VOLUME:
1afb73cf 1019 io->mod_root = &hmp->volu_root;
cdb6e4e6 1020 hmp->locked_dirty_space += io->bytes;
b0aab9b9 1021 atomic_add_int(&hammer_count_dirtybufspace, io->bytes);
cdb6e4e6
MD
1022 break;
1023 case HAMMER_STRUCTURE_META_BUFFER:
1afb73cf 1024 io->mod_root = &hmp->meta_root;
cdb6e4e6 1025 hmp->locked_dirty_space += io->bytes;
b0aab9b9 1026 atomic_add_int(&hammer_count_dirtybufspace, io->bytes);
cdb6e4e6
MD
1027 break;
1028 case HAMMER_STRUCTURE_UNDO_BUFFER:
1afb73cf 1029 io->mod_root = &hmp->undo_root;
cdb6e4e6
MD
1030 break;
1031 case HAMMER_STRUCTURE_DATA_BUFFER:
1afb73cf 1032 io->mod_root = &hmp->data_root;
cdb6e4e6 1033 break;
eddadaee 1034 case HAMMER_STRUCTURE_DUMMY:
1afb73cf
MD
1035 panic("hammer_io_set_modlist: bad io type");
1036 break; /* NOT REACHED */
1037 }
1038 if (RB_INSERT(hammer_mod_rb_tree, io->mod_root, io)) {
1039 panic("hammer_io_set_modlist: duplicate entry");
1040 /* NOT REACHED */
cdb6e4e6 1041 }
77912481 1042 lwkt_reltoken(&hmp->io_token);
cdb6e4e6
MD
1043}
1044
055f5ff8
MD
1045/************************************************************************
1046 * HAMMER_BIOOPS *
1047 ************************************************************************
1048 *
66325755
MD
1049 */
1050
1051/*
055f5ff8 1052 * Pre-IO initiation kernel callback - cluster build only
b0aab9b9
MD
1053 *
1054 * bioops callback - hold io_token
66325755 1055 */
66325755
MD
1056static void
1057hammer_io_start(struct buf *bp)
1058{
b0aab9b9 1059 /* nothing to do, so io_token not needed */
66325755
MD
1060}
1061
055f5ff8 1062/*
7bc5b8c2 1063 * Post-IO completion kernel callback - MAY BE CALLED FROM INTERRUPT!
b33e2cc0 1064 *
77912481
MD
1065 * NOTE: HAMMER may modify a data buffer after we have initiated write
1066 * I/O.
1067 *
1068 * NOTE: MPSAFE callback
b0aab9b9
MD
1069 *
1070 * bioops callback - hold io_token
055f5ff8 1071 */
66325755
MD
1072static void
1073hammer_io_complete(struct buf *bp)
1074{
055f5ff8 1075 union hammer_io_structure *iou = (void *)LIST_FIRST(&bp->b_dep);
ba298df1 1076 struct hammer_mount *hmp = iou->io.hmp;
eddadaee 1077 struct hammer_io *ionext;
055f5ff8 1078
b0aab9b9
MD
1079 lwkt_gettoken(&hmp->io_token);
1080
055f5ff8 1081 KKASSERT(iou->io.released == 1);
fbc6e32a 1082
bf3b416b
MD
1083 /*
1084 * Deal with people waiting for I/O to drain
1085 */
f90dde4c 1086 if (iou->io.running) {
cdb6e4e6
MD
1087 /*
1088 * Deal with critical write errors. Once a critical error
1089 * has been flagged in hmp the UNDO FIFO will not be updated.
1090 * That way crash recover will give us a consistent
1091 * filesystem.
1092 *
1093 * Because of this we can throw away failed UNDO buffers. If
1094 * we throw away META or DATA buffers we risk corrupting
1095 * the now read-only version of the filesystem visible to
1096 * the user. Clear B_ERROR so the buffer is not re-dirtied
1097 * by the kernel and ref the io so it doesn't get thrown
1098 * away.
1099 */
1100 if (bp->b_flags & B_ERROR) {
77912481 1101 lwkt_gettoken(&hmp->fs_token);
ba298df1 1102 hammer_critical_error(hmp, NULL, bp->b_error,
cdb6e4e6 1103 "while flushing meta-data");
77912481
MD
1104 lwkt_reltoken(&hmp->fs_token);
1105
cdb6e4e6
MD
1106 switch(iou->io.type) {
1107 case HAMMER_STRUCTURE_UNDO_BUFFER:
1108 break;
1109 default:
1110 if (iou->io.ioerror == 0) {
1111 iou->io.ioerror = 1;
cdb6e4e6
MD
1112 hammer_ref(&iou->io.lock);
1113 }
1114 break;
1115 }
1116 bp->b_flags &= ~B_ERROR;
1117 bundirty(bp);
1118#if 0
1119 hammer_io_set_modlist(&iou->io);
1120 iou->io.modified = 1;
1121#endif
1122 }
ce0138a6 1123 hammer_stats_disk_write += iou->io.bytes;
b0aab9b9
MD
1124 atomic_add_int(&hammer_count_io_running_write, -iou->io.bytes);
1125 atomic_add_int(&hmp->io_running_space, -iou->io.bytes);
ba298df1
MD
1126 if (hmp->io_running_wakeup &&
1127 hmp->io_running_space < hammer_limit_running_io / 2) {
1128 hmp->io_running_wakeup = 0;
1129 wakeup(&hmp->io_running_wakeup);
1130 }
1131 KKASSERT(hmp->io_running_space >= 0);
f90dde4c 1132 iou->io.running = 0;
eddadaee
MD
1133
1134 /*
1135 * Remove from iorun list and wakeup any multi-io waiter(s).
1136 */
ba298df1 1137 if (TAILQ_FIRST(&hmp->iorun_list) == &iou->io) {
eddadaee
MD
1138 ionext = TAILQ_NEXT(&iou->io, iorun_entry);
1139 if (ionext && ionext->type == HAMMER_STRUCTURE_DUMMY)
1140 wakeup(ionext);
1141 }
ba298df1 1142 TAILQ_REMOVE(&hmp->iorun_list, &iou->io, iorun_entry);
ce0138a6
MD
1143 } else {
1144 hammer_stats_disk_read += iou->io.bytes;
f90dde4c
MD
1145 }
1146
055f5ff8
MD
1147 if (iou->io.waiting) {
1148 iou->io.waiting = 0;
1149 wakeup(iou);
1150 }
1151
1152 /*
bf3b416b 1153 * If B_LOCKED is set someone wanted to deallocate the bp at some
250aec18
MD
1154 * point, try to do it now. The operation will fail if there are
1155 * refs or if hammer_io_deallocate() is unable to gain the
1156 * interlock.
055f5ff8 1157 */
250aec18 1158 if (bp->b_flags & B_LOCKED) {
b0aab9b9 1159 atomic_add_int(&hammer_count_io_locked, -1);
d5ef456e 1160 bp->b_flags &= ~B_LOCKED;
055f5ff8
MD
1161 hammer_io_deallocate(bp);
1162 /* structure may be dead now */
1163 }
b0aab9b9 1164 lwkt_reltoken(&hmp->io_token);
66325755
MD
1165}
1166
1167/*
1168 * Callback from kernel when it wishes to deallocate a passively
10a5d1ba
MD
1169 * associated structure. This mostly occurs with clean buffers
1170 * but it may be possible for a holding structure to be marked dirty
7bc5b8c2 1171 * while its buffer is passively associated. The caller owns the bp.
66325755
MD
1172 *
1173 * If we cannot disassociate we set B_LOCKED to prevent the buffer
1174 * from getting reused.
46fe7ae1
MD
1175 *
1176 * WARNING: Because this can be called directly by getnewbuf we cannot
1177 * recurse into the tree. If a bp cannot be immediately disassociated
1178 * our only recourse is to set B_LOCKED.
7bc5b8c2
MD
1179 *
1180 * WARNING: This may be called from an interrupt via hammer_io_complete()
b0aab9b9
MD
1181 *
1182 * bioops callback - hold io_token
66325755
MD
1183 */
1184static void
1185hammer_io_deallocate(struct buf *bp)
1186{
055f5ff8 1187 hammer_io_structure_t iou = (void *)LIST_FIRST(&bp->b_dep);
b0aab9b9
MD
1188 hammer_mount_t hmp;
1189
1190 hmp = iou->io.hmp;
1191
1192 lwkt_gettoken(&hmp->io_token);
66325755 1193
055f5ff8 1194 KKASSERT((bp->b_flags & B_LOCKED) == 0 && iou->io.running == 0);
250aec18
MD
1195 if (hammer_try_interlock_norefs(&iou->io.lock) == 0) {
1196 /*
1197 * We cannot safely disassociate a bp from a referenced
1198 * or interlocked HAMMER structure.
1199 */
1200 bp->b_flags |= B_LOCKED;
b0aab9b9 1201 atomic_add_int(&hammer_count_io_locked, 1);
250aec18 1202 } else if (iou->io.modified) {
10a5d1ba
MD
1203 /*
1204 * It is not legal to disassociate a modified buffer. This
1205 * case really shouldn't ever occur.
1206 */
055f5ff8 1207 bp->b_flags |= B_LOCKED;
b0aab9b9 1208 atomic_add_int(&hammer_count_io_locked, 1);
250aec18 1209 hammer_put_interlock(&iou->io.lock, 0);
055f5ff8 1210 } else {
10a5d1ba
MD
1211 /*
1212 * Disassociate the BP. If the io has no refs left we
b0aab9b9
MD
1213 * have to add it to the loose list. The kernel has
1214 * locked the buffer and therefore our io must be
1215 * in a released state.
10a5d1ba 1216 */
ecca949a
MD
1217 hammer_io_disassociate(iou);
1218 if (iou->io.type != HAMMER_STRUCTURE_VOLUME) {
1219 KKASSERT(iou->io.bp == NULL);
1afb73cf
MD
1220 KKASSERT(iou->io.mod_root == NULL);
1221 iou->io.mod_root = &hmp->lose_root;
1222 if (RB_INSERT(hammer_mod_rb_tree, iou->io.mod_root,
1223 &iou->io)) {
1224 panic("hammer_io_deallocate: duplicate entry");
1225 }
66325755 1226 }
250aec18 1227 hammer_put_interlock(&iou->io.lock, 1);
66325755 1228 }
b0aab9b9 1229 lwkt_reltoken(&hmp->io_token);
66325755
MD
1230}
1231
b0aab9b9
MD
1232/*
1233 * bioops callback - hold io_token
1234 */
66325755
MD
1235static int
1236hammer_io_fsync(struct vnode *vp)
1237{
b0aab9b9 1238 /* nothing to do, so io_token not needed */
66325755
MD
1239 return(0);
1240}
1241
1242/*
1243 * NOTE: will not be called unless we tell the kernel about the
1244 * bioops. Unused... we use the mount's VFS_SYNC instead.
b0aab9b9
MD
1245 *
1246 * bioops callback - hold io_token
66325755
MD
1247 */
1248static int
1249hammer_io_sync(struct mount *mp)
1250{
b0aab9b9 1251 /* nothing to do, so io_token not needed */
66325755
MD
1252 return(0);
1253}
1254
b0aab9b9
MD
1255/*
1256 * bioops callback - hold io_token
1257 */
66325755
MD
1258static void
1259hammer_io_movedeps(struct buf *bp1, struct buf *bp2)
1260{
b0aab9b9 1261 /* nothing to do, so io_token not needed */
66325755
MD
1262}
1263
1264/*
1265 * I/O pre-check for reading and writing. HAMMER only uses this for
1266 * B_CACHE buffers so checkread just shouldn't happen, but if it does
1267 * allow it.
1268 *
fbc6e32a
MD
1269 * Writing is a different case. We don't want the kernel to try to write
1270 * out a buffer that HAMMER may be modifying passively or which has a
10a5d1ba
MD
1271 * dependancy. In addition, kernel-demanded writes can only proceed for
1272 * certain types of buffers (i.e. UNDO and DATA types). Other dirty
1273 * buffer types can only be explicitly written by the flusher.
fbc6e32a 1274 *
10a5d1ba
MD
1275 * checkwrite will only be called for bdwrite()n buffers. If we return
1276 * success the kernel is guaranteed to initiate the buffer write.
b0aab9b9
MD
1277 *
1278 * bioops callback - hold io_token
66325755
MD
1279 */
1280static int
1281hammer_io_checkread(struct buf *bp)
1282{
b0aab9b9 1283 /* nothing to do, so io_token not needed */
66325755
MD
1284 return(0);
1285}
1286
b0aab9b9 1287/*
77912481
MD
1288 * The kernel is asking us whether it can write out a dirty buffer or not.
1289 *
b0aab9b9
MD
1290 * bioops callback - hold io_token
1291 */
66325755
MD
1292static int
1293hammer_io_checkwrite(struct buf *bp)
1294{
10a5d1ba 1295 hammer_io_t io = (void *)LIST_FIRST(&bp->b_dep);
b0aab9b9 1296 hammer_mount_t hmp = io->hmp;
66325755 1297
77062c8a
MD
1298 /*
1299 * This shouldn't happen under normal operation.
1300 */
b0aab9b9 1301 lwkt_gettoken(&hmp->io_token);
77062c8a
MD
1302 if (io->type == HAMMER_STRUCTURE_VOLUME ||
1303 io->type == HAMMER_STRUCTURE_META_BUFFER) {
1304 if (!panicstr)
1305 panic("hammer_io_checkwrite: illegal buffer");
a99b9ea2
MD
1306 if ((bp->b_flags & B_LOCKED) == 0) {
1307 bp->b_flags |= B_LOCKED;
b0aab9b9 1308 atomic_add_int(&hammer_count_io_locked, 1);
a99b9ea2 1309 }
b0aab9b9 1310 lwkt_reltoken(&hmp->io_token);
77062c8a
MD
1311 return(1);
1312 }
c9b9e29d 1313
055f5ff8 1314 /*
77912481
MD
1315 * We have to be able to interlock the IO to safely modify any
1316 * of its fields without holding the fs_token. If we can't lock
1317 * it then we are racing someone.
1318 *
1319 * Our ownership of the bp lock prevents the io from being ripped
1320 * out from under us.
1321 */
1322 if (hammer_try_interlock_norefs(&io->lock) == 0) {
1323 bp->b_flags |= B_LOCKED;
1324 atomic_add_int(&hammer_count_io_locked, 1);
1325 lwkt_reltoken(&hmp->io_token);
1326 return(1);
1327 }
1328
1329 /*
1330 * The modified bit must be cleared prior to the initiation of
1331 * any IO (returning 0 initiates the IO). Because this is a
1332 * normal data buffer hammer_io_clear_modify() runs through a
1333 * simple degenerate case.
1334 *
1335 * Return 0 will cause the kernel to initiate the IO, and we
1336 * must normally clear the modified bit before we begin. If
1337 * the io has modify_refs we do not clear the modified bit,
1338 * otherwise we may miss changes.
5c8d05e2
MD
1339 *
1340 * Only data and undo buffers can reach here. These buffers do
1341 * not have terminal crc functions but we temporarily reference
1342 * the IO anyway, just in case.
b33e2cc0 1343 */
5c8d05e2
MD
1344 if (io->modify_refs == 0 && io->modified) {
1345 hammer_ref(&io->lock);
4a2796f3 1346 hammer_io_clear_modify(io, 0);
250aec18 1347 hammer_rel(&io->lock);
5c8d05e2
MD
1348 } else if (io->modified) {
1349 KKASSERT(io->type == HAMMER_STRUCTURE_DATA_BUFFER);
1350 }
f90dde4c
MD
1351
1352 /*
1353 * The kernel is going to start the IO, set io->running.
1354 */
1355 KKASSERT(io->running == 0);
1356 io->running = 1;
b0aab9b9
MD
1357 atomic_add_int(&io->hmp->io_running_space, io->bytes);
1358 atomic_add_int(&hammer_count_io_running_write, io->bytes);
eddadaee 1359 TAILQ_INSERT_TAIL(&io->hmp->iorun_list, io, iorun_entry);
b0aab9b9 1360
77912481 1361 hammer_put_interlock(&io->lock, 1);
b0aab9b9
MD
1362 lwkt_reltoken(&hmp->io_token);
1363
055f5ff8 1364 return(0);
66325755
MD
1365}
1366
8cd0a023 1367/*
66325755
MD
1368 * Return non-zero if we wish to delay the kernel's attempt to flush
1369 * this buffer to disk.
b0aab9b9
MD
1370 *
1371 * bioops callback - hold io_token
66325755
MD
1372 */
1373static int
1374hammer_io_countdeps(struct buf *bp, int n)
1375{
b0aab9b9 1376 /* nothing to do, so io_token not needed */
66325755
MD
1377 return(0);
1378}
1379
1380struct bio_ops hammer_bioops = {
1381 .io_start = hammer_io_start,
1382 .io_complete = hammer_io_complete,
1383 .io_deallocate = hammer_io_deallocate,
1384 .io_fsync = hammer_io_fsync,
1385 .io_sync = hammer_io_sync,
1386 .io_movedeps = hammer_io_movedeps,
1387 .io_countdeps = hammer_io_countdeps,
1388 .io_checkread = hammer_io_checkread,
1389 .io_checkwrite = hammer_io_checkwrite,
1390};
1391
47637bff
MD
1392/************************************************************************
1393 * DIRECT IO OPS *
1394 ************************************************************************
1395 *
1396 * These functions operate directly on the buffer cache buffer associated
1397 * with a front-end vnode rather then a back-end device vnode.
1398 */
1399
1400/*
1401 * Read a buffer associated with a front-end vnode directly from the
1b0ab2c3
MD
1402 * disk media. The bio may be issued asynchronously. If leaf is non-NULL
1403 * we validate the CRC.
a99b9ea2 1404 *
1b0ab2c3
MD
1405 * We must check for the presence of a HAMMER buffer to handle the case
1406 * where the reblocker has rewritten the data (which it does via the HAMMER
1407 * buffer system, not via the high-level vnode buffer cache), but not yet
1408 * committed the buffer to the media.
47637bff
MD
1409 */
1410int
1b0ab2c3
MD
1411hammer_io_direct_read(hammer_mount_t hmp, struct bio *bio,
1412 hammer_btree_leaf_elm_t leaf)
47637bff 1413{
1b0ab2c3 1414 hammer_off_t buf_offset;
47637bff
MD
1415 hammer_off_t zone2_offset;
1416 hammer_volume_t volume;
1417 struct buf *bp;
1418 struct bio *nbio;
1419 int vol_no;
1420 int error;
1421
1b0ab2c3
MD
1422 buf_offset = bio->bio_offset;
1423 KKASSERT((buf_offset & HAMMER_OFF_ZONE_MASK) ==
1424 HAMMER_ZONE_LARGE_DATA);
1425
1426 /*
1427 * The buffer cache may have an aliased buffer (the reblocker can
1428 * write them). If it does we have to sync any dirty data before
1429 * we can build our direct-read. This is a non-critical code path.
1430 */
1431 bp = bio->bio_buf;
1432 hammer_sync_buffers(hmp, buf_offset, bp->b_bufsize);
4a2796f3 1433
1b0ab2c3
MD
1434 /*
1435 * Resolve to a zone-2 offset. The conversion just requires
1436 * munging the top 4 bits but we want to abstract it anyway
1437 * so the blockmap code can verify the zone assignment.
1438 */
1439 zone2_offset = hammer_blockmap_lookup(hmp, buf_offset, &error);
1440 if (error)
1441 goto done;
43c665ae
MD
1442 KKASSERT((zone2_offset & HAMMER_OFF_ZONE_MASK) ==
1443 HAMMER_ZONE_RAW_BUFFER);
1444
1b0ab2c3
MD
1445 /*
1446 * Resolve volume and raw-offset for 3rd level bio. The
1447 * offset will be specific to the volume.
1448 */
43c665ae
MD
1449 vol_no = HAMMER_VOL_DECODE(zone2_offset);
1450 volume = hammer_get_volume(hmp, vol_no, &error);
1451 if (error == 0 && zone2_offset >= volume->maxbuf_off)
1452 error = EIO;
1453
47637bff 1454 if (error == 0) {
e469566b
MD
1455 /*
1456 * 3rd level bio
1457 */
43c665ae
MD
1458 nbio = push_bio(bio);
1459 nbio->bio_offset = volume->ondisk->vol_buf_beg +
e469566b 1460 (zone2_offset & HAMMER_OFF_SHORT_MASK);
1b0ab2c3
MD
1461#if 0
1462 /*
1463 * XXX disabled - our CRC check doesn't work if the OS
1464 * does bogus_page replacement on the direct-read.
1465 */
1466 if (leaf && hammer_verify_data) {
1467 nbio->bio_done = hammer_io_direct_read_complete;
1468 nbio->bio_caller_info1.uvalue32 = leaf->data_crc;
1469 }
1470#endif
ce0138a6 1471 hammer_stats_disk_read += bp->b_bufsize;
43c665ae 1472 vn_strategy(volume->devvp, nbio);
47637bff 1473 }
43c665ae 1474 hammer_rel_volume(volume, 0);
1b0ab2c3 1475done:
47637bff 1476 if (error) {
cebe9493 1477 kprintf("hammer_direct_read: failed @ %016llx\n",
973c11b9 1478 (long long)zone2_offset);
47637bff
MD
1479 bp->b_error = error;
1480 bp->b_flags |= B_ERROR;
1481 biodone(bio);
1482 }
1483 return(error);
1484}
1485
1b0ab2c3
MD
1486#if 0
1487/*
1488 * On completion of the BIO this callback must check the data CRC
1489 * and chain to the previous bio.
b0aab9b9
MD
1490 *
1491 * MPSAFE - since we do not modify and hammer_records we do not need
1492 * io_token.
77912481
MD
1493 *
1494 * NOTE: MPSAFE callback
1b0ab2c3
MD
1495 */
1496static
1497void
1498hammer_io_direct_read_complete(struct bio *nbio)
1499{
1500 struct bio *obio;
1501 struct buf *bp;
1502 u_int32_t rec_crc = nbio->bio_caller_info1.uvalue32;
1503
1504 bp = nbio->bio_buf;
1505 if (crc32(bp->b_data, bp->b_bufsize) != rec_crc) {
1506 kprintf("HAMMER: data_crc error @%016llx/%d\n",
1507 nbio->bio_offset, bp->b_bufsize);
fc73edd8
MD
1508 if (hammer_debug_critical)
1509 Debugger("data_crc on read");
1b0ab2c3
MD
1510 bp->b_flags |= B_ERROR;
1511 bp->b_error = EIO;
1512 }
1513 obio = pop_bio(nbio);
1514 biodone(obio);
1515}
1516#endif
1517
47637bff
MD
1518/*
1519 * Write a buffer associated with a front-end vnode directly to the
1520 * disk media. The bio may be issued asynchronously.
1b0ab2c3 1521 *
77912481 1522 * The BIO is associated with the specified record and RECG_DIRECT_IO
e469566b 1523 * is set. The recorded is added to its object.
47637bff
MD
1524 */
1525int
6362a262
MD
1526hammer_io_direct_write(hammer_mount_t hmp, struct bio *bio,
1527 hammer_record_t record)
47637bff 1528{
1b0ab2c3 1529 hammer_btree_leaf_elm_t leaf = &record->leaf;
0832c9bb 1530 hammer_off_t buf_offset;
47637bff
MD
1531 hammer_off_t zone2_offset;
1532 hammer_volume_t volume;
0832c9bb 1533 hammer_buffer_t buffer;
47637bff
MD
1534 struct buf *bp;
1535 struct bio *nbio;
0832c9bb 1536 char *ptr;
47637bff
MD
1537 int vol_no;
1538 int error;
1539
0832c9bb
MD
1540 buf_offset = leaf->data_offset;
1541
1542 KKASSERT(buf_offset > HAMMER_ZONE_BTREE);
47637bff
MD
1543 KKASSERT(bio->bio_buf->b_cmd == BUF_CMD_WRITE);
1544
6362a262
MD
1545 /*
1546 * Issue or execute the I/O. The new memory record must replace
1547 * the old one before the I/O completes, otherwise a reaquisition of
1548 * the buffer will load the old media data instead of the new.
1549 */
0832c9bb 1550 if ((buf_offset & HAMMER_BUFMASK) == 0 &&
4a2796f3 1551 leaf->data_len >= HAMMER_BUFSIZE) {
0832c9bb
MD
1552 /*
1553 * We are using the vnode's bio to write directly to the
1554 * media, any hammer_buffer at the same zone-X offset will
1555 * now have stale data.
1556 */
1557 zone2_offset = hammer_blockmap_lookup(hmp, buf_offset, &error);
47637bff
MD
1558 vol_no = HAMMER_VOL_DECODE(zone2_offset);
1559 volume = hammer_get_volume(hmp, vol_no, &error);
1560
1561 if (error == 0 && zone2_offset >= volume->maxbuf_off)
1562 error = EIO;
1563 if (error == 0) {
0832c9bb 1564 bp = bio->bio_buf;
4a2796f3 1565 KKASSERT((bp->b_bufsize & HAMMER_BUFMASK) == 0);
e469566b 1566 /*
4a2796f3
MD
1567 hammer_del_buffers(hmp, buf_offset,
1568 zone2_offset, bp->b_bufsize);
e469566b 1569 */
1b0ab2c3 1570
43c665ae
MD
1571 /*
1572 * Second level bio - cached zone2 offset.
1b0ab2c3
MD
1573 *
1574 * (We can put our bio_done function in either the
1575 * 2nd or 3rd level).
43c665ae 1576 */
47637bff 1577 nbio = push_bio(bio);
43c665ae 1578 nbio->bio_offset = zone2_offset;
1b0ab2c3
MD
1579 nbio->bio_done = hammer_io_direct_write_complete;
1580 nbio->bio_caller_info1.ptr = record;
e469566b 1581 record->zone2_offset = zone2_offset;
77912481
MD
1582 record->gflags |= HAMMER_RECG_DIRECT_IO |
1583 HAMMER_RECG_DIRECT_INVAL;
43c665ae
MD
1584
1585 /*
1586 * Third level bio - raw offset specific to the
1587 * correct volume.
1588 */
1589 zone2_offset &= HAMMER_OFF_SHORT_MASK;
1590 nbio = push_bio(nbio);
47637bff 1591 nbio->bio_offset = volume->ondisk->vol_buf_beg +
0832c9bb 1592 zone2_offset;
ce0138a6 1593 hammer_stats_disk_write += bp->b_bufsize;
6362a262 1594 hammer_ip_replace_bulk(hmp, record);
47637bff 1595 vn_strategy(volume->devvp, nbio);
748efb59 1596 hammer_io_flush_mark(volume);
47637bff
MD
1597 }
1598 hammer_rel_volume(volume, 0);
0832c9bb 1599 } else {
1b0ab2c3
MD
1600 /*
1601 * Must fit in a standard HAMMER buffer. In this case all
77912481 1602 * consumers use the HAMMER buffer system and RECG_DIRECT_IO
1b0ab2c3
MD
1603 * does not need to be set-up.
1604 */
0832c9bb
MD
1605 KKASSERT(((buf_offset ^ (buf_offset + leaf->data_len - 1)) & ~HAMMER_BUFMASK64) == 0);
1606 buffer = NULL;
1607 ptr = hammer_bread(hmp, buf_offset, &error, &buffer);
1608 if (error == 0) {
0832c9bb 1609 bp = bio->bio_buf;
7bc5b8c2 1610 bp->b_flags |= B_AGE;
0832c9bb
MD
1611 hammer_io_modify(&buffer->io, 1);
1612 bcopy(bp->b_data, ptr, leaf->data_len);
1613 hammer_io_modify_done(&buffer->io);
7bc5b8c2 1614 hammer_rel_buffer(buffer, 0);
0832c9bb 1615 bp->b_resid = 0;
6362a262 1616 hammer_ip_replace_bulk(hmp, record);
0832c9bb
MD
1617 biodone(bio);
1618 }
47637bff 1619 }
6362a262 1620 if (error) {
e469566b 1621 /*
6362a262
MD
1622 * Major suckage occured. Also note: The record was
1623 * never added to the tree so we do not have to worry
1624 * about the backend.
e469566b 1625 */
cebe9493 1626 kprintf("hammer_direct_write: failed @ %016llx\n",
973c11b9 1627 (long long)leaf->data_offset);
47637bff
MD
1628 bp = bio->bio_buf;
1629 bp->b_resid = 0;
1630 bp->b_error = EIO;
1631 bp->b_flags |= B_ERROR;
1632 biodone(bio);
e469566b
MD
1633 record->flags |= HAMMER_RECF_DELETED_FE;
1634 hammer_rel_mem_record(record);
47637bff
MD
1635 }
1636 return(error);
1637}
1638
43c665ae 1639/*
1b0ab2c3
MD
1640 * On completion of the BIO this callback must disconnect
1641 * it from the hammer_record and chain to the previous bio.
cdb6e4e6
MD
1642 *
1643 * An I/O error forces the mount to read-only. Data buffers
1644 * are not B_LOCKED like meta-data buffers are, so we have to
1645 * throw the buffer away to prevent the kernel from retrying.
77912481
MD
1646 *
1647 * NOTE: MPSAFE callback, only modify fields we have explicit
1648 * access to (the bp and the record->gflags).
1b0ab2c3
MD
1649 */
1650static
1651void
1652hammer_io_direct_write_complete(struct bio *nbio)
1653{
1654 struct bio *obio;
e469566b 1655 struct buf *bp;
b0aab9b9
MD
1656 hammer_record_t record;
1657 hammer_mount_t hmp;
1658
1659 record = nbio->bio_caller_info1.ptr;
1660 KKASSERT(record != NULL);
1661 hmp = record->ip->hmp;
1662
1663 lwkt_gettoken(&hmp->io_token);
1b0ab2c3 1664
e469566b 1665 bp = nbio->bio_buf;
1b0ab2c3 1666 obio = pop_bio(nbio);
e469566b 1667 if (bp->b_flags & B_ERROR) {
77912481 1668 lwkt_gettoken(&hmp->fs_token);
b0aab9b9 1669 hammer_critical_error(hmp, record->ip,
e469566b 1670 bp->b_error,
cdb6e4e6 1671 "while writing bulk data");
77912481 1672 lwkt_reltoken(&hmp->fs_token);
e469566b 1673 bp->b_flags |= B_INVAL;
cdb6e4e6 1674 }
1b0ab2c3 1675 biodone(obio);
e469566b 1676
77912481
MD
1677 KKASSERT(record->gflags & HAMMER_RECG_DIRECT_IO);
1678 if (record->gflags & HAMMER_RECG_DIRECT_WAIT) {
1679 record->gflags &= ~(HAMMER_RECG_DIRECT_IO |
1680 HAMMER_RECG_DIRECT_WAIT);
de996e86 1681 /* record can disappear once DIRECT_IO flag is cleared */
1b0ab2c3 1682 wakeup(&record->flags);
de996e86 1683 } else {
77912481 1684 record->gflags &= ~HAMMER_RECG_DIRECT_IO;
de996e86 1685 /* record can disappear once DIRECT_IO flag is cleared */
1b0ab2c3 1686 }
b0aab9b9 1687 lwkt_reltoken(&hmp->io_token);
1b0ab2c3
MD
1688}
1689
1690
1691/*
1692 * This is called before a record is either committed to the B-Tree
e469566b 1693 * or destroyed, to resolve any associated direct-IO.
1b0ab2c3 1694 *
e469566b
MD
1695 * (1) We must wait for any direct-IO related to the record to complete.
1696 *
1697 * (2) We must remove any buffer cache aliases for data accessed via
1698 * leaf->data_offset or zone2_offset so non-direct-IO consumers
1699 * (the mirroring and reblocking code) do not see stale data.
1b0ab2c3
MD
1700 */
1701void
1702hammer_io_direct_wait(hammer_record_t record)
1703{
b0aab9b9
MD
1704 hammer_mount_t hmp = record->ip->hmp;
1705
e469566b
MD
1706 /*
1707 * Wait for I/O to complete
1708 */
77912481 1709 if (record->gflags & HAMMER_RECG_DIRECT_IO) {
b0aab9b9 1710 lwkt_gettoken(&hmp->io_token);
77912481
MD
1711 while (record->gflags & HAMMER_RECG_DIRECT_IO) {
1712 record->gflags |= HAMMER_RECG_DIRECT_WAIT;
e469566b
MD
1713 tsleep(&record->flags, 0, "hmdiow", 0);
1714 }
b0aab9b9 1715 lwkt_reltoken(&hmp->io_token);
e469566b
MD
1716 }
1717
1718 /*
362ec2dc
MD
1719 * Invalidate any related buffer cache aliases associated with the
1720 * backing device. This is needed because the buffer cache buffer
1721 * for file data is associated with the file vnode, not the backing
1722 * device vnode.
1723 *
1724 * XXX I do not think this case can occur any more now that
1725 * reservations ensure that all such buffers are removed before
1726 * an area can be reused.
e469566b 1727 */
77912481 1728 if (record->gflags & HAMMER_RECG_DIRECT_INVAL) {
e469566b 1729 KKASSERT(record->leaf.data_offset);
b0aab9b9 1730 hammer_del_buffers(hmp, record->leaf.data_offset,
362ec2dc
MD
1731 record->zone2_offset, record->leaf.data_len,
1732 1);
77912481 1733 record->gflags &= ~HAMMER_RECG_DIRECT_INVAL;
1b0ab2c3 1734 }
1b0ab2c3
MD
1735}
1736
1737/*
43c665ae
MD
1738 * This is called to remove the second-level cached zone-2 offset from
1739 * frontend buffer cache buffers, now stale due to a data relocation.
1740 * These offsets are generated by cluster_read() via VOP_BMAP, or directly
1741 * by hammer_vop_strategy_read().
1742 *
1743 * This is rather nasty because here we have something like the reblocker
1744 * scanning the raw B-Tree with no held references on anything, really,
1745 * other then a shared lock on the B-Tree node, and we have to access the
1746 * frontend's buffer cache to check for and clean out the association.
1747 * Specifically, if the reblocker is moving data on the disk, these cached
1748 * offsets will become invalid.
1749 *
1750 * Only data record types associated with the large-data zone are subject
1751 * to direct-io and need to be checked.
1752 *
1753 */
1754void
1755hammer_io_direct_uncache(hammer_mount_t hmp, hammer_btree_leaf_elm_t leaf)
1756{
1757 struct hammer_inode_info iinfo;
1758 int zone;
1759
1760 if (leaf->base.rec_type != HAMMER_RECTYPE_DATA)
1761 return;
1762 zone = HAMMER_ZONE_DECODE(leaf->data_offset);
1763 if (zone != HAMMER_ZONE_LARGE_DATA_INDEX)
1764 return;
1765 iinfo.obj_id = leaf->base.obj_id;
1766 iinfo.obj_asof = 0; /* unused */
1767 iinfo.obj_localization = leaf->base.localization &
5a930e66 1768 HAMMER_LOCALIZE_PSEUDOFS_MASK;
43c665ae
MD
1769 iinfo.u.leaf = leaf;
1770 hammer_scan_inode_snapshots(hmp, &iinfo,
1771 hammer_io_direct_uncache_callback,
1772 leaf);
1773}
1774
1775static int
1776hammer_io_direct_uncache_callback(hammer_inode_t ip, void *data)
1777{
1778 hammer_inode_info_t iinfo = data;
1779 hammer_off_t data_offset;
1780 hammer_off_t file_offset;
1781 struct vnode *vp;
1782 struct buf *bp;
1783 int blksize;
1784
1785 if (ip->vp == NULL)
1786 return(0);
1787 data_offset = iinfo->u.leaf->data_offset;
1788 file_offset = iinfo->u.leaf->base.key - iinfo->u.leaf->data_len;
1789 blksize = iinfo->u.leaf->data_len;
1790 KKASSERT((blksize & HAMMER_BUFMASK) == 0);
1791
9c90dba2
MD
1792 /*
1793 * Warning: FINDBLK_TEST return stable storage but not stable
1794 * contents. It happens to be ok in this case.
1795 */
43c665ae
MD
1796 hammer_ref(&ip->lock);
1797 if (hammer_get_vnode(ip, &vp) == 0) {
b1c20cfa 1798 if ((bp = findblk(ip->vp, file_offset, FINDBLK_TEST)) != NULL &&
43c665ae
MD
1799 bp->b_bio2.bio_offset != NOOFFSET) {
1800 bp = getblk(ip->vp, file_offset, blksize, 0, 0);
1801 bp->b_bio2.bio_offset = NOOFFSET;
1802 brelse(bp);
1803 }
1804 vput(vp);
1805 }
1806 hammer_rel_inode(ip, 0);
1807 return(0);
1808}
47637bff 1809
748efb59
MD
1810
1811/*
1812 * This function is called when writes may have occured on the volume,
1813 * indicating that the device may be holding cached writes.
1814 */
1815static void
1816hammer_io_flush_mark(hammer_volume_t volume)
1817{
77912481 1818 atomic_set_int(&volume->vol_flags, HAMMER_VOLF_NEEDFLUSH);
748efb59
MD
1819}
1820
1821/*
1822 * This function ensures that the device has flushed any cached writes out.
1823 */
1824void
1825hammer_io_flush_sync(hammer_mount_t hmp)
1826{
1827 hammer_volume_t volume;
1828 struct buf *bp_base = NULL;
1829 struct buf *bp;
1830
1831 RB_FOREACH(volume, hammer_vol_rb_tree, &hmp->rb_vols_root) {
1832 if (volume->vol_flags & HAMMER_VOLF_NEEDFLUSH) {
77912481
MD
1833 atomic_clear_int(&volume->vol_flags,
1834 HAMMER_VOLF_NEEDFLUSH);
748efb59
MD
1835 bp = getpbuf(NULL);
1836 bp->b_bio1.bio_offset = 0;
1837 bp->b_bufsize = 0;
1838 bp->b_bcount = 0;
1839 bp->b_cmd = BUF_CMD_FLUSH;
1840 bp->b_bio1.bio_caller_info1.cluster_head = bp_base;
ae8e83e6
MD
1841 bp->b_bio1.bio_done = biodone_sync;
1842 bp->b_bio1.bio_flags |= BIO_SYNC;
748efb59
MD
1843 bp_base = bp;
1844 vn_strategy(volume->devvp, &bp->b_bio1);
1845 }
1846 }
1847 while ((bp = bp_base) != NULL) {
1848 bp_base = bp->b_bio1.bio_caller_info1.cluster_head;
ae8e83e6 1849 biowait(&bp->b_bio1, "hmrFLS");
748efb59
MD
1850 relpbuf(bp, NULL);
1851 }
1852}
ba298df1
MD
1853
1854/*
1855 * Limit the amount of backlog which we allow to build up
1856 */
1857void
1858hammer_io_limit_backlog(hammer_mount_t hmp)
1859{
1860 while (hmp->io_running_space > hammer_limit_running_io) {
1861 hmp->io_running_wakeup = 1;
1862 tsleep(&hmp->io_running_wakeup, 0, "hmiolm", hz / 10);
1863 }
1864}