kernel - Fix excessive mbuf use in nfs_realign()
[dragonfly.git] / sys / vfs / hammer / hammer_io.c
CommitLineData
66325755 1/*
b84de5af 2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved.
66325755
MD
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
e83ca595 34 * $DragonFly: src/sys/vfs/hammer/hammer_io.c,v 1.55 2008/09/15 17:02:49 dillon Exp $
66325755
MD
35 */
36/*
37 * IO Primitives and buffer cache management
38 *
39 * All major data-tracking structures in HAMMER contain a struct hammer_io
40 * which is used to manage their backing store. We use filesystem buffers
41 * for backing store and we leave them passively associated with their
42 * HAMMER structures.
43 *
9f5097dc 44 * If the kernel tries to destroy a passively associated buf which we cannot
66325755
MD
45 * yet let go we set B_LOCKED in the buffer and then actively released it
46 * later when we can.
47 */
48
49#include "hammer.h"
50#include <sys/fcntl.h>
51#include <sys/nlookup.h>
52#include <sys/buf.h>
53#include <sys/buf2.h>
54
10a5d1ba 55static void hammer_io_modify(hammer_io_t io, int count);
055f5ff8 56static void hammer_io_deallocate(struct buf *bp);
1b0ab2c3
MD
57#if 0
58static void hammer_io_direct_read_complete(struct bio *nbio);
59#endif
60static void hammer_io_direct_write_complete(struct bio *nbio);
43c665ae 61static int hammer_io_direct_uncache_callback(hammer_inode_t ip, void *data);
cdb6e4e6 62static void hammer_io_set_modlist(struct hammer_io *io);
748efb59 63static void hammer_io_flush_mark(hammer_volume_t volume);
748efb59 64
055f5ff8
MD
65
66/*
10a5d1ba
MD
67 * Initialize a new, already-zero'd hammer_io structure, or reinitialize
68 * an existing hammer_io structure which may have switched to another type.
055f5ff8
MD
69 */
70void
748efb59 71hammer_io_init(hammer_io_t io, hammer_volume_t volume, enum hammer_io_type type)
055f5ff8 72{
748efb59
MD
73 io->volume = volume;
74 io->hmp = volume->io.hmp;
055f5ff8 75 io->type = type;
055f5ff8
MD
76}
77
06b11394
MD
78/*
79 * Determine if an io can be clustered for the storage cdev. We have to
80 * be careful to avoid creating overlapping buffers.
81 *
82 * (1) Any clustering is limited to within a largeblock, since going into
83 * an adjacent largeblock will change the zone.
84 *
85 * (2) The large-data zone can contain mixed buffer sizes. Other zones
86 * contain only HAMMER_BUFSIZE sized buffer sizes (16K).
87 */
88static int
89hammer_io_clusterable(hammer_io_t io, hammer_off_t *limitp)
90{
91 hammer_buffer_t buffer;
92 hammer_off_t eoz;
93
94 /*
95 * Can't cluster non hammer_buffer_t's
96 */
97 if (io->type != HAMMER_STRUCTURE_DATA_BUFFER &&
98 io->type != HAMMER_STRUCTURE_META_BUFFER &&
99 io->type != HAMMER_STRUCTURE_UNDO_BUFFER) {
100 return(0);
101 }
102
103 /*
104 * We cannot cluster the large-data zone. This primarily targets
105 * the reblocker. The normal file handling code will still cluster
106 * file reads via file vnodes.
107 */
108 buffer = (void *)io;
109 if ((buffer->zoneX_offset & HAMMER_OFF_ZONE_MASK) ==
110 HAMMER_ZONE_LARGE_DATA) {
111 return(0);
112 }
113
114 /*
115 * Do not allow the cluster operation to cross a largeblock
116 * boundary.
117 */
118 eoz = (io->offset + HAMMER_LARGEBLOCK_SIZE64 - 1) &
119 ~HAMMER_LARGEBLOCK_MASK64;
120 if (*limitp > eoz)
121 *limitp = eoz;
122 return(1);
123}
124
66325755 125/*
fbc6e32a 126 * Helper routine to disassociate a buffer cache buffer from an I/O
ecca949a 127 * structure. The buffer is unlocked and marked appropriate for reclamation.
055f5ff8
MD
128 *
129 * The io may have 0 or 1 references depending on who called us. The
130 * caller is responsible for dealing with the refs.
131 *
132 * This call can only be made when no action is required on the buffer.
ecca949a
MD
133 *
134 * The caller must own the buffer and the IO must indicate that the
135 * structure no longer owns it (io.released != 0).
66325755
MD
136 */
137static void
ecca949a 138hammer_io_disassociate(hammer_io_structure_t iou)
66325755 139{
055f5ff8 140 struct buf *bp = iou->io.bp;
66325755 141
ecca949a 142 KKASSERT(iou->io.released);
b58c6388 143 KKASSERT(iou->io.modified == 0);
af209b0f 144 KKASSERT(LIST_FIRST(&bp->b_dep) == (void *)iou);
4d75d829 145 buf_dep_init(bp);
055f5ff8 146 iou->io.bp = NULL;
9f5097dc
MD
147
148 /*
149 * If the buffer was locked someone wanted to get rid of it.
150 */
a99b9ea2
MD
151 if (bp->b_flags & B_LOCKED) {
152 --hammer_count_io_locked;
9f5097dc 153 bp->b_flags &= ~B_LOCKED;
a99b9ea2 154 }
ecca949a
MD
155 if (iou->io.reclaim) {
156 bp->b_flags |= B_NOCACHE|B_RELBUF;
157 iou->io.reclaim = 0;
055f5ff8 158 }
66325755 159
055f5ff8 160 switch(iou->io.type) {
66325755 161 case HAMMER_STRUCTURE_VOLUME:
055f5ff8 162 iou->volume.ondisk = NULL;
66325755 163 break;
10a5d1ba
MD
164 case HAMMER_STRUCTURE_DATA_BUFFER:
165 case HAMMER_STRUCTURE_META_BUFFER:
166 case HAMMER_STRUCTURE_UNDO_BUFFER:
055f5ff8 167 iou->buffer.ondisk = NULL;
66325755 168 break;
eddadaee
MD
169 case HAMMER_STRUCTURE_DUMMY:
170 panic("hammer_io_disassociate: bad io type");
171 break;
66325755 172 }
fbc6e32a
MD
173}
174
175/*
055f5ff8 176 * Wait for any physical IO to complete
ae8e83e6
MD
177 *
178 * XXX we aren't interlocked against a spinlock or anything so there
179 * is a small window in the interlock / io->running == 0 test.
fbc6e32a 180 */
1b0ab2c3 181void
055f5ff8 182hammer_io_wait(hammer_io_t io)
fbc6e32a 183{
055f5ff8 184 if (io->running) {
055f5ff8 185 for (;;) {
ae8e83e6
MD
186 io->waiting = 1;
187 tsleep_interlock(io, 0);
055f5ff8
MD
188 if (io->running == 0)
189 break;
ae8e83e6 190 tsleep(io, PINTERLOCKED, "hmrflw", hz);
055f5ff8
MD
191 if (io->running == 0)
192 break;
193 }
66325755
MD
194 }
195}
196
af209b0f 197/*
eddadaee
MD
198 * Wait for all currently queued HAMMER-initiated I/Os to complete.
199 *
200 * This is not supposed to count direct I/O's but some can leak
201 * through (for non-full-sized direct I/Os).
af209b0f
MD
202 */
203void
eddadaee 204hammer_io_wait_all(hammer_mount_t hmp, const char *ident, int doflush)
af209b0f 205{
eddadaee
MD
206 struct hammer_io iodummy;
207 hammer_io_t io;
208
209 /*
210 * Degenerate case, no I/O is running
211 */
af209b0f 212 crit_enter();
eddadaee
MD
213 if (TAILQ_EMPTY(&hmp->iorun_list)) {
214 crit_exit();
215 if (doflush)
216 hammer_io_flush_sync(hmp);
217 return;
218 }
219 bzero(&iodummy, sizeof(iodummy));
220 iodummy.type = HAMMER_STRUCTURE_DUMMY;
221
222 /*
223 * Add placemarker and then wait until it becomes the head of
224 * the list.
225 */
226 TAILQ_INSERT_TAIL(&hmp->iorun_list, &iodummy, iorun_entry);
227 while (TAILQ_FIRST(&hmp->iorun_list) != &iodummy) {
228 tsleep(&iodummy, 0, ident, 0);
229 }
230
231 /*
232 * Chain in case several placemarkers are present.
233 */
234 TAILQ_REMOVE(&hmp->iorun_list, &iodummy, iorun_entry);
235 io = TAILQ_FIRST(&hmp->iorun_list);
236 if (io && io->type == HAMMER_STRUCTURE_DUMMY)
237 wakeup(io);
af209b0f 238 crit_exit();
eddadaee
MD
239
240 if (doflush)
241 hammer_io_flush_sync(hmp);
af209b0f
MD
242}
243
2faf0737
MD
244/*
245 * Clear a flagged error condition on a I/O buffer. The caller must hold
246 * its own ref on the buffer.
247 */
248void
249hammer_io_clear_error(struct hammer_io *io)
250{
251 if (io->ioerror) {
252 io->ioerror = 0;
250aec18
MD
253 hammer_rel(&io->lock);
254 KKASSERT(hammer_isactive(&io->lock));
2faf0737
MD
255 }
256}
257
b8a41159
MD
258/*
259 * This is an advisory function only which tells the buffer cache
260 * the bp is not a meta-data buffer, even though it is backed by
261 * a block device.
262 *
263 * This is used by HAMMER's reblocking code to avoid trying to
264 * swapcache the filesystem's data when it is read or written
265 * by the reblocking code.
266 */
267void
268hammer_io_notmeta(hammer_buffer_t buffer)
269{
270 buffer->io.bp->b_flags |= B_NOTMETA;
271}
272
2faf0737 273
2f85fa4d
MD
274#define HAMMER_MAXRA 4
275
66325755 276/*
10a5d1ba
MD
277 * Load bp for a HAMMER structure. The io must be exclusively locked by
278 * the caller.
2f85fa4d 279 *
a99b9ea2
MD
280 * This routine is mostly used on meta-data and small-data blocks. Generally
281 * speaking HAMMER assumes some locality of reference and will cluster
282 * a 64K read.
af209b0f 283 *
364c022c
MD
284 * Note that the clustering which occurs here is clustering within the
285 * block device... typically meta-data and small-file data. Regular
286 * file clustering is different and handled in hammer_vnops.c
66325755
MD
287 */
288int
2f85fa4d 289hammer_io_read(struct vnode *devvp, struct hammer_io *io, hammer_off_t limit)
66325755
MD
290{
291 struct buf *bp;
2f85fa4d 292 int error;
66325755
MD
293
294 if ((bp = io->bp) == NULL) {
f5a07a7a 295 hammer_count_io_running_read += io->bytes;
06b11394
MD
296 if (hammer_cluster_enable &&
297 hammer_io_clusterable(io, &limit)) {
ce0138a6
MD
298 error = cluster_read(devvp, limit,
299 io->offset, io->bytes,
300 HAMMER_CLUSTER_SIZE,
364c022c
MD
301 HAMMER_CLUSTER_SIZE,
302 &io->bp);
ce0138a6
MD
303 } else {
304 error = bread(devvp, io->offset, io->bytes, &io->bp);
305 }
306 hammer_stats_disk_read += io->bytes;
f5a07a7a 307 hammer_count_io_running_read -= io->bytes;
cdb6e4e6
MD
308
309 /*
310 * The code generally assumes b_ops/b_dep has been set-up,
311 * even if we error out here.
312 */
313 bp = io->bp;
24c8374a
MD
314 if ((hammer_debug_io & 0x0001) && (bp->b_flags & B_IODEBUG)) {
315 const char *metatype;
316
317 switch(io->type) {
318 case HAMMER_STRUCTURE_VOLUME:
319 metatype = "volume";
320 break;
321 case HAMMER_STRUCTURE_META_BUFFER:
322 switch(((struct hammer_buffer *)io)->
323 zoneX_offset & HAMMER_OFF_ZONE_MASK) {
324 case HAMMER_ZONE_BTREE:
325 metatype = "btree";
326 break;
327 case HAMMER_ZONE_META:
328 metatype = "meta";
329 break;
330 case HAMMER_ZONE_FREEMAP:
331 metatype = "freemap";
332 break;
333 default:
334 metatype = "meta?";
335 break;
336 }
337 break;
338 case HAMMER_STRUCTURE_DATA_BUFFER:
339 metatype = "data";
340 break;
341 case HAMMER_STRUCTURE_UNDO_BUFFER:
342 metatype = "undo";
343 break;
344 default:
345 metatype = "unknown";
346 break;
347 }
348 kprintf("doff %016jx %s\n",
349 (intmax_t)bp->b_bio2.bio_offset,
350 metatype);
351 }
352 bp->b_flags &= ~B_IODEBUG;
cdb6e4e6
MD
353 bp->b_ops = &hammer_bioops;
354 KKASSERT(LIST_FIRST(&bp->b_dep) == NULL);
355 LIST_INSERT_HEAD(&bp->b_dep, &io->worklist, node);
356 BUF_KERNPROC(bp);
10a5d1ba
MD
357 KKASSERT(io->modified == 0);
358 KKASSERT(io->running == 0);
359 KKASSERT(io->waiting == 0);
66325755
MD
360 io->released = 0; /* we hold an active lock on bp */
361 } else {
362 error = 0;
363 }
364 return(error);
365}
366
367/*
368 * Similar to hammer_io_read() but returns a zero'd out buffer instead.
10a5d1ba
MD
369 * Must be called with the IO exclusively locked.
370 *
66325755 371 * vfs_bio_clrbuf() is kinda nasty, enforce serialization against background
10a5d1ba
MD
372 * I/O by forcing the buffer to not be in a released state before calling
373 * it.
055f5ff8 374 *
10a5d1ba
MD
375 * This function will also mark the IO as modified but it will not
376 * increment the modify_refs count.
66325755
MD
377 */
378int
379hammer_io_new(struct vnode *devvp, struct hammer_io *io)
380{
381 struct buf *bp;
382
383 if ((bp = io->bp) == NULL) {
4a2796f3 384 io->bp = getblk(devvp, io->offset, io->bytes, 0, 0);
66325755
MD
385 bp = io->bp;
386 bp->b_ops = &hammer_bioops;
af209b0f 387 KKASSERT(LIST_FIRST(&bp->b_dep) == NULL);
66325755 388 LIST_INSERT_HEAD(&bp->b_dep, &io->worklist, node);
055f5ff8 389 io->released = 0;
10a5d1ba 390 KKASSERT(io->running == 0);
055f5ff8 391 io->waiting = 0;
66325755
MD
392 BUF_KERNPROC(bp);
393 } else {
394 if (io->released) {
395 regetblk(bp);
66325755 396 BUF_KERNPROC(bp);
d113fda1 397 io->released = 0;
66325755
MD
398 }
399 }
10a5d1ba 400 hammer_io_modify(io, 0);
66325755
MD
401 vfs_bio_clrbuf(bp);
402 return(0);
403}
404
0e8bd897
MD
405/*
406 * Advance the activity count on the underlying buffer because
407 * HAMMER does not getblk/brelse on every access.
408 */
409void
410hammer_io_advance(struct hammer_io *io)
411{
412 if (io->bp)
413 buf_act_advance(io->bp);
414}
415
47637bff
MD
416/*
417 * Remove potential device level aliases against buffers managed by high level
362ec2dc
MD
418 * vnodes. Aliases can also be created due to mixed buffer sizes or via
419 * direct access to the backing store device.
e469566b
MD
420 *
421 * This is nasty because the buffers are also VMIO-backed. Even if a buffer
422 * does not exist its backing VM pages might, and we have to invalidate
423 * those as well or a getblk() will reinstate them.
362ec2dc
MD
424 *
425 * Buffer cache buffers associated with hammer_buffers cannot be
426 * invalidated.
47637bff 427 */
362ec2dc 428int
47637bff
MD
429hammer_io_inval(hammer_volume_t volume, hammer_off_t zone2_offset)
430{
cebe9493 431 hammer_io_structure_t iou;
47637bff
MD
432 hammer_off_t phys_offset;
433 struct buf *bp;
362ec2dc 434 int error;
47637bff
MD
435
436 phys_offset = volume->ondisk->vol_buf_beg +
437 (zone2_offset & HAMMER_OFF_SHORT_MASK);
4a2796f3 438 crit_enter();
b1c20cfa 439 if ((bp = findblk(volume->devvp, phys_offset, FINDBLK_TEST)) != NULL)
4a2796f3 440 bp = getblk(volume->devvp, phys_offset, bp->b_bufsize, 0, 0);
e469566b
MD
441 else
442 bp = getblk(volume->devvp, phys_offset, HAMMER_BUFSIZE, 0, 0);
443 if ((iou = (void *)LIST_FIRST(&bp->b_dep)) != NULL) {
362ec2dc 444#if 0
5c8d05e2 445 hammer_ref(&iou->io.lock);
e469566b
MD
446 hammer_io_clear_modify(&iou->io, 1);
447 bundirty(bp);
e83ca595
MD
448 iou->io.released = 0;
449 BUF_KERNPROC(bp);
e469566b 450 iou->io.reclaim = 1;
5c8d05e2 451 iou->io.waitdep = 1;
250aec18 452 KKASSERT(hammer_isactive(&iou->io.lock) == 1);
5c8d05e2
MD
453 hammer_rel_buffer(&iou->buffer, 0);
454 /*hammer_io_deallocate(bp);*/
362ec2dc 455#endif
04b04ca6 456 bqrelse(bp);
362ec2dc 457 error = EAGAIN;
e469566b
MD
458 } else {
459 KKASSERT((bp->b_flags & B_LOCKED) == 0);
460 bundirty(bp);
461 bp->b_flags |= B_NOCACHE|B_RELBUF;
e83ca595 462 brelse(bp);
362ec2dc 463 error = 0;
47637bff 464 }
4a2796f3 465 crit_exit();
362ec2dc 466 return(error);
47637bff
MD
467}
468
fbc6e32a 469/*
b3deaf57 470 * This routine is called on the last reference to a hammer structure.
250aec18
MD
471 * The io must be interlocked with a refcount of zero. The hammer structure
472 * will remain interlocked on return.
b3deaf57 473 *
250aec18
MD
474 * This routine may return a non-NULL bp to the caller for dispoal.
475 * The caller typically brelse()'s the bp.
476 *
477 * The bp may or may not still be passively associated with the IO. It
478 * will remain passively associated if it is unreleasable (e.g. a modified
479 * meta-data buffer).
ecca949a
MD
480 *
481 * The only requirement here is that modified meta-data and volume-header
482 * buffer may NOT be disassociated from the IO structure, and consequently
483 * we also leave such buffers actively associated with the IO if they already
484 * are (since the kernel can't do anything with them anyway). Only the
485 * flusher is allowed to write such buffers out. Modified pure-data and
486 * undo buffers are returned to the kernel but left passively associated
487 * so we can track when the kernel writes the bp out.
66325755 488 */
ecca949a 489struct buf *
09ac686b 490hammer_io_release(struct hammer_io *io, int flush)
66325755 491{
9f5097dc 492 union hammer_io_structure *iou = (void *)io;
66325755
MD
493 struct buf *bp;
494
055f5ff8 495 if ((bp = io->bp) == NULL)
ecca949a 496 return(NULL);
fbc6e32a 497
055f5ff8 498 /*
10a5d1ba
MD
499 * Try to flush a dirty IO to disk if asked to by the
500 * caller or if the kernel tried to flush the buffer in the past.
055f5ff8 501 *
10a5d1ba
MD
502 * Kernel-initiated flushes are only allowed for pure-data buffers.
503 * meta-data and volume buffers can only be flushed explicitly
504 * by HAMMER.
055f5ff8 505 */
10a5d1ba 506 if (io->modified) {
09ac686b 507 if (flush) {
710733a6 508 hammer_io_flush(io, 0);
10a5d1ba
MD
509 } else if (bp->b_flags & B_LOCKED) {
510 switch(io->type) {
511 case HAMMER_STRUCTURE_DATA_BUFFER:
710733a6
MD
512 hammer_io_flush(io, 0);
513 break;
10a5d1ba 514 case HAMMER_STRUCTURE_UNDO_BUFFER:
710733a6 515 hammer_io_flush(io, hammer_undo_reclaim(io));
10a5d1ba
MD
516 break;
517 default:
518 break;
519 }
520 } /* else no explicit request to flush the buffer */
521 }
fbc6e32a 522
055f5ff8 523 /*
5c8d05e2
MD
524 * Wait for the IO to complete if asked to. This occurs when
525 * the buffer must be disposed of definitively during an umount
526 * or buffer invalidation.
055f5ff8 527 */
b58c6388 528 if (io->waitdep && io->running) {
055f5ff8
MD
529 hammer_io_wait(io);
530 }
531
532 /*
10a5d1ba
MD
533 * Return control of the buffer to the kernel (with the provisio
534 * that our bioops can override kernel decisions with regards to
535 * the buffer).
055f5ff8 536 */
cebe9493 537 if ((flush || io->reclaim) && io->modified == 0 && io->running == 0) {
10a5d1ba
MD
538 /*
539 * Always disassociate the bp if an explicit flush
540 * was requested and the IO completed with no error
541 * (so unmount can really clean up the structure).
542 */
055f5ff8 543 if (io->released) {
b3deaf57 544 regetblk(bp);
46fe7ae1 545 BUF_KERNPROC(bp);
ecca949a
MD
546 } else {
547 io->released = 1;
055f5ff8 548 }
ecca949a
MD
549 hammer_io_disassociate((hammer_io_structure_t)io);
550 /* return the bp */
055f5ff8 551 } else if (io->modified) {
10a5d1ba 552 /*
ecca949a
MD
553 * Only certain IO types can be released to the kernel if
554 * the buffer has been modified.
555 *
556 * volume and meta-data IO types may only be explicitly
557 * flushed by HAMMER.
10a5d1ba
MD
558 */
559 switch(io->type) {
560 case HAMMER_STRUCTURE_DATA_BUFFER:
561 case HAMMER_STRUCTURE_UNDO_BUFFER:
562 if (io->released == 0) {
563 io->released = 1;
564 bdwrite(bp);
565 }
566 break;
567 default:
568 break;
055f5ff8 569 }
ecca949a 570 bp = NULL; /* bp left associated */
055f5ff8 571 } else if (io->released == 0) {
10a5d1ba
MD
572 /*
573 * Clean buffers can be generally released to the kernel.
574 * We leave the bp passively associated with the HAMMER
575 * structure and use bioops to disconnect it later on
576 * if the kernel wants to discard the buffer.
ecca949a
MD
577 *
578 * We can steal the structure's ownership of the bp.
10a5d1ba 579 */
ecca949a 580 io->released = 1;
9f5097dc 581 if (bp->b_flags & B_LOCKED) {
ecca949a
MD
582 hammer_io_disassociate(iou);
583 /* return the bp */
9f5097dc 584 } else {
cebe9493 585 if (io->reclaim) {
ecca949a
MD
586 hammer_io_disassociate(iou);
587 /* return the bp */
cebe9493 588 } else {
ecca949a 589 /* return the bp (bp passively associated) */
cebe9493 590 }
9f5097dc 591 }
19b97e01
MD
592 } else {
593 /*
af209b0f
MD
594 * A released buffer is passively associate with our
595 * hammer_io structure. The kernel cannot destroy it
596 * without making a bioops call. If the kernel (B_LOCKED)
597 * or we (reclaim) requested that the buffer be destroyed
598 * we destroy it, otherwise we do a quick get/release to
599 * reset its position in the kernel's LRU list.
600 *
601 * Leaving the buffer passively associated allows us to
602 * use the kernel's LRU buffer flushing mechanisms rather
603 * then rolling our own.
cb51be26
MD
604 *
605 * XXX there are two ways of doing this. We can re-acquire
606 * and passively release to reset the LRU, or not.
19b97e01 607 */
af209b0f 608 if (io->running == 0) {
19b97e01 609 regetblk(bp);
cebe9493 610 if ((bp->b_flags & B_LOCKED) || io->reclaim) {
ecca949a
MD
611 hammer_io_disassociate(iou);
612 /* return the bp */
9f5097dc 613 } else {
ecca949a 614 /* return the bp (bp passively associated) */
9f5097dc 615 }
ecca949a
MD
616 } else {
617 /*
618 * bp is left passively associated but we do not
619 * try to reacquire it. Interactions with the io
620 * structure will occur on completion of the bp's
621 * I/O.
622 */
623 bp = NULL;
19b97e01 624 }
66325755 625 }
ecca949a 626 return(bp);
66325755
MD
627}
628
fbc6e32a 629/*
b33e2cc0
MD
630 * This routine is called with a locked IO when a flush is desired and
631 * no other references to the structure exists other then ours. This
632 * routine is ONLY called when HAMMER believes it is safe to flush a
633 * potentially modified buffer out.
fbc6e32a
MD
634 */
635void
710733a6 636hammer_io_flush(struct hammer_io *io, int reclaim)
fbc6e32a
MD
637{
638 struct buf *bp;
fbc6e32a 639
055f5ff8 640 /*
10a5d1ba 641 * Degenerate case - nothing to flush if nothing is dirty.
055f5ff8 642 */
b58c6388 643 if (io->modified == 0) {
055f5ff8 644 return;
b58c6388 645 }
055f5ff8
MD
646
647 KKASSERT(io->bp);
9f5097dc 648 KKASSERT(io->modify_refs <= 0);
055f5ff8 649
77062c8a
MD
650 /*
651 * Acquire ownership of the bp, particularly before we clear our
652 * modified flag.
653 *
654 * We are going to bawrite() this bp. Don't leave a window where
655 * io->released is set, we actually own the bp rather then our
656 * buffer.
657 */
658 bp = io->bp;
659 if (io->released) {
660 regetblk(bp);
661 /* BUF_KERNPROC(io->bp); */
662 /* io->released = 0; */
663 KKASSERT(io->released);
664 KKASSERT(io->bp == bp);
665 }
666 io->released = 1;
667
710733a6
MD
668 if (reclaim) {
669 io->reclaim = 1;
670 if ((bp->b_flags & B_LOCKED) == 0) {
671 bp->b_flags |= B_LOCKED;
672 ++hammer_count_io_locked;
673 }
674 }
675
b33e2cc0 676 /*
10a5d1ba
MD
677 * Acquire exclusive access to the bp and then clear the modified
678 * state of the buffer prior to issuing I/O to interlock any
679 * modifications made while the I/O is in progress. This shouldn't
680 * happen anyway but losing data would be worse. The modified bit
681 * will be rechecked after the IO completes.
682 *
4a2796f3
MD
683 * NOTE: This call also finalizes the buffer's content (inval == 0).
684 *
b33e2cc0
MD
685 * This is only legal when lock.refs == 1 (otherwise we might clear
686 * the modified bit while there are still users of the cluster
687 * modifying the data).
688 *
b33e2cc0
MD
689 * Do this before potentially blocking so any attempt to modify the
690 * ondisk while we are blocked blocks waiting for us.
691 */
5c8d05e2 692 hammer_ref(&io->lock);
4a2796f3 693 hammer_io_clear_modify(io, 0);
250aec18 694 hammer_rel(&io->lock);
bcac4bbb 695
6367d0f9
MD
696 if (hammer_debug_io & 0x0002)
697 kprintf("hammer io_write %016jx\n", bp->b_bio1.bio_offset);
698
10a5d1ba
MD
699 /*
700 * Transfer ownership to the kernel and initiate I/O.
701 */
055f5ff8 702 io->running = 1;
f5a07a7a 703 io->hmp->io_running_space += io->bytes;
eddadaee 704 TAILQ_INSERT_TAIL(&io->hmp->iorun_list, io, iorun_entry);
f5a07a7a 705 hammer_count_io_running_write += io->bytes;
055f5ff8 706 bawrite(bp);
748efb59 707 hammer_io_flush_mark(io->volume);
fbc6e32a
MD
708}
709
055f5ff8
MD
710/************************************************************************
711 * BUFFER DIRTYING *
712 ************************************************************************
713 *
714 * These routines deal with dependancies created when IO buffers get
715 * modified. The caller must call hammer_modify_*() on a referenced
716 * HAMMER structure prior to modifying its on-disk data.
0b075555 717 *
055f5ff8
MD
718 * Any intent to modify an IO buffer acquires the related bp and imposes
719 * various write ordering dependancies.
0b075555 720 */
055f5ff8
MD
721
722/*
10a5d1ba
MD
723 * Mark a HAMMER structure as undergoing modification. Meta-data buffers
724 * are locked until the flusher can deal with them, pure data buffers
725 * can be written out.
055f5ff8 726 */
10a5d1ba 727static
b58c6388 728void
10a5d1ba 729hammer_io_modify(hammer_io_t io, int count)
0b075555 730{
9f5097dc
MD
731 /*
732 * io->modify_refs must be >= 0
733 */
734 while (io->modify_refs < 0) {
735 io->waitmod = 1;
736 tsleep(io, 0, "hmrmod", 0);
737 }
738
46fe7ae1
MD
739 /*
740 * Shortcut if nothing to do.
741 */
250aec18 742 KKASSERT(hammer_isactive(&io->lock) && io->bp != NULL);
10a5d1ba 743 io->modify_refs += count;
b58c6388
MD
744 if (io->modified && io->released == 0)
745 return;
46fe7ae1
MD
746
747 hammer_lock_ex(&io->lock);
10a5d1ba 748 if (io->modified == 0) {
cdb6e4e6 749 hammer_io_set_modlist(io);
10a5d1ba
MD
750 io->modified = 1;
751 }
46fe7ae1
MD
752 if (io->released) {
753 regetblk(io->bp);
754 BUF_KERNPROC(io->bp);
755 io->released = 0;
756 KKASSERT(io->modified != 0);
757 }
46fe7ae1 758 hammer_unlock(&io->lock);
055f5ff8
MD
759}
760
10a5d1ba
MD
761static __inline
762void
763hammer_io_modify_done(hammer_io_t io)
764{
765 KKASSERT(io->modify_refs > 0);
766 --io->modify_refs;
9f5097dc
MD
767 if (io->modify_refs == 0 && io->waitmod) {
768 io->waitmod = 0;
769 wakeup(io);
770 }
771}
772
773void
774hammer_io_write_interlock(hammer_io_t io)
775{
776 while (io->modify_refs != 0) {
777 io->waitmod = 1;
778 tsleep(io, 0, "hmrmod", 0);
779 }
780 io->modify_refs = -1;
781}
782
783void
784hammer_io_done_interlock(hammer_io_t io)
785{
786 KKASSERT(io->modify_refs == -1);
787 io->modify_refs = 0;
788 if (io->waitmod) {
789 io->waitmod = 0;
790 wakeup(io);
791 }
10a5d1ba
MD
792}
793
2f85fa4d
MD
794/*
795 * Caller intends to modify a volume's ondisk structure.
796 *
797 * This is only allowed if we are the flusher or we have a ref on the
798 * sync_lock.
799 */
055f5ff8 800void
36f82b23
MD
801hammer_modify_volume(hammer_transaction_t trans, hammer_volume_t volume,
802 void *base, int len)
055f5ff8 803{
2f85fa4d 804 KKASSERT (trans == NULL || trans->sync_lock_refs > 0);
055f5ff8 805
2f85fa4d 806 hammer_io_modify(&volume->io, 1);
47197d71
MD
807 if (len) {
808 intptr_t rel_offset = (intptr_t)base - (intptr_t)volume->ondisk;
809 KKASSERT((rel_offset & ~(intptr_t)HAMMER_BUFMASK) == 0);
02428fb6 810 hammer_generate_undo(trans,
47197d71
MD
811 HAMMER_ENCODE_RAW_VOLUME(volume->vol_no, rel_offset),
812 base, len);
813 }
0b075555
MD
814}
815
055f5ff8 816/*
2f85fa4d
MD
817 * Caller intends to modify a buffer's ondisk structure.
818 *
819 * This is only allowed if we are the flusher or we have a ref on the
820 * sync_lock.
055f5ff8 821 */
0b075555 822void
36f82b23
MD
823hammer_modify_buffer(hammer_transaction_t trans, hammer_buffer_t buffer,
824 void *base, int len)
46fe7ae1 825{
2f85fa4d
MD
826 KKASSERT (trans == NULL || trans->sync_lock_refs > 0);
827
10a5d1ba 828 hammer_io_modify(&buffer->io, 1);
47197d71
MD
829 if (len) {
830 intptr_t rel_offset = (intptr_t)base - (intptr_t)buffer->ondisk;
831 KKASSERT((rel_offset & ~(intptr_t)HAMMER_BUFMASK) == 0);
02428fb6 832 hammer_generate_undo(trans,
34d829f7 833 buffer->zone2_offset + rel_offset,
47197d71
MD
834 base, len);
835 }
46fe7ae1
MD
836}
837
10a5d1ba
MD
838void
839hammer_modify_volume_done(hammer_volume_t volume)
840{
841 hammer_io_modify_done(&volume->io);
842}
843
844void
845hammer_modify_buffer_done(hammer_buffer_t buffer)
846{
847 hammer_io_modify_done(&buffer->io);
848}
849
61aeeb33 850/*
4a2796f3
MD
851 * Mark an entity as not being dirty any more and finalize any
852 * delayed adjustments to the buffer.
853 *
854 * Delayed adjustments are an important performance enhancement, allowing
855 * us to avoid recalculating B-Tree node CRCs over and over again when
856 * making bulk-modifications to the B-Tree.
857 *
858 * If inval is non-zero delayed adjustments are ignored.
5c8d05e2
MD
859 *
860 * This routine may dereference related btree nodes and cause the
861 * buffer to be dereferenced. The caller must own a reference on io.
61aeeb33
MD
862 */
863void
4a2796f3 864hammer_io_clear_modify(struct hammer_io *io, int inval)
61aeeb33 865{
4a2796f3
MD
866 if (io->modified == 0)
867 return;
868
869 /*
870 * Take us off the mod-list and clear the modified bit.
871 */
872 KKASSERT(io->mod_list != NULL);
873 if (io->mod_list == &io->hmp->volu_list ||
874 io->mod_list == &io->hmp->meta_list) {
f5a07a7a
MD
875 io->hmp->locked_dirty_space -= io->bytes;
876 hammer_count_dirtybufspace -= io->bytes;
4a2796f3
MD
877 }
878 TAILQ_REMOVE(io->mod_list, io, mod_entry);
879 io->mod_list = NULL;
880 io->modified = 0;
881
882 /*
883 * If this bit is not set there are no delayed adjustments.
884 */
885 if (io->gencrc == 0)
886 return;
887 io->gencrc = 0;
888
889 /*
890 * Finalize requested CRCs. The NEEDSCRC flag also holds a reference
891 * on the node (& underlying buffer). Release the node after clearing
892 * the flag.
893 */
894 if (io->type == HAMMER_STRUCTURE_META_BUFFER) {
895 hammer_buffer_t buffer = (void *)io;
896 hammer_node_t node;
897
898restart:
899 TAILQ_FOREACH(node, &buffer->clist, entry) {
900 if ((node->flags & HAMMER_NODE_NEEDSCRC) == 0)
901 continue;
902 node->flags &= ~HAMMER_NODE_NEEDSCRC;
903 KKASSERT(node->ondisk);
904 if (inval == 0)
905 node->ondisk->crc = crc32(&node->ondisk->crc + 1, HAMMER_BTREE_CRCSIZE);
906 hammer_rel_node(node);
907 goto restart;
61aeeb33 908 }
cebe9493 909 }
5c8d05e2 910 /* caller must still have ref on io */
250aec18 911 KKASSERT(hammer_isactive(&io->lock));
cebe9493
MD
912}
913
914/*
915 * Clear the IO's modify list. Even though the IO is no longer modified
916 * it may still be on the lose_list. This routine is called just before
917 * the governing hammer_buffer is destroyed.
918 */
919void
920hammer_io_clear_modlist(struct hammer_io *io)
921{
4a2796f3 922 KKASSERT(io->modified == 0);
cebe9493 923 if (io->mod_list) {
a99b9ea2 924 crit_enter(); /* biodone race against list */
cebe9493
MD
925 KKASSERT(io->mod_list == &io->hmp->lose_list);
926 TAILQ_REMOVE(io->mod_list, io, mod_entry);
927 io->mod_list = NULL;
a99b9ea2 928 crit_exit();
61aeeb33
MD
929 }
930}
931
cdb6e4e6
MD
932static void
933hammer_io_set_modlist(struct hammer_io *io)
934{
935 struct hammer_mount *hmp = io->hmp;
936
937 KKASSERT(io->mod_list == NULL);
938
939 switch(io->type) {
940 case HAMMER_STRUCTURE_VOLUME:
941 io->mod_list = &hmp->volu_list;
942 hmp->locked_dirty_space += io->bytes;
943 hammer_count_dirtybufspace += io->bytes;
944 break;
945 case HAMMER_STRUCTURE_META_BUFFER:
946 io->mod_list = &hmp->meta_list;
947 hmp->locked_dirty_space += io->bytes;
948 hammer_count_dirtybufspace += io->bytes;
949 break;
950 case HAMMER_STRUCTURE_UNDO_BUFFER:
951 io->mod_list = &hmp->undo_list;
952 break;
953 case HAMMER_STRUCTURE_DATA_BUFFER:
954 io->mod_list = &hmp->data_list;
955 break;
eddadaee
MD
956 case HAMMER_STRUCTURE_DUMMY:
957 panic("hammer_io_disassociate: bad io type");
958 break;
cdb6e4e6
MD
959 }
960 TAILQ_INSERT_TAIL(io->mod_list, io, mod_entry);
961}
962
055f5ff8
MD
963/************************************************************************
964 * HAMMER_BIOOPS *
965 ************************************************************************
966 *
66325755
MD
967 */
968
969/*
055f5ff8 970 * Pre-IO initiation kernel callback - cluster build only
66325755 971 */
66325755
MD
972static void
973hammer_io_start(struct buf *bp)
974{
975}
976
055f5ff8 977/*
7bc5b8c2 978 * Post-IO completion kernel callback - MAY BE CALLED FROM INTERRUPT!
b33e2cc0
MD
979 *
980 * NOTE: HAMMER may modify a buffer after initiating I/O. The modified bit
981 * may also be set if we were marking a cluster header open. Only remove
982 * our dependancy if the modified bit is clear.
055f5ff8 983 */
66325755
MD
984static void
985hammer_io_complete(struct buf *bp)
986{
055f5ff8 987 union hammer_io_structure *iou = (void *)LIST_FIRST(&bp->b_dep);
ba298df1 988 struct hammer_mount *hmp = iou->io.hmp;
eddadaee 989 struct hammer_io *ionext;
055f5ff8
MD
990
991 KKASSERT(iou->io.released == 1);
fbc6e32a 992
bf3b416b
MD
993 /*
994 * Deal with people waiting for I/O to drain
995 */
f90dde4c 996 if (iou->io.running) {
cdb6e4e6
MD
997 /*
998 * Deal with critical write errors. Once a critical error
999 * has been flagged in hmp the UNDO FIFO will not be updated.
1000 * That way crash recover will give us a consistent
1001 * filesystem.
1002 *
1003 * Because of this we can throw away failed UNDO buffers. If
1004 * we throw away META or DATA buffers we risk corrupting
1005 * the now read-only version of the filesystem visible to
1006 * the user. Clear B_ERROR so the buffer is not re-dirtied
1007 * by the kernel and ref the io so it doesn't get thrown
1008 * away.
1009 */
1010 if (bp->b_flags & B_ERROR) {
ba298df1 1011 hammer_critical_error(hmp, NULL, bp->b_error,
cdb6e4e6
MD
1012 "while flushing meta-data");
1013 switch(iou->io.type) {
1014 case HAMMER_STRUCTURE_UNDO_BUFFER:
1015 break;
1016 default:
1017 if (iou->io.ioerror == 0) {
1018 iou->io.ioerror = 1;
cdb6e4e6
MD
1019 hammer_ref(&iou->io.lock);
1020 }
1021 break;
1022 }
1023 bp->b_flags &= ~B_ERROR;
1024 bundirty(bp);
1025#if 0
1026 hammer_io_set_modlist(&iou->io);
1027 iou->io.modified = 1;
1028#endif
1029 }
ce0138a6 1030 hammer_stats_disk_write += iou->io.bytes;
f5a07a7a 1031 hammer_count_io_running_write -= iou->io.bytes;
ba298df1
MD
1032 hmp->io_running_space -= iou->io.bytes;
1033 if (hmp->io_running_wakeup &&
1034 hmp->io_running_space < hammer_limit_running_io / 2) {
1035 hmp->io_running_wakeup = 0;
1036 wakeup(&hmp->io_running_wakeup);
1037 }
1038 KKASSERT(hmp->io_running_space >= 0);
f90dde4c 1039 iou->io.running = 0;
eddadaee
MD
1040
1041 /*
1042 * Remove from iorun list and wakeup any multi-io waiter(s).
1043 */
ba298df1 1044 if (TAILQ_FIRST(&hmp->iorun_list) == &iou->io) {
eddadaee
MD
1045 ionext = TAILQ_NEXT(&iou->io, iorun_entry);
1046 if (ionext && ionext->type == HAMMER_STRUCTURE_DUMMY)
1047 wakeup(ionext);
1048 }
ba298df1 1049 TAILQ_REMOVE(&hmp->iorun_list, &iou->io, iorun_entry);
ce0138a6
MD
1050 } else {
1051 hammer_stats_disk_read += iou->io.bytes;
f90dde4c
MD
1052 }
1053
055f5ff8
MD
1054 if (iou->io.waiting) {
1055 iou->io.waiting = 0;
1056 wakeup(iou);
1057 }
1058
1059 /*
bf3b416b 1060 * If B_LOCKED is set someone wanted to deallocate the bp at some
250aec18
MD
1061 * point, try to do it now. The operation will fail if there are
1062 * refs or if hammer_io_deallocate() is unable to gain the
1063 * interlock.
055f5ff8 1064 */
250aec18 1065 if (bp->b_flags & B_LOCKED) {
a99b9ea2 1066 --hammer_count_io_locked;
d5ef456e 1067 bp->b_flags &= ~B_LOCKED;
055f5ff8
MD
1068 hammer_io_deallocate(bp);
1069 /* structure may be dead now */
1070 }
66325755
MD
1071}
1072
1073/*
1074 * Callback from kernel when it wishes to deallocate a passively
10a5d1ba
MD
1075 * associated structure. This mostly occurs with clean buffers
1076 * but it may be possible for a holding structure to be marked dirty
7bc5b8c2 1077 * while its buffer is passively associated. The caller owns the bp.
66325755
MD
1078 *
1079 * If we cannot disassociate we set B_LOCKED to prevent the buffer
1080 * from getting reused.
46fe7ae1
MD
1081 *
1082 * WARNING: Because this can be called directly by getnewbuf we cannot
1083 * recurse into the tree. If a bp cannot be immediately disassociated
1084 * our only recourse is to set B_LOCKED.
7bc5b8c2
MD
1085 *
1086 * WARNING: This may be called from an interrupt via hammer_io_complete()
66325755
MD
1087 */
1088static void
1089hammer_io_deallocate(struct buf *bp)
1090{
055f5ff8 1091 hammer_io_structure_t iou = (void *)LIST_FIRST(&bp->b_dep);
66325755 1092
055f5ff8 1093 KKASSERT((bp->b_flags & B_LOCKED) == 0 && iou->io.running == 0);
250aec18
MD
1094 if (hammer_try_interlock_norefs(&iou->io.lock) == 0) {
1095 /*
1096 * We cannot safely disassociate a bp from a referenced
1097 * or interlocked HAMMER structure.
1098 */
1099 bp->b_flags |= B_LOCKED;
1100 ++hammer_count_io_locked;
1101 } else if (iou->io.modified) {
10a5d1ba
MD
1102 /*
1103 * It is not legal to disassociate a modified buffer. This
1104 * case really shouldn't ever occur.
1105 */
055f5ff8 1106 bp->b_flags |= B_LOCKED;
a99b9ea2 1107 ++hammer_count_io_locked;
250aec18 1108 hammer_put_interlock(&iou->io.lock, 0);
055f5ff8 1109 } else {
10a5d1ba
MD
1110 /*
1111 * Disassociate the BP. If the io has no refs left we
1112 * have to add it to the loose list.
1113 */
ecca949a
MD
1114 hammer_io_disassociate(iou);
1115 if (iou->io.type != HAMMER_STRUCTURE_VOLUME) {
1116 KKASSERT(iou->io.bp == NULL);
10a5d1ba 1117 KKASSERT(iou->io.mod_list == NULL);
a99b9ea2 1118 crit_enter(); /* biodone race against list */
10a5d1ba
MD
1119 iou->io.mod_list = &iou->io.hmp->lose_list;
1120 TAILQ_INSERT_TAIL(iou->io.mod_list, &iou->io, mod_entry);
a99b9ea2 1121 crit_exit();
66325755 1122 }
250aec18 1123 hammer_put_interlock(&iou->io.lock, 1);
66325755 1124 }
66325755
MD
1125}
1126
1127static int
1128hammer_io_fsync(struct vnode *vp)
1129{
1130 return(0);
1131}
1132
1133/*
1134 * NOTE: will not be called unless we tell the kernel about the
1135 * bioops. Unused... we use the mount's VFS_SYNC instead.
1136 */
1137static int
1138hammer_io_sync(struct mount *mp)
1139{
1140 return(0);
1141}
1142
1143static void
1144hammer_io_movedeps(struct buf *bp1, struct buf *bp2)
1145{
1146}
1147
1148/*
1149 * I/O pre-check for reading and writing. HAMMER only uses this for
1150 * B_CACHE buffers so checkread just shouldn't happen, but if it does
1151 * allow it.
1152 *
fbc6e32a
MD
1153 * Writing is a different case. We don't want the kernel to try to write
1154 * out a buffer that HAMMER may be modifying passively or which has a
10a5d1ba
MD
1155 * dependancy. In addition, kernel-demanded writes can only proceed for
1156 * certain types of buffers (i.e. UNDO and DATA types). Other dirty
1157 * buffer types can only be explicitly written by the flusher.
fbc6e32a 1158 *
10a5d1ba
MD
1159 * checkwrite will only be called for bdwrite()n buffers. If we return
1160 * success the kernel is guaranteed to initiate the buffer write.
66325755
MD
1161 */
1162static int
1163hammer_io_checkread(struct buf *bp)
1164{
1165 return(0);
1166}
1167
1168static int
1169hammer_io_checkwrite(struct buf *bp)
1170{
10a5d1ba 1171 hammer_io_t io = (void *)LIST_FIRST(&bp->b_dep);
66325755 1172
77062c8a
MD
1173 /*
1174 * This shouldn't happen under normal operation.
1175 */
1176 if (io->type == HAMMER_STRUCTURE_VOLUME ||
1177 io->type == HAMMER_STRUCTURE_META_BUFFER) {
1178 if (!panicstr)
1179 panic("hammer_io_checkwrite: illegal buffer");
a99b9ea2
MD
1180 if ((bp->b_flags & B_LOCKED) == 0) {
1181 bp->b_flags |= B_LOCKED;
1182 ++hammer_count_io_locked;
1183 }
77062c8a
MD
1184 return(1);
1185 }
c9b9e29d 1186
b33e2cc0 1187 /*
10a5d1ba
MD
1188 * We can only clear the modified bit if the IO is not currently
1189 * undergoing modification. Otherwise we may miss changes.
5c8d05e2
MD
1190 *
1191 * Only data and undo buffers can reach here. These buffers do
1192 * not have terminal crc functions but we temporarily reference
1193 * the IO anyway, just in case.
b33e2cc0 1194 */
5c8d05e2
MD
1195 if (io->modify_refs == 0 && io->modified) {
1196 hammer_ref(&io->lock);
4a2796f3 1197 hammer_io_clear_modify(io, 0);
250aec18 1198 hammer_rel(&io->lock);
5c8d05e2
MD
1199 } else if (io->modified) {
1200 KKASSERT(io->type == HAMMER_STRUCTURE_DATA_BUFFER);
1201 }
f90dde4c
MD
1202
1203 /*
1204 * The kernel is going to start the IO, set io->running.
1205 */
1206 KKASSERT(io->running == 0);
1207 io->running = 1;
f5a07a7a 1208 io->hmp->io_running_space += io->bytes;
eddadaee 1209 TAILQ_INSERT_TAIL(&io->hmp->iorun_list, io, iorun_entry);
f5a07a7a 1210 hammer_count_io_running_write += io->bytes;
055f5ff8 1211 return(0);
66325755
MD
1212}
1213
66325755
MD
1214/*
1215 * Return non-zero if we wish to delay the kernel's attempt to flush
1216 * this buffer to disk.
1217 */
1218static int
1219hammer_io_countdeps(struct buf *bp, int n)
1220{
1221 return(0);
1222}
1223
1224struct bio_ops hammer_bioops = {
1225 .io_start = hammer_io_start,
1226 .io_complete = hammer_io_complete,
1227 .io_deallocate = hammer_io_deallocate,
1228 .io_fsync = hammer_io_fsync,
1229 .io_sync = hammer_io_sync,
1230 .io_movedeps = hammer_io_movedeps,
1231 .io_countdeps = hammer_io_countdeps,
1232 .io_checkread = hammer_io_checkread,
1233 .io_checkwrite = hammer_io_checkwrite,
1234};
1235
47637bff
MD
1236/************************************************************************
1237 * DIRECT IO OPS *
1238 ************************************************************************
1239 *
1240 * These functions operate directly on the buffer cache buffer associated
1241 * with a front-end vnode rather then a back-end device vnode.
1242 */
1243
1244/*
1245 * Read a buffer associated with a front-end vnode directly from the
1b0ab2c3
MD
1246 * disk media. The bio may be issued asynchronously. If leaf is non-NULL
1247 * we validate the CRC.
a99b9ea2 1248 *
1b0ab2c3
MD
1249 * We must check for the presence of a HAMMER buffer to handle the case
1250 * where the reblocker has rewritten the data (which it does via the HAMMER
1251 * buffer system, not via the high-level vnode buffer cache), but not yet
1252 * committed the buffer to the media.
47637bff
MD
1253 */
1254int
1b0ab2c3
MD
1255hammer_io_direct_read(hammer_mount_t hmp, struct bio *bio,
1256 hammer_btree_leaf_elm_t leaf)
47637bff 1257{
1b0ab2c3 1258 hammer_off_t buf_offset;
47637bff
MD
1259 hammer_off_t zone2_offset;
1260 hammer_volume_t volume;
1261 struct buf *bp;
1262 struct bio *nbio;
1263 int vol_no;
1264 int error;
1265
1b0ab2c3
MD
1266 buf_offset = bio->bio_offset;
1267 KKASSERT((buf_offset & HAMMER_OFF_ZONE_MASK) ==
1268 HAMMER_ZONE_LARGE_DATA);
1269
1270 /*
1271 * The buffer cache may have an aliased buffer (the reblocker can
1272 * write them). If it does we have to sync any dirty data before
1273 * we can build our direct-read. This is a non-critical code path.
1274 */
1275 bp = bio->bio_buf;
1276 hammer_sync_buffers(hmp, buf_offset, bp->b_bufsize);
4a2796f3 1277
1b0ab2c3
MD
1278 /*
1279 * Resolve to a zone-2 offset. The conversion just requires
1280 * munging the top 4 bits but we want to abstract it anyway
1281 * so the blockmap code can verify the zone assignment.
1282 */
1283 zone2_offset = hammer_blockmap_lookup(hmp, buf_offset, &error);
1284 if (error)
1285 goto done;
43c665ae
MD
1286 KKASSERT((zone2_offset & HAMMER_OFF_ZONE_MASK) ==
1287 HAMMER_ZONE_RAW_BUFFER);
1288
1b0ab2c3
MD
1289 /*
1290 * Resolve volume and raw-offset for 3rd level bio. The
1291 * offset will be specific to the volume.
1292 */
43c665ae
MD
1293 vol_no = HAMMER_VOL_DECODE(zone2_offset);
1294 volume = hammer_get_volume(hmp, vol_no, &error);
1295 if (error == 0 && zone2_offset >= volume->maxbuf_off)
1296 error = EIO;
1297
47637bff 1298 if (error == 0) {
e469566b
MD
1299 /*
1300 * 3rd level bio
1301 */
43c665ae
MD
1302 nbio = push_bio(bio);
1303 nbio->bio_offset = volume->ondisk->vol_buf_beg +
e469566b 1304 (zone2_offset & HAMMER_OFF_SHORT_MASK);
1b0ab2c3
MD
1305#if 0
1306 /*
1307 * XXX disabled - our CRC check doesn't work if the OS
1308 * does bogus_page replacement on the direct-read.
1309 */
1310 if (leaf && hammer_verify_data) {
1311 nbio->bio_done = hammer_io_direct_read_complete;
1312 nbio->bio_caller_info1.uvalue32 = leaf->data_crc;
1313 }
1314#endif
ce0138a6 1315 hammer_stats_disk_read += bp->b_bufsize;
43c665ae 1316 vn_strategy(volume->devvp, nbio);
47637bff 1317 }
43c665ae 1318 hammer_rel_volume(volume, 0);
1b0ab2c3 1319done:
47637bff 1320 if (error) {
cebe9493 1321 kprintf("hammer_direct_read: failed @ %016llx\n",
973c11b9 1322 (long long)zone2_offset);
47637bff
MD
1323 bp->b_error = error;
1324 bp->b_flags |= B_ERROR;
1325 biodone(bio);
1326 }
1327 return(error);
1328}
1329
1b0ab2c3
MD
1330#if 0
1331/*
1332 * On completion of the BIO this callback must check the data CRC
1333 * and chain to the previous bio.
1334 */
1335static
1336void
1337hammer_io_direct_read_complete(struct bio *nbio)
1338{
1339 struct bio *obio;
1340 struct buf *bp;
1341 u_int32_t rec_crc = nbio->bio_caller_info1.uvalue32;
1342
1343 bp = nbio->bio_buf;
1344 if (crc32(bp->b_data, bp->b_bufsize) != rec_crc) {
1345 kprintf("HAMMER: data_crc error @%016llx/%d\n",
1346 nbio->bio_offset, bp->b_bufsize);
fc73edd8
MD
1347 if (hammer_debug_critical)
1348 Debugger("data_crc on read");
1b0ab2c3
MD
1349 bp->b_flags |= B_ERROR;
1350 bp->b_error = EIO;
1351 }
1352 obio = pop_bio(nbio);
1353 biodone(obio);
1354}
1355#endif
1356
47637bff
MD
1357/*
1358 * Write a buffer associated with a front-end vnode directly to the
1359 * disk media. The bio may be issued asynchronously.
1b0ab2c3
MD
1360 *
1361 * The BIO is associated with the specified record and RECF_DIRECT_IO
e469566b 1362 * is set. The recorded is added to its object.
47637bff
MD
1363 */
1364int
6362a262
MD
1365hammer_io_direct_write(hammer_mount_t hmp, struct bio *bio,
1366 hammer_record_t record)
47637bff 1367{
1b0ab2c3 1368 hammer_btree_leaf_elm_t leaf = &record->leaf;
0832c9bb 1369 hammer_off_t buf_offset;
47637bff
MD
1370 hammer_off_t zone2_offset;
1371 hammer_volume_t volume;
0832c9bb 1372 hammer_buffer_t buffer;
47637bff
MD
1373 struct buf *bp;
1374 struct bio *nbio;
0832c9bb 1375 char *ptr;
47637bff
MD
1376 int vol_no;
1377 int error;
1378
0832c9bb
MD
1379 buf_offset = leaf->data_offset;
1380
1381 KKASSERT(buf_offset > HAMMER_ZONE_BTREE);
47637bff
MD
1382 KKASSERT(bio->bio_buf->b_cmd == BUF_CMD_WRITE);
1383
6362a262
MD
1384 /*
1385 * Issue or execute the I/O. The new memory record must replace
1386 * the old one before the I/O completes, otherwise a reaquisition of
1387 * the buffer will load the old media data instead of the new.
1388 */
0832c9bb 1389 if ((buf_offset & HAMMER_BUFMASK) == 0 &&
4a2796f3 1390 leaf->data_len >= HAMMER_BUFSIZE) {
0832c9bb
MD
1391 /*
1392 * We are using the vnode's bio to write directly to the
1393 * media, any hammer_buffer at the same zone-X offset will
1394 * now have stale data.
1395 */
1396 zone2_offset = hammer_blockmap_lookup(hmp, buf_offset, &error);
47637bff
MD
1397 vol_no = HAMMER_VOL_DECODE(zone2_offset);
1398 volume = hammer_get_volume(hmp, vol_no, &error);
1399
1400 if (error == 0 && zone2_offset >= volume->maxbuf_off)
1401 error = EIO;
1402 if (error == 0) {
0832c9bb 1403 bp = bio->bio_buf;
4a2796f3 1404 KKASSERT((bp->b_bufsize & HAMMER_BUFMASK) == 0);
e469566b 1405 /*
4a2796f3
MD
1406 hammer_del_buffers(hmp, buf_offset,
1407 zone2_offset, bp->b_bufsize);
e469566b 1408 */
1b0ab2c3 1409
43c665ae
MD
1410 /*
1411 * Second level bio - cached zone2 offset.
1b0ab2c3
MD
1412 *
1413 * (We can put our bio_done function in either the
1414 * 2nd or 3rd level).
43c665ae 1415 */
47637bff 1416 nbio = push_bio(bio);
43c665ae 1417 nbio->bio_offset = zone2_offset;
1b0ab2c3
MD
1418 nbio->bio_done = hammer_io_direct_write_complete;
1419 nbio->bio_caller_info1.ptr = record;
e469566b
MD
1420 record->zone2_offset = zone2_offset;
1421 record->flags |= HAMMER_RECF_DIRECT_IO |
1422 HAMMER_RECF_DIRECT_INVAL;
43c665ae
MD
1423
1424 /*
1425 * Third level bio - raw offset specific to the
1426 * correct volume.
1427 */
1428 zone2_offset &= HAMMER_OFF_SHORT_MASK;
1429 nbio = push_bio(nbio);
47637bff 1430 nbio->bio_offset = volume->ondisk->vol_buf_beg +
0832c9bb 1431 zone2_offset;
ce0138a6 1432 hammer_stats_disk_write += bp->b_bufsize;
6362a262 1433 hammer_ip_replace_bulk(hmp, record);
47637bff 1434 vn_strategy(volume->devvp, nbio);
748efb59 1435 hammer_io_flush_mark(volume);
47637bff
MD
1436 }
1437 hammer_rel_volume(volume, 0);
0832c9bb 1438 } else {
1b0ab2c3
MD
1439 /*
1440 * Must fit in a standard HAMMER buffer. In this case all
1441 * consumers use the HAMMER buffer system and RECF_DIRECT_IO
1442 * does not need to be set-up.
1443 */
0832c9bb
MD
1444 KKASSERT(((buf_offset ^ (buf_offset + leaf->data_len - 1)) & ~HAMMER_BUFMASK64) == 0);
1445 buffer = NULL;
1446 ptr = hammer_bread(hmp, buf_offset, &error, &buffer);
1447 if (error == 0) {
0832c9bb 1448 bp = bio->bio_buf;
7bc5b8c2 1449 bp->b_flags |= B_AGE;
0832c9bb
MD
1450 hammer_io_modify(&buffer->io, 1);
1451 bcopy(bp->b_data, ptr, leaf->data_len);
1452 hammer_io_modify_done(&buffer->io);
7bc5b8c2 1453 hammer_rel_buffer(buffer, 0);
0832c9bb 1454 bp->b_resid = 0;
6362a262 1455 hammer_ip_replace_bulk(hmp, record);
0832c9bb
MD
1456 biodone(bio);
1457 }
47637bff 1458 }
6362a262 1459 if (error) {
e469566b 1460 /*
6362a262
MD
1461 * Major suckage occured. Also note: The record was
1462 * never added to the tree so we do not have to worry
1463 * about the backend.
e469566b 1464 */
cebe9493 1465 kprintf("hammer_direct_write: failed @ %016llx\n",
973c11b9 1466 (long long)leaf->data_offset);
47637bff
MD
1467 bp = bio->bio_buf;
1468 bp->b_resid = 0;
1469 bp->b_error = EIO;
1470 bp->b_flags |= B_ERROR;
1471 biodone(bio);
e469566b
MD
1472 record->flags |= HAMMER_RECF_DELETED_FE;
1473 hammer_rel_mem_record(record);
47637bff
MD
1474 }
1475 return(error);
1476}
1477
1b0ab2c3
MD
1478/*
1479 * On completion of the BIO this callback must disconnect
1480 * it from the hammer_record and chain to the previous bio.
cdb6e4e6
MD
1481 *
1482 * An I/O error forces the mount to read-only. Data buffers
1483 * are not B_LOCKED like meta-data buffers are, so we have to
1484 * throw the buffer away to prevent the kernel from retrying.
1b0ab2c3
MD
1485 */
1486static
1487void
1488hammer_io_direct_write_complete(struct bio *nbio)
1489{
1490 struct bio *obio;
e469566b 1491 struct buf *bp;
1b0ab2c3
MD
1492 hammer_record_t record = nbio->bio_caller_info1.ptr;
1493
e469566b 1494 bp = nbio->bio_buf;
1b0ab2c3 1495 obio = pop_bio(nbio);
e469566b 1496 if (bp->b_flags & B_ERROR) {
cdb6e4e6 1497 hammer_critical_error(record->ip->hmp, record->ip,
e469566b 1498 bp->b_error,
cdb6e4e6 1499 "while writing bulk data");
e469566b 1500 bp->b_flags |= B_INVAL;
cdb6e4e6 1501 }
1b0ab2c3 1502 biodone(obio);
e469566b
MD
1503
1504 KKASSERT(record != NULL);
1505 KKASSERT(record->flags & HAMMER_RECF_DIRECT_IO);
1b0ab2c3 1506 if (record->flags & HAMMER_RECF_DIRECT_WAIT) {
de996e86
MD
1507 record->flags &= ~(HAMMER_RECF_DIRECT_IO |
1508 HAMMER_RECF_DIRECT_WAIT);
1509 /* record can disappear once DIRECT_IO flag is cleared */
1b0ab2c3 1510 wakeup(&record->flags);
de996e86
MD
1511 } else {
1512 record->flags &= ~HAMMER_RECF_DIRECT_IO;
1513 /* record can disappear once DIRECT_IO flag is cleared */
1b0ab2c3
MD
1514 }
1515}
1516
1517
1518/*
1519 * This is called before a record is either committed to the B-Tree
e469566b 1520 * or destroyed, to resolve any associated direct-IO.
1b0ab2c3 1521 *
e469566b
MD
1522 * (1) We must wait for any direct-IO related to the record to complete.
1523 *
1524 * (2) We must remove any buffer cache aliases for data accessed via
1525 * leaf->data_offset or zone2_offset so non-direct-IO consumers
1526 * (the mirroring and reblocking code) do not see stale data.
1b0ab2c3
MD
1527 */
1528void
1529hammer_io_direct_wait(hammer_record_t record)
1530{
e469566b
MD
1531 /*
1532 * Wait for I/O to complete
1533 */
1534 if (record->flags & HAMMER_RECF_DIRECT_IO) {
1535 crit_enter();
1536 while (record->flags & HAMMER_RECF_DIRECT_IO) {
1537 record->flags |= HAMMER_RECF_DIRECT_WAIT;
1538 tsleep(&record->flags, 0, "hmdiow", 0);
1539 }
1540 crit_exit();
1541 }
1542
1543 /*
362ec2dc
MD
1544 * Invalidate any related buffer cache aliases associated with the
1545 * backing device. This is needed because the buffer cache buffer
1546 * for file data is associated with the file vnode, not the backing
1547 * device vnode.
1548 *
1549 * XXX I do not think this case can occur any more now that
1550 * reservations ensure that all such buffers are removed before
1551 * an area can be reused.
e469566b
MD
1552 */
1553 if (record->flags & HAMMER_RECF_DIRECT_INVAL) {
1554 KKASSERT(record->leaf.data_offset);
362ec2dc
MD
1555 hammer_del_buffers(record->ip->hmp, record->leaf.data_offset,
1556 record->zone2_offset, record->leaf.data_len,
1557 1);
e469566b 1558 record->flags &= ~HAMMER_RECF_DIRECT_INVAL;
1b0ab2c3 1559 }
1b0ab2c3
MD
1560}
1561
43c665ae
MD
1562/*
1563 * This is called to remove the second-level cached zone-2 offset from
1564 * frontend buffer cache buffers, now stale due to a data relocation.
1565 * These offsets are generated by cluster_read() via VOP_BMAP, or directly
1566 * by hammer_vop_strategy_read().
1567 *
1568 * This is rather nasty because here we have something like the reblocker
1569 * scanning the raw B-Tree with no held references on anything, really,
1570 * other then a shared lock on the B-Tree node, and we have to access the
1571 * frontend's buffer cache to check for and clean out the association.
1572 * Specifically, if the reblocker is moving data on the disk, these cached
1573 * offsets will become invalid.
1574 *
1575 * Only data record types associated with the large-data zone are subject
1576 * to direct-io and need to be checked.
1577 *
1578 */
1579void
1580hammer_io_direct_uncache(hammer_mount_t hmp, hammer_btree_leaf_elm_t leaf)
1581{
1582 struct hammer_inode_info iinfo;
1583 int zone;
1584
1585 if (leaf->base.rec_type != HAMMER_RECTYPE_DATA)
1586 return;
1587 zone = HAMMER_ZONE_DECODE(leaf->data_offset);
1588 if (zone != HAMMER_ZONE_LARGE_DATA_INDEX)
1589 return;
1590 iinfo.obj_id = leaf->base.obj_id;
1591 iinfo.obj_asof = 0; /* unused */
1592 iinfo.obj_localization = leaf->base.localization &
5a930e66 1593 HAMMER_LOCALIZE_PSEUDOFS_MASK;
43c665ae
MD
1594 iinfo.u.leaf = leaf;
1595 hammer_scan_inode_snapshots(hmp, &iinfo,
1596 hammer_io_direct_uncache_callback,
1597 leaf);
1598}
1599
1600static int
1601hammer_io_direct_uncache_callback(hammer_inode_t ip, void *data)
1602{
1603 hammer_inode_info_t iinfo = data;
1604 hammer_off_t data_offset;
1605 hammer_off_t file_offset;
1606 struct vnode *vp;
1607 struct buf *bp;
1608 int blksize;
1609
1610 if (ip->vp == NULL)
1611 return(0);
1612 data_offset = iinfo->u.leaf->data_offset;
1613 file_offset = iinfo->u.leaf->base.key - iinfo->u.leaf->data_len;
1614 blksize = iinfo->u.leaf->data_len;
1615 KKASSERT((blksize & HAMMER_BUFMASK) == 0);
1616
1617 hammer_ref(&ip->lock);
1618 if (hammer_get_vnode(ip, &vp) == 0) {
b1c20cfa 1619 if ((bp = findblk(ip->vp, file_offset, FINDBLK_TEST)) != NULL &&
43c665ae
MD
1620 bp->b_bio2.bio_offset != NOOFFSET) {
1621 bp = getblk(ip->vp, file_offset, blksize, 0, 0);
1622 bp->b_bio2.bio_offset = NOOFFSET;
1623 brelse(bp);
1624 }
1625 vput(vp);
1626 }
1627 hammer_rel_inode(ip, 0);
1628 return(0);
1629}
47637bff 1630
748efb59
MD
1631
1632/*
1633 * This function is called when writes may have occured on the volume,
1634 * indicating that the device may be holding cached writes.
1635 */
1636static void
1637hammer_io_flush_mark(hammer_volume_t volume)
1638{
1639 volume->vol_flags |= HAMMER_VOLF_NEEDFLUSH;
1640}
1641
1642/*
1643 * This function ensures that the device has flushed any cached writes out.
1644 */
1645void
1646hammer_io_flush_sync(hammer_mount_t hmp)
1647{
1648 hammer_volume_t volume;
1649 struct buf *bp_base = NULL;
1650 struct buf *bp;
1651
1652 RB_FOREACH(volume, hammer_vol_rb_tree, &hmp->rb_vols_root) {
1653 if (volume->vol_flags & HAMMER_VOLF_NEEDFLUSH) {
1654 volume->vol_flags &= ~HAMMER_VOLF_NEEDFLUSH;
1655 bp = getpbuf(NULL);
1656 bp->b_bio1.bio_offset = 0;
1657 bp->b_bufsize = 0;
1658 bp->b_bcount = 0;
1659 bp->b_cmd = BUF_CMD_FLUSH;
1660 bp->b_bio1.bio_caller_info1.cluster_head = bp_base;
ae8e83e6
MD
1661 bp->b_bio1.bio_done = biodone_sync;
1662 bp->b_bio1.bio_flags |= BIO_SYNC;
748efb59
MD
1663 bp_base = bp;
1664 vn_strategy(volume->devvp, &bp->b_bio1);
1665 }
1666 }
1667 while ((bp = bp_base) != NULL) {
1668 bp_base = bp->b_bio1.bio_caller_info1.cluster_head;
ae8e83e6 1669 biowait(&bp->b_bio1, "hmrFLS");
748efb59
MD
1670 relpbuf(bp, NULL);
1671 }
1672}
ba298df1
MD
1673
1674/*
1675 * Limit the amount of backlog which we allow to build up
1676 */
1677void
1678hammer_io_limit_backlog(hammer_mount_t hmp)
1679{
1680 while (hmp->io_running_space > hammer_limit_running_io) {
1681 hmp->io_running_wakeup = 1;
1682 tsleep(&hmp->io_running_wakeup, 0, "hmiolm", hz / 10);
1683 }
1684}