hammer2 - update documentation, begin working on callback I/O
[dragonfly.git] / sys / vfs / hammer2 / hammer2_io.c
CommitLineData
fdf62707 1/*
8138a154 2 * Copyright (c) 2013-2014 The DragonFly Project. All rights reserved.
fdf62707
MD
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@dragonflybsd.org>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 */
34
35#include "hammer2.h"
36
37/*
38 * Implements an abstraction layer for synchronous and asynchronous
39 * buffered device I/O. Can be used for OS-abstraction but the main
40 * purpose is to allow larger buffers to be used against hammer2_chain's
41 * using smaller allocations, without causing deadlocks.
42 *
43 */
fdf62707
MD
44static int hammer2_io_cleanup_callback(hammer2_io_t *dio, void *arg);
45
46static int
47hammer2_io_cmp(hammer2_io_t *io1, hammer2_io_t *io2)
48{
49 if (io2->pbase < io1->pbase)
50 return(-1);
51 if (io2->pbase > io1->pbase)
52 return(1);
53 return(0);
54}
55
56RB_PROTOTYPE2(hammer2_io_tree, hammer2_io, rbnode, hammer2_io_cmp, off_t);
57RB_GENERATE2(hammer2_io_tree, hammer2_io, rbnode, hammer2_io_cmp,
58 off_t, pbase);
59
0924b3f8
MD
60struct hammer2_cleanupcb_info {
61 struct hammer2_io_tree tmptree;
62 int count;
63};
64
fdf62707 65#define HAMMER2_DIO_INPROG 0x80000000
bca9f8e6
MD
66#define HAMMER2_DIO_GOOD 0x40000000 /* buf/bio is good */
67#define HAMMER2_DIO_WAITING 0x20000000 /* iocb's queued */
68#define HAMMER2_DIO_DIRTY 0x10000000 /* flush on last drop */
fdf62707
MD
69
70#define HAMMER2_DIO_MASK 0x0FFFFFFF
71
bca9f8e6
MD
72#define HAMMER2_GETBLK_GOOD 0
73#define HAMMER2_GETBLK_QUEUED 1
74#define HAMMER2_GETBLK_OWNED 2
75
fdf62707 76/*
bca9f8e6 77 * Allocate/Locate the requested dio, reference it, issue or queue iocb.
fdf62707 78 */
bca9f8e6
MD
79void
80hammer2_io_getblk(hammer2_mount_t *hmp, off_t lbase, int lsize,
81 hammer2_iocb_t *iocb)
fdf62707
MD
82{
83 hammer2_io_t *dio;
84 hammer2_io_t *xio;
85 off_t pbase;
86 off_t pmask;
87 int psize = hammer2_devblksize(lsize);
88 int refs;
89
90 pmask = ~(hammer2_off_t)(psize - 1);
91
92 KKASSERT((1 << (int)(lbase & HAMMER2_OFF_MASK_RADIX)) == lsize);
93 lbase &= ~HAMMER2_OFF_MASK_RADIX;
94 pbase = lbase & pmask;
95 KKASSERT(pbase != 0 && ((lbase + lsize - 1) & pmask) == pbase);
96
97 /*
98 * Access/Allocate the DIO
99 */
100 spin_lock_shared(&hmp->io_spin);
101 dio = RB_LOOKUP(hammer2_io_tree, &hmp->iotree, pbase);
102 if (dio) {
0924b3f8
MD
103 if ((atomic_fetchadd_int(&dio->refs, 1) &
104 HAMMER2_DIO_MASK) == 0) {
fdf62707 105 atomic_add_int(&dio->hmp->iofree_count, -1);
0924b3f8 106 }
fdf62707
MD
107 spin_unlock_shared(&hmp->io_spin);
108 } else {
109 spin_unlock_shared(&hmp->io_spin);
110 dio = kmalloc(sizeof(*dio), M_HAMMER2, M_INTWAIT | M_ZERO);
111 dio->hmp = hmp;
112 dio->pbase = pbase;
113 dio->psize = psize;
114 dio->refs = 1;
bca9f8e6
MD
115 spin_init(&dio->spin, "h2dio");
116 TAILQ_INIT(&dio->iocbq);
fdf62707
MD
117 spin_lock(&hmp->io_spin);
118 xio = RB_INSERT(hammer2_io_tree, &hmp->iotree, dio);
119 if (xio == NULL) {
837bd39b 120 atomic_add_int(&hammer2_dio_count, 1);
fdf62707
MD
121 spin_unlock(&hmp->io_spin);
122 } else {
0924b3f8
MD
123 if ((atomic_fetchadd_int(&xio->refs, 1) &
124 HAMMER2_DIO_MASK) == 0) {
fdf62707 125 atomic_add_int(&xio->hmp->iofree_count, -1);
0924b3f8 126 }
fdf62707
MD
127 spin_unlock(&hmp->io_spin);
128 kfree(dio, M_HAMMER2);
129 dio = xio;
130 }
131 }
132
133 /*
134 * Obtain/Validate the buffer.
135 */
bca9f8e6
MD
136 iocb->dio = dio;
137
fdf62707
MD
138 for (;;) {
139 refs = dio->refs;
140 cpu_ccfence();
141
142 /*
bca9f8e6
MD
143 * Issue the iocb immediately if the buffer is already good.
144 * Once set GOOD cannot be cleared until refs drops to 0.
fdf62707
MD
145 */
146 if (refs & HAMMER2_DIO_GOOD) {
bca9f8e6
MD
147 iocb->callback(iocb);
148 break;
fdf62707
MD
149 }
150
151 /*
bca9f8e6 152 * Try to own the buffer. If we cannot we queue the iocb.
fdf62707
MD
153 */
154 if (refs & HAMMER2_DIO_INPROG) {
bca9f8e6 155 spin_lock(&dio->spin);
fdf62707
MD
156 if (atomic_cmpset_int(&dio->refs, refs,
157 refs | HAMMER2_DIO_WAITING)) {
bca9f8e6
MD
158 iocb->flags |= HAMMER2_IOCB_ONQ |
159 HAMMER2_IOCB_INPROG;
160 TAILQ_INSERT_TAIL(&dio->iocbq, iocb, entry);
161 spin_unlock(&dio->spin);
162 break;
fdf62707 163 }
bca9f8e6 164 spin_unlock(&dio->spin);
fdf62707
MD
165 /* retry */
166 } else {
167 if (atomic_cmpset_int(&dio->refs, refs,
168 refs | HAMMER2_DIO_INPROG)) {
bca9f8e6
MD
169 iocb->flags |= HAMMER2_IOCB_INPROG;
170 iocb->callback(iocb);
fdf62707
MD
171 break;
172 }
bca9f8e6 173 /* retry */
fdf62707
MD
174 }
175 /* retry */
176 }
bca9f8e6
MD
177 if (dio->act < 5)
178 ++dio->act;
179}
180
181/*
182 * The iocb is done.
183 */
184void
185hammer2_io_complete(hammer2_iocb_t *iocb)
186{
187 hammer2_io_t *dio = iocb->dio;
188 uint32_t orefs;
189 uint32_t nrefs;
190 uint32_t oflags;
191 uint32_t nflags;
192
193 /*
194 * If IOCB_INPROG is not set then the completion was synchronous.
195 * We can set IOCB_DONE safely without having to worry about waiters.
196 * XXX
197 */
198 if ((iocb->flags & HAMMER2_IOCB_INPROG) == 0) {
199 iocb->flags |= HAMMER2_IOCB_DONE;
200 return;
201 }
fdf62707
MD
202
203 /*
bca9f8e6
MD
204 * bp is held for all comers, make sure the lock is not owned by
205 * a particular thread.
fdf62707 206 */
bca9f8e6
MD
207 if (iocb->flags & HAMMER2_IOCB_DIDBP)
208 BUF_KERNPROC(dio->bp);
209
210 /*
211 * Set the GOOD bit on completion with no error if dio->bp is
212 * not NULL. Only applicable if INPROG was set.
213 */
214 if (dio->bp && iocb->error == 0)
215 atomic_set_int(&dio->refs, HAMMER2_DIO_GOOD);
216
217 for (;;) {
218 oflags = iocb->flags;
219 cpu_ccfence();
220 nflags = oflags;
221 nflags &= ~(HAMMER2_IOCB_DIDBP |
222 HAMMER2_IOCB_WAKEUP |
223 HAMMER2_IOCB_INPROG);
224 nflags |= HAMMER2_IOCB_DONE;
225
226 if (atomic_cmpset_int(&iocb->flags, oflags, nflags)) {
227 if (oflags & HAMMER2_IOCB_WAKEUP)
228 wakeup(iocb);
229 /* SMP: iocb is now stale */
230 break;
231 }
232 }
233 iocb = NULL;
234
235 /*
236 * Now finish up the dio. If another iocb is pending chain to it,
237 * otherwise clear INPROG (and WAITING).
238 */
239 for (;;) {
240 orefs = dio->refs;
241 nrefs = orefs & ~(HAMMER2_DIO_WAITING | HAMMER2_DIO_INPROG);
242
243 if ((orefs & HAMMER2_DIO_WAITING) && TAILQ_FIRST(&dio->iocbq)) {
244 spin_lock(&dio->spin);
245 iocb = TAILQ_FIRST(&dio->iocbq);
246 if (iocb) {
247 TAILQ_REMOVE(&dio->iocbq, iocb, entry);
248 spin_unlock(&dio->spin);
249 iocb->callback(iocb); /* chained */
250 break;
251 }
252 spin_unlock(&dio->spin);
253 /* retry */
254 } else if (atomic_cmpset_int(&dio->refs, orefs, nrefs)) {
255 break;
256 } /* else retry */
257 /* retry */
258 }
259 /* SMP: dio is stale now */
fdf62707
MD
260}
261
262/*
fdf62707 263 *
fdf62707 264 */
fdf62707 265void
bca9f8e6 266hammer2_iocb_wait(hammer2_iocb_t *iocb)
fdf62707 267{
bca9f8e6
MD
268 uint32_t oflags;
269 uint32_t nflags;
fdf62707 270
bca9f8e6
MD
271 for (;;) {
272 oflags = iocb->flags;
fdf62707 273 cpu_ccfence();
bca9f8e6
MD
274 nflags = oflags | HAMMER2_IOCB_WAKEUP;
275 if (oflags & HAMMER2_IOCB_DONE)
fdf62707 276 break;
bca9f8e6
MD
277 tsleep_interlock(iocb, 0);
278 if (atomic_cmpset_int(&iocb->flags, oflags, nflags)) {
279 tsleep(iocb, PINTERLOCKED, "h2iocb", hz);
fdf62707 280 }
fdf62707 281 }
bca9f8e6 282
fdf62707
MD
283}
284
285/*
bca9f8e6
MD
286 * Release our ref on *diop, dispose of the underlying buffer, and flush
287 * on last drop if it was dirty.
fdf62707
MD
288 */
289void
290hammer2_io_putblk(hammer2_io_t **diop)
291{
292 hammer2_mount_t *hmp;
293 hammer2_io_t *dio;
bca9f8e6 294 hammer2_iocb_t iocb;
fdf62707
MD
295 struct buf *bp;
296 off_t peof;
114b3e17
MD
297 off_t pbase;
298 int psize;
fdf62707
MD
299 int refs;
300
301 dio = *diop;
302 *diop = NULL;
303
bca9f8e6
MD
304 /*
305 * Drop refs, on 1->0 transition clear flags, set INPROG.
306 */
fdf62707
MD
307 for (;;) {
308 refs = dio->refs;
309
310 if ((refs & HAMMER2_DIO_MASK) == 1) {
311 KKASSERT((refs & HAMMER2_DIO_INPROG) == 0);
312 if (atomic_cmpset_int(&dio->refs, refs,
313 ((refs - 1) &
314 ~(HAMMER2_DIO_GOOD |
315 HAMMER2_DIO_DIRTY)) |
316 HAMMER2_DIO_INPROG)) {
317 break;
318 }
319 /* retry */
320 } else {
321 if (atomic_cmpset_int(&dio->refs, refs, refs - 1))
322 return;
323 /* retry */
324 }
325 /* retry */
326 }
327
328 /*
bca9f8e6
MD
329 * We have set DIO_INPROG to gain control of the buffer and we have
330 * cleared DIO_GOOD to prevent other accessors from thinking it is
331 * still good.
114b3e17 332 *
bca9f8e6
MD
333 * We can now dispose of the buffer, and should do it before calling
334 * io_complete() in case there's a race against a new reference
335 * which causes io_complete() to chain and instantiate the bp again.
fdf62707 336 */
114b3e17
MD
337 pbase = dio->pbase;
338 psize = dio->psize;
bca9f8e6
MD
339 bp = dio->bp;
340 dio->bp = NULL;
114b3e17 341
fdf62707 342 if (refs & HAMMER2_DIO_GOOD) {
114b3e17 343 KKASSERT(bp != NULL);
fdf62707
MD
344 if (refs & HAMMER2_DIO_DIRTY) {
345 if (hammer2_cluster_enable) {
114b3e17 346 peof = (pbase + HAMMER2_SEGMASK64) &
fdf62707 347 ~HAMMER2_SEGMASK64;
114b3e17 348 cluster_write(bp, peof, psize, 4);
fdf62707
MD
349 } else {
350 bp->b_flags |= B_CLUSTEROK;
351 bdwrite(bp);
352 }
353 } else if (bp->b_flags & (B_ERROR | B_INVAL | B_RELBUF)) {
354 brelse(bp);
355 } else {
356 bqrelse(bp);
357 }
bca9f8e6
MD
358 } else if (bp) {
359 if (refs & HAMMER2_DIO_DIRTY) {
360 bdwrite(bp);
361 } else {
362 brelse(bp);
363 }
fdf62707
MD
364 }
365
bca9f8e6
MD
366 /*
367 * The instant we call io_complete dio is a free agent again and
368 * can be ripped out from under us.
369 *
370 * we can cleanup our final DIO_INPROG by simulating an iocb
371 * completion.
372 */
373 hmp = dio->hmp; /* extract fields */
374 atomic_add_int(&hmp->iofree_count, 1);
375 cpu_ccfence();
376
377 iocb.dio = dio;
378 iocb.flags = HAMMER2_IOCB_INPROG;
379 hammer2_io_complete(&iocb);
380 dio = NULL; /* dio stale */
381
fdf62707
MD
382 /*
383 * We cache free buffers so re-use cases can use a shared lock, but
384 * if too many build up we have to clean them out.
385 */
386 if (hmp->iofree_count > 1000) {
0924b3f8 387 struct hammer2_cleanupcb_info info;
fdf62707 388
0924b3f8 389 RB_INIT(&info.tmptree);
fdf62707
MD
390 spin_lock(&hmp->io_spin);
391 if (hmp->iofree_count > 1000) {
0924b3f8 392 info.count = hmp->iofree_count / 2;
114b3e17 393 RB_SCAN(hammer2_io_tree, &hmp->iotree, NULL,
0924b3f8 394 hammer2_io_cleanup_callback, &info);
fdf62707
MD
395 }
396 spin_unlock(&hmp->io_spin);
0924b3f8 397 hammer2_io_cleanup(hmp, &info.tmptree);
fdf62707
MD
398 }
399}
400
0924b3f8 401/*
bca9f8e6 402 * Cleanup any dio's with (INPROG | refs) == 0.
0924b3f8 403 */
fdf62707
MD
404static
405int
406hammer2_io_cleanup_callback(hammer2_io_t *dio, void *arg)
407{
0924b3f8 408 struct hammer2_cleanupcb_info *info = arg;
fdf62707
MD
409 hammer2_io_t *xio;
410
114b3e17 411 if ((dio->refs & (HAMMER2_DIO_MASK | HAMMER2_DIO_INPROG)) == 0) {
0924b3f8
MD
412 if (dio->act > 0) {
413 --dio->act;
414 return 0;
415 }
416 KKASSERT(dio->bp == NULL);
fdf62707 417 RB_REMOVE(hammer2_io_tree, &dio->hmp->iotree, dio);
0924b3f8 418 xio = RB_INSERT(hammer2_io_tree, &info->tmptree, dio);
fdf62707 419 KKASSERT(xio == NULL);
0924b3f8
MD
420 if (--info->count <= 0) /* limit scan */
421 return(-1);
fdf62707
MD
422 }
423 return 0;
424}
425
426void
427hammer2_io_cleanup(hammer2_mount_t *hmp, struct hammer2_io_tree *tree)
428{
429 hammer2_io_t *dio;
430
431 while ((dio = RB_ROOT(tree)) != NULL) {
432 RB_REMOVE(hammer2_io_tree, tree, dio);
433 KKASSERT(dio->bp == NULL &&
114b3e17 434 (dio->refs & (HAMMER2_DIO_MASK | HAMMER2_DIO_INPROG)) == 0);
fdf62707 435 kfree(dio, M_HAMMER2);
837bd39b 436 atomic_add_int(&hammer2_dio_count, -1);
fdf62707
MD
437 atomic_add_int(&hmp->iofree_count, -1);
438 }
439}
440
bca9f8e6
MD
441/*
442 * Returns a pointer to the requested data.
443 */
fdf62707
MD
444char *
445hammer2_io_data(hammer2_io_t *dio, off_t lbase)
446{
447 struct buf *bp;
448 int off;
449
450 bp = dio->bp;
451 KKASSERT(bp != NULL);
452 off = (lbase & ~HAMMER2_OFF_MASK_RADIX) - bp->b_loffset;
453 KKASSERT(off >= 0 && off < bp->b_bufsize);
454 return(bp->b_data + off);
455}
456
bca9f8e6
MD
457/*
458 * Helpers for hammer2_io_new*() functions
459 */
fdf62707 460static
bca9f8e6
MD
461void
462hammer2_iocb_new_callback(hammer2_iocb_t *iocb)
fdf62707 463{
bca9f8e6
MD
464 hammer2_io_t *dio = iocb->dio;
465 int gbctl = (iocb->flags & HAMMER2_IOCB_QUICK) ? GETBLK_NOWAIT : 0;
fdf62707 466
bca9f8e6
MD
467 /*
468 * If INPROG is not set the dio already has a good buffer and we
469 * can't mess with it other than zero the requested range.
470 *
471 * If INPROG is set it gets a bit messy.
472 */
473 if (iocb->flags & HAMMER2_IOCB_INPROG) {
474 if ((iocb->flags & HAMMER2_IOCB_READ) == 0) {
475 if (iocb->lsize == dio->psize) {
476 /*
477 * Fully covered buffer, try to optimize to
478 * avoid any I/O.
479 */
480 if (dio->bp == NULL) {
481 dio->bp = getblk(dio->hmp->devvp,
482 dio->pbase, dio->psize,
483 gbctl, 0);
484 }
485 if (dio->bp) {
486 vfs_bio_clrbuf(dio->bp);
487 if (iocb->flags & HAMMER2_IOCB_QUICK) {
488 dio->bp->b_flags |= B_CACHE;
489 bqrelse(dio->bp);
490 dio->bp = NULL;
491 }
492 }
493 } else if (iocb->flags & HAMMER2_IOCB_QUICK) {
494 /*
495 * Partial buffer, quick mode. Do nothing.
496 */
497 } else if (dio->bp == NULL ||
498 (dio->bp->b_flags & B_CACHE) == 0) {
499 /*
500 * Partial buffer, normal mode, requires
501 * read-before-write. Chain the read.
502 */
503 if (dio->bp) {
504 if (dio->refs & HAMMER2_DIO_DIRTY)
505 bdwrite(dio->bp);
506 else
507 bqrelse(dio->bp);
fdf62707
MD
508 dio->bp = NULL;
509 }
bca9f8e6
MD
510 iocb->flags |= HAMMER2_IOCB_READ;
511 breadcb(dio->hmp->devvp,
512 dio->pbase, dio->psize,
513 hammer2_io_callback, iocb);
514 return;
515 } /* else buffer is good */
fdf62707 516 }
fdf62707
MD
517 }
518 if (dio->bp) {
bca9f8e6
MD
519 if (iocb->flags & HAMMER2_IOCB_ZERO)
520 bzero(hammer2_io_data(dio, iocb->lbase), iocb->lsize);
fdf62707
MD
521 atomic_set_int(&dio->refs, HAMMER2_DIO_DIRTY);
522 }
bca9f8e6
MD
523 hammer2_io_complete(iocb);
524}
525
526static
527int
528_hammer2_io_new(hammer2_mount_t *hmp, off_t lbase, int lsize,
529 hammer2_io_t **diop, int flags)
530{
531 hammer2_iocb_t iocb;
532 hammer2_io_t *dio;
533
534 iocb.callback = hammer2_iocb_new_callback;
535 iocb.cluster = NULL;
536 iocb.chain = NULL;
537 iocb.ptr = NULL;
538 iocb.lbase = lbase;
539 iocb.lsize = lsize;
540 iocb.flags = flags;
541 iocb.error = 0;
542 hammer2_io_getblk(hmp, lbase, lsize, &iocb);
543 if ((iocb.flags & HAMMER2_IOCB_DONE) == 0)
544 hammer2_iocb_wait(&iocb);
545 dio = *diop = iocb.dio;
546
547 return (iocb.error);
fdf62707
MD
548}
549
550int
551hammer2_io_new(hammer2_mount_t *hmp, off_t lbase, int lsize,
552 hammer2_io_t **diop)
553{
bca9f8e6 554 return(_hammer2_io_new(hmp, lbase, lsize, diop, HAMMER2_IOCB_ZERO));
fdf62707
MD
555}
556
557int
558hammer2_io_newnz(hammer2_mount_t *hmp, off_t lbase, int lsize,
559 hammer2_io_t **diop)
560{
bca9f8e6 561 return(_hammer2_io_new(hmp, lbase, lsize, diop, 0));
fdf62707
MD
562}
563
564int
565hammer2_io_newq(hammer2_mount_t *hmp, off_t lbase, int lsize,
566 hammer2_io_t **diop)
567{
bca9f8e6 568 return(_hammer2_io_new(hmp, lbase, lsize, diop, HAMMER2_IOCB_QUICK));
fdf62707
MD
569}
570
bca9f8e6
MD
571static
572void
573hammer2_iocb_bread_callback(hammer2_iocb_t *iocb)
fdf62707 574{
bca9f8e6 575 hammer2_io_t *dio = iocb->dio;
fdf62707 576 off_t peof;
fdf62707
MD
577 int error;
578
bca9f8e6 579 if (iocb->flags & HAMMER2_IOCB_INPROG) {
fdf62707
MD
580 if (hammer2_cluster_enable) {
581 peof = (dio->pbase + HAMMER2_SEGMASK64) &
582 ~HAMMER2_SEGMASK64;
bca9f8e6 583 error = cluster_read(dio->hmp->devvp, peof, dio->pbase,
fdf62707
MD
584 dio->psize,
585 dio->psize, HAMMER2_PBUFSIZE*4,
586 &dio->bp);
587 } else {
bca9f8e6 588 error = bread(dio->hmp->devvp, dio->pbase,
fdf62707
MD
589 dio->psize, &dio->bp);
590 }
591 if (error) {
592 brelse(dio->bp);
593 dio->bp = NULL;
594 }
fdf62707 595 }
bca9f8e6 596 hammer2_io_complete(iocb);
fdf62707
MD
597}
598
bca9f8e6
MD
599int
600hammer2_io_bread(hammer2_mount_t *hmp, off_t lbase, int lsize,
601 hammer2_io_t **diop)
fdf62707 602{
bca9f8e6 603 hammer2_iocb_t iocb;
fdf62707 604 hammer2_io_t *dio;
fdf62707 605
bca9f8e6
MD
606 iocb.callback = hammer2_iocb_bread_callback;
607 iocb.cluster = NULL;
608 iocb.chain = NULL;
609 iocb.ptr = NULL;
610 iocb.lbase = lbase;
611 iocb.lsize = lsize;
612 iocb.flags = 0;
613 iocb.error = 0;
614 hammer2_io_getblk(hmp, lbase, lsize, &iocb);
615 if ((iocb.flags & HAMMER2_IOCB_DONE) == 0)
616 hammer2_iocb_wait(&iocb);
617 dio = *diop = iocb.dio;
618
619 return (iocb.error);
fdf62707
MD
620}
621
bca9f8e6
MD
622/*
623 * System buf/bio async callback extracts the iocb and chains
624 * to the iocb callback.
625 */
626void
fdf62707
MD
627hammer2_io_callback(struct bio *bio)
628{
629 struct buf *dbp = bio->bio_buf;
bca9f8e6
MD
630 hammer2_iocb_t *iocb = bio->bio_caller_info1.ptr;
631 hammer2_io_t *dio;
fdf62707 632
bca9f8e6 633 dio = iocb->dio;
fdf62707
MD
634 if ((bio->bio_flags & BIO_DONE) == 0)
635 bpdone(dbp, 0);
636 bio->bio_flags &= ~(BIO_DONE | BIO_SYNC);
637 dio->bp = bio->bio_buf;
bca9f8e6 638 iocb->callback(iocb);
fdf62707
MD
639}
640
641void
642hammer2_io_bawrite(hammer2_io_t **diop)
643{
644 atomic_set_int(&(*diop)->refs, HAMMER2_DIO_DIRTY);
645 hammer2_io_putblk(diop);
646}
647
648void
649hammer2_io_bdwrite(hammer2_io_t **diop)
650{
651 atomic_set_int(&(*diop)->refs, HAMMER2_DIO_DIRTY);
652 hammer2_io_putblk(diop);
653}
654
655int
656hammer2_io_bwrite(hammer2_io_t **diop)
657{
658 atomic_set_int(&(*diop)->refs, HAMMER2_DIO_DIRTY);
659 hammer2_io_putblk(diop);
660 return (0); /* XXX */
661}
662
663void
664hammer2_io_setdirty(hammer2_io_t *dio)
665{
666 atomic_set_int(&dio->refs, HAMMER2_DIO_DIRTY);
667}
668
669void
670hammer2_io_setinval(hammer2_io_t *dio, u_int bytes)
671{
672 if ((u_int)dio->psize == bytes)
673 dio->bp->b_flags |= B_INVAL | B_RELBUF;
674}
675
676void
677hammer2_io_brelse(hammer2_io_t **diop)
678{
679 hammer2_io_putblk(diop);
680}
681
682void
683hammer2_io_bqrelse(hammer2_io_t **diop)
684{
685 hammer2_io_putblk(diop);
686}
687
688int
689hammer2_io_isdirty(hammer2_io_t *dio)
690{
691 return((dio->refs & HAMMER2_DIO_DIRTY) != 0);
692}