kernel - Cluster fixes + Enable clustering for HAMMER1
[dragonfly.git] / sys / kern / vfs_cluster.c
CommitLineData
984263bc
MD
1/*-
2 * Copyright (c) 1993
3 * The Regents of the University of California. All rights reserved.
4 * Modifications/enhancements:
5 * Copyright (c) 1995 John S. Dyson. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by the University of
18 * California, Berkeley and its contributors.
19 * 4. Neither the name of the University nor the names of its contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 *
35 * @(#)vfs_cluster.c 8.7 (Berkeley) 2/13/94
36 * $FreeBSD: src/sys/kern/vfs_cluster.c,v 1.92.2.9 2001/11/18 07:10:59 dillon Exp $
b77cfc40 37 * $DragonFly: src/sys/kern/vfs_cluster.c,v 1.40 2008/07/14 03:09:00 dillon Exp $
984263bc
MD
38 */
39
40#include "opt_debug_cluster.h"
41
42#include <sys/param.h>
43#include <sys/systm.h>
44#include <sys/kernel.h>
45#include <sys/proc.h>
46#include <sys/buf.h>
47#include <sys/vnode.h>
48#include <sys/malloc.h>
49#include <sys/mount.h>
50#include <sys/resourcevar.h>
51#include <sys/vmmeter.h>
52#include <vm/vm.h>
53#include <vm/vm_object.h>
54#include <vm/vm_page.h>
55#include <sys/sysctl.h>
54341a3b 56
3020e3be 57#include <sys/buf2.h>
12e4aaff 58#include <vm/vm_page2.h>
984263bc 59
e54488bb
MD
60#include <machine/limits.h>
61
984263bc
MD
62#if defined(CLUSTERDEBUG)
63#include <sys/sysctl.h>
64static int rcluster= 0;
65SYSCTL_INT(_debug, OID_AUTO, rcluster, CTLFLAG_RW, &rcluster, 0, "");
66#endif
67
d1cd9d97 68static MALLOC_DEFINE(M_SEGMENT, "cluster_save", "cluster_save buffer");
984263bc
MD
69
70static struct cluster_save *
54078292 71 cluster_collectbufs (struct vnode *vp, struct buf *last_bp,
e92ca23a 72 int blksize);
984263bc 73static struct buf *
54078292 74 cluster_rbuild (struct vnode *vp, off_t filesize, off_t loffset,
e92ca23a 75 off_t doffset, int blksize, int run,
ae8e83e6 76 struct buf *fbp);
81b5c339 77static void cluster_callback (struct bio *);
cf1bb2a8 78static void cluster_setram (struct buf *);
9d4e78c7
MD
79static int cluster_wbuild(struct vnode *vp, struct buf **bpp, int blksize,
80 off_t start_loffset, int bytes);
984263bc
MD
81
82static int write_behind = 1;
093e85dc
SG
83SYSCTL_INT(_vfs, OID_AUTO, write_behind, CTLFLAG_RW, &write_behind, 0,
84 "Cluster write-behind setting");
364c022c 85static int max_readahead = 2 * 1024 * 1024;
093e85dc
SG
86SYSCTL_INT(_vfs, OID_AUTO, max_readahead, CTLFLAG_RW, &max_readahead, 0,
87 "Limit in bytes for desired cluster read-ahead");
984263bc
MD
88
89extern vm_page_t bogus_page;
90
91extern int cluster_pbuf_freecnt;
92
93/*
984263bc 94 * This replaces bread.
364c022c
MD
95 *
96 * filesize - read-ahead @ blksize will not cross this boundary
97 * loffset - loffset for returned *bpp
98 * blksize - blocksize for returned *bpp and read-ahead bps
99 * minreq - minimum (not a hard minimum) in bytes, typically reflects
100 * a higher level uio resid.
101 * maxreq - maximum (sequential heuristic) in bytes (highet typ ~2MB)
102 * bpp - return buffer (*bpp) for (loffset,blksize)
984263bc
MD
103 */
104int
54341a3b 105cluster_readx(struct vnode *vp, off_t filesize, off_t loffset,
364c022c 106 int blksize, size_t minreq, size_t maxreq, struct buf **bpp)
984263bc
MD
107{
108 struct buf *bp, *rbp, *reqbp;
54078292
MD
109 off_t origoffset;
110 off_t doffset;
111 int error;
984263bc 112 int i;
364c022c
MD
113 int maxra;
114 int maxrbuild;
984263bc
MD
115
116 error = 0;
117
118 /*
364c022c
MD
119 * Calculate the desired read-ahead in blksize'd blocks (maxra).
120 * To do this we calculate maxreq.
6b84c93e 121 *
364c022c
MD
122 * maxreq typically starts out as a sequential heuristic. If the
123 * high level uio/resid is bigger (minreq), we pop maxreq up to
124 * minreq. This represents the case where random I/O is being
125 * performed by the userland is issuing big read()'s.
6b84c93e 126 *
364c022c
MD
127 * Then we limit maxreq to max_readahead to ensure it is a reasonable
128 * value.
129 *
b28ad496 130 * Finally we must ensure that (loffset + maxreq) does not cross the
364c022c
MD
131 * boundary (filesize) for the current blocksize. If we allowed it
132 * to cross we could end up with buffers past the boundary with the
133 * wrong block size (HAMMER large-data areas use mixed block sizes).
b28ad496 134 * minreq is also absolutely limited to filesize.
984263bc 135 */
364c022c
MD
136 if (maxreq < minreq)
137 maxreq = minreq;
b28ad496
MD
138 /* minreq not used beyond this point */
139
364c022c
MD
140 if (maxreq > max_readahead) {
141 maxreq = max_readahead;
142 if (maxreq > 16 * 1024 * 1024)
143 maxreq = 16 * 1024 * 1024;
144 }
145 if (maxreq < blksize)
146 maxreq = blksize;
147 if (loffset + maxreq > filesize) {
148 if (loffset > filesize)
149 maxreq = 0;
150 else
151 maxreq = filesize - loffset;
152 }
153
154 maxra = (int)(maxreq / blksize);
984263bc
MD
155
156 /*
ae8e83e6 157 * Get the requested block.
984263bc 158 */
54341a3b
MD
159 if (*bpp)
160 reqbp = bp = *bpp;
161 else
162 *bpp = reqbp = bp = getblk(vp, loffset, blksize, 0, 0);
54078292 163 origoffset = loffset;
984263bc
MD
164
165 /*
364c022c
MD
166 * Calculate the maximum cluster size for a single I/O, used
167 * by cluster_rbuild().
168 */
169 maxrbuild = vmaxiosize(vp) / blksize;
170
171 /*
984263bc
MD
172 * if it is in the cache, then check to see if the reads have been
173 * sequential. If they have, then try some read-ahead, otherwise
174 * back-off on prospective read-aheads.
175 */
176 if (bp->b_flags & B_CACHE) {
6b84c93e
MD
177 /*
178 * Not sequential, do not do any read-ahead
179 */
364c022c 180 if (maxra <= 1)
984263bc 181 return 0;
6b84c93e
MD
182
183 /*
184 * No read-ahead mark, do not do any read-ahead
185 * yet.
186 */
187 if ((bp->b_flags & B_RAM) == 0)
984263bc 188 return 0;
b1c20cfa 189
6b84c93e
MD
190 /*
191 * We hit a read-ahead-mark, figure out how much read-ahead
192 * to do (maxra) and where to start (loffset).
193 *
194 * Shortcut the scan. Typically the way this works is that
195 * we've built up all the blocks inbetween except for the
196 * last in previous iterations, so if the second-to-last
197 * block is present we just skip ahead to it.
198 *
199 * This algorithm has O(1) cpu in the steady state no
200 * matter how large maxra is.
201 */
202 bp->b_flags &= ~B_RAM;
203
204 if (findblk(vp, loffset + (maxra - 2) * blksize, FINDBLK_TEST))
205 i = maxra - 1;
206 else
207 i = 1;
208 while (i < maxra) {
209 if (findblk(vp, loffset + i * blksize,
210 FINDBLK_TEST) == NULL) {
211 break;
984263bc 212 }
6b84c93e 213 ++i;
984263bc 214 }
364c022c
MD
215
216 /*
217 * We got everything or everything is in the cache, no
218 * point continuing.
219 */
6b84c93e
MD
220 if (i >= maxra)
221 return 0;
222 maxra -= i;
223 loffset += i * blksize;
984263bc
MD
224 reqbp = bp = NULL;
225 } else {
4d8329e1 226 __debugvar off_t firstread = bp->b_loffset;
54078292 227 int nblks;
984263bc 228
ae8e83e6
MD
229 /*
230 * Set-up synchronous read for bp.
231 */
232 bp->b_cmd = BUF_CMD_READ;
233 bp->b_bio1.bio_done = biodone_sync;
234 bp->b_bio1.bio_flags |= BIO_SYNC;
235
81b5c339
MD
236 KASSERT(firstread != NOOFFSET,
237 ("cluster_read: no buffer offset"));
54078292 238
364c022c
MD
239 /*
240 * nblks is our cluster_rbuild request size, limited
241 * primarily by the device.
242 */
243 if ((nblks = maxra) > maxrbuild)
244 nblks = maxrbuild;
245
246 if (nblks > 1) {
247 int burstbytes;
984263bc 248
e92ca23a
MD
249 error = VOP_BMAP(vp, loffset, &doffset,
250 &burstbytes, NULL, BUF_CMD_READ);
984263bc
MD
251 if (error)
252 goto single_block_read;
364c022c
MD
253 if (nblks > burstbytes / blksize)
254 nblks = burstbytes / blksize;
54078292 255 if (doffset == NOOFFSET)
984263bc 256 goto single_block_read;
364c022c 257 if (nblks <= 1)
984263bc 258 goto single_block_read;
984263bc 259
54078292 260 bp = cluster_rbuild(vp, filesize, loffset,
ae8e83e6 261 doffset, blksize, nblks, bp);
54078292 262 loffset += bp->b_bufsize;
364c022c 263 maxra -= bp->b_bufsize / blksize;
984263bc
MD
264 } else {
265single_block_read:
266 /*
364c022c 267 * If it isn't in the cache, then get a chunk from
984263bc
MD
268 * disk if sequential, otherwise just get the block.
269 */
cf1bb2a8 270 cluster_setram(bp);
e92ca23a 271 loffset += blksize;
364c022c 272 --maxra;
984263bc
MD
273 }
274 }
275
276 /*
ae8e83e6
MD
277 * If B_CACHE was not set issue bp. bp will either be an
278 * asynchronous cluster buf or a synchronous single-buf.
279 * If it is a single buf it will be the same as reqbp.
280 *
281 * NOTE: Once an async cluster buf is issued bp becomes invalid.
984263bc
MD
282 */
283 if (bp) {
284#if defined(CLUSTERDEBUG)
285 if (rcluster)
364c022c
MD
286 kprintf("S(%012jx,%d,%d)\n",
287 (intmax_t)bp->b_loffset, bp->b_bcount, maxra);
984263bc 288#endif
10f3fee5
MD
289 if ((bp->b_flags & B_CLUSTER) == 0)
290 vfs_busy_pages(vp, bp);
984263bc 291 bp->b_flags &= ~(B_ERROR|B_INVAL);
81b5c339 292 vn_strategy(vp, &bp->b_bio1);
ae8e83e6
MD
293 error = 0;
294 /* bp invalid now */
984263bc
MD
295 }
296
297 /*
bfda7080 298 * If we have been doing sequential I/O, then do some read-ahead.
6b84c93e
MD
299 * The code above us should have positioned us at the next likely
300 * offset.
0728eafc
MD
301 *
302 * Only mess with buffers which we can immediately lock. HAMMER
303 * will do device-readahead irrespective of what the blocks
304 * represent.
984263bc 305 */
364c022c 306 while (error == 0 && maxra > 0) {
bfda7080 307 int burstbytes;
ac7ffc8a 308 int tmp_error;
364c022c 309 int nblks;
bfda7080 310
b77cfc40
MD
311 rbp = getblk(vp, loffset, blksize,
312 GETBLK_SZMATCH|GETBLK_NOWAIT, 0);
313 if (rbp == NULL)
314 goto no_read_ahead;
bfda7080 315 if ((rbp->b_flags & B_CACHE)) {
984263bc 316 bqrelse(rbp);
bfda7080
SS
317 goto no_read_ahead;
318 }
319
ac7ffc8a
MD
320 /*
321 * An error from the read-ahead bmap has nothing to do
322 * with the caller's original request.
323 */
324 tmp_error = VOP_BMAP(vp, loffset, &doffset,
325 &burstbytes, NULL, BUF_CMD_READ);
326 if (tmp_error || doffset == NOOFFSET) {
bfda7080
SS
327 rbp->b_flags |= B_INVAL;
328 brelse(rbp);
329 rbp = NULL;
330 goto no_read_ahead;
331 }
364c022c
MD
332 if ((nblks = maxra) > maxrbuild)
333 nblks = maxrbuild;
334 if (nblks > burstbytes / blksize)
335 nblks = burstbytes / blksize;
bfda7080 336
ae8e83e6
MD
337 /*
338 * rbp: async read
339 */
340 rbp->b_cmd = BUF_CMD_READ;
cf1bb2a8
MD
341 /*rbp->b_flags |= B_AGE*/;
342 cluster_setram(rbp);
ae8e83e6 343
364c022c 344 if (nblks > 1) {
bfda7080 345 rbp = cluster_rbuild(vp, filesize, loffset,
e92ca23a 346 doffset, blksize,
364c022c 347 nblks, rbp);
984263bc 348 } else {
bfda7080
SS
349 rbp->b_bio2.bio_offset = doffset;
350 }
364c022c 351
984263bc 352#if defined(CLUSTERDEBUG)
bfda7080 353 if (rcluster) {
364c022c
MD
354 if (bp) {
355 kprintf("A+(%012jx,%d,%jd) "
356 "doff=%012jx minr=%zd ra=%d\n",
357 (intmax_t)loffset, rbp->b_bcount,
358 (intmax_t)(loffset - origoffset),
359 (intmax_t)doffset, minreq, maxra);
360 } else {
361 kprintf("A-(%012jx,%d,%jd) "
362 "doff=%012jx minr=%zd ra=%d\n",
363 (intmax_t)rbp->b_loffset, rbp->b_bcount,
364 (intmax_t)(loffset - origoffset),
365 (intmax_t)doffset, minreq, maxra);
366 }
bfda7080 367 }
984263bc 368#endif
bfda7080 369 rbp->b_flags &= ~(B_ERROR|B_INVAL);
10f3fee5 370
bfda7080
SS
371 if ((rbp->b_flags & B_CLUSTER) == 0)
372 vfs_busy_pages(vp, rbp);
ae8e83e6 373 BUF_KERNPROC(rbp);
6b84c93e
MD
374 loffset += rbp->b_bufsize;
375 maxra -= rbp->b_bufsize / blksize;
bfda7080 376 vn_strategy(vp, &rbp->b_bio1);
ae8e83e6 377 /* rbp invalid now */
984263bc 378 }
bfda7080 379
ae8e83e6
MD
380 /*
381 * Wait for our original buffer to complete its I/O. reqbp will
382 * be NULL if the original buffer was B_CACHE. We are returning
383 * (*bpp) which is the same as reqbp when reqbp != NULL.
384 */
385no_read_ahead:
386 if (reqbp) {
387 KKASSERT(reqbp->b_bio1.bio_flags & BIO_SYNC);
388 error = biowait(&reqbp->b_bio1, "clurd");
389 }
390 return (error);
984263bc
MD
391}
392
393/*
394 * If blocks are contiguous on disk, use this to provide clustered
395 * read ahead. We will read as many blocks as possible sequentially
396 * and then parcel them up into logical blocks in the buffer hash table.
ae8e83e6
MD
397 *
398 * This function either returns a cluster buf or it returns fbp. fbp is
399 * already expected to be set up as a synchronous or asynchronous request.
400 *
401 * If a cluster buf is returned it will always be async.
984263bc
MD
402 */
403static struct buf *
ae8e83e6
MD
404cluster_rbuild(struct vnode *vp, off_t filesize, off_t loffset, off_t doffset,
405 int blksize, int run, struct buf *fbp)
984263bc
MD
406{
407 struct buf *bp, *tbp;
54078292
MD
408 off_t boffset;
409 int i, j;
2ec4b00d 410 int maxiosize = vmaxiosize(vp);
984263bc 411
2ec4b00d 412 /*
984263bc
MD
413 * avoid a division
414 */
e92ca23a 415 while (loffset + run * blksize > filesize) {
984263bc
MD
416 --run;
417 }
418
6260e485 419 tbp = fbp;
54078292 420 tbp->b_bio2.bio_offset = doffset;
10f3fee5
MD
421 if((tbp->b_flags & B_MALLOC) ||
422 ((tbp->b_flags & B_VMIO) == 0) || (run <= 1)) {
984263bc 423 return tbp;
10f3fee5 424 }
984263bc 425
9a82e536 426 bp = trypbuf_kva(&cluster_pbuf_freecnt);
ae8e83e6 427 if (bp == NULL) {
984263bc 428 return tbp;
ae8e83e6 429 }
984263bc
MD
430
431 /*
432 * We are synthesizing a buffer out of vm_page_t's, but
433 * if the block size is not page aligned then the starting
434 * address may not be either. Inherit the b_data offset
435 * from the original buffer.
436 */
437 bp->b_data = (char *)((vm_offset_t)bp->b_data |
438 ((vm_offset_t)tbp->b_data & PAGE_MASK));
ae8e83e6 439 bp->b_flags |= B_CLUSTER | B_VMIO;
10f3fee5 440 bp->b_cmd = BUF_CMD_READ;
ae8e83e6 441 bp->b_bio1.bio_done = cluster_callback; /* default to async */
81b5c339
MD
442 bp->b_bio1.bio_caller_info1.cluster_head = NULL;
443 bp->b_bio1.bio_caller_info2.cluster_tail = NULL;
54078292 444 bp->b_loffset = loffset;
e92ca23a 445 bp->b_bio2.bio_offset = doffset;
81b5c339
MD
446 KASSERT(bp->b_loffset != NOOFFSET,
447 ("cluster_rbuild: no buffer offset"));
984263bc 448
984263bc
MD
449 bp->b_bcount = 0;
450 bp->b_bufsize = 0;
54f51aeb 451 bp->b_xio.xio_npages = 0;
984263bc 452
e92ca23a 453 for (boffset = doffset, i = 0; i < run; ++i, boffset += blksize) {
10f3fee5 454 if (i) {
54f51aeb 455 if ((bp->b_xio.xio_npages * PAGE_SIZE) +
e92ca23a 456 round_page(blksize) > maxiosize) {
984263bc
MD
457 break;
458 }
459
460 /*
461 * Shortcut some checks and try to avoid buffers that
462 * would block in the lock. The same checks have to
463 * be made again after we officially get the buffer.
464 */
b77cfc40
MD
465 tbp = getblk(vp, loffset + i * blksize, blksize,
466 GETBLK_SZMATCH|GETBLK_NOWAIT, 0);
467 if (tbp == NULL)
468 break;
469 for (j = 0; j < tbp->b_xio.xio_npages; j++) {
470 if (tbp->b_xio.xio_pages[j]->valid)
984263bc
MD
471 break;
472 }
b77cfc40
MD
473 if (j != tbp->b_xio.xio_npages) {
474 bqrelse(tbp);
475 break;
476 }
984263bc
MD
477
478 /*
479 * Stop scanning if the buffer is fuly valid
480 * (marked B_CACHE), or locked (may be doing a
481 * background write), or if the buffer is not
482 * VMIO backed. The clustering code can only deal
483 * with VMIO-backed buffers.
484 */
485 if ((tbp->b_flags & (B_CACHE|B_LOCKED)) ||
27bc0cb1
MD
486 (tbp->b_flags & B_VMIO) == 0 ||
487 (LIST_FIRST(&tbp->b_dep) != NULL &&
488 buf_checkread(tbp))
489 ) {
984263bc
MD
490 bqrelse(tbp);
491 break;
492 }
493
494 /*
495 * The buffer must be completely invalid in order to
496 * take part in the cluster. If it is partially valid
497 * then we stop.
498 */
54f51aeb
HP
499 for (j = 0;j < tbp->b_xio.xio_npages; j++) {
500 if (tbp->b_xio.xio_pages[j]->valid)
984263bc
MD
501 break;
502 }
54f51aeb 503 if (j != tbp->b_xio.xio_npages) {
984263bc
MD
504 bqrelse(tbp);
505 break;
506 }
507
508 /*
509 * Set a read-ahead mark as appropriate
510 */
6260e485 511 if (i == 1 || i == (run - 1))
cf1bb2a8 512 cluster_setram(tbp);
984263bc
MD
513
514 /*
b86460bf
MD
515 * Depress the priority of buffers not explicitly
516 * requested.
517 */
e92ca23a 518 /* tbp->b_flags |= B_AGE; */
b86460bf
MD
519
520 /*
984263bc
MD
521 * Set the block number if it isn't set, otherwise
522 * if it is make sure it matches the block number we
523 * expect.
524 */
54078292
MD
525 if (tbp->b_bio2.bio_offset == NOOFFSET) {
526 tbp->b_bio2.bio_offset = boffset;
527 } else if (tbp->b_bio2.bio_offset != boffset) {
984263bc
MD
528 brelse(tbp);
529 break;
530 }
531 }
ae8e83e6 532
984263bc 533 /*
ae8e83e6
MD
534 * The passed-in tbp (i == 0) will already be set up for
535 * async or sync operation. All other tbp's acquire in
536 * our loop are set up for async operation.
984263bc 537 */
10f3fee5 538 tbp->b_cmd = BUF_CMD_READ;
984263bc 539 BUF_KERNPROC(tbp);
81b5c339 540 cluster_append(&bp->b_bio1, tbp);
54078292 541 for (j = 0; j < tbp->b_xio.xio_npages; ++j) {
984263bc 542 vm_page_t m;
b12defdc 543
54f51aeb 544 m = tbp->b_xio.xio_pages[j];
b12defdc 545 vm_page_busy_wait(m, FALSE, "clurpg");
984263bc 546 vm_page_io_start(m);
b12defdc 547 vm_page_wakeup(m);
984263bc 548 vm_object_pip_add(m->object, 1);
54f51aeb
HP
549 if ((bp->b_xio.xio_npages == 0) ||
550 (bp->b_xio.xio_pages[bp->b_xio.xio_npages-1] != m)) {
551 bp->b_xio.xio_pages[bp->b_xio.xio_npages] = m;
552 bp->b_xio.xio_npages++;
984263bc
MD
553 }
554 if ((m->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL)
54f51aeb 555 tbp->b_xio.xio_pages[j] = bogus_page;
984263bc
MD
556 }
557 /*
558 * XXX shouldn't this be += size for both, like in
559 * cluster_wbuild()?
560 *
561 * Don't inherit tbp->b_bufsize as it may be larger due to
562 * a non-page-aligned size. Instead just aggregate using
563 * 'size'.
564 */
e92ca23a
MD
565 if (tbp->b_bcount != blksize)
566 kprintf("warning: tbp->b_bcount wrong %d vs %d\n", tbp->b_bcount, blksize);
567 if (tbp->b_bufsize != blksize)
568 kprintf("warning: tbp->b_bufsize wrong %d vs %d\n", tbp->b_bufsize, blksize);
569 bp->b_bcount += blksize;
570 bp->b_bufsize += blksize;
984263bc
MD
571 }
572
573 /*
574 * Fully valid pages in the cluster are already good and do not need
575 * to be re-read from disk. Replace the page with bogus_page
576 */
54f51aeb
HP
577 for (j = 0; j < bp->b_xio.xio_npages; j++) {
578 if ((bp->b_xio.xio_pages[j]->valid & VM_PAGE_BITS_ALL) ==
984263bc 579 VM_PAGE_BITS_ALL) {
54f51aeb 580 bp->b_xio.xio_pages[j] = bogus_page;
984263bc
MD
581 }
582 }
312dcd01 583 if (bp->b_bufsize > bp->b_kvasize) {
54078292 584 panic("cluster_rbuild: b_bufsize(%d) > b_kvasize(%d)",
984263bc 585 bp->b_bufsize, bp->b_kvasize);
312dcd01 586 }
984263bc 587 pmap_qenter(trunc_page((vm_offset_t) bp->b_data),
54f51aeb 588 (vm_page_t *)bp->b_xio.xio_pages, bp->b_xio.xio_npages);
ae8e83e6 589 BUF_KERNPROC(bp);
984263bc
MD
590 return (bp);
591}
592
593/*
594 * Cleanup after a clustered read or write.
595 * This is complicated by the fact that any of the buffers might have
596 * extra memory (if there were no empty buffer headers at allocbuf time)
597 * that we will need to shift around.
81b5c339
MD
598 *
599 * The returned bio is &bp->b_bio1
984263bc
MD
600 */
601void
81b5c339 602cluster_callback(struct bio *bio)
984263bc 603{
81b5c339
MD
604 struct buf *bp = bio->bio_buf;
605 struct buf *tbp;
984263bc
MD
606 int error = 0;
607
608 /*
9a71d53f
MD
609 * Must propogate errors to all the components. A short read (EOF)
610 * is a critical error.
984263bc 611 */
9a71d53f 612 if (bp->b_flags & B_ERROR) {
984263bc 613 error = bp->b_error;
9a71d53f
MD
614 } else if (bp->b_bcount != bp->b_bufsize) {
615 panic("cluster_callback: unexpected EOF on cluster %p!", bio);
616 }
984263bc 617
54f51aeb 618 pmap_qremove(trunc_page((vm_offset_t) bp->b_data), bp->b_xio.xio_npages);
984263bc
MD
619 /*
620 * Move memory from the large cluster buffer into the component
81b5c339
MD
621 * buffers and mark IO as done on these. Since the memory map
622 * is the same, no actual copying is required.
984263bc 623 */
81b5c339
MD
624 while ((tbp = bio->bio_caller_info1.cluster_head) != NULL) {
625 bio->bio_caller_info1.cluster_head = tbp->b_cluster_next;
984263bc 626 if (error) {
24c8374a 627 tbp->b_flags |= B_ERROR | B_IODEBUG;
984263bc
MD
628 tbp->b_error = error;
629 } else {
630 tbp->b_dirtyoff = tbp->b_dirtyend = 0;
631 tbp->b_flags &= ~(B_ERROR|B_INVAL);
24c8374a 632 tbp->b_flags |= B_IODEBUG;
984263bc
MD
633 /*
634 * XXX the bdwrite()/bqrelse() issued during
635 * cluster building clears B_RELBUF (see bqrelse()
636 * comment). If direct I/O was specified, we have
637 * to restore it here to allow the buffer and VM
638 * to be freed.
639 */
640 if (tbp->b_flags & B_DIRECT)
641 tbp->b_flags |= B_RELBUF;
642 }
81b5c339 643 biodone(&tbp->b_bio1);
984263bc
MD
644 }
645 relpbuf(bp, &cluster_pbuf_freecnt);
646}
647
648/*
649 * cluster_wbuild_wb:
650 *
651 * Implement modified write build for cluster.
652 *
653 * write_behind = 0 write behind disabled
654 * write_behind = 1 write behind normal (default)
655 * write_behind = 2 write behind backed-off
656 */
657
658static __inline int
e92ca23a 659cluster_wbuild_wb(struct vnode *vp, int blksize, off_t start_loffset, int len)
984263bc
MD
660{
661 int r = 0;
662
663 switch(write_behind) {
664 case 2:
54078292 665 if (start_loffset < len)
984263bc 666 break;
54078292 667 start_loffset -= len;
984263bc
MD
668 /* fall through */
669 case 1:
9d4e78c7 670 r = cluster_wbuild(vp, NULL, blksize, start_loffset, len);
984263bc
MD
671 /* fall through */
672 default:
673 /* fall through */
674 break;
675 }
676 return(r);
677}
678
679/*
680 * Do clustered write for FFS.
681 *
682 * Three cases:
683 * 1. Write is not sequential (write asynchronously)
684 * Write is sequential:
685 * 2. beginning of cluster - begin cluster
686 * 3. middle of a cluster - add to cluster
687 * 4. end of a cluster - asynchronously write cluster
688 */
689void
e92ca23a 690cluster_write(struct buf *bp, off_t filesize, int blksize, int seqcount)
984263bc
MD
691{
692 struct vnode *vp;
54078292 693 off_t loffset;
984263bc 694 int maxclen, cursize;
984263bc
MD
695 int async;
696
697 vp = bp->b_vp;
e92ca23a 698 if (vp->v_type == VREG)
984263bc 699 async = vp->v_mount->mnt_flag & MNT_ASYNC;
e92ca23a 700 else
984263bc 701 async = 0;
54078292 702 loffset = bp->b_loffset;
81b5c339
MD
703 KASSERT(bp->b_loffset != NOOFFSET,
704 ("cluster_write: no buffer offset"));
984263bc
MD
705
706 /* Initialize vnode to beginning of file. */
54078292 707 if (loffset == 0)
984263bc
MD
708 vp->v_lasta = vp->v_clen = vp->v_cstart = vp->v_lastw = 0;
709
e92ca23a 710 if (vp->v_clen == 0 || loffset != vp->v_lastw + blksize ||
54078292 711 bp->b_bio2.bio_offset == NOOFFSET ||
e92ca23a 712 (bp->b_bio2.bio_offset != vp->v_lasta + blksize)) {
2ec4b00d 713 maxclen = vmaxiosize(vp);
984263bc
MD
714 if (vp->v_clen != 0) {
715 /*
716 * Next block is not sequential.
717 *
718 * If we are not writing at end of file, the process
719 * seeked to another point in the file since its last
720 * write, or we have reached our maximum cluster size,
721 * then push the previous cluster. Otherwise try
722 * reallocating to make it sequential.
723 *
724 * Change to algorithm: only push previous cluster if
725 * it was sequential from the point of view of the
726 * seqcount heuristic, otherwise leave the buffer
727 * intact so we can potentially optimize the I/O
728 * later on in the buf_daemon or update daemon
729 * flush.
730 */
e92ca23a 731 cursize = vp->v_lastw - vp->v_cstart + blksize;
9d4e78c7 732 if (bp->b_loffset + blksize < filesize ||
e92ca23a 733 loffset != vp->v_lastw + blksize || vp->v_clen <= cursize) {
984263bc 734 if (!async && seqcount > 0) {
e92ca23a 735 cluster_wbuild_wb(vp, blksize,
984263bc
MD
736 vp->v_cstart, cursize);
737 }
738 } else {
739 struct buf **bpp, **endbp;
740 struct cluster_save *buflist;
741
e92ca23a 742 buflist = cluster_collectbufs(vp, bp, blksize);
984263bc
MD
743 endbp = &buflist->bs_children
744 [buflist->bs_nchildren - 1];
745 if (VOP_REALLOCBLKS(vp, buflist)) {
746 /*
747 * Failed, push the previous cluster
748 * if *really* writing sequentially
749 * in the logical file (seqcount > 1),
750 * otherwise delay it in the hopes that
751 * the low level disk driver can
752 * optimize the write ordering.
753 */
754 for (bpp = buflist->bs_children;
755 bpp < endbp; bpp++)
756 brelse(*bpp);
efda3bd0 757 kfree(buflist, M_SEGMENT);
984263bc
MD
758 if (seqcount > 1) {
759 cluster_wbuild_wb(vp,
e92ca23a 760 blksize, vp->v_cstart,
984263bc
MD
761 cursize);
762 }
763 } else {
764 /*
765 * Succeeded, keep building cluster.
766 */
767 for (bpp = buflist->bs_children;
768 bpp <= endbp; bpp++)
769 bdwrite(*bpp);
efda3bd0 770 kfree(buflist, M_SEGMENT);
54078292
MD
771 vp->v_lastw = loffset;
772 vp->v_lasta = bp->b_bio2.bio_offset;
984263bc
MD
773 return;
774 }
775 }
776 }
777 /*
778 * Consider beginning a cluster. If at end of file, make
779 * cluster as large as possible, otherwise find size of
780 * existing cluster.
781 */
782 if ((vp->v_type == VREG) &&
9d4e78c7 783 bp->b_loffset + blksize < filesize &&
54078292 784 (bp->b_bio2.bio_offset == NOOFFSET) &&
e92ca23a 785 (VOP_BMAP(vp, loffset, &bp->b_bio2.bio_offset, &maxclen, NULL, BUF_CMD_WRITE) ||
54078292 786 bp->b_bio2.bio_offset == NOOFFSET)) {
984263bc
MD
787 bawrite(bp);
788 vp->v_clen = 0;
54078292 789 vp->v_lasta = bp->b_bio2.bio_offset;
e92ca23a 790 vp->v_cstart = loffset + blksize;
54078292 791 vp->v_lastw = loffset;
984263bc
MD
792 return;
793 }
e92ca23a
MD
794 if (maxclen > blksize)
795 vp->v_clen = maxclen - blksize;
54078292
MD
796 else
797 vp->v_clen = 0;
798 if (!async && vp->v_clen == 0) { /* I/O not contiguous */
e92ca23a 799 vp->v_cstart = loffset + blksize;
984263bc
MD
800 bawrite(bp);
801 } else { /* Wait for rest of cluster */
54078292 802 vp->v_cstart = loffset;
984263bc
MD
803 bdwrite(bp);
804 }
54078292 805 } else if (loffset == vp->v_cstart + vp->v_clen) {
984263bc
MD
806 /*
807 * At end of cluster, write it out if seqcount tells us we
808 * are operating sequentially, otherwise let the buf or
809 * update daemon handle it.
810 */
811 bdwrite(bp);
812 if (seqcount > 1)
e92ca23a
MD
813 cluster_wbuild_wb(vp, blksize, vp->v_cstart,
814 vp->v_clen + blksize);
984263bc 815 vp->v_clen = 0;
e92ca23a 816 vp->v_cstart = loffset + blksize;
984263bc
MD
817 } else if (vm_page_count_severe()) {
818 /*
819 * We are low on memory, get it going NOW
820 */
821 bawrite(bp);
822 } else {
823 /*
824 * In the middle of a cluster, so just delay the I/O for now.
825 */
826 bdwrite(bp);
827 }
54078292
MD
828 vp->v_lastw = loffset;
829 vp->v_lasta = bp->b_bio2.bio_offset;
984263bc
MD
830}
831
9d4e78c7
MD
832/*
833 * This is the clustered version of bawrite(). It works similarly to
834 * cluster_write() except I/O on the buffer is guaranteed to occur.
835 */
836int
837cluster_awrite(struct buf *bp)
838{
839 int total;
840
841 /*
842 * Don't bother if it isn't clusterable.
843 */
844 if ((bp->b_flags & B_CLUSTEROK) == 0 ||
845 bp->b_vp == NULL ||
846 (bp->b_vp->v_flag & VOBJBUF) == 0) {
847 total = bp->b_bufsize;
848 bawrite(bp);
849 return (total);
850 }
851
852 total = cluster_wbuild(bp->b_vp, &bp, bp->b_bufsize,
853 bp->b_loffset, vmaxiosize(bp->b_vp));
854 if (bp)
855 bawrite(bp);
856
857 return total;
858}
984263bc
MD
859
860/*
861 * This is an awful lot like cluster_rbuild...wish they could be combined.
862 * The last lbn argument is the current block on which I/O is being
863 * performed. Check to see that it doesn't fall in the middle of
864 * the current block (if last_bp == NULL).
9d4e78c7
MD
865 *
866 * cluster_wbuild() normally does not guarantee anything. If bpp is
867 * non-NULL and cluster_wbuild() is able to incorporate it into the
868 * I/O it will set *bpp to NULL, otherwise it will leave it alone and
869 * the caller must dispose of *bpp.
984263bc 870 */
9d4e78c7
MD
871static int
872cluster_wbuild(struct vnode *vp, struct buf **bpp,
873 int blksize, off_t start_loffset, int bytes)
984263bc
MD
874{
875 struct buf *bp, *tbp;
e43a034f 876 int i, j;
984263bc 877 int totalwritten = 0;
9d4e78c7 878 int must_initiate;
2ec4b00d 879 int maxiosize = vmaxiosize(vp);
984263bc 880
54078292 881 while (bytes > 0) {
984263bc 882 /*
9d4e78c7
MD
883 * If the buffer matches the passed locked & removed buffer
884 * we used the passed buffer (which might not be B_DELWRI).
885 *
886 * Otherwise locate the buffer and determine if it is
887 * compatible.
984263bc 888 */
9d4e78c7
MD
889 if (bpp && (*bpp)->b_loffset == start_loffset) {
890 tbp = *bpp;
891 *bpp = NULL;
892 bpp = NULL;
893 } else {
894 tbp = findblk(vp, start_loffset, FINDBLK_NBLOCK);
895 if (tbp == NULL ||
896 (tbp->b_flags & (B_LOCKED | B_INVAL | B_DELWRI)) !=
897 B_DELWRI ||
898 (LIST_FIRST(&tbp->b_dep) && buf_checkwrite(tbp))) {
899 if (tbp)
900 BUF_UNLOCK(tbp);
901 start_loffset += blksize;
902 bytes -= blksize;
903 continue;
904 }
905 bremfree(tbp);
984263bc 906 }
10f3fee5 907 KKASSERT(tbp->b_cmd == BUF_CMD_DONE);
984263bc
MD
908
909 /*
910 * Extra memory in the buffer, punt on this buffer.
911 * XXX we could handle this in most cases, but we would
912 * have to push the extra memory down to after our max
913 * possible cluster size and then potentially pull it back
914 * up if the cluster was terminated prematurely--too much
915 * hassle.
916 */
917 if (((tbp->b_flags & (B_CLUSTEROK|B_MALLOC)) != B_CLUSTEROK) ||
b1c20cfa
MD
918 (tbp->b_bcount != tbp->b_bufsize) ||
919 (tbp->b_bcount != blksize) ||
920 (bytes == blksize) ||
9a82e536 921 ((bp = getpbuf_kva(&cluster_pbuf_freecnt)) == NULL)) {
984263bc
MD
922 totalwritten += tbp->b_bufsize;
923 bawrite(tbp);
e92ca23a
MD
924 start_loffset += blksize;
925 bytes -= blksize;
984263bc
MD
926 continue;
927 }
928
929 /*
9a71d53f
MD
930 * Set up the pbuf. Track our append point with b_bcount
931 * and b_bufsize. b_bufsize is not used by the device but
932 * our caller uses it to loop clusters and we use it to
933 * detect a premature EOF on the block device.
984263bc 934 */
984263bc
MD
935 bp->b_bcount = 0;
936 bp->b_bufsize = 0;
54f51aeb 937 bp->b_xio.xio_npages = 0;
81b5c339 938 bp->b_loffset = tbp->b_loffset;
54078292 939 bp->b_bio2.bio_offset = tbp->b_bio2.bio_offset;
984263bc
MD
940
941 /*
942 * We are synthesizing a buffer out of vm_page_t's, but
943 * if the block size is not page aligned then the starting
944 * address may not be either. Inherit the b_data offset
945 * from the original buffer.
946 */
947 bp->b_data = (char *)((vm_offset_t)bp->b_data |
948 ((vm_offset_t)tbp->b_data & PAGE_MASK));
10f3fee5 949 bp->b_flags &= ~B_ERROR;
4414f2c9 950 bp->b_flags |= B_CLUSTER | B_BNOCLIP |
f2d7fcf0 951 (tbp->b_flags & (B_VMIO | B_NEEDCOMMIT));
81b5c339
MD
952 bp->b_bio1.bio_caller_info1.cluster_head = NULL;
953 bp->b_bio1.bio_caller_info2.cluster_tail = NULL;
b1c20cfa 954
984263bc
MD
955 /*
956 * From this location in the file, scan forward to see
957 * if there are buffers with adjacent data that need to
958 * be written as well.
9d4e78c7
MD
959 *
960 * IO *must* be initiated on index 0 at this point
961 * (particularly when called from cluster_awrite()).
984263bc 962 */
e92ca23a 963 for (i = 0; i < bytes; (i += blksize), (start_loffset += blksize)) {
9d4e78c7
MD
964 if (i == 0) {
965 must_initiate = 1;
966 } else {
967 /*
968 * Not first buffer.
969 */
970 must_initiate = 0;
b1c20cfa
MD
971 tbp = findblk(vp, start_loffset,
972 FINDBLK_NBLOCK);
984263bc 973 /*
b1c20cfa
MD
974 * Buffer not found or could not be locked
975 * non-blocking.
984263bc 976 */
b1c20cfa 977 if (tbp == NULL)
984263bc 978 break;
984263bc
MD
979
980 /*
981 * If it IS in core, but has different
b1c20cfa
MD
982 * characteristics, then don't cluster
983 * with it.
984263bc
MD
984 */
985 if ((tbp->b_flags & (B_VMIO | B_CLUSTEROK |
b1c20cfa
MD
986 B_INVAL | B_DELWRI | B_NEEDCOMMIT))
987 != (B_DELWRI | B_CLUSTEROK |
988 (bp->b_flags & (B_VMIO | B_NEEDCOMMIT))) ||
9d4e78c7 989 (tbp->b_flags & B_LOCKED)
b1c20cfa
MD
990 ) {
991 BUF_UNLOCK(tbp);
984263bc
MD
992 break;
993 }
994
995 /*
996 * Check that the combined cluster
997 * would make sense with regard to pages
998 * and would not be too large
9d4e78c7
MD
999 *
1000 * WARNING! buf_checkwrite() must be the last
1001 * check made. If it returns 0 then
1002 * we must initiate the I/O.
984263bc 1003 */
e92ca23a 1004 if ((tbp->b_bcount != blksize) ||
54078292
MD
1005 ((bp->b_bio2.bio_offset + i) !=
1006 tbp->b_bio2.bio_offset) ||
54f51aeb 1007 ((tbp->b_xio.xio_npages + bp->b_xio.xio_npages) >
9d4e78c7
MD
1008 (maxiosize / PAGE_SIZE)) ||
1009 (LIST_FIRST(&tbp->b_dep) &&
1010 buf_checkwrite(tbp))
1011 ) {
984263bc 1012 BUF_UNLOCK(tbp);
984263bc
MD
1013 break;
1014 }
9d4e78c7
MD
1015 if (LIST_FIRST(&tbp->b_dep))
1016 must_initiate = 1;
984263bc
MD
1017 /*
1018 * Ok, it's passed all the tests,
1019 * so remove it from the free list
1020 * and mark it busy. We will use it.
1021 */
1022 bremfree(tbp);
10f3fee5 1023 KKASSERT(tbp->b_cmd == BUF_CMD_DONE);
9d4e78c7 1024 }
81b5c339
MD
1025
1026 /*
984263bc
MD
1027 * If the IO is via the VM then we do some
1028 * special VM hackery (yuck). Since the buffer's
1029 * block size may not be page-aligned it is possible
1030 * for a page to be shared between two buffers. We
1031 * have to get rid of the duplication when building
1032 * the cluster.
1033 */
1034 if (tbp->b_flags & B_VMIO) {
1035 vm_page_t m;
1036
9d4e78c7
MD
1037 /*
1038 * Try to avoid deadlocks with the VM system.
1039 * However, we cannot abort the I/O if
1040 * must_initiate is non-zero.
1041 */
1042 if (must_initiate == 0) {
1043 for (j = 0;
1044 j < tbp->b_xio.xio_npages;
1045 ++j) {
54f51aeb 1046 m = tbp->b_xio.xio_pages[j];
984263bc
MD
1047 if (m->flags & PG_BUSY) {
1048 bqrelse(tbp);
1049 goto finishcluster;
1050 }
1051 }
1052 }
1053
54078292 1054 for (j = 0; j < tbp->b_xio.xio_npages; ++j) {
54f51aeb 1055 m = tbp->b_xio.xio_pages[j];
b12defdc 1056 vm_page_busy_wait(m, FALSE, "clurpg");
984263bc 1057 vm_page_io_start(m);
b12defdc 1058 vm_page_wakeup(m);
984263bc 1059 vm_object_pip_add(m->object, 1);
54f51aeb
HP
1060 if ((bp->b_xio.xio_npages == 0) ||
1061 (bp->b_xio.xio_pages[bp->b_xio.xio_npages - 1] != m)) {
1062 bp->b_xio.xio_pages[bp->b_xio.xio_npages] = m;
1063 bp->b_xio.xio_npages++;
984263bc
MD
1064 }
1065 }
1066 }
e92ca23a
MD
1067 bp->b_bcount += blksize;
1068 bp->b_bufsize += blksize;
984263bc 1069
984263bc 1070 bundirty(tbp);
10f3fee5 1071 tbp->b_flags &= ~B_ERROR;
10f3fee5 1072 tbp->b_cmd = BUF_CMD_WRITE;
984263bc 1073 BUF_KERNPROC(tbp);
81b5c339 1074 cluster_append(&bp->b_bio1, tbp);
2aee763b
MD
1075
1076 /*
1077 * check for latent dependencies to be handled
1078 */
408357d8
MD
1079 if (LIST_FIRST(&tbp->b_dep) != NULL)
1080 buf_start(tbp);
984263bc
MD
1081 }
1082 finishcluster:
9d4e78c7
MD
1083 pmap_qenter(trunc_page((vm_offset_t)bp->b_data),
1084 (vm_page_t *)bp->b_xio.xio_pages,
1085 bp->b_xio.xio_npages);
312dcd01 1086 if (bp->b_bufsize > bp->b_kvasize) {
9d4e78c7
MD
1087 panic("cluster_wbuild: b_bufsize(%d) "
1088 "> b_kvasize(%d)\n",
1089 bp->b_bufsize, bp->b_kvasize);
312dcd01 1090 }
984263bc
MD
1091 totalwritten += bp->b_bufsize;
1092 bp->b_dirtyoff = 0;
1093 bp->b_dirtyend = bp->b_bufsize;
ae8e83e6 1094 bp->b_bio1.bio_done = cluster_callback;
10f3fee5 1095 bp->b_cmd = BUF_CMD_WRITE;
ae8e83e6 1096
10f3fee5 1097 vfs_busy_pages(vp, bp);
77912481 1098 bsetrunningbufspace(bp, bp->b_bufsize);
ae8e83e6 1099 BUF_KERNPROC(bp);
a8f169e2 1100 vn_strategy(vp, &bp->b_bio1);
984263bc 1101
54078292 1102 bytes -= i;
984263bc
MD
1103 }
1104 return totalwritten;
1105}
1106
1107/*
1108 * Collect together all the buffers in a cluster.
1109 * Plus add one additional buffer.
1110 */
1111static struct cluster_save *
e92ca23a 1112cluster_collectbufs(struct vnode *vp, struct buf *last_bp, int blksize)
984263bc
MD
1113{
1114 struct cluster_save *buflist;
1115 struct buf *bp;
54078292 1116 off_t loffset;
984263bc
MD
1117 int i, len;
1118
e92ca23a 1119 len = (int)(vp->v_lastw - vp->v_cstart + blksize) / blksize;
77652cad 1120 buflist = kmalloc(sizeof(struct buf *) * (len + 1) + sizeof(*buflist),
54078292 1121 M_SEGMENT, M_WAITOK);
984263bc
MD
1122 buflist->bs_nchildren = 0;
1123 buflist->bs_children = (struct buf **) (buflist + 1);
e92ca23a 1124 for (loffset = vp->v_cstart, i = 0; i < len; (loffset += blksize), i++) {
54078292 1125 (void) bread(vp, loffset, last_bp->b_bcount, &bp);
984263bc 1126 buflist->bs_children[i] = bp;
54078292 1127 if (bp->b_bio2.bio_offset == NOOFFSET) {
08daea96 1128 VOP_BMAP(bp->b_vp, bp->b_loffset,
e92ca23a
MD
1129 &bp->b_bio2.bio_offset,
1130 NULL, NULL, BUF_CMD_WRITE);
54078292 1131 }
984263bc
MD
1132 }
1133 buflist->bs_children[i] = bp = last_bp;
54078292 1134 if (bp->b_bio2.bio_offset == NOOFFSET) {
e92ca23a
MD
1135 VOP_BMAP(bp->b_vp, bp->b_loffset, &bp->b_bio2.bio_offset,
1136 NULL, NULL, BUF_CMD_WRITE);
54078292 1137 }
984263bc
MD
1138 buflist->bs_nchildren = i + 1;
1139 return (buflist);
1140}
81b5c339
MD
1141
1142void
1143cluster_append(struct bio *bio, struct buf *tbp)
1144{
1145 tbp->b_cluster_next = NULL;
1146 if (bio->bio_caller_info1.cluster_head == NULL) {
1147 bio->bio_caller_info1.cluster_head = tbp;
1148 bio->bio_caller_info2.cluster_tail = tbp;
1149 } else {
1150 bio->bio_caller_info2.cluster_tail->b_cluster_next = tbp;
1151 bio->bio_caller_info2.cluster_tail = tbp;
1152 }
1153}
1154
cf1bb2a8
MD
1155static
1156void
1157cluster_setram (struct buf *bp)
1158{
1159 bp->b_flags |= B_RAM;
1160 if (bp->b_xio.xio_npages)
1161 vm_page_flag_set(bp->b_xio.xio_pages[0], PG_RAM);
1162}