kernel - Fix degenerate cluster_write() cases
[dragonfly.git] / sys / kern / vfs_cluster.c
CommitLineData
984263bc
MD
1/*-
2 * Copyright (c) 1993
3 * The Regents of the University of California. All rights reserved.
4 * Modifications/enhancements:
5 * Copyright (c) 1995 John S. Dyson. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by the University of
18 * California, Berkeley and its contributors.
19 * 4. Neither the name of the University nor the names of its contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 *
35 * @(#)vfs_cluster.c 8.7 (Berkeley) 2/13/94
36 * $FreeBSD: src/sys/kern/vfs_cluster.c,v 1.92.2.9 2001/11/18 07:10:59 dillon Exp $
b77cfc40 37 * $DragonFly: src/sys/kern/vfs_cluster.c,v 1.40 2008/07/14 03:09:00 dillon Exp $
984263bc
MD
38 */
39
40#include "opt_debug_cluster.h"
41
42#include <sys/param.h>
43#include <sys/systm.h>
44#include <sys/kernel.h>
45#include <sys/proc.h>
46#include <sys/buf.h>
47#include <sys/vnode.h>
48#include <sys/malloc.h>
49#include <sys/mount.h>
50#include <sys/resourcevar.h>
51#include <sys/vmmeter.h>
52#include <vm/vm.h>
53#include <vm/vm_object.h>
54#include <vm/vm_page.h>
55#include <sys/sysctl.h>
54341a3b 56
3020e3be 57#include <sys/buf2.h>
12e4aaff 58#include <vm/vm_page2.h>
984263bc 59
e54488bb
MD
60#include <machine/limits.h>
61
984263bc
MD
62#if defined(CLUSTERDEBUG)
63#include <sys/sysctl.h>
64static int rcluster= 0;
65SYSCTL_INT(_debug, OID_AUTO, rcluster, CTLFLAG_RW, &rcluster, 0, "");
66#endif
67
d1cd9d97 68static MALLOC_DEFINE(M_SEGMENT, "cluster_save", "cluster_save buffer");
984263bc
MD
69
70static struct cluster_save *
54078292 71 cluster_collectbufs (struct vnode *vp, struct buf *last_bp,
e92ca23a 72 int blksize);
984263bc 73static struct buf *
54078292 74 cluster_rbuild (struct vnode *vp, off_t filesize, off_t loffset,
e92ca23a 75 off_t doffset, int blksize, int run,
ae8e83e6 76 struct buf *fbp);
81b5c339 77static void cluster_callback (struct bio *);
cf1bb2a8 78static void cluster_setram (struct buf *);
9de13b88
MD
79static int cluster_wbuild(struct vnode *vp, struct buf **bpp, int blksize,
80 off_t start_loffset, int bytes);
984263bc
MD
81
82static int write_behind = 1;
093e85dc
SG
83SYSCTL_INT(_vfs, OID_AUTO, write_behind, CTLFLAG_RW, &write_behind, 0,
84 "Cluster write-behind setting");
504ea70e
MD
85static quad_t write_behind_minfilesize = 10 * 1024 * 1024;
86SYSCTL_QUAD(_vfs, OID_AUTO, write_behind_minfilesize, CTLFLAG_RW,
87 &write_behind_minfilesize, 0, "Cluster write-behind setting");
364c022c 88static int max_readahead = 2 * 1024 * 1024;
093e85dc
SG
89SYSCTL_INT(_vfs, OID_AUTO, max_readahead, CTLFLAG_RW, &max_readahead, 0,
90 "Limit in bytes for desired cluster read-ahead");
984263bc
MD
91
92extern vm_page_t bogus_page;
93
94extern int cluster_pbuf_freecnt;
95
96/*
984263bc 97 * This replaces bread.
364c022c
MD
98 *
99 * filesize - read-ahead @ blksize will not cross this boundary
100 * loffset - loffset for returned *bpp
101 * blksize - blocksize for returned *bpp and read-ahead bps
102 * minreq - minimum (not a hard minimum) in bytes, typically reflects
103 * a higher level uio resid.
104 * maxreq - maximum (sequential heuristic) in bytes (highet typ ~2MB)
105 * bpp - return buffer (*bpp) for (loffset,blksize)
984263bc
MD
106 */
107int
54341a3b 108cluster_readx(struct vnode *vp, off_t filesize, off_t loffset,
364c022c 109 int blksize, size_t minreq, size_t maxreq, struct buf **bpp)
984263bc
MD
110{
111 struct buf *bp, *rbp, *reqbp;
54078292
MD
112 off_t origoffset;
113 off_t doffset;
114 int error;
984263bc 115 int i;
364c022c
MD
116 int maxra;
117 int maxrbuild;
984263bc
MD
118
119 error = 0;
120
121 /*
364c022c
MD
122 * Calculate the desired read-ahead in blksize'd blocks (maxra).
123 * To do this we calculate maxreq.
6b84c93e 124 *
364c022c
MD
125 * maxreq typically starts out as a sequential heuristic. If the
126 * high level uio/resid is bigger (minreq), we pop maxreq up to
127 * minreq. This represents the case where random I/O is being
128 * performed by the userland is issuing big read()'s.
6b84c93e 129 *
364c022c
MD
130 * Then we limit maxreq to max_readahead to ensure it is a reasonable
131 * value.
132 *
b28ad496 133 * Finally we must ensure that (loffset + maxreq) does not cross the
364c022c
MD
134 * boundary (filesize) for the current blocksize. If we allowed it
135 * to cross we could end up with buffers past the boundary with the
136 * wrong block size (HAMMER large-data areas use mixed block sizes).
b28ad496 137 * minreq is also absolutely limited to filesize.
984263bc 138 */
364c022c
MD
139 if (maxreq < minreq)
140 maxreq = minreq;
b28ad496
MD
141 /* minreq not used beyond this point */
142
364c022c
MD
143 if (maxreq > max_readahead) {
144 maxreq = max_readahead;
145 if (maxreq > 16 * 1024 * 1024)
146 maxreq = 16 * 1024 * 1024;
147 }
148 if (maxreq < blksize)
149 maxreq = blksize;
150 if (loffset + maxreq > filesize) {
151 if (loffset > filesize)
152 maxreq = 0;
153 else
154 maxreq = filesize - loffset;
155 }
156
157 maxra = (int)(maxreq / blksize);
984263bc
MD
158
159 /*
ae8e83e6 160 * Get the requested block.
984263bc 161 */
54341a3b
MD
162 if (*bpp)
163 reqbp = bp = *bpp;
164 else
165 *bpp = reqbp = bp = getblk(vp, loffset, blksize, 0, 0);
54078292 166 origoffset = loffset;
984263bc
MD
167
168 /*
364c022c
MD
169 * Calculate the maximum cluster size for a single I/O, used
170 * by cluster_rbuild().
171 */
172 maxrbuild = vmaxiosize(vp) / blksize;
173
174 /*
984263bc
MD
175 * if it is in the cache, then check to see if the reads have been
176 * sequential. If they have, then try some read-ahead, otherwise
177 * back-off on prospective read-aheads.
178 */
179 if (bp->b_flags & B_CACHE) {
6b84c93e
MD
180 /*
181 * Not sequential, do not do any read-ahead
182 */
364c022c 183 if (maxra <= 1)
984263bc 184 return 0;
6b84c93e
MD
185
186 /*
187 * No read-ahead mark, do not do any read-ahead
188 * yet.
189 */
190 if ((bp->b_flags & B_RAM) == 0)
984263bc 191 return 0;
b1c20cfa 192
6b84c93e
MD
193 /*
194 * We hit a read-ahead-mark, figure out how much read-ahead
195 * to do (maxra) and where to start (loffset).
196 *
197 * Shortcut the scan. Typically the way this works is that
198 * we've built up all the blocks inbetween except for the
199 * last in previous iterations, so if the second-to-last
200 * block is present we just skip ahead to it.
201 *
202 * This algorithm has O(1) cpu in the steady state no
203 * matter how large maxra is.
204 */
205 bp->b_flags &= ~B_RAM;
206
207 if (findblk(vp, loffset + (maxra - 2) * blksize, FINDBLK_TEST))
208 i = maxra - 1;
209 else
210 i = 1;
211 while (i < maxra) {
212 if (findblk(vp, loffset + i * blksize,
213 FINDBLK_TEST) == NULL) {
214 break;
984263bc 215 }
6b84c93e 216 ++i;
984263bc 217 }
364c022c
MD
218
219 /*
220 * We got everything or everything is in the cache, no
221 * point continuing.
222 */
6b84c93e
MD
223 if (i >= maxra)
224 return 0;
616dd1e9
MD
225
226 /*
227 * Calculate where to start the read-ahead and how much
228 * to do. Generally speaking we want to read-ahead by
229 * (maxra) when we've found a read-ahead mark. We do
230 * not want to reduce maxra here as it will cause
231 * successive read-ahead I/O's to be smaller and smaller.
cf83ee2c
MD
232 *
233 * However, we have to make sure we don't break the
234 * filesize limitation for the clustered operation.
616dd1e9 235 */
6b84c93e 236 loffset += i * blksize;
984263bc 237 reqbp = bp = NULL;
cf83ee2c
MD
238
239 if (loffset >= filesize)
240 return 0;
241 if (loffset + maxra * blksize > filesize) {
242 maxreq = filesize - loffset;
243 maxra = (int)(maxreq / blksize);
244 }
984263bc 245 } else {
4d8329e1 246 __debugvar off_t firstread = bp->b_loffset;
54078292 247 int nblks;
984263bc 248
ae8e83e6
MD
249 /*
250 * Set-up synchronous read for bp.
251 */
252 bp->b_cmd = BUF_CMD_READ;
253 bp->b_bio1.bio_done = biodone_sync;
254 bp->b_bio1.bio_flags |= BIO_SYNC;
255
81b5c339
MD
256 KASSERT(firstread != NOOFFSET,
257 ("cluster_read: no buffer offset"));
54078292 258
364c022c
MD
259 /*
260 * nblks is our cluster_rbuild request size, limited
261 * primarily by the device.
262 */
263 if ((nblks = maxra) > maxrbuild)
264 nblks = maxrbuild;
265
266 if (nblks > 1) {
267 int burstbytes;
984263bc 268
e92ca23a
MD
269 error = VOP_BMAP(vp, loffset, &doffset,
270 &burstbytes, NULL, BUF_CMD_READ);
984263bc
MD
271 if (error)
272 goto single_block_read;
364c022c
MD
273 if (nblks > burstbytes / blksize)
274 nblks = burstbytes / blksize;
54078292 275 if (doffset == NOOFFSET)
984263bc 276 goto single_block_read;
364c022c 277 if (nblks <= 1)
984263bc 278 goto single_block_read;
984263bc 279
54078292 280 bp = cluster_rbuild(vp, filesize, loffset,
ae8e83e6 281 doffset, blksize, nblks, bp);
54078292 282 loffset += bp->b_bufsize;
364c022c 283 maxra -= bp->b_bufsize / blksize;
984263bc
MD
284 } else {
285single_block_read:
286 /*
364c022c 287 * If it isn't in the cache, then get a chunk from
984263bc
MD
288 * disk if sequential, otherwise just get the block.
289 */
cf1bb2a8 290 cluster_setram(bp);
e92ca23a 291 loffset += blksize;
364c022c 292 --maxra;
984263bc
MD
293 }
294 }
295
296 /*
ae8e83e6
MD
297 * If B_CACHE was not set issue bp. bp will either be an
298 * asynchronous cluster buf or a synchronous single-buf.
299 * If it is a single buf it will be the same as reqbp.
300 *
301 * NOTE: Once an async cluster buf is issued bp becomes invalid.
984263bc
MD
302 */
303 if (bp) {
304#if defined(CLUSTERDEBUG)
305 if (rcluster)
364c022c
MD
306 kprintf("S(%012jx,%d,%d)\n",
307 (intmax_t)bp->b_loffset, bp->b_bcount, maxra);
984263bc 308#endif
10f3fee5
MD
309 if ((bp->b_flags & B_CLUSTER) == 0)
310 vfs_busy_pages(vp, bp);
984263bc 311 bp->b_flags &= ~(B_ERROR|B_INVAL);
81b5c339 312 vn_strategy(vp, &bp->b_bio1);
ae8e83e6
MD
313 error = 0;
314 /* bp invalid now */
984263bc
MD
315 }
316
317 /*
bfda7080 318 * If we have been doing sequential I/O, then do some read-ahead.
6b84c93e
MD
319 * The code above us should have positioned us at the next likely
320 * offset.
0728eafc
MD
321 *
322 * Only mess with buffers which we can immediately lock. HAMMER
323 * will do device-readahead irrespective of what the blocks
324 * represent.
984263bc 325 */
364c022c 326 while (error == 0 && maxra > 0) {
bfda7080 327 int burstbytes;
ac7ffc8a 328 int tmp_error;
364c022c 329 int nblks;
bfda7080 330
b77cfc40
MD
331 rbp = getblk(vp, loffset, blksize,
332 GETBLK_SZMATCH|GETBLK_NOWAIT, 0);
333 if (rbp == NULL)
334 goto no_read_ahead;
bfda7080 335 if ((rbp->b_flags & B_CACHE)) {
984263bc 336 bqrelse(rbp);
bfda7080
SS
337 goto no_read_ahead;
338 }
339
ac7ffc8a
MD
340 /*
341 * An error from the read-ahead bmap has nothing to do
342 * with the caller's original request.
343 */
344 tmp_error = VOP_BMAP(vp, loffset, &doffset,
345 &burstbytes, NULL, BUF_CMD_READ);
346 if (tmp_error || doffset == NOOFFSET) {
bfda7080
SS
347 rbp->b_flags |= B_INVAL;
348 brelse(rbp);
349 rbp = NULL;
350 goto no_read_ahead;
351 }
364c022c
MD
352 if ((nblks = maxra) > maxrbuild)
353 nblks = maxrbuild;
354 if (nblks > burstbytes / blksize)
355 nblks = burstbytes / blksize;
bfda7080 356
ae8e83e6
MD
357 /*
358 * rbp: async read
359 */
360 rbp->b_cmd = BUF_CMD_READ;
cf1bb2a8
MD
361 /*rbp->b_flags |= B_AGE*/;
362 cluster_setram(rbp);
ae8e83e6 363
364c022c 364 if (nblks > 1) {
bfda7080 365 rbp = cluster_rbuild(vp, filesize, loffset,
e92ca23a 366 doffset, blksize,
364c022c 367 nblks, rbp);
984263bc 368 } else {
bfda7080
SS
369 rbp->b_bio2.bio_offset = doffset;
370 }
364c022c 371
984263bc 372#if defined(CLUSTERDEBUG)
bfda7080 373 if (rcluster) {
364c022c
MD
374 if (bp) {
375 kprintf("A+(%012jx,%d,%jd) "
376 "doff=%012jx minr=%zd ra=%d\n",
377 (intmax_t)loffset, rbp->b_bcount,
378 (intmax_t)(loffset - origoffset),
379 (intmax_t)doffset, minreq, maxra);
380 } else {
381 kprintf("A-(%012jx,%d,%jd) "
382 "doff=%012jx minr=%zd ra=%d\n",
383 (intmax_t)rbp->b_loffset, rbp->b_bcount,
384 (intmax_t)(loffset - origoffset),
385 (intmax_t)doffset, minreq, maxra);
386 }
bfda7080 387 }
984263bc 388#endif
bfda7080 389 rbp->b_flags &= ~(B_ERROR|B_INVAL);
10f3fee5 390
bfda7080
SS
391 if ((rbp->b_flags & B_CLUSTER) == 0)
392 vfs_busy_pages(vp, rbp);
ae8e83e6 393 BUF_KERNPROC(rbp);
6b84c93e
MD
394 loffset += rbp->b_bufsize;
395 maxra -= rbp->b_bufsize / blksize;
bfda7080 396 vn_strategy(vp, &rbp->b_bio1);
ae8e83e6 397 /* rbp invalid now */
984263bc 398 }
bfda7080 399
ae8e83e6
MD
400 /*
401 * Wait for our original buffer to complete its I/O. reqbp will
402 * be NULL if the original buffer was B_CACHE. We are returning
403 * (*bpp) which is the same as reqbp when reqbp != NULL.
404 */
405no_read_ahead:
406 if (reqbp) {
407 KKASSERT(reqbp->b_bio1.bio_flags & BIO_SYNC);
408 error = biowait(&reqbp->b_bio1, "clurd");
409 }
410 return (error);
984263bc
MD
411}
412
413/*
414 * If blocks are contiguous on disk, use this to provide clustered
415 * read ahead. We will read as many blocks as possible sequentially
416 * and then parcel them up into logical blocks in the buffer hash table.
ae8e83e6
MD
417 *
418 * This function either returns a cluster buf or it returns fbp. fbp is
419 * already expected to be set up as a synchronous or asynchronous request.
420 *
421 * If a cluster buf is returned it will always be async.
984263bc
MD
422 */
423static struct buf *
ae8e83e6
MD
424cluster_rbuild(struct vnode *vp, off_t filesize, off_t loffset, off_t doffset,
425 int blksize, int run, struct buf *fbp)
984263bc
MD
426{
427 struct buf *bp, *tbp;
54078292
MD
428 off_t boffset;
429 int i, j;
2ec4b00d 430 int maxiosize = vmaxiosize(vp);
984263bc 431
2ec4b00d 432 /*
984263bc
MD
433 * avoid a division
434 */
e92ca23a 435 while (loffset + run * blksize > filesize) {
984263bc
MD
436 --run;
437 }
438
6260e485 439 tbp = fbp;
54078292 440 tbp->b_bio2.bio_offset = doffset;
10f3fee5
MD
441 if((tbp->b_flags & B_MALLOC) ||
442 ((tbp->b_flags & B_VMIO) == 0) || (run <= 1)) {
984263bc 443 return tbp;
10f3fee5 444 }
984263bc 445
9a82e536 446 bp = trypbuf_kva(&cluster_pbuf_freecnt);
ae8e83e6 447 if (bp == NULL) {
984263bc 448 return tbp;
ae8e83e6 449 }
984263bc
MD
450
451 /*
452 * We are synthesizing a buffer out of vm_page_t's, but
453 * if the block size is not page aligned then the starting
454 * address may not be either. Inherit the b_data offset
455 * from the original buffer.
456 */
457 bp->b_data = (char *)((vm_offset_t)bp->b_data |
458 ((vm_offset_t)tbp->b_data & PAGE_MASK));
ae8e83e6 459 bp->b_flags |= B_CLUSTER | B_VMIO;
10f3fee5 460 bp->b_cmd = BUF_CMD_READ;
ae8e83e6 461 bp->b_bio1.bio_done = cluster_callback; /* default to async */
81b5c339
MD
462 bp->b_bio1.bio_caller_info1.cluster_head = NULL;
463 bp->b_bio1.bio_caller_info2.cluster_tail = NULL;
54078292 464 bp->b_loffset = loffset;
e92ca23a 465 bp->b_bio2.bio_offset = doffset;
81b5c339
MD
466 KASSERT(bp->b_loffset != NOOFFSET,
467 ("cluster_rbuild: no buffer offset"));
984263bc 468
984263bc
MD
469 bp->b_bcount = 0;
470 bp->b_bufsize = 0;
54f51aeb 471 bp->b_xio.xio_npages = 0;
984263bc 472
e92ca23a 473 for (boffset = doffset, i = 0; i < run; ++i, boffset += blksize) {
10f3fee5 474 if (i) {
54f51aeb 475 if ((bp->b_xio.xio_npages * PAGE_SIZE) +
e92ca23a 476 round_page(blksize) > maxiosize) {
984263bc
MD
477 break;
478 }
479
480 /*
481 * Shortcut some checks and try to avoid buffers that
482 * would block in the lock. The same checks have to
483 * be made again after we officially get the buffer.
484 */
b77cfc40
MD
485 tbp = getblk(vp, loffset + i * blksize, blksize,
486 GETBLK_SZMATCH|GETBLK_NOWAIT, 0);
487 if (tbp == NULL)
488 break;
489 for (j = 0; j < tbp->b_xio.xio_npages; j++) {
490 if (tbp->b_xio.xio_pages[j]->valid)
984263bc
MD
491 break;
492 }
b77cfc40
MD
493 if (j != tbp->b_xio.xio_npages) {
494 bqrelse(tbp);
495 break;
496 }
984263bc
MD
497
498 /*
499 * Stop scanning if the buffer is fuly valid
500 * (marked B_CACHE), or locked (may be doing a
501 * background write), or if the buffer is not
502 * VMIO backed. The clustering code can only deal
503 * with VMIO-backed buffers.
504 */
505 if ((tbp->b_flags & (B_CACHE|B_LOCKED)) ||
27bc0cb1
MD
506 (tbp->b_flags & B_VMIO) == 0 ||
507 (LIST_FIRST(&tbp->b_dep) != NULL &&
508 buf_checkread(tbp))
509 ) {
984263bc
MD
510 bqrelse(tbp);
511 break;
512 }
513
514 /*
515 * The buffer must be completely invalid in order to
516 * take part in the cluster. If it is partially valid
517 * then we stop.
518 */
54f51aeb
HP
519 for (j = 0;j < tbp->b_xio.xio_npages; j++) {
520 if (tbp->b_xio.xio_pages[j]->valid)
984263bc
MD
521 break;
522 }
54f51aeb 523 if (j != tbp->b_xio.xio_npages) {
984263bc
MD
524 bqrelse(tbp);
525 break;
526 }
527
528 /*
616dd1e9
MD
529 * Set a read-ahead mark as appropriate. Always
530 * set the read-ahead mark at (run - 1). It is
531 * unclear why we were also setting it at i == 1.
984263bc 532 */
616dd1e9 533 if (/*i == 1 ||*/ i == (run - 1))
cf1bb2a8 534 cluster_setram(tbp);
984263bc
MD
535
536 /*
b86460bf
MD
537 * Depress the priority of buffers not explicitly
538 * requested.
539 */
e92ca23a 540 /* tbp->b_flags |= B_AGE; */
b86460bf
MD
541
542 /*
984263bc
MD
543 * Set the block number if it isn't set, otherwise
544 * if it is make sure it matches the block number we
545 * expect.
546 */
54078292
MD
547 if (tbp->b_bio2.bio_offset == NOOFFSET) {
548 tbp->b_bio2.bio_offset = boffset;
549 } else if (tbp->b_bio2.bio_offset != boffset) {
984263bc
MD
550 brelse(tbp);
551 break;
552 }
553 }
ae8e83e6 554
984263bc 555 /*
ae8e83e6
MD
556 * The passed-in tbp (i == 0) will already be set up for
557 * async or sync operation. All other tbp's acquire in
558 * our loop are set up for async operation.
984263bc 559 */
10f3fee5 560 tbp->b_cmd = BUF_CMD_READ;
984263bc 561 BUF_KERNPROC(tbp);
81b5c339 562 cluster_append(&bp->b_bio1, tbp);
54078292 563 for (j = 0; j < tbp->b_xio.xio_npages; ++j) {
984263bc 564 vm_page_t m;
b12defdc 565
54f51aeb 566 m = tbp->b_xio.xio_pages[j];
b12defdc 567 vm_page_busy_wait(m, FALSE, "clurpg");
984263bc 568 vm_page_io_start(m);
b12defdc 569 vm_page_wakeup(m);
984263bc 570 vm_object_pip_add(m->object, 1);
54f51aeb
HP
571 if ((bp->b_xio.xio_npages == 0) ||
572 (bp->b_xio.xio_pages[bp->b_xio.xio_npages-1] != m)) {
573 bp->b_xio.xio_pages[bp->b_xio.xio_npages] = m;
574 bp->b_xio.xio_npages++;
984263bc
MD
575 }
576 if ((m->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL)
54f51aeb 577 tbp->b_xio.xio_pages[j] = bogus_page;
984263bc
MD
578 }
579 /*
580 * XXX shouldn't this be += size for both, like in
581 * cluster_wbuild()?
582 *
583 * Don't inherit tbp->b_bufsize as it may be larger due to
584 * a non-page-aligned size. Instead just aggregate using
585 * 'size'.
586 */
e92ca23a
MD
587 if (tbp->b_bcount != blksize)
588 kprintf("warning: tbp->b_bcount wrong %d vs %d\n", tbp->b_bcount, blksize);
589 if (tbp->b_bufsize != blksize)
590 kprintf("warning: tbp->b_bufsize wrong %d vs %d\n", tbp->b_bufsize, blksize);
591 bp->b_bcount += blksize;
592 bp->b_bufsize += blksize;
984263bc
MD
593 }
594
595 /*
596 * Fully valid pages in the cluster are already good and do not need
597 * to be re-read from disk. Replace the page with bogus_page
598 */
54f51aeb
HP
599 for (j = 0; j < bp->b_xio.xio_npages; j++) {
600 if ((bp->b_xio.xio_pages[j]->valid & VM_PAGE_BITS_ALL) ==
984263bc 601 VM_PAGE_BITS_ALL) {
54f51aeb 602 bp->b_xio.xio_pages[j] = bogus_page;
984263bc
MD
603 }
604 }
312dcd01 605 if (bp->b_bufsize > bp->b_kvasize) {
54078292 606 panic("cluster_rbuild: b_bufsize(%d) > b_kvasize(%d)",
984263bc 607 bp->b_bufsize, bp->b_kvasize);
312dcd01 608 }
984263bc 609 pmap_qenter(trunc_page((vm_offset_t) bp->b_data),
54f51aeb 610 (vm_page_t *)bp->b_xio.xio_pages, bp->b_xio.xio_npages);
ae8e83e6 611 BUF_KERNPROC(bp);
984263bc
MD
612 return (bp);
613}
614
615/*
616 * Cleanup after a clustered read or write.
617 * This is complicated by the fact that any of the buffers might have
618 * extra memory (if there were no empty buffer headers at allocbuf time)
619 * that we will need to shift around.
81b5c339
MD
620 *
621 * The returned bio is &bp->b_bio1
984263bc
MD
622 */
623void
81b5c339 624cluster_callback(struct bio *bio)
984263bc 625{
81b5c339
MD
626 struct buf *bp = bio->bio_buf;
627 struct buf *tbp;
984263bc
MD
628 int error = 0;
629
630 /*
9a71d53f
MD
631 * Must propogate errors to all the components. A short read (EOF)
632 * is a critical error.
984263bc 633 */
9a71d53f 634 if (bp->b_flags & B_ERROR) {
984263bc 635 error = bp->b_error;
9a71d53f
MD
636 } else if (bp->b_bcount != bp->b_bufsize) {
637 panic("cluster_callback: unexpected EOF on cluster %p!", bio);
638 }
984263bc 639
54f51aeb 640 pmap_qremove(trunc_page((vm_offset_t) bp->b_data), bp->b_xio.xio_npages);
984263bc
MD
641 /*
642 * Move memory from the large cluster buffer into the component
81b5c339
MD
643 * buffers and mark IO as done on these. Since the memory map
644 * is the same, no actual copying is required.
984263bc 645 */
81b5c339
MD
646 while ((tbp = bio->bio_caller_info1.cluster_head) != NULL) {
647 bio->bio_caller_info1.cluster_head = tbp->b_cluster_next;
984263bc 648 if (error) {
24c8374a 649 tbp->b_flags |= B_ERROR | B_IODEBUG;
984263bc
MD
650 tbp->b_error = error;
651 } else {
652 tbp->b_dirtyoff = tbp->b_dirtyend = 0;
653 tbp->b_flags &= ~(B_ERROR|B_INVAL);
24c8374a 654 tbp->b_flags |= B_IODEBUG;
984263bc
MD
655 /*
656 * XXX the bdwrite()/bqrelse() issued during
657 * cluster building clears B_RELBUF (see bqrelse()
658 * comment). If direct I/O was specified, we have
659 * to restore it here to allow the buffer and VM
660 * to be freed.
661 */
662 if (tbp->b_flags & B_DIRECT)
663 tbp->b_flags |= B_RELBUF;
664 }
81b5c339 665 biodone(&tbp->b_bio1);
984263bc
MD
666 }
667 relpbuf(bp, &cluster_pbuf_freecnt);
668}
669
670/*
504ea70e 671 * Implement modified write build for cluster.
984263bc 672 *
504ea70e
MD
673 * write_behind = 0 write behind disabled
674 * write_behind = 1 write behind normal (default)
675 * write_behind = 2 write behind backed-off
984263bc 676 *
504ea70e
MD
677 * In addition, write_behind is only activated for files that have
678 * grown past a certain size (default 10MB). Otherwise temporary files
679 * wind up generating a lot of unnecessary disk I/O.
984263bc 680 */
984263bc 681static __inline int
e92ca23a 682cluster_wbuild_wb(struct vnode *vp, int blksize, off_t start_loffset, int len)
984263bc
MD
683{
684 int r = 0;
685
686 switch(write_behind) {
687 case 2:
54078292 688 if (start_loffset < len)
984263bc 689 break;
54078292 690 start_loffset -= len;
984263bc
MD
691 /* fall through */
692 case 1:
504ea70e
MD
693 if (vp->v_filesize >= write_behind_minfilesize) {
694 r = cluster_wbuild(vp, NULL, blksize,
695 start_loffset, len);
696 }
984263bc
MD
697 /* fall through */
698 default:
699 /* fall through */
700 break;
701 }
702 return(r);
703}
704
705/*
706 * Do clustered write for FFS.
707 *
708 * Three cases:
709 * 1. Write is not sequential (write asynchronously)
710 * Write is sequential:
711 * 2. beginning of cluster - begin cluster
712 * 3. middle of a cluster - add to cluster
713 * 4. end of a cluster - asynchronously write cluster
714 */
715void
e92ca23a 716cluster_write(struct buf *bp, off_t filesize, int blksize, int seqcount)
984263bc
MD
717{
718 struct vnode *vp;
54078292 719 off_t loffset;
984263bc 720 int maxclen, cursize;
984263bc
MD
721 int async;
722
723 vp = bp->b_vp;
e92ca23a 724 if (vp->v_type == VREG)
984263bc 725 async = vp->v_mount->mnt_flag & MNT_ASYNC;
e92ca23a 726 else
984263bc 727 async = 0;
54078292 728 loffset = bp->b_loffset;
81b5c339
MD
729 KASSERT(bp->b_loffset != NOOFFSET,
730 ("cluster_write: no buffer offset"));
984263bc
MD
731
732 /* Initialize vnode to beginning of file. */
54078292 733 if (loffset == 0)
984263bc
MD
734 vp->v_lasta = vp->v_clen = vp->v_cstart = vp->v_lastw = 0;
735
e92ca23a 736 if (vp->v_clen == 0 || loffset != vp->v_lastw + blksize ||
54078292 737 bp->b_bio2.bio_offset == NOOFFSET ||
e92ca23a 738 (bp->b_bio2.bio_offset != vp->v_lasta + blksize)) {
2ec4b00d 739 maxclen = vmaxiosize(vp);
984263bc
MD
740 if (vp->v_clen != 0) {
741 /*
742 * Next block is not sequential.
743 *
744 * If we are not writing at end of file, the process
745 * seeked to another point in the file since its last
746 * write, or we have reached our maximum cluster size,
747 * then push the previous cluster. Otherwise try
748 * reallocating to make it sequential.
749 *
750 * Change to algorithm: only push previous cluster if
751 * it was sequential from the point of view of the
752 * seqcount heuristic, otherwise leave the buffer
753 * intact so we can potentially optimize the I/O
754 * later on in the buf_daemon or update daemon
755 * flush.
756 */
e92ca23a 757 cursize = vp->v_lastw - vp->v_cstart + blksize;
9de13b88 758 if (bp->b_loffset + blksize < filesize ||
e92ca23a 759 loffset != vp->v_lastw + blksize || vp->v_clen <= cursize) {
984263bc 760 if (!async && seqcount > 0) {
e92ca23a 761 cluster_wbuild_wb(vp, blksize,
984263bc
MD
762 vp->v_cstart, cursize);
763 }
764 } else {
765 struct buf **bpp, **endbp;
766 struct cluster_save *buflist;
767
e92ca23a 768 buflist = cluster_collectbufs(vp, bp, blksize);
984263bc
MD
769 endbp = &buflist->bs_children
770 [buflist->bs_nchildren - 1];
771 if (VOP_REALLOCBLKS(vp, buflist)) {
772 /*
773 * Failed, push the previous cluster
774 * if *really* writing sequentially
775 * in the logical file (seqcount > 1),
776 * otherwise delay it in the hopes that
777 * the low level disk driver can
778 * optimize the write ordering.
779 */
780 for (bpp = buflist->bs_children;
781 bpp < endbp; bpp++)
782 brelse(*bpp);
efda3bd0 783 kfree(buflist, M_SEGMENT);
984263bc
MD
784 if (seqcount > 1) {
785 cluster_wbuild_wb(vp,
e92ca23a 786 blksize, vp->v_cstart,
984263bc
MD
787 cursize);
788 }
789 } else {
790 /*
791 * Succeeded, keep building cluster.
792 */
793 for (bpp = buflist->bs_children;
794 bpp <= endbp; bpp++)
795 bdwrite(*bpp);
efda3bd0 796 kfree(buflist, M_SEGMENT);
54078292
MD
797 vp->v_lastw = loffset;
798 vp->v_lasta = bp->b_bio2.bio_offset;
984263bc
MD
799 return;
800 }
801 }
802 }
803 /*
804 * Consider beginning a cluster. If at end of file, make
805 * cluster as large as possible, otherwise find size of
806 * existing cluster.
807 */
808 if ((vp->v_type == VREG) &&
9de13b88 809 bp->b_loffset + blksize < filesize &&
54078292 810 (bp->b_bio2.bio_offset == NOOFFSET) &&
e92ca23a 811 (VOP_BMAP(vp, loffset, &bp->b_bio2.bio_offset, &maxclen, NULL, BUF_CMD_WRITE) ||
54078292 812 bp->b_bio2.bio_offset == NOOFFSET)) {
b642a6c1 813 bdwrite(bp);
984263bc 814 vp->v_clen = 0;
54078292 815 vp->v_lasta = bp->b_bio2.bio_offset;
e92ca23a 816 vp->v_cstart = loffset + blksize;
54078292 817 vp->v_lastw = loffset;
984263bc
MD
818 return;
819 }
e92ca23a
MD
820 if (maxclen > blksize)
821 vp->v_clen = maxclen - blksize;
54078292
MD
822 else
823 vp->v_clen = 0;
824 if (!async && vp->v_clen == 0) { /* I/O not contiguous */
e92ca23a 825 vp->v_cstart = loffset + blksize;
b642a6c1 826 bdwrite(bp);
984263bc 827 } else { /* Wait for rest of cluster */
54078292 828 vp->v_cstart = loffset;
984263bc
MD
829 bdwrite(bp);
830 }
54078292 831 } else if (loffset == vp->v_cstart + vp->v_clen) {
984263bc
MD
832 /*
833 * At end of cluster, write it out if seqcount tells us we
834 * are operating sequentially, otherwise let the buf or
835 * update daemon handle it.
836 */
837 bdwrite(bp);
838 if (seqcount > 1)
e92ca23a
MD
839 cluster_wbuild_wb(vp, blksize, vp->v_cstart,
840 vp->v_clen + blksize);
984263bc 841 vp->v_clen = 0;
e92ca23a 842 vp->v_cstart = loffset + blksize;
b642a6c1
MD
843 } else if (vm_page_count_severe() &&
844 bp->b_loffset + blksize < filesize) {
984263bc 845 /*
b642a6c1
MD
846 * We are low on memory, get it going NOW. However, do not
847 * try to push out a partial block at the end of the file
848 * as this could lead to extremely non-optimal write activity.
984263bc
MD
849 */
850 bawrite(bp);
851 } else {
852 /*
853 * In the middle of a cluster, so just delay the I/O for now.
854 */
855 bdwrite(bp);
856 }
54078292
MD
857 vp->v_lastw = loffset;
858 vp->v_lasta = bp->b_bio2.bio_offset;
984263bc
MD
859}
860
9de13b88
MD
861/*
862 * This is the clustered version of bawrite(). It works similarly to
863 * cluster_write() except I/O on the buffer is guaranteed to occur.
864 */
865int
866cluster_awrite(struct buf *bp)
867{
868 int total;
869
870 /*
871 * Don't bother if it isn't clusterable.
872 */
873 if ((bp->b_flags & B_CLUSTEROK) == 0 ||
874 bp->b_vp == NULL ||
875 (bp->b_vp->v_flag & VOBJBUF) == 0) {
876 total = bp->b_bufsize;
877 bawrite(bp);
878 return (total);
879 }
880
881 total = cluster_wbuild(bp->b_vp, &bp, bp->b_bufsize,
882 bp->b_loffset, vmaxiosize(bp->b_vp));
883 if (bp)
884 bawrite(bp);
885
886 return total;
887}
984263bc
MD
888
889/*
890 * This is an awful lot like cluster_rbuild...wish they could be combined.
891 * The last lbn argument is the current block on which I/O is being
892 * performed. Check to see that it doesn't fall in the middle of
893 * the current block (if last_bp == NULL).
9de13b88
MD
894 *
895 * cluster_wbuild() normally does not guarantee anything. If bpp is
896 * non-NULL and cluster_wbuild() is able to incorporate it into the
897 * I/O it will set *bpp to NULL, otherwise it will leave it alone and
898 * the caller must dispose of *bpp.
984263bc 899 */
9de13b88
MD
900static int
901cluster_wbuild(struct vnode *vp, struct buf **bpp,
902 int blksize, off_t start_loffset, int bytes)
984263bc
MD
903{
904 struct buf *bp, *tbp;
e43a034f 905 int i, j;
984263bc 906 int totalwritten = 0;
9de13b88 907 int must_initiate;
2ec4b00d 908 int maxiosize = vmaxiosize(vp);
984263bc 909
54078292 910 while (bytes > 0) {
984263bc 911 /*
9de13b88
MD
912 * If the buffer matches the passed locked & removed buffer
913 * we used the passed buffer (which might not be B_DELWRI).
914 *
915 * Otherwise locate the buffer and determine if it is
916 * compatible.
984263bc 917 */
9de13b88
MD
918 if (bpp && (*bpp)->b_loffset == start_loffset) {
919 tbp = *bpp;
920 *bpp = NULL;
921 bpp = NULL;
922 } else {
923 tbp = findblk(vp, start_loffset, FINDBLK_NBLOCK);
924 if (tbp == NULL ||
925 (tbp->b_flags & (B_LOCKED | B_INVAL | B_DELWRI)) !=
926 B_DELWRI ||
927 (LIST_FIRST(&tbp->b_dep) && buf_checkwrite(tbp))) {
928 if (tbp)
929 BUF_UNLOCK(tbp);
930 start_loffset += blksize;
931 bytes -= blksize;
932 continue;
933 }
934 bremfree(tbp);
984263bc 935 }
10f3fee5 936 KKASSERT(tbp->b_cmd == BUF_CMD_DONE);
984263bc
MD
937
938 /*
939 * Extra memory in the buffer, punt on this buffer.
940 * XXX we could handle this in most cases, but we would
941 * have to push the extra memory down to after our max
942 * possible cluster size and then potentially pull it back
943 * up if the cluster was terminated prematurely--too much
944 * hassle.
945 */
946 if (((tbp->b_flags & (B_CLUSTEROK|B_MALLOC)) != B_CLUSTEROK) ||
b1c20cfa
MD
947 (tbp->b_bcount != tbp->b_bufsize) ||
948 (tbp->b_bcount != blksize) ||
949 (bytes == blksize) ||
9a82e536 950 ((bp = getpbuf_kva(&cluster_pbuf_freecnt)) == NULL)) {
984263bc
MD
951 totalwritten += tbp->b_bufsize;
952 bawrite(tbp);
e92ca23a
MD
953 start_loffset += blksize;
954 bytes -= blksize;
984263bc
MD
955 continue;
956 }
957
958 /*
9a71d53f
MD
959 * Set up the pbuf. Track our append point with b_bcount
960 * and b_bufsize. b_bufsize is not used by the device but
961 * our caller uses it to loop clusters and we use it to
962 * detect a premature EOF on the block device.
984263bc 963 */
984263bc
MD
964 bp->b_bcount = 0;
965 bp->b_bufsize = 0;
54f51aeb 966 bp->b_xio.xio_npages = 0;
81b5c339 967 bp->b_loffset = tbp->b_loffset;
54078292 968 bp->b_bio2.bio_offset = tbp->b_bio2.bio_offset;
984263bc
MD
969
970 /*
971 * We are synthesizing a buffer out of vm_page_t's, but
972 * if the block size is not page aligned then the starting
973 * address may not be either. Inherit the b_data offset
974 * from the original buffer.
975 */
976 bp->b_data = (char *)((vm_offset_t)bp->b_data |
977 ((vm_offset_t)tbp->b_data & PAGE_MASK));
10f3fee5 978 bp->b_flags &= ~B_ERROR;
4414f2c9 979 bp->b_flags |= B_CLUSTER | B_BNOCLIP |
f2d7fcf0 980 (tbp->b_flags & (B_VMIO | B_NEEDCOMMIT));
81b5c339
MD
981 bp->b_bio1.bio_caller_info1.cluster_head = NULL;
982 bp->b_bio1.bio_caller_info2.cluster_tail = NULL;
b1c20cfa 983
984263bc
MD
984 /*
985 * From this location in the file, scan forward to see
986 * if there are buffers with adjacent data that need to
987 * be written as well.
9de13b88
MD
988 *
989 * IO *must* be initiated on index 0 at this point
990 * (particularly when called from cluster_awrite()).
984263bc 991 */
e92ca23a 992 for (i = 0; i < bytes; (i += blksize), (start_loffset += blksize)) {
9de13b88
MD
993 if (i == 0) {
994 must_initiate = 1;
995 } else {
996 /*
997 * Not first buffer.
998 */
999 must_initiate = 0;
b1c20cfa
MD
1000 tbp = findblk(vp, start_loffset,
1001 FINDBLK_NBLOCK);
984263bc 1002 /*
b1c20cfa
MD
1003 * Buffer not found or could not be locked
1004 * non-blocking.
984263bc 1005 */
b1c20cfa 1006 if (tbp == NULL)
984263bc 1007 break;
984263bc
MD
1008
1009 /*
1010 * If it IS in core, but has different
b1c20cfa
MD
1011 * characteristics, then don't cluster
1012 * with it.
984263bc
MD
1013 */
1014 if ((tbp->b_flags & (B_VMIO | B_CLUSTEROK |
b1c20cfa
MD
1015 B_INVAL | B_DELWRI | B_NEEDCOMMIT))
1016 != (B_DELWRI | B_CLUSTEROK |
1017 (bp->b_flags & (B_VMIO | B_NEEDCOMMIT))) ||
9de13b88 1018 (tbp->b_flags & B_LOCKED)
b1c20cfa
MD
1019 ) {
1020 BUF_UNLOCK(tbp);
984263bc
MD
1021 break;
1022 }
1023
1024 /*
1025 * Check that the combined cluster
1026 * would make sense with regard to pages
1027 * and would not be too large
9de13b88
MD
1028 *
1029 * WARNING! buf_checkwrite() must be the last
1030 * check made. If it returns 0 then
1031 * we must initiate the I/O.
984263bc 1032 */
e92ca23a 1033 if ((tbp->b_bcount != blksize) ||
54078292
MD
1034 ((bp->b_bio2.bio_offset + i) !=
1035 tbp->b_bio2.bio_offset) ||
54f51aeb 1036 ((tbp->b_xio.xio_npages + bp->b_xio.xio_npages) >
9de13b88
MD
1037 (maxiosize / PAGE_SIZE)) ||
1038 (LIST_FIRST(&tbp->b_dep) &&
1039 buf_checkwrite(tbp))
1040 ) {
984263bc 1041 BUF_UNLOCK(tbp);
984263bc
MD
1042 break;
1043 }
9de13b88
MD
1044 if (LIST_FIRST(&tbp->b_dep))
1045 must_initiate = 1;
984263bc
MD
1046 /*
1047 * Ok, it's passed all the tests,
1048 * so remove it from the free list
1049 * and mark it busy. We will use it.
1050 */
1051 bremfree(tbp);
10f3fee5 1052 KKASSERT(tbp->b_cmd == BUF_CMD_DONE);
9de13b88 1053 }
81b5c339
MD
1054
1055 /*
984263bc
MD
1056 * If the IO is via the VM then we do some
1057 * special VM hackery (yuck). Since the buffer's
1058 * block size may not be page-aligned it is possible
1059 * for a page to be shared between two buffers. We
1060 * have to get rid of the duplication when building
1061 * the cluster.
1062 */
1063 if (tbp->b_flags & B_VMIO) {
1064 vm_page_t m;
1065
9de13b88
MD
1066 /*
1067 * Try to avoid deadlocks with the VM system.
1068 * However, we cannot abort the I/O if
1069 * must_initiate is non-zero.
1070 */
1071 if (must_initiate == 0) {
1072 for (j = 0;
1073 j < tbp->b_xio.xio_npages;
1074 ++j) {
54f51aeb 1075 m = tbp->b_xio.xio_pages[j];
984263bc
MD
1076 if (m->flags & PG_BUSY) {
1077 bqrelse(tbp);
1078 goto finishcluster;
1079 }
1080 }
1081 }
1082
54078292 1083 for (j = 0; j < tbp->b_xio.xio_npages; ++j) {
54f51aeb 1084 m = tbp->b_xio.xio_pages[j];
b12defdc 1085 vm_page_busy_wait(m, FALSE, "clurpg");
984263bc 1086 vm_page_io_start(m);
b12defdc 1087 vm_page_wakeup(m);
984263bc 1088 vm_object_pip_add(m->object, 1);
54f51aeb
HP
1089 if ((bp->b_xio.xio_npages == 0) ||
1090 (bp->b_xio.xio_pages[bp->b_xio.xio_npages - 1] != m)) {
1091 bp->b_xio.xio_pages[bp->b_xio.xio_npages] = m;
1092 bp->b_xio.xio_npages++;
984263bc
MD
1093 }
1094 }
1095 }
e92ca23a
MD
1096 bp->b_bcount += blksize;
1097 bp->b_bufsize += blksize;
984263bc 1098
984263bc 1099 bundirty(tbp);
10f3fee5 1100 tbp->b_flags &= ~B_ERROR;
10f3fee5 1101 tbp->b_cmd = BUF_CMD_WRITE;
984263bc 1102 BUF_KERNPROC(tbp);
81b5c339 1103 cluster_append(&bp->b_bio1, tbp);
2aee763b
MD
1104
1105 /*
1106 * check for latent dependencies to be handled
1107 */
408357d8
MD
1108 if (LIST_FIRST(&tbp->b_dep) != NULL)
1109 buf_start(tbp);
984263bc
MD
1110 }
1111 finishcluster:
9de13b88
MD
1112 pmap_qenter(trunc_page((vm_offset_t)bp->b_data),
1113 (vm_page_t *)bp->b_xio.xio_pages,
1114 bp->b_xio.xio_npages);
312dcd01 1115 if (bp->b_bufsize > bp->b_kvasize) {
9de13b88
MD
1116 panic("cluster_wbuild: b_bufsize(%d) "
1117 "> b_kvasize(%d)\n",
1118 bp->b_bufsize, bp->b_kvasize);
312dcd01 1119 }
984263bc
MD
1120 totalwritten += bp->b_bufsize;
1121 bp->b_dirtyoff = 0;
1122 bp->b_dirtyend = bp->b_bufsize;
ae8e83e6 1123 bp->b_bio1.bio_done = cluster_callback;
10f3fee5 1124 bp->b_cmd = BUF_CMD_WRITE;
ae8e83e6 1125
10f3fee5 1126 vfs_busy_pages(vp, bp);
77912481 1127 bsetrunningbufspace(bp, bp->b_bufsize);
ae8e83e6 1128 BUF_KERNPROC(bp);
a8f169e2 1129 vn_strategy(vp, &bp->b_bio1);
984263bc 1130
54078292 1131 bytes -= i;
984263bc
MD
1132 }
1133 return totalwritten;
1134}
1135
1136/*
1137 * Collect together all the buffers in a cluster.
1138 * Plus add one additional buffer.
1139 */
1140static struct cluster_save *
e92ca23a 1141cluster_collectbufs(struct vnode *vp, struct buf *last_bp, int blksize)
984263bc
MD
1142{
1143 struct cluster_save *buflist;
1144 struct buf *bp;
54078292 1145 off_t loffset;
984263bc
MD
1146 int i, len;
1147
e92ca23a 1148 len = (int)(vp->v_lastw - vp->v_cstart + blksize) / blksize;
77652cad 1149 buflist = kmalloc(sizeof(struct buf *) * (len + 1) + sizeof(*buflist),
54078292 1150 M_SEGMENT, M_WAITOK);
984263bc
MD
1151 buflist->bs_nchildren = 0;
1152 buflist->bs_children = (struct buf **) (buflist + 1);
e92ca23a 1153 for (loffset = vp->v_cstart, i = 0; i < len; (loffset += blksize), i++) {
54078292 1154 (void) bread(vp, loffset, last_bp->b_bcount, &bp);
984263bc 1155 buflist->bs_children[i] = bp;
54078292 1156 if (bp->b_bio2.bio_offset == NOOFFSET) {
08daea96 1157 VOP_BMAP(bp->b_vp, bp->b_loffset,
e92ca23a
MD
1158 &bp->b_bio2.bio_offset,
1159 NULL, NULL, BUF_CMD_WRITE);
54078292 1160 }
984263bc
MD
1161 }
1162 buflist->bs_children[i] = bp = last_bp;
54078292 1163 if (bp->b_bio2.bio_offset == NOOFFSET) {
e92ca23a
MD
1164 VOP_BMAP(bp->b_vp, bp->b_loffset, &bp->b_bio2.bio_offset,
1165 NULL, NULL, BUF_CMD_WRITE);
54078292 1166 }
984263bc
MD
1167 buflist->bs_nchildren = i + 1;
1168 return (buflist);
1169}
81b5c339
MD
1170
1171void
1172cluster_append(struct bio *bio, struct buf *tbp)
1173{
1174 tbp->b_cluster_next = NULL;
1175 if (bio->bio_caller_info1.cluster_head == NULL) {
1176 bio->bio_caller_info1.cluster_head = tbp;
1177 bio->bio_caller_info2.cluster_tail = tbp;
1178 } else {
1179 bio->bio_caller_info2.cluster_tail->b_cluster_next = tbp;
1180 bio->bio_caller_info2.cluster_tail = tbp;
1181 }
1182}
1183
cf1bb2a8
MD
1184static
1185void
1186cluster_setram (struct buf *bp)
1187{
1188 bp->b_flags |= B_RAM;
1189 if (bp->b_xio.xio_npages)
1190 vm_page_flag_set(bp->b_xio.xio_pages[0], PG_RAM);
1191}