AMD64 - Refactor uio_resid and size_t assumptions.
[dragonfly.git] / sys / kern / vfs_cluster.c
CommitLineData
984263bc
MD
1/*-
2 * Copyright (c) 1993
3 * The Regents of the University of California. All rights reserved.
4 * Modifications/enhancements:
5 * Copyright (c) 1995 John S. Dyson. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by the University of
18 * California, Berkeley and its contributors.
19 * 4. Neither the name of the University nor the names of its contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 *
35 * @(#)vfs_cluster.c 8.7 (Berkeley) 2/13/94
36 * $FreeBSD: src/sys/kern/vfs_cluster.c,v 1.92.2.9 2001/11/18 07:10:59 dillon Exp $
b77cfc40 37 * $DragonFly: src/sys/kern/vfs_cluster.c,v 1.40 2008/07/14 03:09:00 dillon Exp $
984263bc
MD
38 */
39
40#include "opt_debug_cluster.h"
41
42#include <sys/param.h>
43#include <sys/systm.h>
44#include <sys/kernel.h>
45#include <sys/proc.h>
46#include <sys/buf.h>
47#include <sys/vnode.h>
48#include <sys/malloc.h>
49#include <sys/mount.h>
50#include <sys/resourcevar.h>
51#include <sys/vmmeter.h>
52#include <vm/vm.h>
53#include <vm/vm_object.h>
54#include <vm/vm_page.h>
55#include <sys/sysctl.h>
3020e3be 56#include <sys/buf2.h>
12e4aaff 57#include <vm/vm_page2.h>
984263bc 58
e54488bb
MD
59#include <machine/limits.h>
60
984263bc
MD
61#if defined(CLUSTERDEBUG)
62#include <sys/sysctl.h>
63static int rcluster= 0;
64SYSCTL_INT(_debug, OID_AUTO, rcluster, CTLFLAG_RW, &rcluster, 0, "");
65#endif
66
d1cd9d97 67static MALLOC_DEFINE(M_SEGMENT, "cluster_save", "cluster_save buffer");
984263bc
MD
68
69static struct cluster_save *
54078292 70 cluster_collectbufs (struct vnode *vp, struct buf *last_bp,
e92ca23a 71 int blksize);
984263bc 72static struct buf *
54078292 73 cluster_rbuild (struct vnode *vp, off_t filesize, off_t loffset,
e92ca23a 74 off_t doffset, int blksize, int run,
ae8e83e6 75 struct buf *fbp);
81b5c339
MD
76static void cluster_callback (struct bio *);
77
984263bc
MD
78
79static int write_behind = 1;
80SYSCTL_INT(_vfs, OID_AUTO, write_behind, CTLFLAG_RW, &write_behind, 0, "");
81
82extern vm_page_t bogus_page;
83
84extern int cluster_pbuf_freecnt;
85
86/*
87 * Maximum number of blocks for read-ahead.
88 */
89#define MAXRA 32
90
91/*
92 * This replaces bread.
93 */
94int
54078292 95cluster_read(struct vnode *vp, off_t filesize, off_t loffset,
e54488bb 96 int blksize, size_t resid, int seqcount, struct buf **bpp)
984263bc
MD
97{
98 struct buf *bp, *rbp, *reqbp;
54078292
MD
99 off_t origoffset;
100 off_t doffset;
101 int error;
984263bc
MD
102 int i;
103 int maxra, racluster;
e54488bb 104 int totread;
984263bc
MD
105
106 error = 0;
e54488bb 107 totread = (resid > INT_MAX) ? INT_MAX : (int)resid;
984263bc
MD
108
109 /*
110 * Try to limit the amount of read-ahead by a few
111 * ad-hoc parameters. This needs work!!!
112 */
e92ca23a
MD
113 racluster = vmaxiosize(vp) / blksize;
114 maxra = 2 * racluster + (totread / blksize);
984263bc
MD
115 if (maxra > MAXRA)
116 maxra = MAXRA;
117 if (maxra > nbuf/8)
118 maxra = nbuf/8;
119
120 /*
ae8e83e6 121 * Get the requested block.
984263bc 122 */
e92ca23a 123 *bpp = reqbp = bp = getblk(vp, loffset, blksize, 0, 0);
54078292 124 origoffset = loffset;
984263bc
MD
125
126 /*
127 * if it is in the cache, then check to see if the reads have been
128 * sequential. If they have, then try some read-ahead, otherwise
129 * back-off on prospective read-aheads.
130 */
131 if (bp->b_flags & B_CACHE) {
132 if (!seqcount) {
133 return 0;
134 } else if ((bp->b_flags & B_RAM) == 0) {
135 return 0;
136 } else {
984263bc
MD
137 struct buf *tbp;
138 bp->b_flags &= ~B_RAM;
b1c20cfa 139
984263bc 140 /*
b1c20cfa
MD
141 * Set read-ahead-mark only if we can passively lock
142 * the buffer. Note that with these flags the bp
143 * could very exist even though NULL is returned.
984263bc 144 */
984263bc 145 for (i = 1; i < maxra; i++) {
b1c20cfa
MD
146 tbp = findblk(vp, loffset + i * blksize,
147 FINDBLK_NBLOCK);
148 if (tbp == NULL)
984263bc 149 break;
984263bc 150 if (((i % racluster) == (racluster - 1)) ||
b1c20cfa 151 (i == (maxra - 1))) {
984263bc 152 tbp->b_flags |= B_RAM;
b1c20cfa
MD
153 }
154 BUF_UNLOCK(tbp);
984263bc 155 }
b1c20cfa 156 if (i >= maxra)
984263bc 157 return 0;
e92ca23a 158 loffset += i * blksize;
984263bc
MD
159 }
160 reqbp = bp = NULL;
161 } else {
81b5c339 162 off_t firstread = bp->b_loffset;
54078292 163 int nblks;
984263bc 164
ae8e83e6
MD
165 /*
166 * Set-up synchronous read for bp.
167 */
168 bp->b_cmd = BUF_CMD_READ;
169 bp->b_bio1.bio_done = biodone_sync;
170 bp->b_bio1.bio_flags |= BIO_SYNC;
171
81b5c339
MD
172 KASSERT(firstread != NOOFFSET,
173 ("cluster_read: no buffer offset"));
984263bc 174 if (firstread + totread > filesize)
54078292 175 totread = (int)(filesize - firstread);
e92ca23a 176 nblks = totread / blksize;
54078292
MD
177 if (nblks) {
178 int burstbytes;
179
984263bc
MD
180 if (nblks > racluster)
181 nblks = racluster;
182
e92ca23a
MD
183 error = VOP_BMAP(vp, loffset, &doffset,
184 &burstbytes, NULL, BUF_CMD_READ);
984263bc
MD
185 if (error)
186 goto single_block_read;
54078292 187 if (doffset == NOOFFSET)
984263bc 188 goto single_block_read;
e92ca23a 189 if (burstbytes < blksize * 2)
984263bc 190 goto single_block_read;
e92ca23a
MD
191 if (nblks > burstbytes / blksize)
192 nblks = burstbytes / blksize;
984263bc 193
54078292 194 bp = cluster_rbuild(vp, filesize, loffset,
ae8e83e6 195 doffset, blksize, nblks, bp);
54078292 196 loffset += bp->b_bufsize;
984263bc
MD
197 } else {
198single_block_read:
199 /*
200 * if it isn't in the cache, then get a chunk from
201 * disk if sequential, otherwise just get the block.
202 */
10f3fee5 203 bp->b_flags |= B_RAM;
e92ca23a 204 loffset += blksize;
984263bc
MD
205 }
206 }
207
208 /*
ae8e83e6
MD
209 * If B_CACHE was not set issue bp. bp will either be an
210 * asynchronous cluster buf or a synchronous single-buf.
211 * If it is a single buf it will be the same as reqbp.
212 *
213 * NOTE: Once an async cluster buf is issued bp becomes invalid.
984263bc
MD
214 */
215 if (bp) {
216#if defined(CLUSTERDEBUG)
217 if (rcluster)
6ea70f76 218 kprintf("S(%lld,%d,%d) ",
54078292 219 bp->b_loffset, bp->b_bcount, seqcount);
984263bc 220#endif
10f3fee5
MD
221 if ((bp->b_flags & B_CLUSTER) == 0)
222 vfs_busy_pages(vp, bp);
984263bc 223 bp->b_flags &= ~(B_ERROR|B_INVAL);
81b5c339 224 vn_strategy(vp, &bp->b_bio1);
ae8e83e6
MD
225 error = 0;
226 /* bp invalid now */
984263bc
MD
227 }
228
229 /*
bfda7080 230 * If we have been doing sequential I/O, then do some read-ahead.
0728eafc
MD
231 *
232 * Only mess with buffers which we can immediately lock. HAMMER
233 * will do device-readahead irrespective of what the blocks
234 * represent.
984263bc 235 */
bfda7080
SS
236 rbp = NULL;
237 if (!error &&
238 seqcount &&
e92ca23a
MD
239 loffset < origoffset + seqcount * blksize &&
240 loffset + blksize <= filesize
bfda7080
SS
241 ) {
242 int nblksread;
243 int ntoread;
244 int burstbytes;
ac7ffc8a 245 int tmp_error;
bfda7080 246
b77cfc40
MD
247 rbp = getblk(vp, loffset, blksize,
248 GETBLK_SZMATCH|GETBLK_NOWAIT, 0);
249 if (rbp == NULL)
250 goto no_read_ahead;
bfda7080 251 if ((rbp->b_flags & B_CACHE)) {
984263bc 252 bqrelse(rbp);
bfda7080
SS
253 goto no_read_ahead;
254 }
255
ac7ffc8a
MD
256 /*
257 * An error from the read-ahead bmap has nothing to do
258 * with the caller's original request.
259 */
260 tmp_error = VOP_BMAP(vp, loffset, &doffset,
261 &burstbytes, NULL, BUF_CMD_READ);
262 if (tmp_error || doffset == NOOFFSET) {
bfda7080
SS
263 rbp->b_flags |= B_INVAL;
264 brelse(rbp);
265 rbp = NULL;
266 goto no_read_ahead;
267 }
e92ca23a
MD
268 ntoread = burstbytes / blksize;
269 nblksread = (totread + blksize - 1) / blksize;
bfda7080
SS
270 if (seqcount < nblksread)
271 seqcount = nblksread;
ac7ffc8a 272 if (ntoread > seqcount)
bfda7080
SS
273 ntoread = seqcount;
274
ae8e83e6
MD
275 /*
276 * rbp: async read
277 */
278 rbp->b_cmd = BUF_CMD_READ;
e92ca23a 279 rbp->b_flags |= B_RAM/* | B_AGE*/;
ae8e83e6 280
bfda7080
SS
281 if (burstbytes) {
282 rbp = cluster_rbuild(vp, filesize, loffset,
e92ca23a 283 doffset, blksize,
ae8e83e6 284 ntoread, rbp);
984263bc 285 } else {
bfda7080
SS
286 rbp->b_bio2.bio_offset = doffset;
287 }
984263bc 288#if defined(CLUSTERDEBUG)
bfda7080
SS
289 if (rcluster) {
290 if (bp)
6ea70f76 291 kprintf("A+(%lld,%d,%lld,%d) ",
bfda7080
SS
292 rbp->b_loffset, rbp->b_bcount,
293 rbp->b_loffset - origoffset,
294 seqcount);
295 else
6ea70f76 296 kprintf("A(%lld,%d,%lld,%d) ",
bfda7080
SS
297 rbp->b_loffset, rbp->b_bcount,
298 rbp->b_loffset - origoffset,
299 seqcount);
300 }
984263bc 301#endif
bfda7080 302 rbp->b_flags &= ~(B_ERROR|B_INVAL);
10f3fee5 303
bfda7080
SS
304 if ((rbp->b_flags & B_CLUSTER) == 0)
305 vfs_busy_pages(vp, rbp);
ae8e83e6 306 BUF_KERNPROC(rbp);
bfda7080 307 vn_strategy(vp, &rbp->b_bio1);
ae8e83e6 308 /* rbp invalid now */
984263bc 309 }
bfda7080 310
ae8e83e6
MD
311 /*
312 * Wait for our original buffer to complete its I/O. reqbp will
313 * be NULL if the original buffer was B_CACHE. We are returning
314 * (*bpp) which is the same as reqbp when reqbp != NULL.
315 */
316no_read_ahead:
317 if (reqbp) {
318 KKASSERT(reqbp->b_bio1.bio_flags & BIO_SYNC);
319 error = biowait(&reqbp->b_bio1, "clurd");
320 }
321 return (error);
984263bc
MD
322}
323
324/*
325 * If blocks are contiguous on disk, use this to provide clustered
326 * read ahead. We will read as many blocks as possible sequentially
327 * and then parcel them up into logical blocks in the buffer hash table.
ae8e83e6
MD
328 *
329 * This function either returns a cluster buf or it returns fbp. fbp is
330 * already expected to be set up as a synchronous or asynchronous request.
331 *
332 * If a cluster buf is returned it will always be async.
984263bc
MD
333 */
334static struct buf *
ae8e83e6
MD
335cluster_rbuild(struct vnode *vp, off_t filesize, off_t loffset, off_t doffset,
336 int blksize, int run, struct buf *fbp)
984263bc
MD
337{
338 struct buf *bp, *tbp;
54078292
MD
339 off_t boffset;
340 int i, j;
2ec4b00d 341 int maxiosize = vmaxiosize(vp);
984263bc 342
2ec4b00d 343 /*
984263bc
MD
344 * avoid a division
345 */
e92ca23a 346 while (loffset + run * blksize > filesize) {
984263bc
MD
347 --run;
348 }
349
6260e485 350 tbp = fbp;
54078292 351 tbp->b_bio2.bio_offset = doffset;
10f3fee5
MD
352 if((tbp->b_flags & B_MALLOC) ||
353 ((tbp->b_flags & B_VMIO) == 0) || (run <= 1)) {
984263bc 354 return tbp;
10f3fee5 355 }
984263bc
MD
356
357 bp = trypbuf(&cluster_pbuf_freecnt);
ae8e83e6 358 if (bp == NULL) {
984263bc 359 return tbp;
ae8e83e6 360 }
984263bc
MD
361
362 /*
363 * We are synthesizing a buffer out of vm_page_t's, but
364 * if the block size is not page aligned then the starting
365 * address may not be either. Inherit the b_data offset
366 * from the original buffer.
367 */
368 bp->b_data = (char *)((vm_offset_t)bp->b_data |
369 ((vm_offset_t)tbp->b_data & PAGE_MASK));
ae8e83e6 370 bp->b_flags |= B_CLUSTER | B_VMIO;
10f3fee5 371 bp->b_cmd = BUF_CMD_READ;
ae8e83e6 372 bp->b_bio1.bio_done = cluster_callback; /* default to async */
81b5c339
MD
373 bp->b_bio1.bio_caller_info1.cluster_head = NULL;
374 bp->b_bio1.bio_caller_info2.cluster_tail = NULL;
54078292 375 bp->b_loffset = loffset;
e92ca23a 376 bp->b_bio2.bio_offset = doffset;
81b5c339
MD
377 KASSERT(bp->b_loffset != NOOFFSET,
378 ("cluster_rbuild: no buffer offset"));
984263bc 379
984263bc
MD
380 bp->b_bcount = 0;
381 bp->b_bufsize = 0;
54f51aeb 382 bp->b_xio.xio_npages = 0;
984263bc 383
e92ca23a 384 for (boffset = doffset, i = 0; i < run; ++i, boffset += blksize) {
10f3fee5 385 if (i) {
54f51aeb 386 if ((bp->b_xio.xio_npages * PAGE_SIZE) +
e92ca23a 387 round_page(blksize) > maxiosize) {
984263bc
MD
388 break;
389 }
390
391 /*
392 * Shortcut some checks and try to avoid buffers that
393 * would block in the lock. The same checks have to
394 * be made again after we officially get the buffer.
395 */
b77cfc40
MD
396 tbp = getblk(vp, loffset + i * blksize, blksize,
397 GETBLK_SZMATCH|GETBLK_NOWAIT, 0);
398 if (tbp == NULL)
399 break;
400 for (j = 0; j < tbp->b_xio.xio_npages; j++) {
401 if (tbp->b_xio.xio_pages[j]->valid)
984263bc
MD
402 break;
403 }
b77cfc40
MD
404 if (j != tbp->b_xio.xio_npages) {
405 bqrelse(tbp);
406 break;
407 }
984263bc
MD
408
409 /*
410 * Stop scanning if the buffer is fuly valid
411 * (marked B_CACHE), or locked (may be doing a
412 * background write), or if the buffer is not
413 * VMIO backed. The clustering code can only deal
414 * with VMIO-backed buffers.
415 */
416 if ((tbp->b_flags & (B_CACHE|B_LOCKED)) ||
27bc0cb1
MD
417 (tbp->b_flags & B_VMIO) == 0 ||
418 (LIST_FIRST(&tbp->b_dep) != NULL &&
419 buf_checkread(tbp))
420 ) {
984263bc
MD
421 bqrelse(tbp);
422 break;
423 }
424
425 /*
426 * The buffer must be completely invalid in order to
427 * take part in the cluster. If it is partially valid
428 * then we stop.
429 */
54f51aeb
HP
430 for (j = 0;j < tbp->b_xio.xio_npages; j++) {
431 if (tbp->b_xio.xio_pages[j]->valid)
984263bc
MD
432 break;
433 }
54f51aeb 434 if (j != tbp->b_xio.xio_npages) {
984263bc
MD
435 bqrelse(tbp);
436 break;
437 }
438
439 /*
440 * Set a read-ahead mark as appropriate
441 */
6260e485 442 if (i == 1 || i == (run - 1))
984263bc
MD
443 tbp->b_flags |= B_RAM;
444
445 /*
b86460bf
MD
446 * Depress the priority of buffers not explicitly
447 * requested.
448 */
e92ca23a 449 /* tbp->b_flags |= B_AGE; */
b86460bf
MD
450
451 /*
984263bc
MD
452 * Set the block number if it isn't set, otherwise
453 * if it is make sure it matches the block number we
454 * expect.
455 */
54078292
MD
456 if (tbp->b_bio2.bio_offset == NOOFFSET) {
457 tbp->b_bio2.bio_offset = boffset;
458 } else if (tbp->b_bio2.bio_offset != boffset) {
984263bc
MD
459 brelse(tbp);
460 break;
461 }
462 }
ae8e83e6 463
984263bc 464 /*
ae8e83e6
MD
465 * The passed-in tbp (i == 0) will already be set up for
466 * async or sync operation. All other tbp's acquire in
467 * our loop are set up for async operation.
984263bc 468 */
10f3fee5 469 tbp->b_cmd = BUF_CMD_READ;
984263bc 470 BUF_KERNPROC(tbp);
81b5c339 471 cluster_append(&bp->b_bio1, tbp);
54078292 472 for (j = 0; j < tbp->b_xio.xio_npages; ++j) {
984263bc 473 vm_page_t m;
54f51aeb 474 m = tbp->b_xio.xio_pages[j];
984263bc
MD
475 vm_page_io_start(m);
476 vm_object_pip_add(m->object, 1);
54f51aeb
HP
477 if ((bp->b_xio.xio_npages == 0) ||
478 (bp->b_xio.xio_pages[bp->b_xio.xio_npages-1] != m)) {
479 bp->b_xio.xio_pages[bp->b_xio.xio_npages] = m;
480 bp->b_xio.xio_npages++;
984263bc
MD
481 }
482 if ((m->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL)
54f51aeb 483 tbp->b_xio.xio_pages[j] = bogus_page;
984263bc
MD
484 }
485 /*
486 * XXX shouldn't this be += size for both, like in
487 * cluster_wbuild()?
488 *
489 * Don't inherit tbp->b_bufsize as it may be larger due to
490 * a non-page-aligned size. Instead just aggregate using
491 * 'size'.
492 */
e92ca23a
MD
493 if (tbp->b_bcount != blksize)
494 kprintf("warning: tbp->b_bcount wrong %d vs %d\n", tbp->b_bcount, blksize);
495 if (tbp->b_bufsize != blksize)
496 kprintf("warning: tbp->b_bufsize wrong %d vs %d\n", tbp->b_bufsize, blksize);
497 bp->b_bcount += blksize;
498 bp->b_bufsize += blksize;
984263bc
MD
499 }
500
501 /*
502 * Fully valid pages in the cluster are already good and do not need
503 * to be re-read from disk. Replace the page with bogus_page
504 */
54f51aeb
HP
505 for (j = 0; j < bp->b_xio.xio_npages; j++) {
506 if ((bp->b_xio.xio_pages[j]->valid & VM_PAGE_BITS_ALL) ==
984263bc 507 VM_PAGE_BITS_ALL) {
54f51aeb 508 bp->b_xio.xio_pages[j] = bogus_page;
984263bc
MD
509 }
510 }
312dcd01 511 if (bp->b_bufsize > bp->b_kvasize) {
54078292 512 panic("cluster_rbuild: b_bufsize(%d) > b_kvasize(%d)",
984263bc 513 bp->b_bufsize, bp->b_kvasize);
312dcd01 514 }
984263bc 515 pmap_qenter(trunc_page((vm_offset_t) bp->b_data),
54f51aeb 516 (vm_page_t *)bp->b_xio.xio_pages, bp->b_xio.xio_npages);
ae8e83e6 517 BUF_KERNPROC(bp);
984263bc
MD
518 return (bp);
519}
520
521/*
522 * Cleanup after a clustered read or write.
523 * This is complicated by the fact that any of the buffers might have
524 * extra memory (if there were no empty buffer headers at allocbuf time)
525 * that we will need to shift around.
81b5c339
MD
526 *
527 * The returned bio is &bp->b_bio1
984263bc
MD
528 */
529void
81b5c339 530cluster_callback(struct bio *bio)
984263bc 531{
81b5c339
MD
532 struct buf *bp = bio->bio_buf;
533 struct buf *tbp;
984263bc
MD
534 int error = 0;
535
536 /*
9a71d53f
MD
537 * Must propogate errors to all the components. A short read (EOF)
538 * is a critical error.
984263bc 539 */
9a71d53f 540 if (bp->b_flags & B_ERROR) {
984263bc 541 error = bp->b_error;
9a71d53f
MD
542 } else if (bp->b_bcount != bp->b_bufsize) {
543 panic("cluster_callback: unexpected EOF on cluster %p!", bio);
544 }
984263bc 545
54f51aeb 546 pmap_qremove(trunc_page((vm_offset_t) bp->b_data), bp->b_xio.xio_npages);
984263bc
MD
547 /*
548 * Move memory from the large cluster buffer into the component
81b5c339
MD
549 * buffers and mark IO as done on these. Since the memory map
550 * is the same, no actual copying is required.
984263bc 551 */
81b5c339
MD
552 while ((tbp = bio->bio_caller_info1.cluster_head) != NULL) {
553 bio->bio_caller_info1.cluster_head = tbp->b_cluster_next;
984263bc
MD
554 if (error) {
555 tbp->b_flags |= B_ERROR;
556 tbp->b_error = error;
557 } else {
558 tbp->b_dirtyoff = tbp->b_dirtyend = 0;
559 tbp->b_flags &= ~(B_ERROR|B_INVAL);
560 /*
561 * XXX the bdwrite()/bqrelse() issued during
562 * cluster building clears B_RELBUF (see bqrelse()
563 * comment). If direct I/O was specified, we have
564 * to restore it here to allow the buffer and VM
565 * to be freed.
566 */
567 if (tbp->b_flags & B_DIRECT)
568 tbp->b_flags |= B_RELBUF;
569 }
81b5c339 570 biodone(&tbp->b_bio1);
984263bc
MD
571 }
572 relpbuf(bp, &cluster_pbuf_freecnt);
573}
574
575/*
576 * cluster_wbuild_wb:
577 *
578 * Implement modified write build for cluster.
579 *
580 * write_behind = 0 write behind disabled
581 * write_behind = 1 write behind normal (default)
582 * write_behind = 2 write behind backed-off
583 */
584
585static __inline int
e92ca23a 586cluster_wbuild_wb(struct vnode *vp, int blksize, off_t start_loffset, int len)
984263bc
MD
587{
588 int r = 0;
589
590 switch(write_behind) {
591 case 2:
54078292 592 if (start_loffset < len)
984263bc 593 break;
54078292 594 start_loffset -= len;
984263bc
MD
595 /* fall through */
596 case 1:
e92ca23a 597 r = cluster_wbuild(vp, blksize, start_loffset, len);
984263bc
MD
598 /* fall through */
599 default:
600 /* fall through */
601 break;
602 }
603 return(r);
604}
605
606/*
607 * Do clustered write for FFS.
608 *
609 * Three cases:
610 * 1. Write is not sequential (write asynchronously)
611 * Write is sequential:
612 * 2. beginning of cluster - begin cluster
613 * 3. middle of a cluster - add to cluster
614 * 4. end of a cluster - asynchronously write cluster
615 */
616void
e92ca23a 617cluster_write(struct buf *bp, off_t filesize, int blksize, int seqcount)
984263bc
MD
618{
619 struct vnode *vp;
54078292 620 off_t loffset;
984263bc 621 int maxclen, cursize;
984263bc
MD
622 int async;
623
624 vp = bp->b_vp;
e92ca23a 625 if (vp->v_type == VREG)
984263bc 626 async = vp->v_mount->mnt_flag & MNT_ASYNC;
e92ca23a 627 else
984263bc 628 async = 0;
54078292 629 loffset = bp->b_loffset;
81b5c339
MD
630 KASSERT(bp->b_loffset != NOOFFSET,
631 ("cluster_write: no buffer offset"));
984263bc
MD
632
633 /* Initialize vnode to beginning of file. */
54078292 634 if (loffset == 0)
984263bc
MD
635 vp->v_lasta = vp->v_clen = vp->v_cstart = vp->v_lastw = 0;
636
e92ca23a 637 if (vp->v_clen == 0 || loffset != vp->v_lastw + blksize ||
54078292 638 bp->b_bio2.bio_offset == NOOFFSET ||
e92ca23a 639 (bp->b_bio2.bio_offset != vp->v_lasta + blksize)) {
2ec4b00d 640 maxclen = vmaxiosize(vp);
984263bc
MD
641 if (vp->v_clen != 0) {
642 /*
643 * Next block is not sequential.
644 *
645 * If we are not writing at end of file, the process
646 * seeked to another point in the file since its last
647 * write, or we have reached our maximum cluster size,
648 * then push the previous cluster. Otherwise try
649 * reallocating to make it sequential.
650 *
651 * Change to algorithm: only push previous cluster if
652 * it was sequential from the point of view of the
653 * seqcount heuristic, otherwise leave the buffer
654 * intact so we can potentially optimize the I/O
655 * later on in the buf_daemon or update daemon
656 * flush.
657 */
e92ca23a
MD
658 cursize = vp->v_lastw - vp->v_cstart + blksize;
659 if (bp->b_loffset + blksize != filesize ||
660 loffset != vp->v_lastw + blksize || vp->v_clen <= cursize) {
984263bc 661 if (!async && seqcount > 0) {
e92ca23a 662 cluster_wbuild_wb(vp, blksize,
984263bc
MD
663 vp->v_cstart, cursize);
664 }
665 } else {
666 struct buf **bpp, **endbp;
667 struct cluster_save *buflist;
668
e92ca23a 669 buflist = cluster_collectbufs(vp, bp, blksize);
984263bc
MD
670 endbp = &buflist->bs_children
671 [buflist->bs_nchildren - 1];
672 if (VOP_REALLOCBLKS(vp, buflist)) {
673 /*
674 * Failed, push the previous cluster
675 * if *really* writing sequentially
676 * in the logical file (seqcount > 1),
677 * otherwise delay it in the hopes that
678 * the low level disk driver can
679 * optimize the write ordering.
680 */
681 for (bpp = buflist->bs_children;
682 bpp < endbp; bpp++)
683 brelse(*bpp);
efda3bd0 684 kfree(buflist, M_SEGMENT);
984263bc
MD
685 if (seqcount > 1) {
686 cluster_wbuild_wb(vp,
e92ca23a 687 blksize, vp->v_cstart,
984263bc
MD
688 cursize);
689 }
690 } else {
691 /*
692 * Succeeded, keep building cluster.
693 */
694 for (bpp = buflist->bs_children;
695 bpp <= endbp; bpp++)
696 bdwrite(*bpp);
efda3bd0 697 kfree(buflist, M_SEGMENT);
54078292
MD
698 vp->v_lastw = loffset;
699 vp->v_lasta = bp->b_bio2.bio_offset;
984263bc
MD
700 return;
701 }
702 }
703 }
704 /*
705 * Consider beginning a cluster. If at end of file, make
706 * cluster as large as possible, otherwise find size of
707 * existing cluster.
708 */
709 if ((vp->v_type == VREG) &&
e92ca23a 710 bp->b_loffset + blksize != filesize &&
54078292 711 (bp->b_bio2.bio_offset == NOOFFSET) &&
e92ca23a 712 (VOP_BMAP(vp, loffset, &bp->b_bio2.bio_offset, &maxclen, NULL, BUF_CMD_WRITE) ||
54078292 713 bp->b_bio2.bio_offset == NOOFFSET)) {
984263bc
MD
714 bawrite(bp);
715 vp->v_clen = 0;
54078292 716 vp->v_lasta = bp->b_bio2.bio_offset;
e92ca23a 717 vp->v_cstart = loffset + blksize;
54078292 718 vp->v_lastw = loffset;
984263bc
MD
719 return;
720 }
e92ca23a
MD
721 if (maxclen > blksize)
722 vp->v_clen = maxclen - blksize;
54078292
MD
723 else
724 vp->v_clen = 0;
725 if (!async && vp->v_clen == 0) { /* I/O not contiguous */
e92ca23a 726 vp->v_cstart = loffset + blksize;
984263bc
MD
727 bawrite(bp);
728 } else { /* Wait for rest of cluster */
54078292 729 vp->v_cstart = loffset;
984263bc
MD
730 bdwrite(bp);
731 }
54078292 732 } else if (loffset == vp->v_cstart + vp->v_clen) {
984263bc
MD
733 /*
734 * At end of cluster, write it out if seqcount tells us we
735 * are operating sequentially, otherwise let the buf or
736 * update daemon handle it.
737 */
738 bdwrite(bp);
739 if (seqcount > 1)
e92ca23a
MD
740 cluster_wbuild_wb(vp, blksize, vp->v_cstart,
741 vp->v_clen + blksize);
984263bc 742 vp->v_clen = 0;
e92ca23a 743 vp->v_cstart = loffset + blksize;
984263bc
MD
744 } else if (vm_page_count_severe()) {
745 /*
746 * We are low on memory, get it going NOW
747 */
748 bawrite(bp);
749 } else {
750 /*
751 * In the middle of a cluster, so just delay the I/O for now.
752 */
753 bdwrite(bp);
754 }
54078292
MD
755 vp->v_lastw = loffset;
756 vp->v_lasta = bp->b_bio2.bio_offset;
984263bc
MD
757}
758
759
760/*
761 * This is an awful lot like cluster_rbuild...wish they could be combined.
762 * The last lbn argument is the current block on which I/O is being
763 * performed. Check to see that it doesn't fall in the middle of
764 * the current block (if last_bp == NULL).
765 */
766int
e92ca23a 767cluster_wbuild(struct vnode *vp, int blksize, off_t start_loffset, int bytes)
984263bc
MD
768{
769 struct buf *bp, *tbp;
e43a034f 770 int i, j;
984263bc 771 int totalwritten = 0;
2ec4b00d 772 int maxiosize = vmaxiosize(vp);
984263bc 773
54078292 774 while (bytes > 0) {
984263bc
MD
775 /*
776 * If the buffer is not delayed-write (i.e. dirty), or it
777 * is delayed-write but either locked or inval, it cannot
778 * partake in the clustered write.
779 */
b1c20cfa
MD
780 tbp = findblk(vp, start_loffset, FINDBLK_NBLOCK);
781 if (tbp == NULL ||
782 (tbp->b_flags & (B_LOCKED | B_INVAL | B_DELWRI)) != B_DELWRI ||
783 (LIST_FIRST(&tbp->b_dep) && buf_checkwrite(tbp))) {
784 if (tbp)
785 BUF_UNLOCK(tbp);
e92ca23a
MD
786 start_loffset += blksize;
787 bytes -= blksize;
984263bc
MD
788 continue;
789 }
790 bremfree(tbp);
10f3fee5 791 KKASSERT(tbp->b_cmd == BUF_CMD_DONE);
984263bc
MD
792
793 /*
794 * Extra memory in the buffer, punt on this buffer.
795 * XXX we could handle this in most cases, but we would
796 * have to push the extra memory down to after our max
797 * possible cluster size and then potentially pull it back
798 * up if the cluster was terminated prematurely--too much
799 * hassle.
800 */
801 if (((tbp->b_flags & (B_CLUSTEROK|B_MALLOC)) != B_CLUSTEROK) ||
b1c20cfa
MD
802 (tbp->b_bcount != tbp->b_bufsize) ||
803 (tbp->b_bcount != blksize) ||
804 (bytes == blksize) ||
805 ((bp = getpbuf(&cluster_pbuf_freecnt)) == NULL)) {
984263bc
MD
806 totalwritten += tbp->b_bufsize;
807 bawrite(tbp);
e92ca23a
MD
808 start_loffset += blksize;
809 bytes -= blksize;
984263bc
MD
810 continue;
811 }
812
813 /*
9a71d53f
MD
814 * Set up the pbuf. Track our append point with b_bcount
815 * and b_bufsize. b_bufsize is not used by the device but
816 * our caller uses it to loop clusters and we use it to
817 * detect a premature EOF on the block device.
984263bc 818 */
984263bc
MD
819 bp->b_bcount = 0;
820 bp->b_bufsize = 0;
54f51aeb 821 bp->b_xio.xio_npages = 0;
81b5c339 822 bp->b_loffset = tbp->b_loffset;
54078292 823 bp->b_bio2.bio_offset = tbp->b_bio2.bio_offset;
984263bc
MD
824
825 /*
826 * We are synthesizing a buffer out of vm_page_t's, but
827 * if the block size is not page aligned then the starting
828 * address may not be either. Inherit the b_data offset
829 * from the original buffer.
830 */
831 bp->b_data = (char *)((vm_offset_t)bp->b_data |
832 ((vm_offset_t)tbp->b_data & PAGE_MASK));
10f3fee5 833 bp->b_flags &= ~B_ERROR;
4414f2c9 834 bp->b_flags |= B_CLUSTER | B_BNOCLIP |
f2d7fcf0 835 (tbp->b_flags & (B_VMIO | B_NEEDCOMMIT));
81b5c339
MD
836 bp->b_bio1.bio_caller_info1.cluster_head = NULL;
837 bp->b_bio1.bio_caller_info2.cluster_tail = NULL;
b1c20cfa 838
984263bc
MD
839 /*
840 * From this location in the file, scan forward to see
841 * if there are buffers with adjacent data that need to
842 * be written as well.
843 */
e92ca23a 844 for (i = 0; i < bytes; (i += blksize), (start_loffset += blksize)) {
984263bc 845 if (i != 0) { /* If not the first buffer */
b1c20cfa
MD
846 tbp = findblk(vp, start_loffset,
847 FINDBLK_NBLOCK);
984263bc 848 /*
b1c20cfa
MD
849 * Buffer not found or could not be locked
850 * non-blocking.
984263bc 851 */
b1c20cfa 852 if (tbp == NULL)
984263bc 853 break;
984263bc
MD
854
855 /*
856 * If it IS in core, but has different
b1c20cfa
MD
857 * characteristics, then don't cluster
858 * with it.
984263bc
MD
859 */
860 if ((tbp->b_flags & (B_VMIO | B_CLUSTEROK |
b1c20cfa
MD
861 B_INVAL | B_DELWRI | B_NEEDCOMMIT))
862 != (B_DELWRI | B_CLUSTEROK |
863 (bp->b_flags & (B_VMIO | B_NEEDCOMMIT))) ||
984263bc 864 (tbp->b_flags & B_LOCKED) ||
b1c20cfa
MD
865 (LIST_FIRST(&tbp->b_dep) &&
866 buf_checkwrite(tbp))
867 ) {
868 BUF_UNLOCK(tbp);
984263bc
MD
869 break;
870 }
871
872 /*
873 * Check that the combined cluster
874 * would make sense with regard to pages
875 * and would not be too large
876 */
e92ca23a 877 if ((tbp->b_bcount != blksize) ||
54078292
MD
878 ((bp->b_bio2.bio_offset + i) !=
879 tbp->b_bio2.bio_offset) ||
54f51aeb 880 ((tbp->b_xio.xio_npages + bp->b_xio.xio_npages) >
2ec4b00d 881 (maxiosize / PAGE_SIZE))) {
984263bc 882 BUF_UNLOCK(tbp);
984263bc
MD
883 break;
884 }
885 /*
886 * Ok, it's passed all the tests,
887 * so remove it from the free list
888 * and mark it busy. We will use it.
889 */
890 bremfree(tbp);
10f3fee5 891 KKASSERT(tbp->b_cmd == BUF_CMD_DONE);
984263bc 892 } /* end of code for non-first buffers only */
81b5c339
MD
893
894 /*
984263bc
MD
895 * If the IO is via the VM then we do some
896 * special VM hackery (yuck). Since the buffer's
897 * block size may not be page-aligned it is possible
898 * for a page to be shared between two buffers. We
899 * have to get rid of the duplication when building
900 * the cluster.
901 */
902 if (tbp->b_flags & B_VMIO) {
903 vm_page_t m;
904
905 if (i != 0) { /* if not first buffer */
54078292 906 for (j = 0; j < tbp->b_xio.xio_npages; ++j) {
54f51aeb 907 m = tbp->b_xio.xio_pages[j];
984263bc
MD
908 if (m->flags & PG_BUSY) {
909 bqrelse(tbp);
910 goto finishcluster;
911 }
912 }
913 }
914
54078292 915 for (j = 0; j < tbp->b_xio.xio_npages; ++j) {
54f51aeb 916 m = tbp->b_xio.xio_pages[j];
984263bc
MD
917 vm_page_io_start(m);
918 vm_object_pip_add(m->object, 1);
54f51aeb
HP
919 if ((bp->b_xio.xio_npages == 0) ||
920 (bp->b_xio.xio_pages[bp->b_xio.xio_npages - 1] != m)) {
921 bp->b_xio.xio_pages[bp->b_xio.xio_npages] = m;
922 bp->b_xio.xio_npages++;
984263bc
MD
923 }
924 }
925 }
e92ca23a
MD
926 bp->b_bcount += blksize;
927 bp->b_bufsize += blksize;
984263bc 928
984263bc 929 bundirty(tbp);
10f3fee5 930 tbp->b_flags &= ~B_ERROR;
10f3fee5 931 tbp->b_cmd = BUF_CMD_WRITE;
984263bc 932 BUF_KERNPROC(tbp);
81b5c339 933 cluster_append(&bp->b_bio1, tbp);
2aee763b
MD
934
935 /*
936 * check for latent dependencies to be handled
937 */
408357d8
MD
938 if (LIST_FIRST(&tbp->b_dep) != NULL)
939 buf_start(tbp);
984263bc
MD
940 }
941 finishcluster:
942 pmap_qenter(trunc_page((vm_offset_t) bp->b_data),
54f51aeb 943 (vm_page_t *) bp->b_xio.xio_pages, bp->b_xio.xio_npages);
312dcd01 944 if (bp->b_bufsize > bp->b_kvasize) {
984263bc 945 panic(
54078292 946 "cluster_wbuild: b_bufsize(%d) > b_kvasize(%d)\n",
984263bc 947 bp->b_bufsize, bp->b_kvasize);
312dcd01 948 }
984263bc
MD
949 totalwritten += bp->b_bufsize;
950 bp->b_dirtyoff = 0;
951 bp->b_dirtyend = bp->b_bufsize;
ae8e83e6 952 bp->b_bio1.bio_done = cluster_callback;
10f3fee5 953 bp->b_cmd = BUF_CMD_WRITE;
ae8e83e6 954
10f3fee5 955 vfs_busy_pages(vp, bp);
a8f169e2 956 bp->b_runningbufspace = bp->b_bufsize;
1b30fbcc
MD
957 if (bp->b_runningbufspace) {
958 runningbufspace += bp->b_runningbufspace;
959 ++runningbufcount;
960 }
ae8e83e6 961 BUF_KERNPROC(bp);
a8f169e2 962 vn_strategy(vp, &bp->b_bio1);
984263bc 963
54078292 964 bytes -= i;
984263bc
MD
965 }
966 return totalwritten;
967}
968
969/*
970 * Collect together all the buffers in a cluster.
971 * Plus add one additional buffer.
972 */
973static struct cluster_save *
e92ca23a 974cluster_collectbufs(struct vnode *vp, struct buf *last_bp, int blksize)
984263bc
MD
975{
976 struct cluster_save *buflist;
977 struct buf *bp;
54078292 978 off_t loffset;
984263bc
MD
979 int i, len;
980
e92ca23a 981 len = (int)(vp->v_lastw - vp->v_cstart + blksize) / blksize;
77652cad 982 buflist = kmalloc(sizeof(struct buf *) * (len + 1) + sizeof(*buflist),
54078292 983 M_SEGMENT, M_WAITOK);
984263bc
MD
984 buflist->bs_nchildren = 0;
985 buflist->bs_children = (struct buf **) (buflist + 1);
e92ca23a 986 for (loffset = vp->v_cstart, i = 0; i < len; (loffset += blksize), i++) {
54078292 987 (void) bread(vp, loffset, last_bp->b_bcount, &bp);
984263bc 988 buflist->bs_children[i] = bp;
54078292 989 if (bp->b_bio2.bio_offset == NOOFFSET) {
08daea96 990 VOP_BMAP(bp->b_vp, bp->b_loffset,
e92ca23a
MD
991 &bp->b_bio2.bio_offset,
992 NULL, NULL, BUF_CMD_WRITE);
54078292 993 }
984263bc
MD
994 }
995 buflist->bs_children[i] = bp = last_bp;
54078292 996 if (bp->b_bio2.bio_offset == NOOFFSET) {
e92ca23a
MD
997 VOP_BMAP(bp->b_vp, bp->b_loffset, &bp->b_bio2.bio_offset,
998 NULL, NULL, BUF_CMD_WRITE);
54078292 999 }
984263bc
MD
1000 buflist->bs_nchildren = i + 1;
1001 return (buflist);
1002}
81b5c339
MD
1003
1004void
1005cluster_append(struct bio *bio, struct buf *tbp)
1006{
1007 tbp->b_cluster_next = NULL;
1008 if (bio->bio_caller_info1.cluster_head == NULL) {
1009 bio->bio_caller_info1.cluster_head = tbp;
1010 bio->bio_caller_info2.cluster_tail = tbp;
1011 } else {
1012 bio->bio_caller_info2.cluster_tail->b_cluster_next = tbp;
1013 bio->bio_caller_info2.cluster_tail = tbp;
1014 }
1015}
1016