3 * The Regents of the University of California. All rights reserved.
4 * Modifications/enhancements:
5 * Copyright (c) 1995 John S. Dyson. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the University nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * @(#)vfs_cluster.c 8.7 (Berkeley) 2/13/94
32 * $FreeBSD: src/sys/kern/vfs_cluster.c,v 1.92.2.9 2001/11/18 07:10:59 dillon Exp $
33 * $DragonFly: src/sys/kern/vfs_cluster.c,v 1.40 2008/07/14 03:09:00 dillon Exp $
36 #include "opt_debug_cluster.h"
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/kernel.h>
43 #include <sys/vnode.h>
44 #include <sys/malloc.h>
45 #include <sys/mount.h>
46 #include <sys/resourcevar.h>
47 #include <sys/vmmeter.h>
49 #include <vm/vm_object.h>
50 #include <vm/vm_page.h>
51 #include <sys/sysctl.h>
54 #include <vm/vm_page2.h>
56 #include <machine/limits.h>
58 #if defined(CLUSTERDEBUG)
59 #include <sys/sysctl.h>
60 static int rcluster= 0;
61 SYSCTL_INT(_debug, OID_AUTO, rcluster, CTLFLAG_RW, &rcluster, 0, "");
64 static MALLOC_DEFINE(M_SEGMENT, "cluster_save", "cluster_save buffer");
66 static struct cluster_save *
67 cluster_collectbufs (struct vnode *vp, struct buf *last_bp,
70 cluster_rbuild (struct vnode *vp, off_t filesize, off_t loffset,
71 off_t doffset, int blksize, int run,
73 static void cluster_callback (struct bio *);
74 static void cluster_setram (struct buf *);
75 static int cluster_wbuild(struct vnode *vp, struct buf **bpp, int blksize,
76 off_t start_loffset, int bytes);
78 static int write_behind = 1;
79 SYSCTL_INT(_vfs, OID_AUTO, write_behind, CTLFLAG_RW, &write_behind, 0,
80 "Cluster write-behind setting");
81 static quad_t write_behind_minfilesize = 10 * 1024 * 1024;
82 SYSCTL_QUAD(_vfs, OID_AUTO, write_behind_minfilesize, CTLFLAG_RW,
83 &write_behind_minfilesize, 0, "Cluster write-behind setting");
84 static int max_readahead = 2 * 1024 * 1024;
85 SYSCTL_INT(_vfs, OID_AUTO, max_readahead, CTLFLAG_RW, &max_readahead, 0,
86 "Limit in bytes for desired cluster read-ahead");
88 extern vm_page_t bogus_page;
90 extern int cluster_pbuf_freecnt;
93 * This replaces bread(), providing a synchronous read of the requested
94 * buffer plus asynchronous read-ahead within the specified bounds.
96 * The caller may pre-populate *bpp if it already has the requested buffer
97 * in-hand, else must set *bpp to NULL. Note that the cluster_read() inline
98 * sets *bpp to NULL and then calls cluster_readx() for compatibility.
100 * filesize - read-ahead @ blksize will not cross this boundary
101 * loffset - loffset for returned *bpp
102 * blksize - blocksize for returned *bpp and read-ahead bps
103 * minreq - minimum (not a hard minimum) in bytes, typically reflects
104 * a higher level uio resid.
105 * maxreq - maximum (sequential heuristic) in bytes (highet typ ~2MB)
106 * bpp - return buffer (*bpp) for (loffset,blksize)
109 cluster_readx(struct vnode *vp, off_t filesize, off_t loffset,
110 int blksize, size_t minreq, size_t maxreq, struct buf **bpp)
112 struct buf *bp, *rbp, *reqbp;
123 * Calculate the desired read-ahead in blksize'd blocks (maxra).
124 * To do this we calculate maxreq.
126 * maxreq typically starts out as a sequential heuristic. If the
127 * high level uio/resid is bigger (minreq), we pop maxreq up to
128 * minreq. This represents the case where random I/O is being
129 * performed by the userland is issuing big read()'s.
131 * Then we limit maxreq to max_readahead to ensure it is a reasonable
134 * Finally we must ensure that (loffset + maxreq) does not cross the
135 * boundary (filesize) for the current blocksize. If we allowed it
136 * to cross we could end up with buffers past the boundary with the
137 * wrong block size (HAMMER large-data areas use mixed block sizes).
138 * minreq is also absolutely limited to filesize.
142 /* minreq not used beyond this point */
144 if (maxreq > max_readahead) {
145 maxreq = max_readahead;
146 if (maxreq > 16 * 1024 * 1024)
147 maxreq = 16 * 1024 * 1024;
149 if (maxreq < blksize)
151 if (loffset + maxreq > filesize) {
152 if (loffset > filesize)
155 maxreq = filesize - loffset;
158 maxra = (int)(maxreq / blksize);
161 * Get the requested block.
166 *bpp = reqbp = bp = getblk(vp, loffset, blksize, 0, 0);
167 origoffset = loffset;
170 * Calculate the maximum cluster size for a single I/O, used
171 * by cluster_rbuild().
173 maxrbuild = vmaxiosize(vp) / blksize;
176 * if it is in the cache, then check to see if the reads have been
177 * sequential. If they have, then try some read-ahead, otherwise
178 * back-off on prospective read-aheads.
180 if (bp->b_flags & B_CACHE) {
182 * Not sequential, do not do any read-ahead
188 * No read-ahead mark, do not do any read-ahead
191 if ((bp->b_flags & B_RAM) == 0)
195 * We hit a read-ahead-mark, figure out how much read-ahead
196 * to do (maxra) and where to start (loffset).
198 * Shortcut the scan. Typically the way this works is that
199 * we've built up all the blocks inbetween except for the
200 * last in previous iterations, so if the second-to-last
201 * block is present we just skip ahead to it.
203 * This algorithm has O(1) cpu in the steady state no
204 * matter how large maxra is.
206 bp->b_flags &= ~B_RAM;
208 if (findblk(vp, loffset + (maxra - 2) * blksize, FINDBLK_TEST))
213 if (findblk(vp, loffset + i * blksize,
214 FINDBLK_TEST) == NULL) {
221 * We got everything or everything is in the cache, no
228 * Calculate where to start the read-ahead and how much
229 * to do. Generally speaking we want to read-ahead by
230 * (maxra) when we've found a read-ahead mark. We do
231 * not want to reduce maxra here as it will cause
232 * successive read-ahead I/O's to be smaller and smaller.
234 * However, we have to make sure we don't break the
235 * filesize limitation for the clustered operation.
237 loffset += i * blksize;
240 if (loffset >= filesize)
242 if (loffset + maxra * blksize > filesize) {
243 maxreq = filesize - loffset;
244 maxra = (int)(maxreq / blksize);
247 __debugvar off_t firstread = bp->b_loffset;
251 * Set-up synchronous read for bp.
253 bp->b_cmd = BUF_CMD_READ;
254 bp->b_bio1.bio_done = biodone_sync;
255 bp->b_bio1.bio_flags |= BIO_SYNC;
257 KASSERT(firstread != NOOFFSET,
258 ("cluster_read: no buffer offset"));
261 * nblks is our cluster_rbuild request size, limited
262 * primarily by the device.
264 if ((nblks = maxra) > maxrbuild)
270 error = VOP_BMAP(vp, loffset, &doffset,
271 &burstbytes, NULL, BUF_CMD_READ);
273 goto single_block_read;
274 if (nblks > burstbytes / blksize)
275 nblks = burstbytes / blksize;
276 if (doffset == NOOFFSET)
277 goto single_block_read;
279 goto single_block_read;
281 bp = cluster_rbuild(vp, filesize, loffset,
282 doffset, blksize, nblks, bp);
283 loffset += bp->b_bufsize;
284 maxra -= bp->b_bufsize / blksize;
288 * If it isn't in the cache, then get a chunk from
289 * disk if sequential, otherwise just get the block.
298 * If B_CACHE was not set issue bp. bp will either be an
299 * asynchronous cluster buf or a synchronous single-buf.
300 * If it is a single buf it will be the same as reqbp.
302 * NOTE: Once an async cluster buf is issued bp becomes invalid.
305 #if defined(CLUSTERDEBUG)
307 kprintf("S(%012jx,%d,%d)\n",
308 (intmax_t)bp->b_loffset, bp->b_bcount, maxra);
310 if ((bp->b_flags & B_CLUSTER) == 0)
311 vfs_busy_pages(vp, bp);
312 bp->b_flags &= ~(B_ERROR|B_INVAL);
313 vn_strategy(vp, &bp->b_bio1);
320 * If we have been doing sequential I/O, then do some read-ahead.
321 * The code above us should have positioned us at the next likely
324 * Only mess with buffers which we can immediately lock. HAMMER
325 * will do device-readahead irrespective of what the blocks
328 while (error == 0 && maxra > 0) {
333 rbp = getblk(vp, loffset, blksize,
334 GETBLK_SZMATCH|GETBLK_NOWAIT, 0);
337 if ((rbp->b_flags & B_CACHE)) {
343 * An error from the read-ahead bmap has nothing to do
344 * with the caller's original request.
346 tmp_error = VOP_BMAP(vp, loffset, &doffset,
347 &burstbytes, NULL, BUF_CMD_READ);
348 if (tmp_error || doffset == NOOFFSET) {
349 rbp->b_flags |= B_INVAL;
354 if ((nblks = maxra) > maxrbuild)
356 if (nblks > burstbytes / blksize)
357 nblks = burstbytes / blksize;
362 rbp->b_cmd = BUF_CMD_READ;
363 /*rbp->b_flags |= B_AGE*/;
367 rbp = cluster_rbuild(vp, filesize, loffset,
371 rbp->b_bio2.bio_offset = doffset;
374 rbp->b_flags &= ~(B_ERROR|B_INVAL);
376 if ((rbp->b_flags & B_CLUSTER) == 0)
377 vfs_busy_pages(vp, rbp);
379 loffset += rbp->b_bufsize;
380 maxra -= rbp->b_bufsize / blksize;
381 vn_strategy(vp, &rbp->b_bio1);
382 /* rbp invalid now */
386 * Wait for our original buffer to complete its I/O. reqbp will
387 * be NULL if the original buffer was B_CACHE. We are returning
388 * (*bpp) which is the same as reqbp when reqbp != NULL.
392 KKASSERT(reqbp->b_bio1.bio_flags & BIO_SYNC);
393 error = biowait(&reqbp->b_bio1, "clurd");
399 * This replaces breadcb(), providing an asynchronous read of the requested
400 * buffer with a callback, plus an asynchronous read-ahead within the
403 * The callback must check whether BIO_DONE is set in the bio and issue
404 * the bpdone(bp, 0) if it isn't. The callback is responsible for clearing
405 * BIO_DONE and disposing of the I/O (bqrelse()ing it).
407 * filesize - read-ahead @ blksize will not cross this boundary
408 * loffset - loffset for returned *bpp
409 * blksize - blocksize for returned *bpp and read-ahead bps
410 * minreq - minimum (not a hard minimum) in bytes, typically reflects
411 * a higher level uio resid.
412 * maxreq - maximum (sequential heuristic) in bytes (highet typ ~2MB)
413 * bpp - return buffer (*bpp) for (loffset,blksize)
416 cluster_readcb(struct vnode *vp, off_t filesize, off_t loffset,
417 int blksize, size_t minreq, size_t maxreq,
418 void (*func)(struct bio *), void *arg)
420 struct buf *bp, *rbp, *reqbp;
428 * Calculate the desired read-ahead in blksize'd blocks (maxra).
429 * To do this we calculate maxreq.
431 * maxreq typically starts out as a sequential heuristic. If the
432 * high level uio/resid is bigger (minreq), we pop maxreq up to
433 * minreq. This represents the case where random I/O is being
434 * performed by the userland is issuing big read()'s.
436 * Then we limit maxreq to max_readahead to ensure it is a reasonable
439 * Finally we must ensure that (loffset + maxreq) does not cross the
440 * boundary (filesize) for the current blocksize. If we allowed it
441 * to cross we could end up with buffers past the boundary with the
442 * wrong block size (HAMMER large-data areas use mixed block sizes).
443 * minreq is also absolutely limited to filesize.
447 /* minreq not used beyond this point */
449 if (maxreq > max_readahead) {
450 maxreq = max_readahead;
451 if (maxreq > 16 * 1024 * 1024)
452 maxreq = 16 * 1024 * 1024;
454 if (maxreq < blksize)
456 if (loffset + maxreq > filesize) {
457 if (loffset > filesize)
460 maxreq = filesize - loffset;
463 maxra = (int)(maxreq / blksize);
466 * Get the requested block.
468 reqbp = bp = getblk(vp, loffset, blksize, 0, 0);
469 origoffset = loffset;
472 * Calculate the maximum cluster size for a single I/O, used
473 * by cluster_rbuild().
475 maxrbuild = vmaxiosize(vp) / blksize;
478 * if it is in the cache, then check to see if the reads have been
479 * sequential. If they have, then try some read-ahead, otherwise
480 * back-off on prospective read-aheads.
482 if (bp->b_flags & B_CACHE) {
484 * Setup for func() call whether we do read-ahead or not.
486 bp->b_bio1.bio_caller_info1.ptr = arg;
487 bp->b_bio1.bio_flags |= BIO_DONE;
490 * Not sequential, do not do any read-ahead
496 * No read-ahead mark, do not do any read-ahead
499 if ((bp->b_flags & B_RAM) == 0)
501 bp->b_flags &= ~B_RAM;
504 * We hit a read-ahead-mark, figure out how much read-ahead
505 * to do (maxra) and where to start (loffset).
507 * Shortcut the scan. Typically the way this works is that
508 * we've built up all the blocks inbetween except for the
509 * last in previous iterations, so if the second-to-last
510 * block is present we just skip ahead to it.
512 * This algorithm has O(1) cpu in the steady state no
513 * matter how large maxra is.
515 if (findblk(vp, loffset + (maxra - 2) * blksize, FINDBLK_TEST))
520 if (findblk(vp, loffset + i * blksize,
521 FINDBLK_TEST) == NULL) {
528 * We got everything or everything is in the cache, no
535 * Calculate where to start the read-ahead and how much
536 * to do. Generally speaking we want to read-ahead by
537 * (maxra) when we've found a read-ahead mark. We do
538 * not want to reduce maxra here as it will cause
539 * successive read-ahead I/O's to be smaller and smaller.
541 * However, we have to make sure we don't break the
542 * filesize limitation for the clustered operation.
544 loffset += i * blksize;
546 /* leave reqbp intact to force function callback */
548 if (loffset >= filesize)
550 if (loffset + maxra * blksize > filesize) {
551 maxreq = filesize - loffset;
552 maxra = (int)(maxreq / blksize);
555 __debugvar off_t firstread = bp->b_loffset;
560 * Set-up synchronous read for bp.
562 bp->b_flags &= ~(B_ERROR | B_EINTR | B_INVAL);
563 bp->b_cmd = BUF_CMD_READ;
564 bp->b_bio1.bio_done = func;
565 bp->b_bio1.bio_caller_info1.ptr = arg;
567 reqbp = NULL; /* don't func() reqbp, it's running async */
569 KASSERT(firstread != NOOFFSET,
570 ("cluster_read: no buffer offset"));
573 * nblks is our cluster_rbuild request size, limited
574 * primarily by the device.
576 if ((nblks = maxra) > maxrbuild)
582 tmp_error = VOP_BMAP(vp, loffset, &doffset,
583 &burstbytes, NULL, BUF_CMD_READ);
585 goto single_block_read;
586 if (nblks > burstbytes / blksize)
587 nblks = burstbytes / blksize;
588 if (doffset == NOOFFSET)
589 goto single_block_read;
591 goto single_block_read;
593 bp = cluster_rbuild(vp, filesize, loffset,
594 doffset, blksize, nblks, bp);
595 loffset += bp->b_bufsize;
596 maxra -= bp->b_bufsize / blksize;
600 * If it isn't in the cache, then get a chunk from
601 * disk if sequential, otherwise just get the block.
610 * If bp != NULL then B_CACHE was *NOT* set and bp must be issued.
611 * bp will either be an asynchronous cluster buf or an asynchronous
614 * NOTE: Once an async cluster buf is issued bp becomes invalid.
617 #if defined(CLUSTERDEBUG)
619 kprintf("S(%012jx,%d,%d)\n",
620 (intmax_t)bp->b_loffset, bp->b_bcount, maxra);
622 if ((bp->b_flags & B_CLUSTER) == 0)
623 vfs_busy_pages(vp, bp);
624 bp->b_flags &= ~(B_ERROR|B_INVAL);
625 vn_strategy(vp, &bp->b_bio1);
631 * If we have been doing sequential I/O, then do some read-ahead.
632 * The code above us should have positioned us at the next likely
635 * Only mess with buffers which we can immediately lock. HAMMER
636 * will do device-readahead irrespective of what the blocks
644 rbp = getblk(vp, loffset, blksize,
645 GETBLK_SZMATCH|GETBLK_NOWAIT, 0);
648 if ((rbp->b_flags & B_CACHE)) {
654 * An error from the read-ahead bmap has nothing to do
655 * with the caller's original request.
657 tmp_error = VOP_BMAP(vp, loffset, &doffset,
658 &burstbytes, NULL, BUF_CMD_READ);
659 if (tmp_error || doffset == NOOFFSET) {
660 rbp->b_flags |= B_INVAL;
665 if ((nblks = maxra) > maxrbuild)
667 if (nblks > burstbytes / blksize)
668 nblks = burstbytes / blksize;
673 rbp->b_cmd = BUF_CMD_READ;
674 /*rbp->b_flags |= B_AGE*/;
678 rbp = cluster_rbuild(vp, filesize, loffset,
682 rbp->b_bio2.bio_offset = doffset;
685 rbp->b_flags &= ~(B_ERROR|B_INVAL);
687 if ((rbp->b_flags & B_CLUSTER) == 0)
688 vfs_busy_pages(vp, rbp);
690 loffset += rbp->b_bufsize;
691 maxra -= rbp->b_bufsize / blksize;
692 vn_strategy(vp, &rbp->b_bio1);
693 /* rbp invalid now */
697 * If reqbp is non-NULL it had B_CACHE set and we issue the
698 * function callback synchronously.
700 * Note that we may start additional asynchronous I/O before doing
701 * the func() callback for the B_CACHE case
705 func(&reqbp->b_bio1);
709 * If blocks are contiguous on disk, use this to provide clustered
710 * read ahead. We will read as many blocks as possible sequentially
711 * and then parcel them up into logical blocks in the buffer hash table.
713 * This function either returns a cluster buf or it returns fbp. fbp is
714 * already expected to be set up as a synchronous or asynchronous request.
716 * If a cluster buf is returned it will always be async.
719 cluster_rbuild(struct vnode *vp, off_t filesize, off_t loffset, off_t doffset,
720 int blksize, int run, struct buf *fbp)
722 struct buf *bp, *tbp;
725 int maxiosize = vmaxiosize(vp);
730 while (loffset + run * blksize > filesize) {
735 tbp->b_bio2.bio_offset = doffset;
736 if((tbp->b_flags & B_MALLOC) ||
737 ((tbp->b_flags & B_VMIO) == 0) || (run <= 1)) {
741 bp = trypbuf_kva(&cluster_pbuf_freecnt);
747 * We are synthesizing a buffer out of vm_page_t's, but
748 * if the block size is not page aligned then the starting
749 * address may not be either. Inherit the b_data offset
750 * from the original buffer.
752 bp->b_data = (char *)((vm_offset_t)bp->b_data |
753 ((vm_offset_t)tbp->b_data & PAGE_MASK));
754 bp->b_flags |= B_CLUSTER | B_VMIO;
755 bp->b_cmd = BUF_CMD_READ;
756 bp->b_bio1.bio_done = cluster_callback; /* default to async */
757 bp->b_bio1.bio_caller_info1.cluster_head = NULL;
758 bp->b_bio1.bio_caller_info2.cluster_tail = NULL;
759 bp->b_loffset = loffset;
760 bp->b_bio2.bio_offset = doffset;
761 KASSERT(bp->b_loffset != NOOFFSET,
762 ("cluster_rbuild: no buffer offset"));
766 bp->b_xio.xio_npages = 0;
768 for (boffset = doffset, i = 0; i < run; ++i, boffset += blksize) {
770 if ((bp->b_xio.xio_npages * PAGE_SIZE) +
771 round_page(blksize) > maxiosize) {
776 * Shortcut some checks and try to avoid buffers that
777 * would block in the lock. The same checks have to
778 * be made again after we officially get the buffer.
780 tbp = getblk(vp, loffset + i * blksize, blksize,
781 GETBLK_SZMATCH|GETBLK_NOWAIT, 0);
784 for (j = 0; j < tbp->b_xio.xio_npages; j++) {
785 if (tbp->b_xio.xio_pages[j]->valid)
788 if (j != tbp->b_xio.xio_npages) {
794 * Stop scanning if the buffer is fuly valid
795 * (marked B_CACHE), or locked (may be doing a
796 * background write), or if the buffer is not
797 * VMIO backed. The clustering code can only deal
798 * with VMIO-backed buffers.
800 if ((tbp->b_flags & (B_CACHE|B_LOCKED)) ||
801 (tbp->b_flags & B_VMIO) == 0 ||
802 (LIST_FIRST(&tbp->b_dep) != NULL &&
810 * The buffer must be completely invalid in order to
811 * take part in the cluster. If it is partially valid
814 for (j = 0;j < tbp->b_xio.xio_npages; j++) {
815 if (tbp->b_xio.xio_pages[j]->valid)
818 if (j != tbp->b_xio.xio_npages) {
824 * Set a read-ahead mark as appropriate. Always
825 * set the read-ahead mark at (run - 1). It is
826 * unclear why we were also setting it at i == 1.
828 if (/*i == 1 ||*/ i == (run - 1))
832 * Depress the priority of buffers not explicitly
835 /* tbp->b_flags |= B_AGE; */
838 * Set the block number if it isn't set, otherwise
839 * if it is make sure it matches the block number we
842 if (tbp->b_bio2.bio_offset == NOOFFSET) {
843 tbp->b_bio2.bio_offset = boffset;
844 } else if (tbp->b_bio2.bio_offset != boffset) {
851 * The passed-in tbp (i == 0) will already be set up for
852 * async or sync operation. All other tbp's acquire in
853 * our loop are set up for async operation.
855 tbp->b_cmd = BUF_CMD_READ;
857 cluster_append(&bp->b_bio1, tbp);
858 for (j = 0; j < tbp->b_xio.xio_npages; ++j) {
861 m = tbp->b_xio.xio_pages[j];
862 vm_page_busy_wait(m, FALSE, "clurpg");
865 vm_object_pip_add(m->object, 1);
866 if ((bp->b_xio.xio_npages == 0) ||
867 (bp->b_xio.xio_pages[bp->b_xio.xio_npages-1] != m)) {
868 bp->b_xio.xio_pages[bp->b_xio.xio_npages] = m;
869 bp->b_xio.xio_npages++;
871 if ((m->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL)
872 tbp->b_xio.xio_pages[j] = bogus_page;
875 * XXX shouldn't this be += size for both, like in
878 * Don't inherit tbp->b_bufsize as it may be larger due to
879 * a non-page-aligned size. Instead just aggregate using
882 if (tbp->b_bcount != blksize)
883 kprintf("warning: tbp->b_bcount wrong %d vs %d\n", tbp->b_bcount, blksize);
884 if (tbp->b_bufsize != blksize)
885 kprintf("warning: tbp->b_bufsize wrong %d vs %d\n", tbp->b_bufsize, blksize);
886 bp->b_bcount += blksize;
887 bp->b_bufsize += blksize;
891 * Fully valid pages in the cluster are already good and do not need
892 * to be re-read from disk. Replace the page with bogus_page
894 for (j = 0; j < bp->b_xio.xio_npages; j++) {
895 if ((bp->b_xio.xio_pages[j]->valid & VM_PAGE_BITS_ALL) ==
897 bp->b_xio.xio_pages[j] = bogus_page;
900 if (bp->b_bufsize > bp->b_kvasize) {
901 panic("cluster_rbuild: b_bufsize(%d) > b_kvasize(%d)",
902 bp->b_bufsize, bp->b_kvasize);
904 pmap_qenter(trunc_page((vm_offset_t) bp->b_data),
905 (vm_page_t *)bp->b_xio.xio_pages, bp->b_xio.xio_npages);
911 * Cleanup after a clustered read or write.
912 * This is complicated by the fact that any of the buffers might have
913 * extra memory (if there were no empty buffer headers at allocbuf time)
914 * that we will need to shift around.
916 * The returned bio is &bp->b_bio1
919 cluster_callback(struct bio *bio)
921 struct buf *bp = bio->bio_buf;
926 * Must propogate errors to all the components. A short read (EOF)
927 * is a critical error.
929 if (bp->b_flags & B_ERROR) {
931 } else if (bp->b_bcount != bp->b_bufsize) {
932 panic("cluster_callback: unexpected EOF on cluster %p!", bio);
935 pmap_qremove(trunc_page((vm_offset_t) bp->b_data), bp->b_xio.xio_npages);
937 * Move memory from the large cluster buffer into the component
938 * buffers and mark IO as done on these. Since the memory map
939 * is the same, no actual copying is required.
941 while ((tbp = bio->bio_caller_info1.cluster_head) != NULL) {
942 bio->bio_caller_info1.cluster_head = tbp->b_cluster_next;
944 tbp->b_flags |= B_ERROR | B_IODEBUG;
945 tbp->b_error = error;
947 tbp->b_dirtyoff = tbp->b_dirtyend = 0;
948 tbp->b_flags &= ~(B_ERROR|B_INVAL);
949 tbp->b_flags |= B_IODEBUG;
951 * XXX the bdwrite()/bqrelse() issued during
952 * cluster building clears B_RELBUF (see bqrelse()
953 * comment). If direct I/O was specified, we have
954 * to restore it here to allow the buffer and VM
957 if (tbp->b_flags & B_DIRECT)
958 tbp->b_flags |= B_RELBUF;
960 biodone(&tbp->b_bio1);
962 relpbuf(bp, &cluster_pbuf_freecnt);
966 * Implement modified write build for cluster.
968 * write_behind = 0 write behind disabled
969 * write_behind = 1 write behind normal (default)
970 * write_behind = 2 write behind backed-off
972 * In addition, write_behind is only activated for files that have
973 * grown past a certain size (default 10MB). Otherwise temporary files
974 * wind up generating a lot of unnecessary disk I/O.
977 cluster_wbuild_wb(struct vnode *vp, int blksize, off_t start_loffset, int len)
981 switch(write_behind) {
983 if (start_loffset < len)
985 start_loffset -= len;
988 if (vp->v_filesize >= write_behind_minfilesize) {
989 r = cluster_wbuild(vp, NULL, blksize,
1001 * Do clustered write for FFS.
1004 * 1. Write is not sequential (write asynchronously)
1005 * Write is sequential:
1006 * 2. beginning of cluster - begin cluster
1007 * 3. middle of a cluster - add to cluster
1008 * 4. end of a cluster - asynchronously write cluster
1011 cluster_write(struct buf *bp, off_t filesize, int blksize, int seqcount)
1015 int maxclen, cursize;
1019 if (vp->v_type == VREG)
1020 async = vp->v_mount->mnt_flag & MNT_ASYNC;
1023 loffset = bp->b_loffset;
1024 KASSERT(bp->b_loffset != NOOFFSET,
1025 ("cluster_write: no buffer offset"));
1027 /* Initialize vnode to beginning of file. */
1029 vp->v_lasta = vp->v_clen = vp->v_cstart = vp->v_lastw = 0;
1031 if (vp->v_clen == 0 || loffset != vp->v_lastw + blksize ||
1032 bp->b_bio2.bio_offset == NOOFFSET ||
1033 (bp->b_bio2.bio_offset != vp->v_lasta + blksize)) {
1034 maxclen = vmaxiosize(vp);
1035 if (vp->v_clen != 0) {
1037 * Next block is not sequential.
1039 * If we are not writing at end of file, the process
1040 * seeked to another point in the file since its last
1041 * write, or we have reached our maximum cluster size,
1042 * then push the previous cluster. Otherwise try
1043 * reallocating to make it sequential.
1045 * Change to algorithm: only push previous cluster if
1046 * it was sequential from the point of view of the
1047 * seqcount heuristic, otherwise leave the buffer
1048 * intact so we can potentially optimize the I/O
1049 * later on in the buf_daemon or update daemon
1052 cursize = vp->v_lastw - vp->v_cstart + blksize;
1053 if (bp->b_loffset + blksize < filesize ||
1054 loffset != vp->v_lastw + blksize || vp->v_clen <= cursize) {
1055 if (!async && seqcount > 0) {
1056 cluster_wbuild_wb(vp, blksize,
1057 vp->v_cstart, cursize);
1060 struct buf **bpp, **endbp;
1061 struct cluster_save *buflist;
1063 buflist = cluster_collectbufs(vp, bp, blksize);
1064 endbp = &buflist->bs_children
1065 [buflist->bs_nchildren - 1];
1066 if (VOP_REALLOCBLKS(vp, buflist)) {
1068 * Failed, push the previous cluster
1069 * if *really* writing sequentially
1070 * in the logical file (seqcount > 1),
1071 * otherwise delay it in the hopes that
1072 * the low level disk driver can
1073 * optimize the write ordering.
1075 for (bpp = buflist->bs_children;
1078 kfree(buflist, M_SEGMENT);
1080 cluster_wbuild_wb(vp,
1081 blksize, vp->v_cstart,
1086 * Succeeded, keep building cluster.
1088 for (bpp = buflist->bs_children;
1089 bpp <= endbp; bpp++)
1091 kfree(buflist, M_SEGMENT);
1092 vp->v_lastw = loffset;
1093 vp->v_lasta = bp->b_bio2.bio_offset;
1099 * Consider beginning a cluster. If at end of file, make
1100 * cluster as large as possible, otherwise find size of
1103 if ((vp->v_type == VREG) &&
1104 bp->b_loffset + blksize < filesize &&
1105 (bp->b_bio2.bio_offset == NOOFFSET) &&
1106 (VOP_BMAP(vp, loffset, &bp->b_bio2.bio_offset, &maxclen, NULL, BUF_CMD_WRITE) ||
1107 bp->b_bio2.bio_offset == NOOFFSET)) {
1110 vp->v_lasta = bp->b_bio2.bio_offset;
1111 vp->v_cstart = loffset + blksize;
1112 vp->v_lastw = loffset;
1115 if (maxclen > blksize)
1116 vp->v_clen = maxclen - blksize;
1119 if (!async && vp->v_clen == 0) { /* I/O not contiguous */
1120 vp->v_cstart = loffset + blksize;
1122 } else { /* Wait for rest of cluster */
1123 vp->v_cstart = loffset;
1126 } else if (loffset == vp->v_cstart + vp->v_clen) {
1128 * At end of cluster, write it out if seqcount tells us we
1129 * are operating sequentially, otherwise let the buf or
1130 * update daemon handle it.
1134 cluster_wbuild_wb(vp, blksize, vp->v_cstart,
1135 vp->v_clen + blksize);
1137 vp->v_cstart = loffset + blksize;
1138 } else if (vm_page_count_severe() &&
1139 bp->b_loffset + blksize < filesize) {
1141 * We are low on memory, get it going NOW. However, do not
1142 * try to push out a partial block at the end of the file
1143 * as this could lead to extremely non-optimal write activity.
1148 * In the middle of a cluster, so just delay the I/O for now.
1152 vp->v_lastw = loffset;
1153 vp->v_lasta = bp->b_bio2.bio_offset;
1157 * This is the clustered version of bawrite(). It works similarly to
1158 * cluster_write() except I/O on the buffer is guaranteed to occur.
1161 cluster_awrite(struct buf *bp)
1166 * Don't bother if it isn't clusterable.
1168 if ((bp->b_flags & B_CLUSTEROK) == 0 ||
1170 (bp->b_vp->v_flag & VOBJBUF) == 0) {
1171 total = bp->b_bufsize;
1176 total = cluster_wbuild(bp->b_vp, &bp, bp->b_bufsize,
1177 bp->b_loffset, vmaxiosize(bp->b_vp));
1185 * This is an awful lot like cluster_rbuild...wish they could be combined.
1186 * The last lbn argument is the current block on which I/O is being
1187 * performed. Check to see that it doesn't fall in the middle of
1188 * the current block (if last_bp == NULL).
1190 * cluster_wbuild() normally does not guarantee anything. If bpp is
1191 * non-NULL and cluster_wbuild() is able to incorporate it into the
1192 * I/O it will set *bpp to NULL, otherwise it will leave it alone and
1193 * the caller must dispose of *bpp.
1196 cluster_wbuild(struct vnode *vp, struct buf **bpp,
1197 int blksize, off_t start_loffset, int bytes)
1199 struct buf *bp, *tbp;
1201 int totalwritten = 0;
1203 int maxiosize = vmaxiosize(vp);
1207 * If the buffer matches the passed locked & removed buffer
1208 * we used the passed buffer (which might not be B_DELWRI).
1210 * Otherwise locate the buffer and determine if it is
1213 if (bpp && (*bpp)->b_loffset == start_loffset) {
1218 tbp = findblk(vp, start_loffset, FINDBLK_NBLOCK);
1220 (tbp->b_flags & (B_LOCKED | B_INVAL | B_DELWRI)) !=
1222 (LIST_FIRST(&tbp->b_dep) && buf_checkwrite(tbp))) {
1225 start_loffset += blksize;
1231 KKASSERT(tbp->b_cmd == BUF_CMD_DONE);
1234 * Extra memory in the buffer, punt on this buffer.
1235 * XXX we could handle this in most cases, but we would
1236 * have to push the extra memory down to after our max
1237 * possible cluster size and then potentially pull it back
1238 * up if the cluster was terminated prematurely--too much
1241 if (((tbp->b_flags & (B_CLUSTEROK|B_MALLOC)) != B_CLUSTEROK) ||
1242 (tbp->b_bcount != tbp->b_bufsize) ||
1243 (tbp->b_bcount != blksize) ||
1244 (bytes == blksize) ||
1245 ((bp = getpbuf_kva(&cluster_pbuf_freecnt)) == NULL)) {
1246 totalwritten += tbp->b_bufsize;
1248 start_loffset += blksize;
1254 * Set up the pbuf. Track our append point with b_bcount
1255 * and b_bufsize. b_bufsize is not used by the device but
1256 * our caller uses it to loop clusters and we use it to
1257 * detect a premature EOF on the block device.
1261 bp->b_xio.xio_npages = 0;
1262 bp->b_loffset = tbp->b_loffset;
1263 bp->b_bio2.bio_offset = tbp->b_bio2.bio_offset;
1266 * We are synthesizing a buffer out of vm_page_t's, but
1267 * if the block size is not page aligned then the starting
1268 * address may not be either. Inherit the b_data offset
1269 * from the original buffer.
1271 bp->b_data = (char *)((vm_offset_t)bp->b_data |
1272 ((vm_offset_t)tbp->b_data & PAGE_MASK));
1273 bp->b_flags &= ~B_ERROR;
1274 bp->b_flags |= B_CLUSTER | B_BNOCLIP |
1275 (tbp->b_flags & (B_VMIO | B_NEEDCOMMIT));
1276 bp->b_bio1.bio_caller_info1.cluster_head = NULL;
1277 bp->b_bio1.bio_caller_info2.cluster_tail = NULL;
1280 * From this location in the file, scan forward to see
1281 * if there are buffers with adjacent data that need to
1282 * be written as well.
1284 * IO *must* be initiated on index 0 at this point
1285 * (particularly when called from cluster_awrite()).
1287 for (i = 0; i < bytes; (i += blksize), (start_loffset += blksize)) {
1295 tbp = findblk(vp, start_loffset,
1298 * Buffer not found or could not be locked
1305 * If it IS in core, but has different
1306 * characteristics, then don't cluster
1309 if ((tbp->b_flags & (B_VMIO | B_CLUSTEROK |
1310 B_INVAL | B_DELWRI | B_NEEDCOMMIT))
1311 != (B_DELWRI | B_CLUSTEROK |
1312 (bp->b_flags & (B_VMIO | B_NEEDCOMMIT))) ||
1313 (tbp->b_flags & B_LOCKED)
1320 * Check that the combined cluster
1321 * would make sense with regard to pages
1322 * and would not be too large
1324 * WARNING! buf_checkwrite() must be the last
1325 * check made. If it returns 0 then
1326 * we must initiate the I/O.
1328 if ((tbp->b_bcount != blksize) ||
1329 ((bp->b_bio2.bio_offset + i) !=
1330 tbp->b_bio2.bio_offset) ||
1331 ((tbp->b_xio.xio_npages + bp->b_xio.xio_npages) >
1332 (maxiosize / PAGE_SIZE)) ||
1333 (LIST_FIRST(&tbp->b_dep) &&
1334 buf_checkwrite(tbp))
1339 if (LIST_FIRST(&tbp->b_dep))
1342 * Ok, it's passed all the tests,
1343 * so remove it from the free list
1344 * and mark it busy. We will use it.
1347 KKASSERT(tbp->b_cmd == BUF_CMD_DONE);
1351 * If the IO is via the VM then we do some
1352 * special VM hackery (yuck). Since the buffer's
1353 * block size may not be page-aligned it is possible
1354 * for a page to be shared between two buffers. We
1355 * have to get rid of the duplication when building
1358 if (tbp->b_flags & B_VMIO) {
1362 * Try to avoid deadlocks with the VM system.
1363 * However, we cannot abort the I/O if
1364 * must_initiate is non-zero.
1366 if (must_initiate == 0) {
1368 j < tbp->b_xio.xio_npages;
1370 m = tbp->b_xio.xio_pages[j];
1371 if (m->flags & PG_BUSY) {
1378 for (j = 0; j < tbp->b_xio.xio_npages; ++j) {
1379 m = tbp->b_xio.xio_pages[j];
1380 vm_page_busy_wait(m, FALSE, "clurpg");
1381 vm_page_io_start(m);
1383 vm_object_pip_add(m->object, 1);
1384 if ((bp->b_xio.xio_npages == 0) ||
1385 (bp->b_xio.xio_pages[bp->b_xio.xio_npages - 1] != m)) {
1386 bp->b_xio.xio_pages[bp->b_xio.xio_npages] = m;
1387 bp->b_xio.xio_npages++;
1391 bp->b_bcount += blksize;
1392 bp->b_bufsize += blksize;
1395 tbp->b_flags &= ~B_ERROR;
1396 tbp->b_cmd = BUF_CMD_WRITE;
1398 cluster_append(&bp->b_bio1, tbp);
1401 * check for latent dependencies to be handled
1403 if (LIST_FIRST(&tbp->b_dep) != NULL)
1407 pmap_qenter(trunc_page((vm_offset_t)bp->b_data),
1408 (vm_page_t *)bp->b_xio.xio_pages,
1409 bp->b_xio.xio_npages);
1410 if (bp->b_bufsize > bp->b_kvasize) {
1411 panic("cluster_wbuild: b_bufsize(%d) "
1412 "> b_kvasize(%d)\n",
1413 bp->b_bufsize, bp->b_kvasize);
1415 totalwritten += bp->b_bufsize;
1417 bp->b_dirtyend = bp->b_bufsize;
1418 bp->b_bio1.bio_done = cluster_callback;
1419 bp->b_cmd = BUF_CMD_WRITE;
1421 vfs_busy_pages(vp, bp);
1422 bsetrunningbufspace(bp, bp->b_bufsize);
1424 vn_strategy(vp, &bp->b_bio1);
1428 return totalwritten;
1432 * Collect together all the buffers in a cluster, plus add one
1433 * additional buffer passed-in.
1435 * Only pre-existing buffers whos block size matches blksize are collected.
1436 * (this is primarily because HAMMER1 uses varying block sizes and we don't
1437 * want to override its choices).
1439 static struct cluster_save *
1440 cluster_collectbufs(struct vnode *vp, struct buf *last_bp, int blksize)
1442 struct cluster_save *buflist;
1449 len = (int)(vp->v_lastw - vp->v_cstart + blksize) / blksize;
1450 buflist = kmalloc(sizeof(struct buf *) * (len + 1) + sizeof(*buflist),
1451 M_SEGMENT, M_WAITOK);
1452 buflist->bs_nchildren = 0;
1453 buflist->bs_children = (struct buf **) (buflist + 1);
1454 for (loffset = vp->v_cstart, i = 0, j = 0;
1456 (loffset += blksize), i++) {
1457 bp = getcacheblk(vp, loffset,
1458 last_bp->b_bcount, GETBLK_SZMATCH);
1459 buflist->bs_children[i] = bp;
1462 } else if (bp->b_bio2.bio_offset == NOOFFSET) {
1463 VOP_BMAP(bp->b_vp, bp->b_loffset,
1464 &bp->b_bio2.bio_offset,
1465 NULL, NULL, BUF_CMD_WRITE);
1472 for (k = 0; k < j; ++k) {
1473 if (buflist->bs_children[k]) {
1474 bqrelse(buflist->bs_children[k]);
1475 buflist->bs_children[k] = NULL;
1480 bcopy(buflist->bs_children + j,
1481 buflist->bs_children + 0,
1482 sizeof(buflist->bs_children[0]) * (i - j));
1486 buflist->bs_children[i] = bp = last_bp;
1487 if (bp->b_bio2.bio_offset == NOOFFSET) {
1488 VOP_BMAP(bp->b_vp, bp->b_loffset, &bp->b_bio2.bio_offset,
1489 NULL, NULL, BUF_CMD_WRITE);
1491 buflist->bs_nchildren = i + 1;
1496 cluster_append(struct bio *bio, struct buf *tbp)
1498 tbp->b_cluster_next = NULL;
1499 if (bio->bio_caller_info1.cluster_head == NULL) {
1500 bio->bio_caller_info1.cluster_head = tbp;
1501 bio->bio_caller_info2.cluster_tail = tbp;
1503 bio->bio_caller_info2.cluster_tail->b_cluster_next = tbp;
1504 bio->bio_caller_info2.cluster_tail = tbp;
1510 cluster_setram (struct buf *bp)
1512 bp->b_flags |= B_RAM;
1513 if (bp->b_xio.xio_npages)
1514 vm_page_flag_set(bp->b_xio.xio_pages[0], PG_RAM);