3 * The Regents of the University of California. All rights reserved.
4 * Modifications/enhancements:
5 * Copyright (c) 1995 John S. Dyson. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by the University of
18 * California, Berkeley and its contributors.
19 * 4. Neither the name of the University nor the names of its contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * @(#)vfs_cluster.c 8.7 (Berkeley) 2/13/94
36 * $FreeBSD: src/sys/kern/vfs_cluster.c,v 1.92.2.9 2001/11/18 07:10:59 dillon Exp $
37 * $DragonFly: src/sys/kern/vfs_cluster.c,v 1.40 2008/07/14 03:09:00 dillon Exp $
40 #include "opt_debug_cluster.h"
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/kernel.h>
47 #include <sys/vnode.h>
48 #include <sys/malloc.h>
49 #include <sys/mount.h>
50 #include <sys/resourcevar.h>
51 #include <sys/vmmeter.h>
53 #include <vm/vm_object.h>
54 #include <vm/vm_page.h>
55 #include <sys/sysctl.h>
58 #include <vm/vm_page2.h>
60 #include <machine/limits.h>
62 #if defined(CLUSTERDEBUG)
63 #include <sys/sysctl.h>
64 static int rcluster= 0;
65 SYSCTL_INT(_debug, OID_AUTO, rcluster, CTLFLAG_RW, &rcluster, 0, "");
68 static MALLOC_DEFINE(M_SEGMENT, "cluster_save", "cluster_save buffer");
70 static struct cluster_save *
71 cluster_collectbufs (struct vnode *vp, struct buf *last_bp,
74 cluster_rbuild (struct vnode *vp, off_t filesize, off_t loffset,
75 off_t doffset, int blksize, int run,
77 static void cluster_callback (struct bio *);
78 static void cluster_setram (struct buf *);
79 static int cluster_wbuild(struct vnode *vp, struct buf **bpp, int blksize,
80 off_t start_loffset, int bytes);
82 static int write_behind = 1;
83 SYSCTL_INT(_vfs, OID_AUTO, write_behind, CTLFLAG_RW, &write_behind, 0,
84 "Cluster write-behind setting");
85 static int max_readahead = 2 * 1024 * 1024;
86 SYSCTL_INT(_vfs, OID_AUTO, max_readahead, CTLFLAG_RW, &max_readahead, 0,
87 "Limit in bytes for desired cluster read-ahead");
89 extern vm_page_t bogus_page;
91 extern int cluster_pbuf_freecnt;
94 * This replaces bread.
96 * filesize - read-ahead @ blksize will not cross this boundary
97 * loffset - loffset for returned *bpp
98 * blksize - blocksize for returned *bpp and read-ahead bps
99 * minreq - minimum (not a hard minimum) in bytes, typically reflects
100 * a higher level uio resid.
101 * maxreq - maximum (sequential heuristic) in bytes (highet typ ~2MB)
102 * bpp - return buffer (*bpp) for (loffset,blksize)
105 cluster_readx(struct vnode *vp, off_t filesize, off_t loffset,
106 int blksize, size_t minreq, size_t maxreq, struct buf **bpp)
108 struct buf *bp, *rbp, *reqbp;
119 * Calculate the desired read-ahead in blksize'd blocks (maxra).
120 * To do this we calculate maxreq.
122 * maxreq typically starts out as a sequential heuristic. If the
123 * high level uio/resid is bigger (minreq), we pop maxreq up to
124 * minreq. This represents the case where random I/O is being
125 * performed by the userland is issuing big read()'s.
127 * Then we limit maxreq to max_readahead to ensure it is a reasonable
130 * Finally we must ensure that (loffset + maxreq) does not cross the
131 * boundary (filesize) for the current blocksize. If we allowed it
132 * to cross we could end up with buffers past the boundary with the
133 * wrong block size (HAMMER large-data areas use mixed block sizes).
134 * minreq is also absolutely limited to filesize.
138 /* minreq not used beyond this point */
140 if (maxreq > max_readahead) {
141 maxreq = max_readahead;
142 if (maxreq > 16 * 1024 * 1024)
143 maxreq = 16 * 1024 * 1024;
145 if (maxreq < blksize)
147 if (loffset + maxreq > filesize) {
148 if (loffset > filesize)
151 maxreq = filesize - loffset;
154 maxra = (int)(maxreq / blksize);
157 * Get the requested block.
162 *bpp = reqbp = bp = getblk(vp, loffset, blksize, 0, 0);
163 origoffset = loffset;
166 * Calculate the maximum cluster size for a single I/O, used
167 * by cluster_rbuild().
169 maxrbuild = vmaxiosize(vp) / blksize;
172 * if it is in the cache, then check to see if the reads have been
173 * sequential. If they have, then try some read-ahead, otherwise
174 * back-off on prospective read-aheads.
176 if (bp->b_flags & B_CACHE) {
178 * Not sequential, do not do any read-ahead
184 * No read-ahead mark, do not do any read-ahead
187 if ((bp->b_flags & B_RAM) == 0)
191 * We hit a read-ahead-mark, figure out how much read-ahead
192 * to do (maxra) and where to start (loffset).
194 * Shortcut the scan. Typically the way this works is that
195 * we've built up all the blocks inbetween except for the
196 * last in previous iterations, so if the second-to-last
197 * block is present we just skip ahead to it.
199 * This algorithm has O(1) cpu in the steady state no
200 * matter how large maxra is.
202 bp->b_flags &= ~B_RAM;
204 if (findblk(vp, loffset + (maxra - 2) * blksize, FINDBLK_TEST))
209 if (findblk(vp, loffset + i * blksize,
210 FINDBLK_TEST) == NULL) {
217 * We got everything or everything is in the cache, no
224 * Calculate where to start the read-ahead and how much
225 * to do. Generally speaking we want to read-ahead by
226 * (maxra) when we've found a read-ahead mark. We do
227 * not want to reduce maxra here as it will cause
228 * successive read-ahead I/O's to be smaller and smaller.
230 * However, we have to make sure we don't break the
231 * filesize limitation for the clustered operation.
233 loffset += i * blksize;
236 if (loffset >= filesize)
238 if (loffset + maxra * blksize > filesize) {
239 maxreq = filesize - loffset;
240 maxra = (int)(maxreq / blksize);
243 __debugvar off_t firstread = bp->b_loffset;
247 * Set-up synchronous read for bp.
249 bp->b_cmd = BUF_CMD_READ;
250 bp->b_bio1.bio_done = biodone_sync;
251 bp->b_bio1.bio_flags |= BIO_SYNC;
253 KASSERT(firstread != NOOFFSET,
254 ("cluster_read: no buffer offset"));
257 * nblks is our cluster_rbuild request size, limited
258 * primarily by the device.
260 if ((nblks = maxra) > maxrbuild)
266 error = VOP_BMAP(vp, loffset, &doffset,
267 &burstbytes, NULL, BUF_CMD_READ);
269 goto single_block_read;
270 if (nblks > burstbytes / blksize)
271 nblks = burstbytes / blksize;
272 if (doffset == NOOFFSET)
273 goto single_block_read;
275 goto single_block_read;
277 bp = cluster_rbuild(vp, filesize, loffset,
278 doffset, blksize, nblks, bp);
279 loffset += bp->b_bufsize;
280 maxra -= bp->b_bufsize / blksize;
284 * If it isn't in the cache, then get a chunk from
285 * disk if sequential, otherwise just get the block.
294 * If B_CACHE was not set issue bp. bp will either be an
295 * asynchronous cluster buf or a synchronous single-buf.
296 * If it is a single buf it will be the same as reqbp.
298 * NOTE: Once an async cluster buf is issued bp becomes invalid.
301 #if defined(CLUSTERDEBUG)
303 kprintf("S(%012jx,%d,%d)\n",
304 (intmax_t)bp->b_loffset, bp->b_bcount, maxra);
306 if ((bp->b_flags & B_CLUSTER) == 0)
307 vfs_busy_pages(vp, bp);
308 bp->b_flags &= ~(B_ERROR|B_INVAL);
309 vn_strategy(vp, &bp->b_bio1);
315 * If we have been doing sequential I/O, then do some read-ahead.
316 * The code above us should have positioned us at the next likely
319 * Only mess with buffers which we can immediately lock. HAMMER
320 * will do device-readahead irrespective of what the blocks
323 while (error == 0 && maxra > 0) {
328 rbp = getblk(vp, loffset, blksize,
329 GETBLK_SZMATCH|GETBLK_NOWAIT, 0);
332 if ((rbp->b_flags & B_CACHE)) {
338 * An error from the read-ahead bmap has nothing to do
339 * with the caller's original request.
341 tmp_error = VOP_BMAP(vp, loffset, &doffset,
342 &burstbytes, NULL, BUF_CMD_READ);
343 if (tmp_error || doffset == NOOFFSET) {
344 rbp->b_flags |= B_INVAL;
349 if ((nblks = maxra) > maxrbuild)
351 if (nblks > burstbytes / blksize)
352 nblks = burstbytes / blksize;
357 rbp->b_cmd = BUF_CMD_READ;
358 /*rbp->b_flags |= B_AGE*/;
362 rbp = cluster_rbuild(vp, filesize, loffset,
366 rbp->b_bio2.bio_offset = doffset;
369 #if defined(CLUSTERDEBUG)
372 kprintf("A+(%012jx,%d,%jd) "
373 "doff=%012jx minr=%zd ra=%d\n",
374 (intmax_t)loffset, rbp->b_bcount,
375 (intmax_t)(loffset - origoffset),
376 (intmax_t)doffset, minreq, maxra);
378 kprintf("A-(%012jx,%d,%jd) "
379 "doff=%012jx minr=%zd ra=%d\n",
380 (intmax_t)rbp->b_loffset, rbp->b_bcount,
381 (intmax_t)(loffset - origoffset),
382 (intmax_t)doffset, minreq, maxra);
386 rbp->b_flags &= ~(B_ERROR|B_INVAL);
388 if ((rbp->b_flags & B_CLUSTER) == 0)
389 vfs_busy_pages(vp, rbp);
391 loffset += rbp->b_bufsize;
392 maxra -= rbp->b_bufsize / blksize;
393 vn_strategy(vp, &rbp->b_bio1);
394 /* rbp invalid now */
398 * Wait for our original buffer to complete its I/O. reqbp will
399 * be NULL if the original buffer was B_CACHE. We are returning
400 * (*bpp) which is the same as reqbp when reqbp != NULL.
404 KKASSERT(reqbp->b_bio1.bio_flags & BIO_SYNC);
405 error = biowait(&reqbp->b_bio1, "clurd");
411 * If blocks are contiguous on disk, use this to provide clustered
412 * read ahead. We will read as many blocks as possible sequentially
413 * and then parcel them up into logical blocks in the buffer hash table.
415 * This function either returns a cluster buf or it returns fbp. fbp is
416 * already expected to be set up as a synchronous or asynchronous request.
418 * If a cluster buf is returned it will always be async.
421 cluster_rbuild(struct vnode *vp, off_t filesize, off_t loffset, off_t doffset,
422 int blksize, int run, struct buf *fbp)
424 struct buf *bp, *tbp;
427 int maxiosize = vmaxiosize(vp);
432 while (loffset + run * blksize > filesize) {
437 tbp->b_bio2.bio_offset = doffset;
438 if((tbp->b_flags & B_MALLOC) ||
439 ((tbp->b_flags & B_VMIO) == 0) || (run <= 1)) {
443 bp = trypbuf_kva(&cluster_pbuf_freecnt);
449 * We are synthesizing a buffer out of vm_page_t's, but
450 * if the block size is not page aligned then the starting
451 * address may not be either. Inherit the b_data offset
452 * from the original buffer.
454 bp->b_data = (char *)((vm_offset_t)bp->b_data |
455 ((vm_offset_t)tbp->b_data & PAGE_MASK));
456 bp->b_flags |= B_CLUSTER | B_VMIO;
457 bp->b_cmd = BUF_CMD_READ;
458 bp->b_bio1.bio_done = cluster_callback; /* default to async */
459 bp->b_bio1.bio_caller_info1.cluster_head = NULL;
460 bp->b_bio1.bio_caller_info2.cluster_tail = NULL;
461 bp->b_loffset = loffset;
462 bp->b_bio2.bio_offset = doffset;
463 KASSERT(bp->b_loffset != NOOFFSET,
464 ("cluster_rbuild: no buffer offset"));
468 bp->b_xio.xio_npages = 0;
470 for (boffset = doffset, i = 0; i < run; ++i, boffset += blksize) {
472 if ((bp->b_xio.xio_npages * PAGE_SIZE) +
473 round_page(blksize) > maxiosize) {
478 * Shortcut some checks and try to avoid buffers that
479 * would block in the lock. The same checks have to
480 * be made again after we officially get the buffer.
482 tbp = getblk(vp, loffset + i * blksize, blksize,
483 GETBLK_SZMATCH|GETBLK_NOWAIT, 0);
486 for (j = 0; j < tbp->b_xio.xio_npages; j++) {
487 if (tbp->b_xio.xio_pages[j]->valid)
490 if (j != tbp->b_xio.xio_npages) {
496 * Stop scanning if the buffer is fuly valid
497 * (marked B_CACHE), or locked (may be doing a
498 * background write), or if the buffer is not
499 * VMIO backed. The clustering code can only deal
500 * with VMIO-backed buffers.
502 if ((tbp->b_flags & (B_CACHE|B_LOCKED)) ||
503 (tbp->b_flags & B_VMIO) == 0 ||
504 (LIST_FIRST(&tbp->b_dep) != NULL &&
512 * The buffer must be completely invalid in order to
513 * take part in the cluster. If it is partially valid
516 for (j = 0;j < tbp->b_xio.xio_npages; j++) {
517 if (tbp->b_xio.xio_pages[j]->valid)
520 if (j != tbp->b_xio.xio_npages) {
526 * Set a read-ahead mark as appropriate. Always
527 * set the read-ahead mark at (run - 1). It is
528 * unclear why we were also setting it at i == 1.
530 if (/*i == 1 ||*/ i == (run - 1))
534 * Depress the priority of buffers not explicitly
537 /* tbp->b_flags |= B_AGE; */
540 * Set the block number if it isn't set, otherwise
541 * if it is make sure it matches the block number we
544 if (tbp->b_bio2.bio_offset == NOOFFSET) {
545 tbp->b_bio2.bio_offset = boffset;
546 } else if (tbp->b_bio2.bio_offset != boffset) {
553 * The passed-in tbp (i == 0) will already be set up for
554 * async or sync operation. All other tbp's acquire in
555 * our loop are set up for async operation.
557 tbp->b_cmd = BUF_CMD_READ;
559 cluster_append(&bp->b_bio1, tbp);
560 for (j = 0; j < tbp->b_xio.xio_npages; ++j) {
563 m = tbp->b_xio.xio_pages[j];
564 vm_page_busy_wait(m, FALSE, "clurpg");
567 vm_object_pip_add(m->object, 1);
568 if ((bp->b_xio.xio_npages == 0) ||
569 (bp->b_xio.xio_pages[bp->b_xio.xio_npages-1] != m)) {
570 bp->b_xio.xio_pages[bp->b_xio.xio_npages] = m;
571 bp->b_xio.xio_npages++;
573 if ((m->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL)
574 tbp->b_xio.xio_pages[j] = bogus_page;
577 * XXX shouldn't this be += size for both, like in
580 * Don't inherit tbp->b_bufsize as it may be larger due to
581 * a non-page-aligned size. Instead just aggregate using
584 if (tbp->b_bcount != blksize)
585 kprintf("warning: tbp->b_bcount wrong %d vs %d\n", tbp->b_bcount, blksize);
586 if (tbp->b_bufsize != blksize)
587 kprintf("warning: tbp->b_bufsize wrong %d vs %d\n", tbp->b_bufsize, blksize);
588 bp->b_bcount += blksize;
589 bp->b_bufsize += blksize;
593 * Fully valid pages in the cluster are already good and do not need
594 * to be re-read from disk. Replace the page with bogus_page
596 for (j = 0; j < bp->b_xio.xio_npages; j++) {
597 if ((bp->b_xio.xio_pages[j]->valid & VM_PAGE_BITS_ALL) ==
599 bp->b_xio.xio_pages[j] = bogus_page;
602 if (bp->b_bufsize > bp->b_kvasize) {
603 panic("cluster_rbuild: b_bufsize(%d) > b_kvasize(%d)",
604 bp->b_bufsize, bp->b_kvasize);
606 pmap_qenter(trunc_page((vm_offset_t) bp->b_data),
607 (vm_page_t *)bp->b_xio.xio_pages, bp->b_xio.xio_npages);
613 * Cleanup after a clustered read or write.
614 * This is complicated by the fact that any of the buffers might have
615 * extra memory (if there were no empty buffer headers at allocbuf time)
616 * that we will need to shift around.
618 * The returned bio is &bp->b_bio1
621 cluster_callback(struct bio *bio)
623 struct buf *bp = bio->bio_buf;
628 * Must propogate errors to all the components. A short read (EOF)
629 * is a critical error.
631 if (bp->b_flags & B_ERROR) {
633 } else if (bp->b_bcount != bp->b_bufsize) {
634 panic("cluster_callback: unexpected EOF on cluster %p!", bio);
637 pmap_qremove(trunc_page((vm_offset_t) bp->b_data), bp->b_xio.xio_npages);
639 * Move memory from the large cluster buffer into the component
640 * buffers and mark IO as done on these. Since the memory map
641 * is the same, no actual copying is required.
643 while ((tbp = bio->bio_caller_info1.cluster_head) != NULL) {
644 bio->bio_caller_info1.cluster_head = tbp->b_cluster_next;
646 tbp->b_flags |= B_ERROR | B_IODEBUG;
647 tbp->b_error = error;
649 tbp->b_dirtyoff = tbp->b_dirtyend = 0;
650 tbp->b_flags &= ~(B_ERROR|B_INVAL);
651 tbp->b_flags |= B_IODEBUG;
653 * XXX the bdwrite()/bqrelse() issued during
654 * cluster building clears B_RELBUF (see bqrelse()
655 * comment). If direct I/O was specified, we have
656 * to restore it here to allow the buffer and VM
659 if (tbp->b_flags & B_DIRECT)
660 tbp->b_flags |= B_RELBUF;
662 biodone(&tbp->b_bio1);
664 relpbuf(bp, &cluster_pbuf_freecnt);
670 * Implement modified write build for cluster.
672 * write_behind = 0 write behind disabled
673 * write_behind = 1 write behind normal (default)
674 * write_behind = 2 write behind backed-off
678 cluster_wbuild_wb(struct vnode *vp, int blksize, off_t start_loffset, int len)
682 switch(write_behind) {
684 if (start_loffset < len)
686 start_loffset -= len;
689 r = cluster_wbuild(vp, NULL, blksize, start_loffset, len);
699 * Do clustered write for FFS.
702 * 1. Write is not sequential (write asynchronously)
703 * Write is sequential:
704 * 2. beginning of cluster - begin cluster
705 * 3. middle of a cluster - add to cluster
706 * 4. end of a cluster - asynchronously write cluster
709 cluster_write(struct buf *bp, off_t filesize, int blksize, int seqcount)
713 int maxclen, cursize;
717 if (vp->v_type == VREG)
718 async = vp->v_mount->mnt_flag & MNT_ASYNC;
721 loffset = bp->b_loffset;
722 KASSERT(bp->b_loffset != NOOFFSET,
723 ("cluster_write: no buffer offset"));
725 /* Initialize vnode to beginning of file. */
727 vp->v_lasta = vp->v_clen = vp->v_cstart = vp->v_lastw = 0;
729 if (vp->v_clen == 0 || loffset != vp->v_lastw + blksize ||
730 bp->b_bio2.bio_offset == NOOFFSET ||
731 (bp->b_bio2.bio_offset != vp->v_lasta + blksize)) {
732 maxclen = vmaxiosize(vp);
733 if (vp->v_clen != 0) {
735 * Next block is not sequential.
737 * If we are not writing at end of file, the process
738 * seeked to another point in the file since its last
739 * write, or we have reached our maximum cluster size,
740 * then push the previous cluster. Otherwise try
741 * reallocating to make it sequential.
743 * Change to algorithm: only push previous cluster if
744 * it was sequential from the point of view of the
745 * seqcount heuristic, otherwise leave the buffer
746 * intact so we can potentially optimize the I/O
747 * later on in the buf_daemon or update daemon
750 cursize = vp->v_lastw - vp->v_cstart + blksize;
751 if (bp->b_loffset + blksize < filesize ||
752 loffset != vp->v_lastw + blksize || vp->v_clen <= cursize) {
753 if (!async && seqcount > 0) {
754 cluster_wbuild_wb(vp, blksize,
755 vp->v_cstart, cursize);
758 struct buf **bpp, **endbp;
759 struct cluster_save *buflist;
761 buflist = cluster_collectbufs(vp, bp, blksize);
762 endbp = &buflist->bs_children
763 [buflist->bs_nchildren - 1];
764 if (VOP_REALLOCBLKS(vp, buflist)) {
766 * Failed, push the previous cluster
767 * if *really* writing sequentially
768 * in the logical file (seqcount > 1),
769 * otherwise delay it in the hopes that
770 * the low level disk driver can
771 * optimize the write ordering.
773 for (bpp = buflist->bs_children;
776 kfree(buflist, M_SEGMENT);
778 cluster_wbuild_wb(vp,
779 blksize, vp->v_cstart,
784 * Succeeded, keep building cluster.
786 for (bpp = buflist->bs_children;
789 kfree(buflist, M_SEGMENT);
790 vp->v_lastw = loffset;
791 vp->v_lasta = bp->b_bio2.bio_offset;
797 * Consider beginning a cluster. If at end of file, make
798 * cluster as large as possible, otherwise find size of
801 if ((vp->v_type == VREG) &&
802 bp->b_loffset + blksize < filesize &&
803 (bp->b_bio2.bio_offset == NOOFFSET) &&
804 (VOP_BMAP(vp, loffset, &bp->b_bio2.bio_offset, &maxclen, NULL, BUF_CMD_WRITE) ||
805 bp->b_bio2.bio_offset == NOOFFSET)) {
808 vp->v_lasta = bp->b_bio2.bio_offset;
809 vp->v_cstart = loffset + blksize;
810 vp->v_lastw = loffset;
813 if (maxclen > blksize)
814 vp->v_clen = maxclen - blksize;
817 if (!async && vp->v_clen == 0) { /* I/O not contiguous */
818 vp->v_cstart = loffset + blksize;
820 } else { /* Wait for rest of cluster */
821 vp->v_cstart = loffset;
824 } else if (loffset == vp->v_cstart + vp->v_clen) {
826 * At end of cluster, write it out if seqcount tells us we
827 * are operating sequentially, otherwise let the buf or
828 * update daemon handle it.
832 cluster_wbuild_wb(vp, blksize, vp->v_cstart,
833 vp->v_clen + blksize);
835 vp->v_cstart = loffset + blksize;
836 } else if (vm_page_count_severe()) {
838 * We are low on memory, get it going NOW
843 * In the middle of a cluster, so just delay the I/O for now.
847 vp->v_lastw = loffset;
848 vp->v_lasta = bp->b_bio2.bio_offset;
852 * This is the clustered version of bawrite(). It works similarly to
853 * cluster_write() except I/O on the buffer is guaranteed to occur.
856 cluster_awrite(struct buf *bp)
861 * Don't bother if it isn't clusterable.
863 if ((bp->b_flags & B_CLUSTEROK) == 0 ||
865 (bp->b_vp->v_flag & VOBJBUF) == 0) {
866 total = bp->b_bufsize;
871 total = cluster_wbuild(bp->b_vp, &bp, bp->b_bufsize,
872 bp->b_loffset, vmaxiosize(bp->b_vp));
880 * This is an awful lot like cluster_rbuild...wish they could be combined.
881 * The last lbn argument is the current block on which I/O is being
882 * performed. Check to see that it doesn't fall in the middle of
883 * the current block (if last_bp == NULL).
885 * cluster_wbuild() normally does not guarantee anything. If bpp is
886 * non-NULL and cluster_wbuild() is able to incorporate it into the
887 * I/O it will set *bpp to NULL, otherwise it will leave it alone and
888 * the caller must dispose of *bpp.
891 cluster_wbuild(struct vnode *vp, struct buf **bpp,
892 int blksize, off_t start_loffset, int bytes)
894 struct buf *bp, *tbp;
896 int totalwritten = 0;
898 int maxiosize = vmaxiosize(vp);
902 * If the buffer matches the passed locked & removed buffer
903 * we used the passed buffer (which might not be B_DELWRI).
905 * Otherwise locate the buffer and determine if it is
908 if (bpp && (*bpp)->b_loffset == start_loffset) {
913 tbp = findblk(vp, start_loffset, FINDBLK_NBLOCK);
915 (tbp->b_flags & (B_LOCKED | B_INVAL | B_DELWRI)) !=
917 (LIST_FIRST(&tbp->b_dep) && buf_checkwrite(tbp))) {
920 start_loffset += blksize;
926 KKASSERT(tbp->b_cmd == BUF_CMD_DONE);
929 * Extra memory in the buffer, punt on this buffer.
930 * XXX we could handle this in most cases, but we would
931 * have to push the extra memory down to after our max
932 * possible cluster size and then potentially pull it back
933 * up if the cluster was terminated prematurely--too much
936 if (((tbp->b_flags & (B_CLUSTEROK|B_MALLOC)) != B_CLUSTEROK) ||
937 (tbp->b_bcount != tbp->b_bufsize) ||
938 (tbp->b_bcount != blksize) ||
939 (bytes == blksize) ||
940 ((bp = getpbuf_kva(&cluster_pbuf_freecnt)) == NULL)) {
941 totalwritten += tbp->b_bufsize;
943 start_loffset += blksize;
949 * Set up the pbuf. Track our append point with b_bcount
950 * and b_bufsize. b_bufsize is not used by the device but
951 * our caller uses it to loop clusters and we use it to
952 * detect a premature EOF on the block device.
956 bp->b_xio.xio_npages = 0;
957 bp->b_loffset = tbp->b_loffset;
958 bp->b_bio2.bio_offset = tbp->b_bio2.bio_offset;
961 * We are synthesizing a buffer out of vm_page_t's, but
962 * if the block size is not page aligned then the starting
963 * address may not be either. Inherit the b_data offset
964 * from the original buffer.
966 bp->b_data = (char *)((vm_offset_t)bp->b_data |
967 ((vm_offset_t)tbp->b_data & PAGE_MASK));
968 bp->b_flags &= ~B_ERROR;
969 bp->b_flags |= B_CLUSTER | B_BNOCLIP |
970 (tbp->b_flags & (B_VMIO | B_NEEDCOMMIT));
971 bp->b_bio1.bio_caller_info1.cluster_head = NULL;
972 bp->b_bio1.bio_caller_info2.cluster_tail = NULL;
975 * From this location in the file, scan forward to see
976 * if there are buffers with adjacent data that need to
977 * be written as well.
979 * IO *must* be initiated on index 0 at this point
980 * (particularly when called from cluster_awrite()).
982 for (i = 0; i < bytes; (i += blksize), (start_loffset += blksize)) {
990 tbp = findblk(vp, start_loffset,
993 * Buffer not found or could not be locked
1000 * If it IS in core, but has different
1001 * characteristics, then don't cluster
1004 if ((tbp->b_flags & (B_VMIO | B_CLUSTEROK |
1005 B_INVAL | B_DELWRI | B_NEEDCOMMIT))
1006 != (B_DELWRI | B_CLUSTEROK |
1007 (bp->b_flags & (B_VMIO | B_NEEDCOMMIT))) ||
1008 (tbp->b_flags & B_LOCKED)
1015 * Check that the combined cluster
1016 * would make sense with regard to pages
1017 * and would not be too large
1019 * WARNING! buf_checkwrite() must be the last
1020 * check made. If it returns 0 then
1021 * we must initiate the I/O.
1023 if ((tbp->b_bcount != blksize) ||
1024 ((bp->b_bio2.bio_offset + i) !=
1025 tbp->b_bio2.bio_offset) ||
1026 ((tbp->b_xio.xio_npages + bp->b_xio.xio_npages) >
1027 (maxiosize / PAGE_SIZE)) ||
1028 (LIST_FIRST(&tbp->b_dep) &&
1029 buf_checkwrite(tbp))
1034 if (LIST_FIRST(&tbp->b_dep))
1037 * Ok, it's passed all the tests,
1038 * so remove it from the free list
1039 * and mark it busy. We will use it.
1042 KKASSERT(tbp->b_cmd == BUF_CMD_DONE);
1046 * If the IO is via the VM then we do some
1047 * special VM hackery (yuck). Since the buffer's
1048 * block size may not be page-aligned it is possible
1049 * for a page to be shared between two buffers. We
1050 * have to get rid of the duplication when building
1053 if (tbp->b_flags & B_VMIO) {
1057 * Try to avoid deadlocks with the VM system.
1058 * However, we cannot abort the I/O if
1059 * must_initiate is non-zero.
1061 if (must_initiate == 0) {
1063 j < tbp->b_xio.xio_npages;
1065 m = tbp->b_xio.xio_pages[j];
1066 if (m->flags & PG_BUSY) {
1073 for (j = 0; j < tbp->b_xio.xio_npages; ++j) {
1074 m = tbp->b_xio.xio_pages[j];
1075 vm_page_busy_wait(m, FALSE, "clurpg");
1076 vm_page_io_start(m);
1078 vm_object_pip_add(m->object, 1);
1079 if ((bp->b_xio.xio_npages == 0) ||
1080 (bp->b_xio.xio_pages[bp->b_xio.xio_npages - 1] != m)) {
1081 bp->b_xio.xio_pages[bp->b_xio.xio_npages] = m;
1082 bp->b_xio.xio_npages++;
1086 bp->b_bcount += blksize;
1087 bp->b_bufsize += blksize;
1090 tbp->b_flags &= ~B_ERROR;
1091 tbp->b_cmd = BUF_CMD_WRITE;
1093 cluster_append(&bp->b_bio1, tbp);
1096 * check for latent dependencies to be handled
1098 if (LIST_FIRST(&tbp->b_dep) != NULL)
1102 pmap_qenter(trunc_page((vm_offset_t)bp->b_data),
1103 (vm_page_t *)bp->b_xio.xio_pages,
1104 bp->b_xio.xio_npages);
1105 if (bp->b_bufsize > bp->b_kvasize) {
1106 panic("cluster_wbuild: b_bufsize(%d) "
1107 "> b_kvasize(%d)\n",
1108 bp->b_bufsize, bp->b_kvasize);
1110 totalwritten += bp->b_bufsize;
1112 bp->b_dirtyend = bp->b_bufsize;
1113 bp->b_bio1.bio_done = cluster_callback;
1114 bp->b_cmd = BUF_CMD_WRITE;
1116 vfs_busy_pages(vp, bp);
1117 bsetrunningbufspace(bp, bp->b_bufsize);
1119 vn_strategy(vp, &bp->b_bio1);
1123 return totalwritten;
1127 * Collect together all the buffers in a cluster.
1128 * Plus add one additional buffer.
1130 static struct cluster_save *
1131 cluster_collectbufs(struct vnode *vp, struct buf *last_bp, int blksize)
1133 struct cluster_save *buflist;
1138 len = (int)(vp->v_lastw - vp->v_cstart + blksize) / blksize;
1139 buflist = kmalloc(sizeof(struct buf *) * (len + 1) + sizeof(*buflist),
1140 M_SEGMENT, M_WAITOK);
1141 buflist->bs_nchildren = 0;
1142 buflist->bs_children = (struct buf **) (buflist + 1);
1143 for (loffset = vp->v_cstart, i = 0; i < len; (loffset += blksize), i++) {
1144 (void) bread(vp, loffset, last_bp->b_bcount, &bp);
1145 buflist->bs_children[i] = bp;
1146 if (bp->b_bio2.bio_offset == NOOFFSET) {
1147 VOP_BMAP(bp->b_vp, bp->b_loffset,
1148 &bp->b_bio2.bio_offset,
1149 NULL, NULL, BUF_CMD_WRITE);
1152 buflist->bs_children[i] = bp = last_bp;
1153 if (bp->b_bio2.bio_offset == NOOFFSET) {
1154 VOP_BMAP(bp->b_vp, bp->b_loffset, &bp->b_bio2.bio_offset,
1155 NULL, NULL, BUF_CMD_WRITE);
1157 buflist->bs_nchildren = i + 1;
1162 cluster_append(struct bio *bio, struct buf *tbp)
1164 tbp->b_cluster_next = NULL;
1165 if (bio->bio_caller_info1.cluster_head == NULL) {
1166 bio->bio_caller_info1.cluster_head = tbp;
1167 bio->bio_caller_info2.cluster_tail = tbp;
1169 bio->bio_caller_info2.cluster_tail->b_cluster_next = tbp;
1170 bio->bio_caller_info2.cluster_tail = tbp;
1176 cluster_setram (struct buf *bp)
1178 bp->b_flags |= B_RAM;
1179 if (bp->b_xio.xio_npages)
1180 vm_page_flag_set(bp->b_xio.xio_pages[0], PG_RAM);