3 * The Regents of the University of California. All rights reserved.
4 * Modifications/enhancements:
5 * Copyright (c) 1995 John S. Dyson. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by the University of
18 * California, Berkeley and its contributors.
19 * 4. Neither the name of the University nor the names of its contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * @(#)vfs_cluster.c 8.7 (Berkeley) 2/13/94
36 * $FreeBSD: src/sys/kern/vfs_cluster.c,v 1.92.2.9 2001/11/18 07:10:59 dillon Exp $
37 * $DragonFly: src/sys/kern/vfs_cluster.c,v 1.40 2008/07/14 03:09:00 dillon Exp $
40 #include "opt_debug_cluster.h"
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/kernel.h>
47 #include <sys/vnode.h>
48 #include <sys/malloc.h>
49 #include <sys/mount.h>
50 #include <sys/resourcevar.h>
51 #include <sys/vmmeter.h>
53 #include <vm/vm_object.h>
54 #include <vm/vm_page.h>
55 #include <sys/sysctl.h>
57 #include <vm/vm_page2.h>
59 #if defined(CLUSTERDEBUG)
60 #include <sys/sysctl.h>
61 static int rcluster= 0;
62 SYSCTL_INT(_debug, OID_AUTO, rcluster, CTLFLAG_RW, &rcluster, 0, "");
65 static MALLOC_DEFINE(M_SEGMENT, "cluster_save", "cluster_save buffer");
67 static struct cluster_save *
68 cluster_collectbufs (struct vnode *vp, struct buf *last_bp,
71 cluster_rbuild (struct vnode *vp, off_t filesize, off_t loffset,
72 off_t doffset, int blksize, int run,
74 static void cluster_callback (struct bio *);
77 static int write_behind = 1;
78 SYSCTL_INT(_vfs, OID_AUTO, write_behind, CTLFLAG_RW, &write_behind, 0, "");
80 extern vm_page_t bogus_page;
82 extern int cluster_pbuf_freecnt;
85 * Maximum number of blocks for read-ahead.
90 * This replaces bread.
93 cluster_read(struct vnode *vp, off_t filesize, off_t loffset,
94 int blksize, int totread, int seqcount, struct buf **bpp)
96 struct buf *bp, *rbp, *reqbp;
101 int maxra, racluster;
106 * Try to limit the amount of read-ahead by a few
107 * ad-hoc parameters. This needs work!!!
109 racluster = vmaxiosize(vp) / blksize;
110 maxra = 2 * racluster + (totread / blksize);
117 * Get the requested block.
119 *bpp = reqbp = bp = getblk(vp, loffset, blksize, 0, 0);
120 origoffset = loffset;
123 * if it is in the cache, then check to see if the reads have been
124 * sequential. If they have, then try some read-ahead, otherwise
125 * back-off on prospective read-aheads.
127 if (bp->b_flags & B_CACHE) {
130 } else if ((bp->b_flags & B_RAM) == 0) {
134 bp->b_flags &= ~B_RAM;
137 * Set read-ahead-mark only if we can passively lock
138 * the buffer. Note that with these flags the bp
139 * could very exist even though NULL is returned.
141 for (i = 1; i < maxra; i++) {
142 tbp = findblk(vp, loffset + i * blksize,
146 if (((i % racluster) == (racluster - 1)) ||
147 (i == (maxra - 1))) {
148 tbp->b_flags |= B_RAM;
154 loffset += i * blksize;
158 off_t firstread = bp->b_loffset;
162 * Set-up synchronous read for bp.
164 bp->b_cmd = BUF_CMD_READ;
165 bp->b_bio1.bio_done = biodone_sync;
166 bp->b_bio1.bio_flags |= BIO_SYNC;
168 KASSERT(firstread != NOOFFSET,
169 ("cluster_read: no buffer offset"));
170 if (firstread + totread > filesize)
171 totread = (int)(filesize - firstread);
172 nblks = totread / blksize;
176 if (nblks > racluster)
179 error = VOP_BMAP(vp, loffset, &doffset,
180 &burstbytes, NULL, BUF_CMD_READ);
182 goto single_block_read;
183 if (doffset == NOOFFSET)
184 goto single_block_read;
185 if (burstbytes < blksize * 2)
186 goto single_block_read;
187 if (nblks > burstbytes / blksize)
188 nblks = burstbytes / blksize;
190 bp = cluster_rbuild(vp, filesize, loffset,
191 doffset, blksize, nblks, bp);
192 loffset += bp->b_bufsize;
196 * if it isn't in the cache, then get a chunk from
197 * disk if sequential, otherwise just get the block.
199 bp->b_flags |= B_RAM;
205 * If B_CACHE was not set issue bp. bp will either be an
206 * asynchronous cluster buf or a synchronous single-buf.
207 * If it is a single buf it will be the same as reqbp.
209 * NOTE: Once an async cluster buf is issued bp becomes invalid.
212 #if defined(CLUSTERDEBUG)
214 kprintf("S(%lld,%d,%d) ",
215 bp->b_loffset, bp->b_bcount, seqcount);
217 if ((bp->b_flags & B_CLUSTER) == 0)
218 vfs_busy_pages(vp, bp);
219 bp->b_flags &= ~(B_ERROR|B_INVAL);
220 vn_strategy(vp, &bp->b_bio1);
226 * If we have been doing sequential I/O, then do some read-ahead.
228 * Only mess with buffers which we can immediately lock. HAMMER
229 * will do device-readahead irrespective of what the blocks
235 loffset < origoffset + seqcount * blksize &&
236 loffset + blksize <= filesize
243 rbp = getblk(vp, loffset, blksize,
244 GETBLK_SZMATCH|GETBLK_NOWAIT, 0);
247 if ((rbp->b_flags & B_CACHE)) {
253 * An error from the read-ahead bmap has nothing to do
254 * with the caller's original request.
256 tmp_error = VOP_BMAP(vp, loffset, &doffset,
257 &burstbytes, NULL, BUF_CMD_READ);
258 if (tmp_error || doffset == NOOFFSET) {
259 rbp->b_flags |= B_INVAL;
264 ntoread = burstbytes / blksize;
265 nblksread = (totread + blksize - 1) / blksize;
266 if (seqcount < nblksread)
267 seqcount = nblksread;
268 if (ntoread > seqcount)
274 rbp->b_cmd = BUF_CMD_READ;
275 rbp->b_flags |= B_RAM/* | B_AGE*/;
278 rbp = cluster_rbuild(vp, filesize, loffset,
282 rbp->b_bio2.bio_offset = doffset;
284 #if defined(CLUSTERDEBUG)
287 kprintf("A+(%lld,%d,%lld,%d) ",
288 rbp->b_loffset, rbp->b_bcount,
289 rbp->b_loffset - origoffset,
292 kprintf("A(%lld,%d,%lld,%d) ",
293 rbp->b_loffset, rbp->b_bcount,
294 rbp->b_loffset - origoffset,
298 rbp->b_flags &= ~(B_ERROR|B_INVAL);
300 if ((rbp->b_flags & B_CLUSTER) == 0)
301 vfs_busy_pages(vp, rbp);
303 vn_strategy(vp, &rbp->b_bio1);
304 /* rbp invalid now */
308 * Wait for our original buffer to complete its I/O. reqbp will
309 * be NULL if the original buffer was B_CACHE. We are returning
310 * (*bpp) which is the same as reqbp when reqbp != NULL.
314 KKASSERT(reqbp->b_bio1.bio_flags & BIO_SYNC);
315 error = biowait(&reqbp->b_bio1, "clurd");
321 * If blocks are contiguous on disk, use this to provide clustered
322 * read ahead. We will read as many blocks as possible sequentially
323 * and then parcel them up into logical blocks in the buffer hash table.
325 * This function either returns a cluster buf or it returns fbp. fbp is
326 * already expected to be set up as a synchronous or asynchronous request.
328 * If a cluster buf is returned it will always be async.
331 cluster_rbuild(struct vnode *vp, off_t filesize, off_t loffset, off_t doffset,
332 int blksize, int run, struct buf *fbp)
334 struct buf *bp, *tbp;
337 int maxiosize = vmaxiosize(vp);
342 while (loffset + run * blksize > filesize) {
347 tbp->b_bio2.bio_offset = doffset;
348 if((tbp->b_flags & B_MALLOC) ||
349 ((tbp->b_flags & B_VMIO) == 0) || (run <= 1)) {
353 bp = trypbuf(&cluster_pbuf_freecnt);
359 * We are synthesizing a buffer out of vm_page_t's, but
360 * if the block size is not page aligned then the starting
361 * address may not be either. Inherit the b_data offset
362 * from the original buffer.
364 bp->b_data = (char *)((vm_offset_t)bp->b_data |
365 ((vm_offset_t)tbp->b_data & PAGE_MASK));
366 bp->b_flags |= B_CLUSTER | B_VMIO;
367 bp->b_cmd = BUF_CMD_READ;
368 bp->b_bio1.bio_done = cluster_callback; /* default to async */
369 bp->b_bio1.bio_caller_info1.cluster_head = NULL;
370 bp->b_bio1.bio_caller_info2.cluster_tail = NULL;
371 bp->b_loffset = loffset;
372 bp->b_bio2.bio_offset = doffset;
373 KASSERT(bp->b_loffset != NOOFFSET,
374 ("cluster_rbuild: no buffer offset"));
378 bp->b_xio.xio_npages = 0;
380 for (boffset = doffset, i = 0; i < run; ++i, boffset += blksize) {
382 if ((bp->b_xio.xio_npages * PAGE_SIZE) +
383 round_page(blksize) > maxiosize) {
388 * Shortcut some checks and try to avoid buffers that
389 * would block in the lock. The same checks have to
390 * be made again after we officially get the buffer.
392 tbp = getblk(vp, loffset + i * blksize, blksize,
393 GETBLK_SZMATCH|GETBLK_NOWAIT, 0);
396 for (j = 0; j < tbp->b_xio.xio_npages; j++) {
397 if (tbp->b_xio.xio_pages[j]->valid)
400 if (j != tbp->b_xio.xio_npages) {
406 * Stop scanning if the buffer is fuly valid
407 * (marked B_CACHE), or locked (may be doing a
408 * background write), or if the buffer is not
409 * VMIO backed. The clustering code can only deal
410 * with VMIO-backed buffers.
412 if ((tbp->b_flags & (B_CACHE|B_LOCKED)) ||
413 (tbp->b_flags & B_VMIO) == 0 ||
414 (LIST_FIRST(&tbp->b_dep) != NULL &&
422 * The buffer must be completely invalid in order to
423 * take part in the cluster. If it is partially valid
426 for (j = 0;j < tbp->b_xio.xio_npages; j++) {
427 if (tbp->b_xio.xio_pages[j]->valid)
430 if (j != tbp->b_xio.xio_npages) {
436 * Set a read-ahead mark as appropriate
438 if (i == 1 || i == (run - 1))
439 tbp->b_flags |= B_RAM;
442 * Depress the priority of buffers not explicitly
445 /* tbp->b_flags |= B_AGE; */
448 * Set the block number if it isn't set, otherwise
449 * if it is make sure it matches the block number we
452 if (tbp->b_bio2.bio_offset == NOOFFSET) {
453 tbp->b_bio2.bio_offset = boffset;
454 } else if (tbp->b_bio2.bio_offset != boffset) {
461 * The passed-in tbp (i == 0) will already be set up for
462 * async or sync operation. All other tbp's acquire in
463 * our loop are set up for async operation.
465 tbp->b_cmd = BUF_CMD_READ;
467 cluster_append(&bp->b_bio1, tbp);
468 for (j = 0; j < tbp->b_xio.xio_npages; ++j) {
470 m = tbp->b_xio.xio_pages[j];
472 vm_object_pip_add(m->object, 1);
473 if ((bp->b_xio.xio_npages == 0) ||
474 (bp->b_xio.xio_pages[bp->b_xio.xio_npages-1] != m)) {
475 bp->b_xio.xio_pages[bp->b_xio.xio_npages] = m;
476 bp->b_xio.xio_npages++;
478 if ((m->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL)
479 tbp->b_xio.xio_pages[j] = bogus_page;
482 * XXX shouldn't this be += size for both, like in
485 * Don't inherit tbp->b_bufsize as it may be larger due to
486 * a non-page-aligned size. Instead just aggregate using
489 if (tbp->b_bcount != blksize)
490 kprintf("warning: tbp->b_bcount wrong %d vs %d\n", tbp->b_bcount, blksize);
491 if (tbp->b_bufsize != blksize)
492 kprintf("warning: tbp->b_bufsize wrong %d vs %d\n", tbp->b_bufsize, blksize);
493 bp->b_bcount += blksize;
494 bp->b_bufsize += blksize;
498 * Fully valid pages in the cluster are already good and do not need
499 * to be re-read from disk. Replace the page with bogus_page
501 for (j = 0; j < bp->b_xio.xio_npages; j++) {
502 if ((bp->b_xio.xio_pages[j]->valid & VM_PAGE_BITS_ALL) ==
504 bp->b_xio.xio_pages[j] = bogus_page;
507 if (bp->b_bufsize > bp->b_kvasize) {
508 panic("cluster_rbuild: b_bufsize(%d) > b_kvasize(%d)",
509 bp->b_bufsize, bp->b_kvasize);
511 pmap_qenter(trunc_page((vm_offset_t) bp->b_data),
512 (vm_page_t *)bp->b_xio.xio_pages, bp->b_xio.xio_npages);
518 * Cleanup after a clustered read or write.
519 * This is complicated by the fact that any of the buffers might have
520 * extra memory (if there were no empty buffer headers at allocbuf time)
521 * that we will need to shift around.
523 * The returned bio is &bp->b_bio1
526 cluster_callback(struct bio *bio)
528 struct buf *bp = bio->bio_buf;
533 * Must propogate errors to all the components. A short read (EOF)
534 * is a critical error.
536 if (bp->b_flags & B_ERROR) {
538 } else if (bp->b_bcount != bp->b_bufsize) {
539 panic("cluster_callback: unexpected EOF on cluster %p!", bio);
542 pmap_qremove(trunc_page((vm_offset_t) bp->b_data), bp->b_xio.xio_npages);
544 * Move memory from the large cluster buffer into the component
545 * buffers and mark IO as done on these. Since the memory map
546 * is the same, no actual copying is required.
548 while ((tbp = bio->bio_caller_info1.cluster_head) != NULL) {
549 bio->bio_caller_info1.cluster_head = tbp->b_cluster_next;
551 tbp->b_flags |= B_ERROR;
552 tbp->b_error = error;
554 tbp->b_dirtyoff = tbp->b_dirtyend = 0;
555 tbp->b_flags &= ~(B_ERROR|B_INVAL);
557 * XXX the bdwrite()/bqrelse() issued during
558 * cluster building clears B_RELBUF (see bqrelse()
559 * comment). If direct I/O was specified, we have
560 * to restore it here to allow the buffer and VM
563 if (tbp->b_flags & B_DIRECT)
564 tbp->b_flags |= B_RELBUF;
566 biodone(&tbp->b_bio1);
568 relpbuf(bp, &cluster_pbuf_freecnt);
574 * Implement modified write build for cluster.
576 * write_behind = 0 write behind disabled
577 * write_behind = 1 write behind normal (default)
578 * write_behind = 2 write behind backed-off
582 cluster_wbuild_wb(struct vnode *vp, int blksize, off_t start_loffset, int len)
586 switch(write_behind) {
588 if (start_loffset < len)
590 start_loffset -= len;
593 r = cluster_wbuild(vp, blksize, start_loffset, len);
603 * Do clustered write for FFS.
606 * 1. Write is not sequential (write asynchronously)
607 * Write is sequential:
608 * 2. beginning of cluster - begin cluster
609 * 3. middle of a cluster - add to cluster
610 * 4. end of a cluster - asynchronously write cluster
613 cluster_write(struct buf *bp, off_t filesize, int blksize, int seqcount)
617 int maxclen, cursize;
621 if (vp->v_type == VREG)
622 async = vp->v_mount->mnt_flag & MNT_ASYNC;
625 loffset = bp->b_loffset;
626 KASSERT(bp->b_loffset != NOOFFSET,
627 ("cluster_write: no buffer offset"));
629 /* Initialize vnode to beginning of file. */
631 vp->v_lasta = vp->v_clen = vp->v_cstart = vp->v_lastw = 0;
633 if (vp->v_clen == 0 || loffset != vp->v_lastw + blksize ||
634 bp->b_bio2.bio_offset == NOOFFSET ||
635 (bp->b_bio2.bio_offset != vp->v_lasta + blksize)) {
636 maxclen = vmaxiosize(vp);
637 if (vp->v_clen != 0) {
639 * Next block is not sequential.
641 * If we are not writing at end of file, the process
642 * seeked to another point in the file since its last
643 * write, or we have reached our maximum cluster size,
644 * then push the previous cluster. Otherwise try
645 * reallocating to make it sequential.
647 * Change to algorithm: only push previous cluster if
648 * it was sequential from the point of view of the
649 * seqcount heuristic, otherwise leave the buffer
650 * intact so we can potentially optimize the I/O
651 * later on in the buf_daemon or update daemon
654 cursize = vp->v_lastw - vp->v_cstart + blksize;
655 if (bp->b_loffset + blksize != filesize ||
656 loffset != vp->v_lastw + blksize || vp->v_clen <= cursize) {
657 if (!async && seqcount > 0) {
658 cluster_wbuild_wb(vp, blksize,
659 vp->v_cstart, cursize);
662 struct buf **bpp, **endbp;
663 struct cluster_save *buflist;
665 buflist = cluster_collectbufs(vp, bp, blksize);
666 endbp = &buflist->bs_children
667 [buflist->bs_nchildren - 1];
668 if (VOP_REALLOCBLKS(vp, buflist)) {
670 * Failed, push the previous cluster
671 * if *really* writing sequentially
672 * in the logical file (seqcount > 1),
673 * otherwise delay it in the hopes that
674 * the low level disk driver can
675 * optimize the write ordering.
677 for (bpp = buflist->bs_children;
680 kfree(buflist, M_SEGMENT);
682 cluster_wbuild_wb(vp,
683 blksize, vp->v_cstart,
688 * Succeeded, keep building cluster.
690 for (bpp = buflist->bs_children;
693 kfree(buflist, M_SEGMENT);
694 vp->v_lastw = loffset;
695 vp->v_lasta = bp->b_bio2.bio_offset;
701 * Consider beginning a cluster. If at end of file, make
702 * cluster as large as possible, otherwise find size of
705 if ((vp->v_type == VREG) &&
706 bp->b_loffset + blksize != filesize &&
707 (bp->b_bio2.bio_offset == NOOFFSET) &&
708 (VOP_BMAP(vp, loffset, &bp->b_bio2.bio_offset, &maxclen, NULL, BUF_CMD_WRITE) ||
709 bp->b_bio2.bio_offset == NOOFFSET)) {
712 vp->v_lasta = bp->b_bio2.bio_offset;
713 vp->v_cstart = loffset + blksize;
714 vp->v_lastw = loffset;
717 if (maxclen > blksize)
718 vp->v_clen = maxclen - blksize;
721 if (!async && vp->v_clen == 0) { /* I/O not contiguous */
722 vp->v_cstart = loffset + blksize;
724 } else { /* Wait for rest of cluster */
725 vp->v_cstart = loffset;
728 } else if (loffset == vp->v_cstart + vp->v_clen) {
730 * At end of cluster, write it out if seqcount tells us we
731 * are operating sequentially, otherwise let the buf or
732 * update daemon handle it.
736 cluster_wbuild_wb(vp, blksize, vp->v_cstart,
737 vp->v_clen + blksize);
739 vp->v_cstart = loffset + blksize;
740 } else if (vm_page_count_severe()) {
742 * We are low on memory, get it going NOW
747 * In the middle of a cluster, so just delay the I/O for now.
751 vp->v_lastw = loffset;
752 vp->v_lasta = bp->b_bio2.bio_offset;
757 * This is an awful lot like cluster_rbuild...wish they could be combined.
758 * The last lbn argument is the current block on which I/O is being
759 * performed. Check to see that it doesn't fall in the middle of
760 * the current block (if last_bp == NULL).
763 cluster_wbuild(struct vnode *vp, int blksize, off_t start_loffset, int bytes)
765 struct buf *bp, *tbp;
767 int totalwritten = 0;
768 int maxiosize = vmaxiosize(vp);
772 * If the buffer is not delayed-write (i.e. dirty), or it
773 * is delayed-write but either locked or inval, it cannot
774 * partake in the clustered write.
776 tbp = findblk(vp, start_loffset, FINDBLK_NBLOCK);
778 (tbp->b_flags & (B_LOCKED | B_INVAL | B_DELWRI)) != B_DELWRI ||
779 (LIST_FIRST(&tbp->b_dep) && buf_checkwrite(tbp))) {
782 start_loffset += blksize;
787 KKASSERT(tbp->b_cmd == BUF_CMD_DONE);
790 * Extra memory in the buffer, punt on this buffer.
791 * XXX we could handle this in most cases, but we would
792 * have to push the extra memory down to after our max
793 * possible cluster size and then potentially pull it back
794 * up if the cluster was terminated prematurely--too much
797 if (((tbp->b_flags & (B_CLUSTEROK|B_MALLOC)) != B_CLUSTEROK) ||
798 (tbp->b_bcount != tbp->b_bufsize) ||
799 (tbp->b_bcount != blksize) ||
800 (bytes == blksize) ||
801 ((bp = getpbuf(&cluster_pbuf_freecnt)) == NULL)) {
802 totalwritten += tbp->b_bufsize;
804 start_loffset += blksize;
810 * Set up the pbuf. Track our append point with b_bcount
811 * and b_bufsize. b_bufsize is not used by the device but
812 * our caller uses it to loop clusters and we use it to
813 * detect a premature EOF on the block device.
817 bp->b_xio.xio_npages = 0;
818 bp->b_loffset = tbp->b_loffset;
819 bp->b_bio2.bio_offset = tbp->b_bio2.bio_offset;
822 * We are synthesizing a buffer out of vm_page_t's, but
823 * if the block size is not page aligned then the starting
824 * address may not be either. Inherit the b_data offset
825 * from the original buffer.
827 bp->b_data = (char *)((vm_offset_t)bp->b_data |
828 ((vm_offset_t)tbp->b_data & PAGE_MASK));
829 bp->b_flags &= ~B_ERROR;
830 bp->b_flags |= B_CLUSTER | B_BNOCLIP |
831 (tbp->b_flags & (B_VMIO | B_NEEDCOMMIT));
832 bp->b_bio1.bio_caller_info1.cluster_head = NULL;
833 bp->b_bio1.bio_caller_info2.cluster_tail = NULL;
836 * From this location in the file, scan forward to see
837 * if there are buffers with adjacent data that need to
838 * be written as well.
840 for (i = 0; i < bytes; (i += blksize), (start_loffset += blksize)) {
841 if (i != 0) { /* If not the first buffer */
842 tbp = findblk(vp, start_loffset,
845 * Buffer not found or could not be locked
852 * If it IS in core, but has different
853 * characteristics, then don't cluster
856 if ((tbp->b_flags & (B_VMIO | B_CLUSTEROK |
857 B_INVAL | B_DELWRI | B_NEEDCOMMIT))
858 != (B_DELWRI | B_CLUSTEROK |
859 (bp->b_flags & (B_VMIO | B_NEEDCOMMIT))) ||
860 (tbp->b_flags & B_LOCKED) ||
861 (LIST_FIRST(&tbp->b_dep) &&
869 * Check that the combined cluster
870 * would make sense with regard to pages
871 * and would not be too large
873 if ((tbp->b_bcount != blksize) ||
874 ((bp->b_bio2.bio_offset + i) !=
875 tbp->b_bio2.bio_offset) ||
876 ((tbp->b_xio.xio_npages + bp->b_xio.xio_npages) >
877 (maxiosize / PAGE_SIZE))) {
882 * Ok, it's passed all the tests,
883 * so remove it from the free list
884 * and mark it busy. We will use it.
887 KKASSERT(tbp->b_cmd == BUF_CMD_DONE);
888 } /* end of code for non-first buffers only */
891 * If the IO is via the VM then we do some
892 * special VM hackery (yuck). Since the buffer's
893 * block size may not be page-aligned it is possible
894 * for a page to be shared between two buffers. We
895 * have to get rid of the duplication when building
898 if (tbp->b_flags & B_VMIO) {
901 if (i != 0) { /* if not first buffer */
902 for (j = 0; j < tbp->b_xio.xio_npages; ++j) {
903 m = tbp->b_xio.xio_pages[j];
904 if (m->flags & PG_BUSY) {
911 for (j = 0; j < tbp->b_xio.xio_npages; ++j) {
912 m = tbp->b_xio.xio_pages[j];
914 vm_object_pip_add(m->object, 1);
915 if ((bp->b_xio.xio_npages == 0) ||
916 (bp->b_xio.xio_pages[bp->b_xio.xio_npages - 1] != m)) {
917 bp->b_xio.xio_pages[bp->b_xio.xio_npages] = m;
918 bp->b_xio.xio_npages++;
922 bp->b_bcount += blksize;
923 bp->b_bufsize += blksize;
926 tbp->b_flags &= ~B_ERROR;
927 tbp->b_cmd = BUF_CMD_WRITE;
929 cluster_append(&bp->b_bio1, tbp);
932 * check for latent dependencies to be handled
934 if (LIST_FIRST(&tbp->b_dep) != NULL)
938 pmap_qenter(trunc_page((vm_offset_t) bp->b_data),
939 (vm_page_t *) bp->b_xio.xio_pages, bp->b_xio.xio_npages);
940 if (bp->b_bufsize > bp->b_kvasize) {
942 "cluster_wbuild: b_bufsize(%d) > b_kvasize(%d)\n",
943 bp->b_bufsize, bp->b_kvasize);
945 totalwritten += bp->b_bufsize;
947 bp->b_dirtyend = bp->b_bufsize;
948 bp->b_bio1.bio_done = cluster_callback;
949 bp->b_cmd = BUF_CMD_WRITE;
951 vfs_busy_pages(vp, bp);
952 bp->b_runningbufspace = bp->b_bufsize;
953 if (bp->b_runningbufspace) {
954 runningbufspace += bp->b_runningbufspace;
958 vn_strategy(vp, &bp->b_bio1);
966 * Collect together all the buffers in a cluster.
967 * Plus add one additional buffer.
969 static struct cluster_save *
970 cluster_collectbufs(struct vnode *vp, struct buf *last_bp, int blksize)
972 struct cluster_save *buflist;
977 len = (int)(vp->v_lastw - vp->v_cstart + blksize) / blksize;
978 buflist = kmalloc(sizeof(struct buf *) * (len + 1) + sizeof(*buflist),
979 M_SEGMENT, M_WAITOK);
980 buflist->bs_nchildren = 0;
981 buflist->bs_children = (struct buf **) (buflist + 1);
982 for (loffset = vp->v_cstart, i = 0; i < len; (loffset += blksize), i++) {
983 (void) bread(vp, loffset, last_bp->b_bcount, &bp);
984 buflist->bs_children[i] = bp;
985 if (bp->b_bio2.bio_offset == NOOFFSET) {
986 VOP_BMAP(bp->b_vp, bp->b_loffset,
987 &bp->b_bio2.bio_offset,
988 NULL, NULL, BUF_CMD_WRITE);
991 buflist->bs_children[i] = bp = last_bp;
992 if (bp->b_bio2.bio_offset == NOOFFSET) {
993 VOP_BMAP(bp->b_vp, bp->b_loffset, &bp->b_bio2.bio_offset,
994 NULL, NULL, BUF_CMD_WRITE);
996 buflist->bs_nchildren = i + 1;
1001 cluster_append(struct bio *bio, struct buf *tbp)
1003 tbp->b_cluster_next = NULL;
1004 if (bio->bio_caller_info1.cluster_head == NULL) {
1005 bio->bio_caller_info1.cluster_head = tbp;
1006 bio->bio_caller_info2.cluster_tail = tbp;
1008 bio->bio_caller_info2.cluster_tail->b_cluster_next = tbp;
1009 bio->bio_caller_info2.cluster_tail = tbp;