Merge branches 'hammer2' and 'master' of ssh://crater.dragonflybsd.org/repository...
[dragonfly.git] / sys / kern / vfs_cluster.c
1 /*-
2  * Copyright (c) 1993
3  *      The Regents of the University of California.  All rights reserved.
4  * Modifications/enhancements:
5  *      Copyright (c) 1995 John S. Dyson.  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. All advertising materials mentioning features or use of this software
16  *    must display the following acknowledgement:
17  *      This product includes software developed by the University of
18  *      California, Berkeley and its contributors.
19  * 4. Neither the name of the University nor the names of its contributors
20  *    may be used to endorse or promote products derived from this software
21  *    without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  *
35  *      @(#)vfs_cluster.c       8.7 (Berkeley) 2/13/94
36  * $FreeBSD: src/sys/kern/vfs_cluster.c,v 1.92.2.9 2001/11/18 07:10:59 dillon Exp $
37  * $DragonFly: src/sys/kern/vfs_cluster.c,v 1.40 2008/07/14 03:09:00 dillon Exp $
38  */
39
40 #include "opt_debug_cluster.h"
41
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/kernel.h>
45 #include <sys/proc.h>
46 #include <sys/buf.h>
47 #include <sys/vnode.h>
48 #include <sys/malloc.h>
49 #include <sys/mount.h>
50 #include <sys/resourcevar.h>
51 #include <sys/vmmeter.h>
52 #include <vm/vm.h>
53 #include <vm/vm_object.h>
54 #include <vm/vm_page.h>
55 #include <sys/sysctl.h>
56
57 #include <sys/buf2.h>
58 #include <vm/vm_page2.h>
59
60 #include <machine/limits.h>
61
62 #if defined(CLUSTERDEBUG)
63 #include <sys/sysctl.h>
64 static int      rcluster= 0;
65 SYSCTL_INT(_debug, OID_AUTO, rcluster, CTLFLAG_RW, &rcluster, 0, "");
66 #endif
67
68 static MALLOC_DEFINE(M_SEGMENT, "cluster_save", "cluster_save buffer");
69
70 static struct cluster_save *
71         cluster_collectbufs (struct vnode *vp, struct buf *last_bp,
72                             int blksize);
73 static struct buf *
74         cluster_rbuild (struct vnode *vp, off_t filesize, off_t loffset,
75                             off_t doffset, int blksize, int run, 
76                             struct buf *fbp);
77 static void cluster_callback (struct bio *);
78 static void cluster_setram (struct buf *);
79
80 static int write_behind = 1;
81 SYSCTL_INT(_vfs, OID_AUTO, write_behind, CTLFLAG_RW, &write_behind, 0,
82     "Cluster write-behind setting");
83 static int max_readahead = 2 * 1024 * 1024;
84 SYSCTL_INT(_vfs, OID_AUTO, max_readahead, CTLFLAG_RW, &max_readahead, 0,
85     "Limit in bytes for desired cluster read-ahead");
86
87 extern vm_page_t        bogus_page;
88
89 extern int cluster_pbuf_freecnt;
90
91 /*
92  * This replaces bread.
93  *
94  * filesize     - read-ahead @ blksize will not cross this boundary
95  * loffset      - loffset for returned *bpp
96  * blksize      - blocksize for returned *bpp and read-ahead bps
97  * minreq       - minimum (not a hard minimum) in bytes, typically reflects
98  *                a higher level uio resid.
99  * maxreq       - maximum (sequential heuristic) in bytes (highet typ ~2MB)
100  * bpp          - return buffer (*bpp) for (loffset,blksize)
101  */
102 int
103 cluster_readx(struct vnode *vp, off_t filesize, off_t loffset,
104              int blksize, size_t minreq, size_t maxreq, struct buf **bpp)
105 {
106         struct buf *bp, *rbp, *reqbp;
107         off_t origoffset;
108         off_t doffset;
109         int error;
110         int i;
111         int maxra;
112         int maxrbuild;
113
114         error = 0;
115
116         /*
117          * Calculate the desired read-ahead in blksize'd blocks (maxra).
118          * To do this we calculate maxreq.
119          *
120          * maxreq typically starts out as a sequential heuristic.  If the
121          * high level uio/resid is bigger (minreq), we pop maxreq up to
122          * minreq.  This represents the case where random I/O is being
123          * performed by the userland is issuing big read()'s.
124          *
125          * Then we limit maxreq to max_readahead to ensure it is a reasonable
126          * value.
127          *
128          * Finally we must ensure that (loffset + maxreq) does not cross the
129          * boundary (filesize) for the current blocksize.  If we allowed it
130          * to cross we could end up with buffers past the boundary with the
131          * wrong block size (HAMMER large-data areas use mixed block sizes).
132          * minreq is also absolutely limited to filesize.
133          */
134         if (maxreq < minreq)
135                 maxreq = minreq;
136         /* minreq not used beyond this point */
137
138         if (maxreq > max_readahead) {
139                 maxreq = max_readahead;
140                 if (maxreq > 16 * 1024 * 1024)
141                         maxreq = 16 * 1024 * 1024;
142         }
143         if (maxreq < blksize)
144                 maxreq = blksize;
145         if (loffset + maxreq > filesize) {
146                 if (loffset > filesize)
147                         maxreq = 0;
148                 else
149                         maxreq = filesize - loffset;
150         }
151
152         maxra = (int)(maxreq / blksize);
153
154         /*
155          * Get the requested block.
156          */
157         if (*bpp)
158                 reqbp = bp = *bpp;
159         else
160                 *bpp = reqbp = bp = getblk(vp, loffset, blksize, 0, 0);
161         origoffset = loffset;
162
163         /*
164          * Calculate the maximum cluster size for a single I/O, used
165          * by cluster_rbuild().
166          */
167         maxrbuild = vmaxiosize(vp) / blksize;
168
169         /*
170          * if it is in the cache, then check to see if the reads have been
171          * sequential.  If they have, then try some read-ahead, otherwise
172          * back-off on prospective read-aheads.
173          */
174         if (bp->b_flags & B_CACHE) {
175                 /*
176                  * Not sequential, do not do any read-ahead
177                  */
178                 if (maxra <= 1)
179                         return 0;
180
181                 /*
182                  * No read-ahead mark, do not do any read-ahead
183                  * yet.
184                  */
185                 if ((bp->b_flags & B_RAM) == 0)
186                         return 0;
187
188                 /*
189                  * We hit a read-ahead-mark, figure out how much read-ahead
190                  * to do (maxra) and where to start (loffset).
191                  *
192                  * Shortcut the scan.  Typically the way this works is that
193                  * we've built up all the blocks inbetween except for the
194                  * last in previous iterations, so if the second-to-last
195                  * block is present we just skip ahead to it.
196                  *
197                  * This algorithm has O(1) cpu in the steady state no
198                  * matter how large maxra is.
199                  */
200                 bp->b_flags &= ~B_RAM;
201
202                 if (findblk(vp, loffset + (maxra - 2) * blksize, FINDBLK_TEST))
203                         i = maxra - 1;
204                 else
205                         i = 1;
206                 while (i < maxra) {
207                         if (findblk(vp, loffset + i * blksize,
208                                     FINDBLK_TEST) == NULL) {
209                                 break;
210                         }
211                         ++i;
212                 }
213
214                 /*
215                  * We got everything or everything is in the cache, no
216                  * point continuing.
217                  */
218                 if (i >= maxra)
219                         return 0;
220
221                 /*
222                  * Calculate where to start the read-ahead and how much
223                  * to do.  Generally speaking we want to read-ahead by
224                  * (maxra) when we've found a read-ahead mark.  We do
225                  * not want to reduce maxra here as it will cause
226                  * successive read-ahead I/O's to be smaller and smaller.
227                  */
228                 loffset += i * blksize;
229                 reqbp = bp = NULL;
230         } else {
231                 __debugvar off_t firstread = bp->b_loffset;
232                 int nblks;
233
234                 /*
235                  * Set-up synchronous read for bp.
236                  */
237                 bp->b_cmd = BUF_CMD_READ;
238                 bp->b_bio1.bio_done = biodone_sync;
239                 bp->b_bio1.bio_flags |= BIO_SYNC;
240
241                 KASSERT(firstread != NOOFFSET, 
242                         ("cluster_read: no buffer offset"));
243
244                 /*
245                  * nblks is our cluster_rbuild request size, limited
246                  * primarily by the device.
247                  */
248                 if ((nblks = maxra) > maxrbuild)
249                         nblks = maxrbuild;
250
251                 if (nblks > 1) {
252                         int burstbytes;
253
254                         error = VOP_BMAP(vp, loffset, &doffset,
255                                          &burstbytes, NULL, BUF_CMD_READ);
256                         if (error)
257                                 goto single_block_read;
258                         if (nblks > burstbytes / blksize)
259                                 nblks = burstbytes / blksize;
260                         if (doffset == NOOFFSET)
261                                 goto single_block_read;
262                         if (nblks <= 1)
263                                 goto single_block_read;
264
265                         bp = cluster_rbuild(vp, filesize, loffset,
266                                             doffset, blksize, nblks, bp);
267                         loffset += bp->b_bufsize;
268                         maxra -= bp->b_bufsize / blksize;
269                 } else {
270 single_block_read:
271                         /*
272                          * If it isn't in the cache, then get a chunk from
273                          * disk if sequential, otherwise just get the block.
274                          */
275                         cluster_setram(bp);
276                         loffset += blksize;
277                         --maxra;
278                 }
279         }
280
281         /*
282          * If B_CACHE was not set issue bp.  bp will either be an
283          * asynchronous cluster buf or a synchronous single-buf.
284          * If it is a single buf it will be the same as reqbp.
285          *
286          * NOTE: Once an async cluster buf is issued bp becomes invalid.
287          */
288         if (bp) {
289 #if defined(CLUSTERDEBUG)
290                 if (rcluster)
291                         kprintf("S(%012jx,%d,%d)\n",
292                             (intmax_t)bp->b_loffset, bp->b_bcount, maxra);
293 #endif
294                 if ((bp->b_flags & B_CLUSTER) == 0)
295                         vfs_busy_pages(vp, bp);
296                 bp->b_flags &= ~(B_ERROR|B_INVAL);
297                 vn_strategy(vp, &bp->b_bio1);
298                 error = 0;
299                 /* bp invalid now */
300         }
301
302         /*
303          * If we have been doing sequential I/O, then do some read-ahead.
304          * The code above us should have positioned us at the next likely
305          * offset.
306          *
307          * Only mess with buffers which we can immediately lock.  HAMMER
308          * will do device-readahead irrespective of what the blocks
309          * represent.
310          */
311         while (error == 0 && maxra > 0) {
312                 int burstbytes;
313                 int tmp_error;
314                 int nblks;
315
316                 rbp = getblk(vp, loffset, blksize,
317                              GETBLK_SZMATCH|GETBLK_NOWAIT, 0);
318                 if (rbp == NULL)
319                         goto no_read_ahead;
320                 if ((rbp->b_flags & B_CACHE)) {
321                         bqrelse(rbp);
322                         goto no_read_ahead;
323                 }
324
325                 /*
326                  * An error from the read-ahead bmap has nothing to do
327                  * with the caller's original request.
328                  */
329                 tmp_error = VOP_BMAP(vp, loffset, &doffset,
330                                      &burstbytes, NULL, BUF_CMD_READ);
331                 if (tmp_error || doffset == NOOFFSET) {
332                         rbp->b_flags |= B_INVAL;
333                         brelse(rbp);
334                         rbp = NULL;
335                         goto no_read_ahead;
336                 }
337                 if ((nblks = maxra) > maxrbuild)
338                         nblks = maxrbuild;
339                 if (nblks > burstbytes / blksize)
340                         nblks = burstbytes / blksize;
341
342                 /*
343                  * rbp: async read
344                  */
345                 rbp->b_cmd = BUF_CMD_READ;
346                 /*rbp->b_flags |= B_AGE*/;
347                 cluster_setram(rbp);
348
349                 if (nblks > 1) {
350                         rbp = cluster_rbuild(vp, filesize, loffset,
351                                              doffset, blksize, 
352                                              nblks, rbp);
353                 } else {
354                         rbp->b_bio2.bio_offset = doffset;
355                 }
356
357 #if defined(CLUSTERDEBUG)
358                 if (rcluster) {
359                         if (bp) {
360                                 kprintf("A+(%012jx,%d,%jd) "
361                                         "doff=%012jx minr=%zd ra=%d\n",
362                                     (intmax_t)loffset, rbp->b_bcount,
363                                     (intmax_t)(loffset - origoffset),
364                                     (intmax_t)doffset, minreq, maxra);
365                         } else {
366                                 kprintf("A-(%012jx,%d,%jd) "
367                                         "doff=%012jx minr=%zd ra=%d\n",
368                                     (intmax_t)rbp->b_loffset, rbp->b_bcount,
369                                     (intmax_t)(loffset - origoffset),
370                                     (intmax_t)doffset, minreq, maxra);
371                         }
372                 }
373 #endif
374                 rbp->b_flags &= ~(B_ERROR|B_INVAL);
375
376                 if ((rbp->b_flags & B_CLUSTER) == 0)
377                         vfs_busy_pages(vp, rbp);
378                 BUF_KERNPROC(rbp);
379                 loffset += rbp->b_bufsize;
380                 maxra -= rbp->b_bufsize / blksize;
381                 vn_strategy(vp, &rbp->b_bio1);
382                 /* rbp invalid now */
383         }
384
385         /*
386          * Wait for our original buffer to complete its I/O.  reqbp will
387          * be NULL if the original buffer was B_CACHE.  We are returning
388          * (*bpp) which is the same as reqbp when reqbp != NULL.
389          */
390 no_read_ahead:
391         if (reqbp) {
392                 KKASSERT(reqbp->b_bio1.bio_flags & BIO_SYNC);
393                 error = biowait(&reqbp->b_bio1, "clurd");
394         }
395         return (error);
396 }
397
398 /*
399  * If blocks are contiguous on disk, use this to provide clustered
400  * read ahead.  We will read as many blocks as possible sequentially
401  * and then parcel them up into logical blocks in the buffer hash table.
402  *
403  * This function either returns a cluster buf or it returns fbp.  fbp is
404  * already expected to be set up as a synchronous or asynchronous request.
405  *
406  * If a cluster buf is returned it will always be async.
407  */
408 static struct buf *
409 cluster_rbuild(struct vnode *vp, off_t filesize, off_t loffset, off_t doffset,
410                int blksize, int run, struct buf *fbp)
411 {
412         struct buf *bp, *tbp;
413         off_t boffset;
414         int i, j;
415         int maxiosize = vmaxiosize(vp);
416
417         /*
418          * avoid a division
419          */
420         while (loffset + run * blksize > filesize) {
421                 --run;
422         }
423
424         tbp = fbp;
425         tbp->b_bio2.bio_offset = doffset;
426         if((tbp->b_flags & B_MALLOC) ||
427             ((tbp->b_flags & B_VMIO) == 0) || (run <= 1)) {
428                 return tbp;
429         }
430
431         bp = trypbuf_kva(&cluster_pbuf_freecnt);
432         if (bp == NULL) {
433                 return tbp;
434         }
435
436         /*
437          * We are synthesizing a buffer out of vm_page_t's, but
438          * if the block size is not page aligned then the starting
439          * address may not be either.  Inherit the b_data offset
440          * from the original buffer.
441          */
442         bp->b_data = (char *)((vm_offset_t)bp->b_data |
443             ((vm_offset_t)tbp->b_data & PAGE_MASK));
444         bp->b_flags |= B_CLUSTER | B_VMIO;
445         bp->b_cmd = BUF_CMD_READ;
446         bp->b_bio1.bio_done = cluster_callback;         /* default to async */
447         bp->b_bio1.bio_caller_info1.cluster_head = NULL;
448         bp->b_bio1.bio_caller_info2.cluster_tail = NULL;
449         bp->b_loffset = loffset;
450         bp->b_bio2.bio_offset = doffset;
451         KASSERT(bp->b_loffset != NOOFFSET,
452                 ("cluster_rbuild: no buffer offset"));
453
454         bp->b_bcount = 0;
455         bp->b_bufsize = 0;
456         bp->b_xio.xio_npages = 0;
457
458         for (boffset = doffset, i = 0; i < run; ++i, boffset += blksize) {
459                 if (i) {
460                         if ((bp->b_xio.xio_npages * PAGE_SIZE) +
461                             round_page(blksize) > maxiosize) {
462                                 break;
463                         }
464
465                         /*
466                          * Shortcut some checks and try to avoid buffers that
467                          * would block in the lock.  The same checks have to
468                          * be made again after we officially get the buffer.
469                          */
470                         tbp = getblk(vp, loffset + i * blksize, blksize,
471                                      GETBLK_SZMATCH|GETBLK_NOWAIT, 0);
472                         if (tbp == NULL)
473                                 break;
474                         for (j = 0; j < tbp->b_xio.xio_npages; j++) {
475                                 if (tbp->b_xio.xio_pages[j]->valid)
476                                         break;
477                         }
478                         if (j != tbp->b_xio.xio_npages) {
479                                 bqrelse(tbp);
480                                 break;
481                         }
482
483                         /*
484                          * Stop scanning if the buffer is fuly valid 
485                          * (marked B_CACHE), or locked (may be doing a
486                          * background write), or if the buffer is not
487                          * VMIO backed.  The clustering code can only deal
488                          * with VMIO-backed buffers.
489                          */
490                         if ((tbp->b_flags & (B_CACHE|B_LOCKED)) ||
491                             (tbp->b_flags & B_VMIO) == 0 ||
492                             (LIST_FIRST(&tbp->b_dep) != NULL &&
493                              buf_checkread(tbp))
494                         ) {
495                                 bqrelse(tbp);
496                                 break;
497                         }
498
499                         /*
500                          * The buffer must be completely invalid in order to
501                          * take part in the cluster.  If it is partially valid
502                          * then we stop.
503                          */
504                         for (j = 0;j < tbp->b_xio.xio_npages; j++) {
505                                 if (tbp->b_xio.xio_pages[j]->valid)
506                                         break;
507                         }
508                         if (j != tbp->b_xio.xio_npages) {
509                                 bqrelse(tbp);
510                                 break;
511                         }
512
513                         /*
514                          * Set a read-ahead mark as appropriate.  Always
515                          * set the read-ahead mark at (run - 1).  It is
516                          * unclear why we were also setting it at i == 1.
517                          */
518                         if (/*i == 1 ||*/ i == (run - 1))
519                                 cluster_setram(tbp);
520
521                         /*
522                          * Depress the priority of buffers not explicitly
523                          * requested.
524                          */
525                         /* tbp->b_flags |= B_AGE; */
526
527                         /*
528                          * Set the block number if it isn't set, otherwise
529                          * if it is make sure it matches the block number we
530                          * expect.
531                          */
532                         if (tbp->b_bio2.bio_offset == NOOFFSET) {
533                                 tbp->b_bio2.bio_offset = boffset;
534                         } else if (tbp->b_bio2.bio_offset != boffset) {
535                                 brelse(tbp);
536                                 break;
537                         }
538                 }
539
540                 /*
541                  * The passed-in tbp (i == 0) will already be set up for
542                  * async or sync operation.  All other tbp's acquire in
543                  * our loop are set up for async operation.
544                  */
545                 tbp->b_cmd = BUF_CMD_READ;
546                 BUF_KERNPROC(tbp);
547                 cluster_append(&bp->b_bio1, tbp);
548                 for (j = 0; j < tbp->b_xio.xio_npages; ++j) {
549                         vm_page_t m;
550
551                         m = tbp->b_xio.xio_pages[j];
552                         vm_page_busy_wait(m, FALSE, "clurpg");
553                         vm_page_io_start(m);
554                         vm_page_wakeup(m);
555                         vm_object_pip_add(m->object, 1);
556                         if ((bp->b_xio.xio_npages == 0) ||
557                                 (bp->b_xio.xio_pages[bp->b_xio.xio_npages-1] != m)) {
558                                 bp->b_xio.xio_pages[bp->b_xio.xio_npages] = m;
559                                 bp->b_xio.xio_npages++;
560                         }
561                         if ((m->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL)
562                                 tbp->b_xio.xio_pages[j] = bogus_page;
563                 }
564                 /*
565                  * XXX shouldn't this be += size for both, like in 
566                  * cluster_wbuild()?
567                  *
568                  * Don't inherit tbp->b_bufsize as it may be larger due to
569                  * a non-page-aligned size.  Instead just aggregate using
570                  * 'size'.
571                  */
572                 if (tbp->b_bcount != blksize)
573                     kprintf("warning: tbp->b_bcount wrong %d vs %d\n", tbp->b_bcount, blksize);
574                 if (tbp->b_bufsize != blksize)
575                     kprintf("warning: tbp->b_bufsize wrong %d vs %d\n", tbp->b_bufsize, blksize);
576                 bp->b_bcount += blksize;
577                 bp->b_bufsize += blksize;
578         }
579
580         /*
581          * Fully valid pages in the cluster are already good and do not need
582          * to be re-read from disk.  Replace the page with bogus_page
583          */
584         for (j = 0; j < bp->b_xio.xio_npages; j++) {
585                 if ((bp->b_xio.xio_pages[j]->valid & VM_PAGE_BITS_ALL) ==
586                     VM_PAGE_BITS_ALL) {
587                         bp->b_xio.xio_pages[j] = bogus_page;
588                 }
589         }
590         if (bp->b_bufsize > bp->b_kvasize) {
591                 panic("cluster_rbuild: b_bufsize(%d) > b_kvasize(%d)",
592                     bp->b_bufsize, bp->b_kvasize);
593         }
594         pmap_qenter(trunc_page((vm_offset_t) bp->b_data),
595                 (vm_page_t *)bp->b_xio.xio_pages, bp->b_xio.xio_npages);
596         BUF_KERNPROC(bp);
597         return (bp);
598 }
599
600 /*
601  * Cleanup after a clustered read or write.
602  * This is complicated by the fact that any of the buffers might have
603  * extra memory (if there were no empty buffer headers at allocbuf time)
604  * that we will need to shift around.
605  *
606  * The returned bio is &bp->b_bio1
607  */
608 void
609 cluster_callback(struct bio *bio)
610 {
611         struct buf *bp = bio->bio_buf;
612         struct buf *tbp;
613         int error = 0;
614
615         /*
616          * Must propogate errors to all the components.  A short read (EOF)
617          * is a critical error.
618          */
619         if (bp->b_flags & B_ERROR) {
620                 error = bp->b_error;
621         } else if (bp->b_bcount != bp->b_bufsize) {
622                 panic("cluster_callback: unexpected EOF on cluster %p!", bio);
623         }
624
625         pmap_qremove(trunc_page((vm_offset_t) bp->b_data), bp->b_xio.xio_npages);
626         /*
627          * Move memory from the large cluster buffer into the component
628          * buffers and mark IO as done on these.  Since the memory map
629          * is the same, no actual copying is required.
630          */
631         while ((tbp = bio->bio_caller_info1.cluster_head) != NULL) {
632                 bio->bio_caller_info1.cluster_head = tbp->b_cluster_next;
633                 if (error) {
634                         tbp->b_flags |= B_ERROR | B_IODEBUG;
635                         tbp->b_error = error;
636                 } else {
637                         tbp->b_dirtyoff = tbp->b_dirtyend = 0;
638                         tbp->b_flags &= ~(B_ERROR|B_INVAL);
639                         tbp->b_flags |= B_IODEBUG;
640                         /*
641                          * XXX the bdwrite()/bqrelse() issued during
642                          * cluster building clears B_RELBUF (see bqrelse()
643                          * comment).  If direct I/O was specified, we have
644                          * to restore it here to allow the buffer and VM
645                          * to be freed.
646                          */
647                         if (tbp->b_flags & B_DIRECT)
648                                 tbp->b_flags |= B_RELBUF;
649                 }
650                 biodone(&tbp->b_bio1);
651         }
652         relpbuf(bp, &cluster_pbuf_freecnt);
653 }
654
655 /*
656  *      cluster_wbuild_wb:
657  *
658  *      Implement modified write build for cluster.
659  *
660  *              write_behind = 0        write behind disabled
661  *              write_behind = 1        write behind normal (default)
662  *              write_behind = 2        write behind backed-off
663  */
664
665 static __inline int
666 cluster_wbuild_wb(struct vnode *vp, int blksize, off_t start_loffset, int len)
667 {
668         int r = 0;
669
670         switch(write_behind) {
671         case 2:
672                 if (start_loffset < len)
673                         break;
674                 start_loffset -= len;
675                 /* fall through */
676         case 1:
677                 r = cluster_wbuild(vp, blksize, start_loffset, len);
678                 /* fall through */
679         default:
680                 /* fall through */
681                 break;
682         }
683         return(r);
684 }
685
686 /*
687  * Do clustered write for FFS.
688  *
689  * Three cases:
690  *      1. Write is not sequential (write asynchronously)
691  *      Write is sequential:
692  *      2.      beginning of cluster - begin cluster
693  *      3.      middle of a cluster - add to cluster
694  *      4.      end of a cluster - asynchronously write cluster
695  */
696 void
697 cluster_write(struct buf *bp, off_t filesize, int blksize, int seqcount)
698 {
699         struct vnode *vp;
700         off_t loffset;
701         int maxclen, cursize;
702         int async;
703
704         vp = bp->b_vp;
705         if (vp->v_type == VREG)
706                 async = vp->v_mount->mnt_flag & MNT_ASYNC;
707         else
708                 async = 0;
709         loffset = bp->b_loffset;
710         KASSERT(bp->b_loffset != NOOFFSET, 
711                 ("cluster_write: no buffer offset"));
712
713         /* Initialize vnode to beginning of file. */
714         if (loffset == 0)
715                 vp->v_lasta = vp->v_clen = vp->v_cstart = vp->v_lastw = 0;
716
717         if (vp->v_clen == 0 || loffset != vp->v_lastw + blksize ||
718             bp->b_bio2.bio_offset == NOOFFSET ||
719             (bp->b_bio2.bio_offset != vp->v_lasta + blksize)) {
720                 maxclen = vmaxiosize(vp);
721                 if (vp->v_clen != 0) {
722                         /*
723                          * Next block is not sequential.
724                          *
725                          * If we are not writing at end of file, the process
726                          * seeked to another point in the file since its last
727                          * write, or we have reached our maximum cluster size,
728                          * then push the previous cluster. Otherwise try
729                          * reallocating to make it sequential.
730                          *
731                          * Change to algorithm: only push previous cluster if
732                          * it was sequential from the point of view of the
733                          * seqcount heuristic, otherwise leave the buffer 
734                          * intact so we can potentially optimize the I/O
735                          * later on in the buf_daemon or update daemon
736                          * flush.
737                          */
738                         cursize = vp->v_lastw - vp->v_cstart + blksize;
739                         if (bp->b_loffset + blksize != filesize ||
740                             loffset != vp->v_lastw + blksize || vp->v_clen <= cursize) {
741                                 if (!async && seqcount > 0) {
742                                         cluster_wbuild_wb(vp, blksize,
743                                                 vp->v_cstart, cursize);
744                                 }
745                         } else {
746                                 struct buf **bpp, **endbp;
747                                 struct cluster_save *buflist;
748
749                                 buflist = cluster_collectbufs(vp, bp, blksize);
750                                 endbp = &buflist->bs_children
751                                     [buflist->bs_nchildren - 1];
752                                 if (VOP_REALLOCBLKS(vp, buflist)) {
753                                         /*
754                                          * Failed, push the previous cluster
755                                          * if *really* writing sequentially
756                                          * in the logical file (seqcount > 1),
757                                          * otherwise delay it in the hopes that
758                                          * the low level disk driver can
759                                          * optimize the write ordering.
760                                          */
761                                         for (bpp = buflist->bs_children;
762                                              bpp < endbp; bpp++)
763                                                 brelse(*bpp);
764                                         kfree(buflist, M_SEGMENT);
765                                         if (seqcount > 1) {
766                                                 cluster_wbuild_wb(vp, 
767                                                     blksize, vp->v_cstart, 
768                                                     cursize);
769                                         }
770                                 } else {
771                                         /*
772                                          * Succeeded, keep building cluster.
773                                          */
774                                         for (bpp = buflist->bs_children;
775                                              bpp <= endbp; bpp++)
776                                                 bdwrite(*bpp);
777                                         kfree(buflist, M_SEGMENT);
778                                         vp->v_lastw = loffset;
779                                         vp->v_lasta = bp->b_bio2.bio_offset;
780                                         return;
781                                 }
782                         }
783                 }
784                 /*
785                  * Consider beginning a cluster. If at end of file, make
786                  * cluster as large as possible, otherwise find size of
787                  * existing cluster.
788                  */
789                 if ((vp->v_type == VREG) &&
790                     bp->b_loffset + blksize != filesize &&
791                     (bp->b_bio2.bio_offset == NOOFFSET) &&
792                     (VOP_BMAP(vp, loffset, &bp->b_bio2.bio_offset, &maxclen, NULL, BUF_CMD_WRITE) ||
793                      bp->b_bio2.bio_offset == NOOFFSET)) {
794                         bawrite(bp);
795                         vp->v_clen = 0;
796                         vp->v_lasta = bp->b_bio2.bio_offset;
797                         vp->v_cstart = loffset + blksize;
798                         vp->v_lastw = loffset;
799                         return;
800                 }
801                 if (maxclen > blksize)
802                         vp->v_clen = maxclen - blksize;
803                 else
804                         vp->v_clen = 0;
805                 if (!async && vp->v_clen == 0) { /* I/O not contiguous */
806                         vp->v_cstart = loffset + blksize;
807                         bawrite(bp);
808                 } else {        /* Wait for rest of cluster */
809                         vp->v_cstart = loffset;
810                         bdwrite(bp);
811                 }
812         } else if (loffset == vp->v_cstart + vp->v_clen) {
813                 /*
814                  * At end of cluster, write it out if seqcount tells us we
815                  * are operating sequentially, otherwise let the buf or
816                  * update daemon handle it.
817                  */
818                 bdwrite(bp);
819                 if (seqcount > 1)
820                         cluster_wbuild_wb(vp, blksize, vp->v_cstart,
821                                           vp->v_clen + blksize);
822                 vp->v_clen = 0;
823                 vp->v_cstart = loffset + blksize;
824         } else if (vm_page_count_severe()) {
825                 /*
826                  * We are low on memory, get it going NOW
827                  */
828                 bawrite(bp);
829         } else {
830                 /*
831                  * In the middle of a cluster, so just delay the I/O for now.
832                  */
833                 bdwrite(bp);
834         }
835         vp->v_lastw = loffset;
836         vp->v_lasta = bp->b_bio2.bio_offset;
837 }
838
839
840 /*
841  * This is an awful lot like cluster_rbuild...wish they could be combined.
842  * The last lbn argument is the current block on which I/O is being
843  * performed.  Check to see that it doesn't fall in the middle of
844  * the current block (if last_bp == NULL).
845  */
846 int
847 cluster_wbuild(struct vnode *vp, int blksize, off_t start_loffset, int bytes)
848 {
849         struct buf *bp, *tbp;
850         int i, j;
851         int totalwritten = 0;
852         int maxiosize = vmaxiosize(vp);
853
854         while (bytes > 0) {
855                 /*
856                  * If the buffer is not delayed-write (i.e. dirty), or it 
857                  * is delayed-write but either locked or inval, it cannot 
858                  * partake in the clustered write.
859                  */
860                 tbp = findblk(vp, start_loffset, FINDBLK_NBLOCK);
861                 if (tbp == NULL ||
862                     (tbp->b_flags & (B_LOCKED | B_INVAL | B_DELWRI)) != B_DELWRI ||
863                     (LIST_FIRST(&tbp->b_dep) && buf_checkwrite(tbp))) {
864                         if (tbp)
865                                 BUF_UNLOCK(tbp);
866                         start_loffset += blksize;
867                         bytes -= blksize;
868                         continue;
869                 }
870                 bremfree(tbp);
871                 KKASSERT(tbp->b_cmd == BUF_CMD_DONE);
872
873                 /*
874                  * Extra memory in the buffer, punt on this buffer.
875                  * XXX we could handle this in most cases, but we would
876                  * have to push the extra memory down to after our max
877                  * possible cluster size and then potentially pull it back
878                  * up if the cluster was terminated prematurely--too much
879                  * hassle.
880                  */
881                 if (((tbp->b_flags & (B_CLUSTEROK|B_MALLOC)) != B_CLUSTEROK) ||
882                     (tbp->b_bcount != tbp->b_bufsize) ||
883                     (tbp->b_bcount != blksize) ||
884                     (bytes == blksize) ||
885                     ((bp = getpbuf_kva(&cluster_pbuf_freecnt)) == NULL)) {
886                         totalwritten += tbp->b_bufsize;
887                         bawrite(tbp);
888                         start_loffset += blksize;
889                         bytes -= blksize;
890                         continue;
891                 }
892
893                 /*
894                  * Set up the pbuf.  Track our append point with b_bcount
895                  * and b_bufsize.  b_bufsize is not used by the device but
896                  * our caller uses it to loop clusters and we use it to
897                  * detect a premature EOF on the block device.
898                  */
899                 bp->b_bcount = 0;
900                 bp->b_bufsize = 0;
901                 bp->b_xio.xio_npages = 0;
902                 bp->b_loffset = tbp->b_loffset;
903                 bp->b_bio2.bio_offset = tbp->b_bio2.bio_offset;
904
905                 /*
906                  * We are synthesizing a buffer out of vm_page_t's, but
907                  * if the block size is not page aligned then the starting
908                  * address may not be either.  Inherit the b_data offset
909                  * from the original buffer.
910                  */
911                 bp->b_data = (char *)((vm_offset_t)bp->b_data |
912                     ((vm_offset_t)tbp->b_data & PAGE_MASK));
913                 bp->b_flags &= ~B_ERROR;
914                 bp->b_flags |= B_CLUSTER | B_BNOCLIP |
915                         (tbp->b_flags & (B_VMIO | B_NEEDCOMMIT));
916                 bp->b_bio1.bio_caller_info1.cluster_head = NULL;
917                 bp->b_bio1.bio_caller_info2.cluster_tail = NULL;
918
919                 /*
920                  * From this location in the file, scan forward to see
921                  * if there are buffers with adjacent data that need to
922                  * be written as well.
923                  */
924                 for (i = 0; i < bytes; (i += blksize), (start_loffset += blksize)) {
925                         if (i != 0) { /* If not the first buffer */
926                                 tbp = findblk(vp, start_loffset,
927                                               FINDBLK_NBLOCK);
928                                 /*
929                                  * Buffer not found or could not be locked
930                                  * non-blocking.
931                                  */
932                                 if (tbp == NULL)
933                                         break;
934
935                                 /*
936                                  * If it IS in core, but has different
937                                  * characteristics, then don't cluster
938                                  * with it.
939                                  */
940                                 if ((tbp->b_flags & (B_VMIO | B_CLUSTEROK |
941                                      B_INVAL | B_DELWRI | B_NEEDCOMMIT))
942                                     != (B_DELWRI | B_CLUSTEROK |
943                                      (bp->b_flags & (B_VMIO | B_NEEDCOMMIT))) ||
944                                     (tbp->b_flags & B_LOCKED) ||
945                                     (LIST_FIRST(&tbp->b_dep) &&
946                                      buf_checkwrite(tbp))
947                                 ) {
948                                         BUF_UNLOCK(tbp);
949                                         break;
950                                 }
951
952                                 /*
953                                  * Check that the combined cluster
954                                  * would make sense with regard to pages
955                                  * and would not be too large
956                                  */
957                                 if ((tbp->b_bcount != blksize) ||
958                                   ((bp->b_bio2.bio_offset + i) !=
959                                     tbp->b_bio2.bio_offset) ||
960                                   ((tbp->b_xio.xio_npages + bp->b_xio.xio_npages) >
961                                     (maxiosize / PAGE_SIZE))) {
962                                         BUF_UNLOCK(tbp);
963                                         break;
964                                 }
965                                 /*
966                                  * Ok, it's passed all the tests,
967                                  * so remove it from the free list
968                                  * and mark it busy. We will use it.
969                                  */
970                                 bremfree(tbp);
971                                 KKASSERT(tbp->b_cmd == BUF_CMD_DONE);
972                         } /* end of code for non-first buffers only */
973
974                         /*
975                          * If the IO is via the VM then we do some
976                          * special VM hackery (yuck).  Since the buffer's
977                          * block size may not be page-aligned it is possible
978                          * for a page to be shared between two buffers.  We
979                          * have to get rid of the duplication when building
980                          * the cluster.
981                          */
982                         if (tbp->b_flags & B_VMIO) {
983                                 vm_page_t m;
984
985                                 if (i != 0) { /* if not first buffer */
986                                         for (j = 0; j < tbp->b_xio.xio_npages; ++j) {
987                                                 m = tbp->b_xio.xio_pages[j];
988                                                 if (m->flags & PG_BUSY) {
989                                                         bqrelse(tbp);
990                                                         goto finishcluster;
991                                                 }
992                                         }
993                                 }
994                                         
995                                 for (j = 0; j < tbp->b_xio.xio_npages; ++j) {
996                                         m = tbp->b_xio.xio_pages[j];
997                                         vm_page_busy_wait(m, FALSE, "clurpg");
998                                         vm_page_io_start(m);
999                                         vm_page_wakeup(m);
1000                                         vm_object_pip_add(m->object, 1);
1001                                         if ((bp->b_xio.xio_npages == 0) ||
1002                                           (bp->b_xio.xio_pages[bp->b_xio.xio_npages - 1] != m)) {
1003                                                 bp->b_xio.xio_pages[bp->b_xio.xio_npages] = m;
1004                                                 bp->b_xio.xio_npages++;
1005                                         }
1006                                 }
1007                         }
1008                         bp->b_bcount += blksize;
1009                         bp->b_bufsize += blksize;
1010
1011                         bundirty(tbp);
1012                         tbp->b_flags &= ~B_ERROR;
1013                         tbp->b_cmd = BUF_CMD_WRITE;
1014                         BUF_KERNPROC(tbp);
1015                         cluster_append(&bp->b_bio1, tbp);
1016
1017                         /*
1018                          * check for latent dependencies to be handled 
1019                          */
1020                         if (LIST_FIRST(&tbp->b_dep) != NULL)
1021                                 buf_start(tbp);
1022                 }
1023         finishcluster:
1024                 pmap_qenter(trunc_page((vm_offset_t) bp->b_data),
1025                         (vm_page_t *) bp->b_xio.xio_pages, bp->b_xio.xio_npages);
1026                 if (bp->b_bufsize > bp->b_kvasize) {
1027                         panic(
1028                             "cluster_wbuild: b_bufsize(%d) > b_kvasize(%d)\n",
1029                             bp->b_bufsize, bp->b_kvasize);
1030                 }
1031                 totalwritten += bp->b_bufsize;
1032                 bp->b_dirtyoff = 0;
1033                 bp->b_dirtyend = bp->b_bufsize;
1034                 bp->b_bio1.bio_done = cluster_callback;
1035                 bp->b_cmd = BUF_CMD_WRITE;
1036
1037                 vfs_busy_pages(vp, bp);
1038                 bsetrunningbufspace(bp, bp->b_bufsize);
1039                 BUF_KERNPROC(bp);
1040                 vn_strategy(vp, &bp->b_bio1);
1041
1042                 bytes -= i;
1043         }
1044         return totalwritten;
1045 }
1046
1047 /*
1048  * Collect together all the buffers in a cluster.
1049  * Plus add one additional buffer.
1050  */
1051 static struct cluster_save *
1052 cluster_collectbufs(struct vnode *vp, struct buf *last_bp, int blksize)
1053 {
1054         struct cluster_save *buflist;
1055         struct buf *bp;
1056         off_t loffset;
1057         int i, len;
1058
1059         len = (int)(vp->v_lastw - vp->v_cstart + blksize) / blksize;
1060         buflist = kmalloc(sizeof(struct buf *) * (len + 1) + sizeof(*buflist),
1061                          M_SEGMENT, M_WAITOK);
1062         buflist->bs_nchildren = 0;
1063         buflist->bs_children = (struct buf **) (buflist + 1);
1064         for (loffset = vp->v_cstart, i = 0; i < len; (loffset += blksize), i++) {
1065                 (void) bread(vp, loffset, last_bp->b_bcount, &bp);
1066                 buflist->bs_children[i] = bp;
1067                 if (bp->b_bio2.bio_offset == NOOFFSET) {
1068                         VOP_BMAP(bp->b_vp, bp->b_loffset,
1069                                  &bp->b_bio2.bio_offset,
1070                                  NULL, NULL, BUF_CMD_WRITE);
1071                 }
1072         }
1073         buflist->bs_children[i] = bp = last_bp;
1074         if (bp->b_bio2.bio_offset == NOOFFSET) {
1075                 VOP_BMAP(bp->b_vp, bp->b_loffset, &bp->b_bio2.bio_offset,
1076                          NULL, NULL, BUF_CMD_WRITE);
1077         }
1078         buflist->bs_nchildren = i + 1;
1079         return (buflist);
1080 }
1081
1082 void
1083 cluster_append(struct bio *bio, struct buf *tbp)
1084 {
1085         tbp->b_cluster_next = NULL;
1086         if (bio->bio_caller_info1.cluster_head == NULL) {
1087                 bio->bio_caller_info1.cluster_head = tbp;
1088                 bio->bio_caller_info2.cluster_tail = tbp;
1089         } else {
1090                 bio->bio_caller_info2.cluster_tail->b_cluster_next = tbp;
1091                 bio->bio_caller_info2.cluster_tail = tbp;
1092         }
1093 }
1094
1095 static
1096 void
1097 cluster_setram (struct buf *bp)
1098 {
1099         bp->b_flags |= B_RAM;
1100         if (bp->b_xio.xio_npages)
1101                 vm_page_flag_set(bp->b_xio.xio_pages[0], PG_RAM);
1102 }