kernel - Cluster fixes + Enable clustering for HAMMER1
[dragonfly.git] / sys / kern / vfs_cluster.c
1 /*-
2  * Copyright (c) 1993
3  *      The Regents of the University of California.  All rights reserved.
4  * Modifications/enhancements:
5  *      Copyright (c) 1995 John S. Dyson.  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. All advertising materials mentioning features or use of this software
16  *    must display the following acknowledgement:
17  *      This product includes software developed by the University of
18  *      California, Berkeley and its contributors.
19  * 4. Neither the name of the University nor the names of its contributors
20  *    may be used to endorse or promote products derived from this software
21  *    without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  *
35  *      @(#)vfs_cluster.c       8.7 (Berkeley) 2/13/94
36  * $FreeBSD: src/sys/kern/vfs_cluster.c,v 1.92.2.9 2001/11/18 07:10:59 dillon Exp $
37  * $DragonFly: src/sys/kern/vfs_cluster.c,v 1.40 2008/07/14 03:09:00 dillon Exp $
38  */
39
40 #include "opt_debug_cluster.h"
41
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/kernel.h>
45 #include <sys/proc.h>
46 #include <sys/buf.h>
47 #include <sys/vnode.h>
48 #include <sys/malloc.h>
49 #include <sys/mount.h>
50 #include <sys/resourcevar.h>
51 #include <sys/vmmeter.h>
52 #include <vm/vm.h>
53 #include <vm/vm_object.h>
54 #include <vm/vm_page.h>
55 #include <sys/sysctl.h>
56
57 #include <sys/buf2.h>
58 #include <vm/vm_page2.h>
59
60 #include <machine/limits.h>
61
62 #if defined(CLUSTERDEBUG)
63 #include <sys/sysctl.h>
64 static int      rcluster= 0;
65 SYSCTL_INT(_debug, OID_AUTO, rcluster, CTLFLAG_RW, &rcluster, 0, "");
66 #endif
67
68 static MALLOC_DEFINE(M_SEGMENT, "cluster_save", "cluster_save buffer");
69
70 static struct cluster_save *
71         cluster_collectbufs (struct vnode *vp, struct buf *last_bp,
72                             int blksize);
73 static struct buf *
74         cluster_rbuild (struct vnode *vp, off_t filesize, off_t loffset,
75                             off_t doffset, int blksize, int run, 
76                             struct buf *fbp);
77 static void cluster_callback (struct bio *);
78 static void cluster_setram (struct buf *);
79 static int cluster_wbuild(struct vnode *vp, struct buf **bpp, int blksize,
80                             off_t start_loffset, int bytes);
81
82 static int write_behind = 1;
83 SYSCTL_INT(_vfs, OID_AUTO, write_behind, CTLFLAG_RW, &write_behind, 0,
84     "Cluster write-behind setting");
85 static int max_readahead = 2 * 1024 * 1024;
86 SYSCTL_INT(_vfs, OID_AUTO, max_readahead, CTLFLAG_RW, &max_readahead, 0,
87     "Limit in bytes for desired cluster read-ahead");
88
89 extern vm_page_t        bogus_page;
90
91 extern int cluster_pbuf_freecnt;
92
93 /*
94  * This replaces bread.
95  *
96  * filesize     - read-ahead @ blksize will not cross this boundary
97  * loffset      - loffset for returned *bpp
98  * blksize      - blocksize for returned *bpp and read-ahead bps
99  * minreq       - minimum (not a hard minimum) in bytes, typically reflects
100  *                a higher level uio resid.
101  * maxreq       - maximum (sequential heuristic) in bytes (highet typ ~2MB)
102  * bpp          - return buffer (*bpp) for (loffset,blksize)
103  */
104 int
105 cluster_readx(struct vnode *vp, off_t filesize, off_t loffset,
106              int blksize, size_t minreq, size_t maxreq, struct buf **bpp)
107 {
108         struct buf *bp, *rbp, *reqbp;
109         off_t origoffset;
110         off_t doffset;
111         int error;
112         int i;
113         int maxra;
114         int maxrbuild;
115
116         error = 0;
117
118         /*
119          * Calculate the desired read-ahead in blksize'd blocks (maxra).
120          * To do this we calculate maxreq.
121          *
122          * maxreq typically starts out as a sequential heuristic.  If the
123          * high level uio/resid is bigger (minreq), we pop maxreq up to
124          * minreq.  This represents the case where random I/O is being
125          * performed by the userland is issuing big read()'s.
126          *
127          * Then we limit maxreq to max_readahead to ensure it is a reasonable
128          * value.
129          *
130          * Finally we must ensure that (loffset + maxreq) does not cross the
131          * boundary (filesize) for the current blocksize.  If we allowed it
132          * to cross we could end up with buffers past the boundary with the
133          * wrong block size (HAMMER large-data areas use mixed block sizes).
134          * minreq is also absolutely limited to filesize.
135          */
136         if (maxreq < minreq)
137                 maxreq = minreq;
138         /* minreq not used beyond this point */
139
140         if (maxreq > max_readahead) {
141                 maxreq = max_readahead;
142                 if (maxreq > 16 * 1024 * 1024)
143                         maxreq = 16 * 1024 * 1024;
144         }
145         if (maxreq < blksize)
146                 maxreq = blksize;
147         if (loffset + maxreq > filesize) {
148                 if (loffset > filesize)
149                         maxreq = 0;
150                 else
151                         maxreq = filesize - loffset;
152         }
153
154         maxra = (int)(maxreq / blksize);
155
156         /*
157          * Get the requested block.
158          */
159         if (*bpp)
160                 reqbp = bp = *bpp;
161         else
162                 *bpp = reqbp = bp = getblk(vp, loffset, blksize, 0, 0);
163         origoffset = loffset;
164
165         /*
166          * Calculate the maximum cluster size for a single I/O, used
167          * by cluster_rbuild().
168          */
169         maxrbuild = vmaxiosize(vp) / blksize;
170
171         /*
172          * if it is in the cache, then check to see if the reads have been
173          * sequential.  If they have, then try some read-ahead, otherwise
174          * back-off on prospective read-aheads.
175          */
176         if (bp->b_flags & B_CACHE) {
177                 /*
178                  * Not sequential, do not do any read-ahead
179                  */
180                 if (maxra <= 1)
181                         return 0;
182
183                 /*
184                  * No read-ahead mark, do not do any read-ahead
185                  * yet.
186                  */
187                 if ((bp->b_flags & B_RAM) == 0)
188                         return 0;
189
190                 /*
191                  * We hit a read-ahead-mark, figure out how much read-ahead
192                  * to do (maxra) and where to start (loffset).
193                  *
194                  * Shortcut the scan.  Typically the way this works is that
195                  * we've built up all the blocks inbetween except for the
196                  * last in previous iterations, so if the second-to-last
197                  * block is present we just skip ahead to it.
198                  *
199                  * This algorithm has O(1) cpu in the steady state no
200                  * matter how large maxra is.
201                  */
202                 bp->b_flags &= ~B_RAM;
203
204                 if (findblk(vp, loffset + (maxra - 2) * blksize, FINDBLK_TEST))
205                         i = maxra - 1;
206                 else
207                         i = 1;
208                 while (i < maxra) {
209                         if (findblk(vp, loffset + i * blksize,
210                                     FINDBLK_TEST) == NULL) {
211                                 break;
212                         }
213                         ++i;
214                 }
215
216                 /*
217                  * We got everything or everything is in the cache, no
218                  * point continuing.
219                  */
220                 if (i >= maxra)
221                         return 0;
222                 maxra -= i;
223                 loffset += i * blksize;
224                 reqbp = bp = NULL;
225         } else {
226                 __debugvar off_t firstread = bp->b_loffset;
227                 int nblks;
228
229                 /*
230                  * Set-up synchronous read for bp.
231                  */
232                 bp->b_cmd = BUF_CMD_READ;
233                 bp->b_bio1.bio_done = biodone_sync;
234                 bp->b_bio1.bio_flags |= BIO_SYNC;
235
236                 KASSERT(firstread != NOOFFSET, 
237                         ("cluster_read: no buffer offset"));
238
239                 /*
240                  * nblks is our cluster_rbuild request size, limited
241                  * primarily by the device.
242                  */
243                 if ((nblks = maxra) > maxrbuild)
244                         nblks = maxrbuild;
245
246                 if (nblks > 1) {
247                         int burstbytes;
248
249                         error = VOP_BMAP(vp, loffset, &doffset,
250                                          &burstbytes, NULL, BUF_CMD_READ);
251                         if (error)
252                                 goto single_block_read;
253                         if (nblks > burstbytes / blksize)
254                                 nblks = burstbytes / blksize;
255                         if (doffset == NOOFFSET)
256                                 goto single_block_read;
257                         if (nblks <= 1)
258                                 goto single_block_read;
259
260                         bp = cluster_rbuild(vp, filesize, loffset,
261                                             doffset, blksize, nblks, bp);
262                         loffset += bp->b_bufsize;
263                         maxra -= bp->b_bufsize / blksize;
264                 } else {
265 single_block_read:
266                         /*
267                          * If it isn't in the cache, then get a chunk from
268                          * disk if sequential, otherwise just get the block.
269                          */
270                         cluster_setram(bp);
271                         loffset += blksize;
272                         --maxra;
273                 }
274         }
275
276         /*
277          * If B_CACHE was not set issue bp.  bp will either be an
278          * asynchronous cluster buf or a synchronous single-buf.
279          * If it is a single buf it will be the same as reqbp.
280          *
281          * NOTE: Once an async cluster buf is issued bp becomes invalid.
282          */
283         if (bp) {
284 #if defined(CLUSTERDEBUG)
285                 if (rcluster)
286                         kprintf("S(%012jx,%d,%d)\n",
287                             (intmax_t)bp->b_loffset, bp->b_bcount, maxra);
288 #endif
289                 if ((bp->b_flags & B_CLUSTER) == 0)
290                         vfs_busy_pages(vp, bp);
291                 bp->b_flags &= ~(B_ERROR|B_INVAL);
292                 vn_strategy(vp, &bp->b_bio1);
293                 error = 0;
294                 /* bp invalid now */
295         }
296
297         /*
298          * If we have been doing sequential I/O, then do some read-ahead.
299          * The code above us should have positioned us at the next likely
300          * offset.
301          *
302          * Only mess with buffers which we can immediately lock.  HAMMER
303          * will do device-readahead irrespective of what the blocks
304          * represent.
305          */
306         while (error == 0 && maxra > 0) {
307                 int burstbytes;
308                 int tmp_error;
309                 int nblks;
310
311                 rbp = getblk(vp, loffset, blksize,
312                              GETBLK_SZMATCH|GETBLK_NOWAIT, 0);
313                 if (rbp == NULL)
314                         goto no_read_ahead;
315                 if ((rbp->b_flags & B_CACHE)) {
316                         bqrelse(rbp);
317                         goto no_read_ahead;
318                 }
319
320                 /*
321                  * An error from the read-ahead bmap has nothing to do
322                  * with the caller's original request.
323                  */
324                 tmp_error = VOP_BMAP(vp, loffset, &doffset,
325                                      &burstbytes, NULL, BUF_CMD_READ);
326                 if (tmp_error || doffset == NOOFFSET) {
327                         rbp->b_flags |= B_INVAL;
328                         brelse(rbp);
329                         rbp = NULL;
330                         goto no_read_ahead;
331                 }
332                 if ((nblks = maxra) > maxrbuild)
333                         nblks = maxrbuild;
334                 if (nblks > burstbytes / blksize)
335                         nblks = burstbytes / blksize;
336
337                 /*
338                  * rbp: async read
339                  */
340                 rbp->b_cmd = BUF_CMD_READ;
341                 /*rbp->b_flags |= B_AGE*/;
342                 cluster_setram(rbp);
343
344                 if (nblks > 1) {
345                         rbp = cluster_rbuild(vp, filesize, loffset,
346                                              doffset, blksize, 
347                                              nblks, rbp);
348                 } else {
349                         rbp->b_bio2.bio_offset = doffset;
350                 }
351
352 #if defined(CLUSTERDEBUG)
353                 if (rcluster) {
354                         if (bp) {
355                                 kprintf("A+(%012jx,%d,%jd) "
356                                         "doff=%012jx minr=%zd ra=%d\n",
357                                     (intmax_t)loffset, rbp->b_bcount,
358                                     (intmax_t)(loffset - origoffset),
359                                     (intmax_t)doffset, minreq, maxra);
360                         } else {
361                                 kprintf("A-(%012jx,%d,%jd) "
362                                         "doff=%012jx minr=%zd ra=%d\n",
363                                     (intmax_t)rbp->b_loffset, rbp->b_bcount,
364                                     (intmax_t)(loffset - origoffset),
365                                     (intmax_t)doffset, minreq, maxra);
366                         }
367                 }
368 #endif
369                 rbp->b_flags &= ~(B_ERROR|B_INVAL);
370
371                 if ((rbp->b_flags & B_CLUSTER) == 0)
372                         vfs_busy_pages(vp, rbp);
373                 BUF_KERNPROC(rbp);
374                 loffset += rbp->b_bufsize;
375                 maxra -= rbp->b_bufsize / blksize;
376                 vn_strategy(vp, &rbp->b_bio1);
377                 /* rbp invalid now */
378         }
379
380         /*
381          * Wait for our original buffer to complete its I/O.  reqbp will
382          * be NULL if the original buffer was B_CACHE.  We are returning
383          * (*bpp) which is the same as reqbp when reqbp != NULL.
384          */
385 no_read_ahead:
386         if (reqbp) {
387                 KKASSERT(reqbp->b_bio1.bio_flags & BIO_SYNC);
388                 error = biowait(&reqbp->b_bio1, "clurd");
389         }
390         return (error);
391 }
392
393 /*
394  * If blocks are contiguous on disk, use this to provide clustered
395  * read ahead.  We will read as many blocks as possible sequentially
396  * and then parcel them up into logical blocks in the buffer hash table.
397  *
398  * This function either returns a cluster buf or it returns fbp.  fbp is
399  * already expected to be set up as a synchronous or asynchronous request.
400  *
401  * If a cluster buf is returned it will always be async.
402  */
403 static struct buf *
404 cluster_rbuild(struct vnode *vp, off_t filesize, off_t loffset, off_t doffset,
405                int blksize, int run, struct buf *fbp)
406 {
407         struct buf *bp, *tbp;
408         off_t boffset;
409         int i, j;
410         int maxiosize = vmaxiosize(vp);
411
412         /*
413          * avoid a division
414          */
415         while (loffset + run * blksize > filesize) {
416                 --run;
417         }
418
419         tbp = fbp;
420         tbp->b_bio2.bio_offset = doffset;
421         if((tbp->b_flags & B_MALLOC) ||
422             ((tbp->b_flags & B_VMIO) == 0) || (run <= 1)) {
423                 return tbp;
424         }
425
426         bp = trypbuf_kva(&cluster_pbuf_freecnt);
427         if (bp == NULL) {
428                 return tbp;
429         }
430
431         /*
432          * We are synthesizing a buffer out of vm_page_t's, but
433          * if the block size is not page aligned then the starting
434          * address may not be either.  Inherit the b_data offset
435          * from the original buffer.
436          */
437         bp->b_data = (char *)((vm_offset_t)bp->b_data |
438             ((vm_offset_t)tbp->b_data & PAGE_MASK));
439         bp->b_flags |= B_CLUSTER | B_VMIO;
440         bp->b_cmd = BUF_CMD_READ;
441         bp->b_bio1.bio_done = cluster_callback;         /* default to async */
442         bp->b_bio1.bio_caller_info1.cluster_head = NULL;
443         bp->b_bio1.bio_caller_info2.cluster_tail = NULL;
444         bp->b_loffset = loffset;
445         bp->b_bio2.bio_offset = doffset;
446         KASSERT(bp->b_loffset != NOOFFSET,
447                 ("cluster_rbuild: no buffer offset"));
448
449         bp->b_bcount = 0;
450         bp->b_bufsize = 0;
451         bp->b_xio.xio_npages = 0;
452
453         for (boffset = doffset, i = 0; i < run; ++i, boffset += blksize) {
454                 if (i) {
455                         if ((bp->b_xio.xio_npages * PAGE_SIZE) +
456                             round_page(blksize) > maxiosize) {
457                                 break;
458                         }
459
460                         /*
461                          * Shortcut some checks and try to avoid buffers that
462                          * would block in the lock.  The same checks have to
463                          * be made again after we officially get the buffer.
464                          */
465                         tbp = getblk(vp, loffset + i * blksize, blksize,
466                                      GETBLK_SZMATCH|GETBLK_NOWAIT, 0);
467                         if (tbp == NULL)
468                                 break;
469                         for (j = 0; j < tbp->b_xio.xio_npages; j++) {
470                                 if (tbp->b_xio.xio_pages[j]->valid)
471                                         break;
472                         }
473                         if (j != tbp->b_xio.xio_npages) {
474                                 bqrelse(tbp);
475                                 break;
476                         }
477
478                         /*
479                          * Stop scanning if the buffer is fuly valid 
480                          * (marked B_CACHE), or locked (may be doing a
481                          * background write), or if the buffer is not
482                          * VMIO backed.  The clustering code can only deal
483                          * with VMIO-backed buffers.
484                          */
485                         if ((tbp->b_flags & (B_CACHE|B_LOCKED)) ||
486                             (tbp->b_flags & B_VMIO) == 0 ||
487                             (LIST_FIRST(&tbp->b_dep) != NULL &&
488                              buf_checkread(tbp))
489                         ) {
490                                 bqrelse(tbp);
491                                 break;
492                         }
493
494                         /*
495                          * The buffer must be completely invalid in order to
496                          * take part in the cluster.  If it is partially valid
497                          * then we stop.
498                          */
499                         for (j = 0;j < tbp->b_xio.xio_npages; j++) {
500                                 if (tbp->b_xio.xio_pages[j]->valid)
501                                         break;
502                         }
503                         if (j != tbp->b_xio.xio_npages) {
504                                 bqrelse(tbp);
505                                 break;
506                         }
507
508                         /*
509                          * Set a read-ahead mark as appropriate
510                          */
511                         if (i == 1 || i == (run - 1))
512                                 cluster_setram(tbp);
513
514                         /*
515                          * Depress the priority of buffers not explicitly
516                          * requested.
517                          */
518                         /* tbp->b_flags |= B_AGE; */
519
520                         /*
521                          * Set the block number if it isn't set, otherwise
522                          * if it is make sure it matches the block number we
523                          * expect.
524                          */
525                         if (tbp->b_bio2.bio_offset == NOOFFSET) {
526                                 tbp->b_bio2.bio_offset = boffset;
527                         } else if (tbp->b_bio2.bio_offset != boffset) {
528                                 brelse(tbp);
529                                 break;
530                         }
531                 }
532
533                 /*
534                  * The passed-in tbp (i == 0) will already be set up for
535                  * async or sync operation.  All other tbp's acquire in
536                  * our loop are set up for async operation.
537                  */
538                 tbp->b_cmd = BUF_CMD_READ;
539                 BUF_KERNPROC(tbp);
540                 cluster_append(&bp->b_bio1, tbp);
541                 for (j = 0; j < tbp->b_xio.xio_npages; ++j) {
542                         vm_page_t m;
543
544                         m = tbp->b_xio.xio_pages[j];
545                         vm_page_busy_wait(m, FALSE, "clurpg");
546                         vm_page_io_start(m);
547                         vm_page_wakeup(m);
548                         vm_object_pip_add(m->object, 1);
549                         if ((bp->b_xio.xio_npages == 0) ||
550                                 (bp->b_xio.xio_pages[bp->b_xio.xio_npages-1] != m)) {
551                                 bp->b_xio.xio_pages[bp->b_xio.xio_npages] = m;
552                                 bp->b_xio.xio_npages++;
553                         }
554                         if ((m->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL)
555                                 tbp->b_xio.xio_pages[j] = bogus_page;
556                 }
557                 /*
558                  * XXX shouldn't this be += size for both, like in 
559                  * cluster_wbuild()?
560                  *
561                  * Don't inherit tbp->b_bufsize as it may be larger due to
562                  * a non-page-aligned size.  Instead just aggregate using
563                  * 'size'.
564                  */
565                 if (tbp->b_bcount != blksize)
566                     kprintf("warning: tbp->b_bcount wrong %d vs %d\n", tbp->b_bcount, blksize);
567                 if (tbp->b_bufsize != blksize)
568                     kprintf("warning: tbp->b_bufsize wrong %d vs %d\n", tbp->b_bufsize, blksize);
569                 bp->b_bcount += blksize;
570                 bp->b_bufsize += blksize;
571         }
572
573         /*
574          * Fully valid pages in the cluster are already good and do not need
575          * to be re-read from disk.  Replace the page with bogus_page
576          */
577         for (j = 0; j < bp->b_xio.xio_npages; j++) {
578                 if ((bp->b_xio.xio_pages[j]->valid & VM_PAGE_BITS_ALL) ==
579                     VM_PAGE_BITS_ALL) {
580                         bp->b_xio.xio_pages[j] = bogus_page;
581                 }
582         }
583         if (bp->b_bufsize > bp->b_kvasize) {
584                 panic("cluster_rbuild: b_bufsize(%d) > b_kvasize(%d)",
585                     bp->b_bufsize, bp->b_kvasize);
586         }
587         pmap_qenter(trunc_page((vm_offset_t) bp->b_data),
588                 (vm_page_t *)bp->b_xio.xio_pages, bp->b_xio.xio_npages);
589         BUF_KERNPROC(bp);
590         return (bp);
591 }
592
593 /*
594  * Cleanup after a clustered read or write.
595  * This is complicated by the fact that any of the buffers might have
596  * extra memory (if there were no empty buffer headers at allocbuf time)
597  * that we will need to shift around.
598  *
599  * The returned bio is &bp->b_bio1
600  */
601 void
602 cluster_callback(struct bio *bio)
603 {
604         struct buf *bp = bio->bio_buf;
605         struct buf *tbp;
606         int error = 0;
607
608         /*
609          * Must propogate errors to all the components.  A short read (EOF)
610          * is a critical error.
611          */
612         if (bp->b_flags & B_ERROR) {
613                 error = bp->b_error;
614         } else if (bp->b_bcount != bp->b_bufsize) {
615                 panic("cluster_callback: unexpected EOF on cluster %p!", bio);
616         }
617
618         pmap_qremove(trunc_page((vm_offset_t) bp->b_data), bp->b_xio.xio_npages);
619         /*
620          * Move memory from the large cluster buffer into the component
621          * buffers and mark IO as done on these.  Since the memory map
622          * is the same, no actual copying is required.
623          */
624         while ((tbp = bio->bio_caller_info1.cluster_head) != NULL) {
625                 bio->bio_caller_info1.cluster_head = tbp->b_cluster_next;
626                 if (error) {
627                         tbp->b_flags |= B_ERROR | B_IODEBUG;
628                         tbp->b_error = error;
629                 } else {
630                         tbp->b_dirtyoff = tbp->b_dirtyend = 0;
631                         tbp->b_flags &= ~(B_ERROR|B_INVAL);
632                         tbp->b_flags |= B_IODEBUG;
633                         /*
634                          * XXX the bdwrite()/bqrelse() issued during
635                          * cluster building clears B_RELBUF (see bqrelse()
636                          * comment).  If direct I/O was specified, we have
637                          * to restore it here to allow the buffer and VM
638                          * to be freed.
639                          */
640                         if (tbp->b_flags & B_DIRECT)
641                                 tbp->b_flags |= B_RELBUF;
642                 }
643                 biodone(&tbp->b_bio1);
644         }
645         relpbuf(bp, &cluster_pbuf_freecnt);
646 }
647
648 /*
649  *      cluster_wbuild_wb:
650  *
651  *      Implement modified write build for cluster.
652  *
653  *              write_behind = 0        write behind disabled
654  *              write_behind = 1        write behind normal (default)
655  *              write_behind = 2        write behind backed-off
656  */
657
658 static __inline int
659 cluster_wbuild_wb(struct vnode *vp, int blksize, off_t start_loffset, int len)
660 {
661         int r = 0;
662
663         switch(write_behind) {
664         case 2:
665                 if (start_loffset < len)
666                         break;
667                 start_loffset -= len;
668                 /* fall through */
669         case 1:
670                 r = cluster_wbuild(vp, NULL, blksize, start_loffset, len);
671                 /* fall through */
672         default:
673                 /* fall through */
674                 break;
675         }
676         return(r);
677 }
678
679 /*
680  * Do clustered write for FFS.
681  *
682  * Three cases:
683  *      1. Write is not sequential (write asynchronously)
684  *      Write is sequential:
685  *      2.      beginning of cluster - begin cluster
686  *      3.      middle of a cluster - add to cluster
687  *      4.      end of a cluster - asynchronously write cluster
688  */
689 void
690 cluster_write(struct buf *bp, off_t filesize, int blksize, int seqcount)
691 {
692         struct vnode *vp;
693         off_t loffset;
694         int maxclen, cursize;
695         int async;
696
697         vp = bp->b_vp;
698         if (vp->v_type == VREG)
699                 async = vp->v_mount->mnt_flag & MNT_ASYNC;
700         else
701                 async = 0;
702         loffset = bp->b_loffset;
703         KASSERT(bp->b_loffset != NOOFFSET, 
704                 ("cluster_write: no buffer offset"));
705
706         /* Initialize vnode to beginning of file. */
707         if (loffset == 0)
708                 vp->v_lasta = vp->v_clen = vp->v_cstart = vp->v_lastw = 0;
709
710         if (vp->v_clen == 0 || loffset != vp->v_lastw + blksize ||
711             bp->b_bio2.bio_offset == NOOFFSET ||
712             (bp->b_bio2.bio_offset != vp->v_lasta + blksize)) {
713                 maxclen = vmaxiosize(vp);
714                 if (vp->v_clen != 0) {
715                         /*
716                          * Next block is not sequential.
717                          *
718                          * If we are not writing at end of file, the process
719                          * seeked to another point in the file since its last
720                          * write, or we have reached our maximum cluster size,
721                          * then push the previous cluster. Otherwise try
722                          * reallocating to make it sequential.
723                          *
724                          * Change to algorithm: only push previous cluster if
725                          * it was sequential from the point of view of the
726                          * seqcount heuristic, otherwise leave the buffer 
727                          * intact so we can potentially optimize the I/O
728                          * later on in the buf_daemon or update daemon
729                          * flush.
730                          */
731                         cursize = vp->v_lastw - vp->v_cstart + blksize;
732                         if (bp->b_loffset + blksize < filesize ||
733                             loffset != vp->v_lastw + blksize || vp->v_clen <= cursize) {
734                                 if (!async && seqcount > 0) {
735                                         cluster_wbuild_wb(vp, blksize,
736                                                 vp->v_cstart, cursize);
737                                 }
738                         } else {
739                                 struct buf **bpp, **endbp;
740                                 struct cluster_save *buflist;
741
742                                 buflist = cluster_collectbufs(vp, bp, blksize);
743                                 endbp = &buflist->bs_children
744                                     [buflist->bs_nchildren - 1];
745                                 if (VOP_REALLOCBLKS(vp, buflist)) {
746                                         /*
747                                          * Failed, push the previous cluster
748                                          * if *really* writing sequentially
749                                          * in the logical file (seqcount > 1),
750                                          * otherwise delay it in the hopes that
751                                          * the low level disk driver can
752                                          * optimize the write ordering.
753                                          */
754                                         for (bpp = buflist->bs_children;
755                                              bpp < endbp; bpp++)
756                                                 brelse(*bpp);
757                                         kfree(buflist, M_SEGMENT);
758                                         if (seqcount > 1) {
759                                                 cluster_wbuild_wb(vp, 
760                                                     blksize, vp->v_cstart, 
761                                                     cursize);
762                                         }
763                                 } else {
764                                         /*
765                                          * Succeeded, keep building cluster.
766                                          */
767                                         for (bpp = buflist->bs_children;
768                                              bpp <= endbp; bpp++)
769                                                 bdwrite(*bpp);
770                                         kfree(buflist, M_SEGMENT);
771                                         vp->v_lastw = loffset;
772                                         vp->v_lasta = bp->b_bio2.bio_offset;
773                                         return;
774                                 }
775                         }
776                 }
777                 /*
778                  * Consider beginning a cluster. If at end of file, make
779                  * cluster as large as possible, otherwise find size of
780                  * existing cluster.
781                  */
782                 if ((vp->v_type == VREG) &&
783                     bp->b_loffset + blksize < filesize &&
784                     (bp->b_bio2.bio_offset == NOOFFSET) &&
785                     (VOP_BMAP(vp, loffset, &bp->b_bio2.bio_offset, &maxclen, NULL, BUF_CMD_WRITE) ||
786                      bp->b_bio2.bio_offset == NOOFFSET)) {
787                         bawrite(bp);
788                         vp->v_clen = 0;
789                         vp->v_lasta = bp->b_bio2.bio_offset;
790                         vp->v_cstart = loffset + blksize;
791                         vp->v_lastw = loffset;
792                         return;
793                 }
794                 if (maxclen > blksize)
795                         vp->v_clen = maxclen - blksize;
796                 else
797                         vp->v_clen = 0;
798                 if (!async && vp->v_clen == 0) { /* I/O not contiguous */
799                         vp->v_cstart = loffset + blksize;
800                         bawrite(bp);
801                 } else {        /* Wait for rest of cluster */
802                         vp->v_cstart = loffset;
803                         bdwrite(bp);
804                 }
805         } else if (loffset == vp->v_cstart + vp->v_clen) {
806                 /*
807                  * At end of cluster, write it out if seqcount tells us we
808                  * are operating sequentially, otherwise let the buf or
809                  * update daemon handle it.
810                  */
811                 bdwrite(bp);
812                 if (seqcount > 1)
813                         cluster_wbuild_wb(vp, blksize, vp->v_cstart,
814                                           vp->v_clen + blksize);
815                 vp->v_clen = 0;
816                 vp->v_cstart = loffset + blksize;
817         } else if (vm_page_count_severe()) {
818                 /*
819                  * We are low on memory, get it going NOW
820                  */
821                 bawrite(bp);
822         } else {
823                 /*
824                  * In the middle of a cluster, so just delay the I/O for now.
825                  */
826                 bdwrite(bp);
827         }
828         vp->v_lastw = loffset;
829         vp->v_lasta = bp->b_bio2.bio_offset;
830 }
831
832 /*
833  * This is the clustered version of bawrite().  It works similarly to
834  * cluster_write() except I/O on the buffer is guaranteed to occur.
835  */
836 int
837 cluster_awrite(struct buf *bp)
838 {
839         int total;
840
841         /*
842          * Don't bother if it isn't clusterable.
843          */
844         if ((bp->b_flags & B_CLUSTEROK) == 0 ||
845             bp->b_vp == NULL ||
846             (bp->b_vp->v_flag & VOBJBUF) == 0) {
847                 total = bp->b_bufsize;
848                 bawrite(bp);
849                 return (total);
850         }
851
852         total = cluster_wbuild(bp->b_vp, &bp, bp->b_bufsize,
853                                bp->b_loffset, vmaxiosize(bp->b_vp));
854         if (bp)
855                 bawrite(bp);
856
857         return total;
858 }
859
860 /*
861  * This is an awful lot like cluster_rbuild...wish they could be combined.
862  * The last lbn argument is the current block on which I/O is being
863  * performed.  Check to see that it doesn't fall in the middle of
864  * the current block (if last_bp == NULL).
865  *
866  * cluster_wbuild() normally does not guarantee anything.  If bpp is
867  * non-NULL and cluster_wbuild() is able to incorporate it into the
868  * I/O it will set *bpp to NULL, otherwise it will leave it alone and
869  * the caller must dispose of *bpp.
870  */
871 static int
872 cluster_wbuild(struct vnode *vp, struct buf **bpp,
873                int blksize, off_t start_loffset, int bytes)
874 {
875         struct buf *bp, *tbp;
876         int i, j;
877         int totalwritten = 0;
878         int must_initiate;
879         int maxiosize = vmaxiosize(vp);
880
881         while (bytes > 0) {
882                 /*
883                  * If the buffer matches the passed locked & removed buffer
884                  * we used the passed buffer (which might not be B_DELWRI).
885                  *
886                  * Otherwise locate the buffer and determine if it is
887                  * compatible.
888                  */
889                 if (bpp && (*bpp)->b_loffset == start_loffset) {
890                         tbp = *bpp;
891                         *bpp = NULL;
892                         bpp = NULL;
893                 } else {
894                         tbp = findblk(vp, start_loffset, FINDBLK_NBLOCK);
895                         if (tbp == NULL ||
896                             (tbp->b_flags & (B_LOCKED | B_INVAL | B_DELWRI)) !=
897                              B_DELWRI ||
898                             (LIST_FIRST(&tbp->b_dep) && buf_checkwrite(tbp))) {
899                                 if (tbp)
900                                         BUF_UNLOCK(tbp);
901                                 start_loffset += blksize;
902                                 bytes -= blksize;
903                                 continue;
904                         }
905                         bremfree(tbp);
906                 }
907                 KKASSERT(tbp->b_cmd == BUF_CMD_DONE);
908
909                 /*
910                  * Extra memory in the buffer, punt on this buffer.
911                  * XXX we could handle this in most cases, but we would
912                  * have to push the extra memory down to after our max
913                  * possible cluster size and then potentially pull it back
914                  * up if the cluster was terminated prematurely--too much
915                  * hassle.
916                  */
917                 if (((tbp->b_flags & (B_CLUSTEROK|B_MALLOC)) != B_CLUSTEROK) ||
918                     (tbp->b_bcount != tbp->b_bufsize) ||
919                     (tbp->b_bcount != blksize) ||
920                     (bytes == blksize) ||
921                     ((bp = getpbuf_kva(&cluster_pbuf_freecnt)) == NULL)) {
922                         totalwritten += tbp->b_bufsize;
923                         bawrite(tbp);
924                         start_loffset += blksize;
925                         bytes -= blksize;
926                         continue;
927                 }
928
929                 /*
930                  * Set up the pbuf.  Track our append point with b_bcount
931                  * and b_bufsize.  b_bufsize is not used by the device but
932                  * our caller uses it to loop clusters and we use it to
933                  * detect a premature EOF on the block device.
934                  */
935                 bp->b_bcount = 0;
936                 bp->b_bufsize = 0;
937                 bp->b_xio.xio_npages = 0;
938                 bp->b_loffset = tbp->b_loffset;
939                 bp->b_bio2.bio_offset = tbp->b_bio2.bio_offset;
940
941                 /*
942                  * We are synthesizing a buffer out of vm_page_t's, but
943                  * if the block size is not page aligned then the starting
944                  * address may not be either.  Inherit the b_data offset
945                  * from the original buffer.
946                  */
947                 bp->b_data = (char *)((vm_offset_t)bp->b_data |
948                     ((vm_offset_t)tbp->b_data & PAGE_MASK));
949                 bp->b_flags &= ~B_ERROR;
950                 bp->b_flags |= B_CLUSTER | B_BNOCLIP |
951                         (tbp->b_flags & (B_VMIO | B_NEEDCOMMIT));
952                 bp->b_bio1.bio_caller_info1.cluster_head = NULL;
953                 bp->b_bio1.bio_caller_info2.cluster_tail = NULL;
954
955                 /*
956                  * From this location in the file, scan forward to see
957                  * if there are buffers with adjacent data that need to
958                  * be written as well.
959                  *
960                  * IO *must* be initiated on index 0 at this point
961                  * (particularly when called from cluster_awrite()).
962                  */
963                 for (i = 0; i < bytes; (i += blksize), (start_loffset += blksize)) {
964                         if (i == 0) {
965                                 must_initiate = 1;
966                         } else {
967                                 /*
968                                  * Not first buffer.
969                                  */
970                                 must_initiate = 0;
971                                 tbp = findblk(vp, start_loffset,
972                                               FINDBLK_NBLOCK);
973                                 /*
974                                  * Buffer not found or could not be locked
975                                  * non-blocking.
976                                  */
977                                 if (tbp == NULL)
978                                         break;
979
980                                 /*
981                                  * If it IS in core, but has different
982                                  * characteristics, then don't cluster
983                                  * with it.
984                                  */
985                                 if ((tbp->b_flags & (B_VMIO | B_CLUSTEROK |
986                                      B_INVAL | B_DELWRI | B_NEEDCOMMIT))
987                                     != (B_DELWRI | B_CLUSTEROK |
988                                      (bp->b_flags & (B_VMIO | B_NEEDCOMMIT))) ||
989                                     (tbp->b_flags & B_LOCKED)
990                                 ) {
991                                         BUF_UNLOCK(tbp);
992                                         break;
993                                 }
994
995                                 /*
996                                  * Check that the combined cluster
997                                  * would make sense with regard to pages
998                                  * and would not be too large
999                                  *
1000                                  * WARNING! buf_checkwrite() must be the last
1001                                  *          check made.  If it returns 0 then
1002                                  *          we must initiate the I/O.
1003                                  */
1004                                 if ((tbp->b_bcount != blksize) ||
1005                                   ((bp->b_bio2.bio_offset + i) !=
1006                                     tbp->b_bio2.bio_offset) ||
1007                                   ((tbp->b_xio.xio_npages + bp->b_xio.xio_npages) >
1008                                     (maxiosize / PAGE_SIZE)) ||
1009                                   (LIST_FIRST(&tbp->b_dep) &&
1010                                    buf_checkwrite(tbp))
1011                                 ) {
1012                                         BUF_UNLOCK(tbp);
1013                                         break;
1014                                 }
1015                                 if (LIST_FIRST(&tbp->b_dep))
1016                                         must_initiate = 1;
1017                                 /*
1018                                  * Ok, it's passed all the tests,
1019                                  * so remove it from the free list
1020                                  * and mark it busy. We will use it.
1021                                  */
1022                                 bremfree(tbp);
1023                                 KKASSERT(tbp->b_cmd == BUF_CMD_DONE);
1024                         }
1025
1026                         /*
1027                          * If the IO is via the VM then we do some
1028                          * special VM hackery (yuck).  Since the buffer's
1029                          * block size may not be page-aligned it is possible
1030                          * for a page to be shared between two buffers.  We
1031                          * have to get rid of the duplication when building
1032                          * the cluster.
1033                          */
1034                         if (tbp->b_flags & B_VMIO) {
1035                                 vm_page_t m;
1036
1037                                 /*
1038                                  * Try to avoid deadlocks with the VM system.
1039                                  * However, we cannot abort the I/O if
1040                                  * must_initiate is non-zero.
1041                                  */
1042                                 if (must_initiate == 0) {
1043                                         for (j = 0;
1044                                              j < tbp->b_xio.xio_npages;
1045                                              ++j) {
1046                                                 m = tbp->b_xio.xio_pages[j];
1047                                                 if (m->flags & PG_BUSY) {
1048                                                         bqrelse(tbp);
1049                                                         goto finishcluster;
1050                                                 }
1051                                         }
1052                                 }
1053                                         
1054                                 for (j = 0; j < tbp->b_xio.xio_npages; ++j) {
1055                                         m = tbp->b_xio.xio_pages[j];
1056                                         vm_page_busy_wait(m, FALSE, "clurpg");
1057                                         vm_page_io_start(m);
1058                                         vm_page_wakeup(m);
1059                                         vm_object_pip_add(m->object, 1);
1060                                         if ((bp->b_xio.xio_npages == 0) ||
1061                                           (bp->b_xio.xio_pages[bp->b_xio.xio_npages - 1] != m)) {
1062                                                 bp->b_xio.xio_pages[bp->b_xio.xio_npages] = m;
1063                                                 bp->b_xio.xio_npages++;
1064                                         }
1065                                 }
1066                         }
1067                         bp->b_bcount += blksize;
1068                         bp->b_bufsize += blksize;
1069
1070                         bundirty(tbp);
1071                         tbp->b_flags &= ~B_ERROR;
1072                         tbp->b_cmd = BUF_CMD_WRITE;
1073                         BUF_KERNPROC(tbp);
1074                         cluster_append(&bp->b_bio1, tbp);
1075
1076                         /*
1077                          * check for latent dependencies to be handled 
1078                          */
1079                         if (LIST_FIRST(&tbp->b_dep) != NULL)
1080                                 buf_start(tbp);
1081                 }
1082         finishcluster:
1083                 pmap_qenter(trunc_page((vm_offset_t)bp->b_data),
1084                             (vm_page_t *)bp->b_xio.xio_pages,
1085                             bp->b_xio.xio_npages);
1086                 if (bp->b_bufsize > bp->b_kvasize) {
1087                         panic("cluster_wbuild: b_bufsize(%d) "
1088                               "> b_kvasize(%d)\n",
1089                               bp->b_bufsize, bp->b_kvasize);
1090                 }
1091                 totalwritten += bp->b_bufsize;
1092                 bp->b_dirtyoff = 0;
1093                 bp->b_dirtyend = bp->b_bufsize;
1094                 bp->b_bio1.bio_done = cluster_callback;
1095                 bp->b_cmd = BUF_CMD_WRITE;
1096
1097                 vfs_busy_pages(vp, bp);
1098                 bsetrunningbufspace(bp, bp->b_bufsize);
1099                 BUF_KERNPROC(bp);
1100                 vn_strategy(vp, &bp->b_bio1);
1101
1102                 bytes -= i;
1103         }
1104         return totalwritten;
1105 }
1106
1107 /*
1108  * Collect together all the buffers in a cluster.
1109  * Plus add one additional buffer.
1110  */
1111 static struct cluster_save *
1112 cluster_collectbufs(struct vnode *vp, struct buf *last_bp, int blksize)
1113 {
1114         struct cluster_save *buflist;
1115         struct buf *bp;
1116         off_t loffset;
1117         int i, len;
1118
1119         len = (int)(vp->v_lastw - vp->v_cstart + blksize) / blksize;
1120         buflist = kmalloc(sizeof(struct buf *) * (len + 1) + sizeof(*buflist),
1121                          M_SEGMENT, M_WAITOK);
1122         buflist->bs_nchildren = 0;
1123         buflist->bs_children = (struct buf **) (buflist + 1);
1124         for (loffset = vp->v_cstart, i = 0; i < len; (loffset += blksize), i++) {
1125                 (void) bread(vp, loffset, last_bp->b_bcount, &bp);
1126                 buflist->bs_children[i] = bp;
1127                 if (bp->b_bio2.bio_offset == NOOFFSET) {
1128                         VOP_BMAP(bp->b_vp, bp->b_loffset,
1129                                  &bp->b_bio2.bio_offset,
1130                                  NULL, NULL, BUF_CMD_WRITE);
1131                 }
1132         }
1133         buflist->bs_children[i] = bp = last_bp;
1134         if (bp->b_bio2.bio_offset == NOOFFSET) {
1135                 VOP_BMAP(bp->b_vp, bp->b_loffset, &bp->b_bio2.bio_offset,
1136                          NULL, NULL, BUF_CMD_WRITE);
1137         }
1138         buflist->bs_nchildren = i + 1;
1139         return (buflist);
1140 }
1141
1142 void
1143 cluster_append(struct bio *bio, struct buf *tbp)
1144 {
1145         tbp->b_cluster_next = NULL;
1146         if (bio->bio_caller_info1.cluster_head == NULL) {
1147                 bio->bio_caller_info1.cluster_head = tbp;
1148                 bio->bio_caller_info2.cluster_tail = tbp;
1149         } else {
1150                 bio->bio_caller_info2.cluster_tail->b_cluster_next = tbp;
1151                 bio->bio_caller_info2.cluster_tail = tbp;
1152         }
1153 }
1154
1155 static
1156 void
1157 cluster_setram (struct buf *bp)
1158 {
1159         bp->b_flags |= B_RAM;
1160         if (bp->b_xio.xio_npages)
1161                 vm_page_flag_set(bp->b_xio.xio_pages[0], PG_RAM);
1162 }