2f11de765aa39792d9a403d7c96ee19f2b973de0
[dragonfly.git] / sys / kern / vfs_cluster.c
1 /*-
2  * Copyright (c) 1993
3  *      The Regents of the University of California.  All rights reserved.
4  * Modifications/enhancements:
5  *      Copyright (c) 1995 John S. Dyson.  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. All advertising materials mentioning features or use of this software
16  *    must display the following acknowledgement:
17  *      This product includes software developed by the University of
18  *      California, Berkeley and its contributors.
19  * 4. Neither the name of the University nor the names of its contributors
20  *    may be used to endorse or promote products derived from this software
21  *    without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  *
35  *      @(#)vfs_cluster.c       8.7 (Berkeley) 2/13/94
36  * $FreeBSD: src/sys/kern/vfs_cluster.c,v 1.92.2.9 2001/11/18 07:10:59 dillon Exp $
37  * $DragonFly: src/sys/kern/vfs_cluster.c,v 1.40 2008/07/14 03:09:00 dillon Exp $
38  */
39
40 #include "opt_debug_cluster.h"
41
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/kernel.h>
45 #include <sys/proc.h>
46 #include <sys/buf.h>
47 #include <sys/vnode.h>
48 #include <sys/malloc.h>
49 #include <sys/mount.h>
50 #include <sys/resourcevar.h>
51 #include <sys/vmmeter.h>
52 #include <vm/vm.h>
53 #include <vm/vm_object.h>
54 #include <vm/vm_page.h>
55 #include <sys/sysctl.h>
56
57 #include <sys/buf2.h>
58 #include <vm/vm_page2.h>
59
60 #include <machine/limits.h>
61
62 #if defined(CLUSTERDEBUG)
63 #include <sys/sysctl.h>
64 static int      rcluster= 0;
65 SYSCTL_INT(_debug, OID_AUTO, rcluster, CTLFLAG_RW, &rcluster, 0, "");
66 #endif
67
68 static MALLOC_DEFINE(M_SEGMENT, "cluster_save", "cluster_save buffer");
69
70 static struct cluster_save *
71         cluster_collectbufs (struct vnode *vp, struct buf *last_bp,
72                             int blksize);
73 static struct buf *
74         cluster_rbuild (struct vnode *vp, off_t filesize, off_t loffset,
75                             off_t doffset, int blksize, int run, 
76                             struct buf *fbp);
77 static void cluster_callback (struct bio *);
78 static void cluster_setram (struct buf *);
79 static int cluster_wbuild(struct vnode *vp, struct buf **bpp, int blksize,
80                             off_t start_loffset, int bytes);
81
82 static int write_behind = 1;
83 SYSCTL_INT(_vfs, OID_AUTO, write_behind, CTLFLAG_RW, &write_behind, 0,
84     "Cluster write-behind setting");
85 static int max_readahead = 2 * 1024 * 1024;
86 SYSCTL_INT(_vfs, OID_AUTO, max_readahead, CTLFLAG_RW, &max_readahead, 0,
87     "Limit in bytes for desired cluster read-ahead");
88
89 extern vm_page_t        bogus_page;
90
91 extern int cluster_pbuf_freecnt;
92
93 /*
94  * This replaces bread.
95  *
96  * filesize     - read-ahead @ blksize will not cross this boundary
97  * loffset      - loffset for returned *bpp
98  * blksize      - blocksize for returned *bpp and read-ahead bps
99  * minreq       - minimum (not a hard minimum) in bytes, typically reflects
100  *                a higher level uio resid.
101  * maxreq       - maximum (sequential heuristic) in bytes (highet typ ~2MB)
102  * bpp          - return buffer (*bpp) for (loffset,blksize)
103  */
104 int
105 cluster_readx(struct vnode *vp, off_t filesize, off_t loffset,
106              int blksize, size_t minreq, size_t maxreq, struct buf **bpp)
107 {
108         struct buf *bp, *rbp, *reqbp;
109         off_t origoffset;
110         off_t doffset;
111         int error;
112         int i;
113         int maxra;
114         int maxrbuild;
115
116         error = 0;
117
118         /*
119          * Calculate the desired read-ahead in blksize'd blocks (maxra).
120          * To do this we calculate maxreq.
121          *
122          * maxreq typically starts out as a sequential heuristic.  If the
123          * high level uio/resid is bigger (minreq), we pop maxreq up to
124          * minreq.  This represents the case where random I/O is being
125          * performed by the userland is issuing big read()'s.
126          *
127          * Then we limit maxreq to max_readahead to ensure it is a reasonable
128          * value.
129          *
130          * Finally we must ensure that (loffset + maxreq) does not cross the
131          * boundary (filesize) for the current blocksize.  If we allowed it
132          * to cross we could end up with buffers past the boundary with the
133          * wrong block size (HAMMER large-data areas use mixed block sizes).
134          * minreq is also absolutely limited to filesize.
135          */
136         if (maxreq < minreq)
137                 maxreq = minreq;
138         /* minreq not used beyond this point */
139
140         if (maxreq > max_readahead) {
141                 maxreq = max_readahead;
142                 if (maxreq > 16 * 1024 * 1024)
143                         maxreq = 16 * 1024 * 1024;
144         }
145         if (maxreq < blksize)
146                 maxreq = blksize;
147         if (loffset + maxreq > filesize) {
148                 if (loffset > filesize)
149                         maxreq = 0;
150                 else
151                         maxreq = filesize - loffset;
152         }
153
154         maxra = (int)(maxreq / blksize);
155
156         /*
157          * Get the requested block.
158          */
159         if (*bpp)
160                 reqbp = bp = *bpp;
161         else
162                 *bpp = reqbp = bp = getblk(vp, loffset, blksize, 0, 0);
163         origoffset = loffset;
164
165         /*
166          * Calculate the maximum cluster size for a single I/O, used
167          * by cluster_rbuild().
168          */
169         maxrbuild = vmaxiosize(vp) / blksize;
170
171         /*
172          * if it is in the cache, then check to see if the reads have been
173          * sequential.  If they have, then try some read-ahead, otherwise
174          * back-off on prospective read-aheads.
175          */
176         if (bp->b_flags & B_CACHE) {
177                 /*
178                  * Not sequential, do not do any read-ahead
179                  */
180                 if (maxra <= 1)
181                         return 0;
182
183                 /*
184                  * No read-ahead mark, do not do any read-ahead
185                  * yet.
186                  */
187                 if ((bp->b_flags & B_RAM) == 0)
188                         return 0;
189
190                 /*
191                  * We hit a read-ahead-mark, figure out how much read-ahead
192                  * to do (maxra) and where to start (loffset).
193                  *
194                  * Shortcut the scan.  Typically the way this works is that
195                  * we've built up all the blocks inbetween except for the
196                  * last in previous iterations, so if the second-to-last
197                  * block is present we just skip ahead to it.
198                  *
199                  * This algorithm has O(1) cpu in the steady state no
200                  * matter how large maxra is.
201                  */
202                 bp->b_flags &= ~B_RAM;
203
204                 if (findblk(vp, loffset + (maxra - 2) * blksize, FINDBLK_TEST))
205                         i = maxra - 1;
206                 else
207                         i = 1;
208                 while (i < maxra) {
209                         if (findblk(vp, loffset + i * blksize,
210                                     FINDBLK_TEST) == NULL) {
211                                 break;
212                         }
213                         ++i;
214                 }
215
216                 /*
217                  * We got everything or everything is in the cache, no
218                  * point continuing.
219                  */
220                 if (i >= maxra)
221                         return 0;
222
223                 /*
224                  * Calculate where to start the read-ahead and how much
225                  * to do.  Generally speaking we want to read-ahead by
226                  * (maxra) when we've found a read-ahead mark.  We do
227                  * not want to reduce maxra here as it will cause
228                  * successive read-ahead I/O's to be smaller and smaller.
229                  *
230                  * However, we have to make sure we don't break the
231                  * filesize limitation for the clustered operation.
232                  */
233                 loffset += i * blksize;
234                 reqbp = bp = NULL;
235
236                 if (loffset >= filesize)
237                         return 0;
238                 if (loffset + maxra * blksize > filesize) {
239                         maxreq = filesize - loffset;
240                         maxra = (int)(maxreq / blksize);
241                 }
242         } else {
243                 __debugvar off_t firstread = bp->b_loffset;
244                 int nblks;
245
246                 /*
247                  * Set-up synchronous read for bp.
248                  */
249                 bp->b_cmd = BUF_CMD_READ;
250                 bp->b_bio1.bio_done = biodone_sync;
251                 bp->b_bio1.bio_flags |= BIO_SYNC;
252
253                 KASSERT(firstread != NOOFFSET, 
254                         ("cluster_read: no buffer offset"));
255
256                 /*
257                  * nblks is our cluster_rbuild request size, limited
258                  * primarily by the device.
259                  */
260                 if ((nblks = maxra) > maxrbuild)
261                         nblks = maxrbuild;
262
263                 if (nblks > 1) {
264                         int burstbytes;
265
266                         error = VOP_BMAP(vp, loffset, &doffset,
267                                          &burstbytes, NULL, BUF_CMD_READ);
268                         if (error)
269                                 goto single_block_read;
270                         if (nblks > burstbytes / blksize)
271                                 nblks = burstbytes / blksize;
272                         if (doffset == NOOFFSET)
273                                 goto single_block_read;
274                         if (nblks <= 1)
275                                 goto single_block_read;
276
277                         bp = cluster_rbuild(vp, filesize, loffset,
278                                             doffset, blksize, nblks, bp);
279                         loffset += bp->b_bufsize;
280                         maxra -= bp->b_bufsize / blksize;
281                 } else {
282 single_block_read:
283                         /*
284                          * If it isn't in the cache, then get a chunk from
285                          * disk if sequential, otherwise just get the block.
286                          */
287                         cluster_setram(bp);
288                         loffset += blksize;
289                         --maxra;
290                 }
291         }
292
293         /*
294          * If B_CACHE was not set issue bp.  bp will either be an
295          * asynchronous cluster buf or a synchronous single-buf.
296          * If it is a single buf it will be the same as reqbp.
297          *
298          * NOTE: Once an async cluster buf is issued bp becomes invalid.
299          */
300         if (bp) {
301 #if defined(CLUSTERDEBUG)
302                 if (rcluster)
303                         kprintf("S(%012jx,%d,%d)\n",
304                             (intmax_t)bp->b_loffset, bp->b_bcount, maxra);
305 #endif
306                 if ((bp->b_flags & B_CLUSTER) == 0)
307                         vfs_busy_pages(vp, bp);
308                 bp->b_flags &= ~(B_ERROR|B_INVAL);
309                 vn_strategy(vp, &bp->b_bio1);
310                 error = 0;
311                 /* bp invalid now */
312         }
313
314         /*
315          * If we have been doing sequential I/O, then do some read-ahead.
316          * The code above us should have positioned us at the next likely
317          * offset.
318          *
319          * Only mess with buffers which we can immediately lock.  HAMMER
320          * will do device-readahead irrespective of what the blocks
321          * represent.
322          */
323         while (error == 0 && maxra > 0) {
324                 int burstbytes;
325                 int tmp_error;
326                 int nblks;
327
328                 rbp = getblk(vp, loffset, blksize,
329                              GETBLK_SZMATCH|GETBLK_NOWAIT, 0);
330                 if (rbp == NULL)
331                         goto no_read_ahead;
332                 if ((rbp->b_flags & B_CACHE)) {
333                         bqrelse(rbp);
334                         goto no_read_ahead;
335                 }
336
337                 /*
338                  * An error from the read-ahead bmap has nothing to do
339                  * with the caller's original request.
340                  */
341                 tmp_error = VOP_BMAP(vp, loffset, &doffset,
342                                      &burstbytes, NULL, BUF_CMD_READ);
343                 if (tmp_error || doffset == NOOFFSET) {
344                         rbp->b_flags |= B_INVAL;
345                         brelse(rbp);
346                         rbp = NULL;
347                         goto no_read_ahead;
348                 }
349                 if ((nblks = maxra) > maxrbuild)
350                         nblks = maxrbuild;
351                 if (nblks > burstbytes / blksize)
352                         nblks = burstbytes / blksize;
353
354                 /*
355                  * rbp: async read
356                  */
357                 rbp->b_cmd = BUF_CMD_READ;
358                 /*rbp->b_flags |= B_AGE*/;
359                 cluster_setram(rbp);
360
361                 if (nblks > 1) {
362                         rbp = cluster_rbuild(vp, filesize, loffset,
363                                              doffset, blksize, 
364                                              nblks, rbp);
365                 } else {
366                         rbp->b_bio2.bio_offset = doffset;
367                 }
368
369 #if defined(CLUSTERDEBUG)
370                 if (rcluster) {
371                         if (bp) {
372                                 kprintf("A+(%012jx,%d,%jd) "
373                                         "doff=%012jx minr=%zd ra=%d\n",
374                                     (intmax_t)loffset, rbp->b_bcount,
375                                     (intmax_t)(loffset - origoffset),
376                                     (intmax_t)doffset, minreq, maxra);
377                         } else {
378                                 kprintf("A-(%012jx,%d,%jd) "
379                                         "doff=%012jx minr=%zd ra=%d\n",
380                                     (intmax_t)rbp->b_loffset, rbp->b_bcount,
381                                     (intmax_t)(loffset - origoffset),
382                                     (intmax_t)doffset, minreq, maxra);
383                         }
384                 }
385 #endif
386                 rbp->b_flags &= ~(B_ERROR|B_INVAL);
387
388                 if ((rbp->b_flags & B_CLUSTER) == 0)
389                         vfs_busy_pages(vp, rbp);
390                 BUF_KERNPROC(rbp);
391                 loffset += rbp->b_bufsize;
392                 maxra -= rbp->b_bufsize / blksize;
393                 vn_strategy(vp, &rbp->b_bio1);
394                 /* rbp invalid now */
395         }
396
397         /*
398          * Wait for our original buffer to complete its I/O.  reqbp will
399          * be NULL if the original buffer was B_CACHE.  We are returning
400          * (*bpp) which is the same as reqbp when reqbp != NULL.
401          */
402 no_read_ahead:
403         if (reqbp) {
404                 KKASSERT(reqbp->b_bio1.bio_flags & BIO_SYNC);
405                 error = biowait(&reqbp->b_bio1, "clurd");
406         }
407         return (error);
408 }
409
410 /*
411  * If blocks are contiguous on disk, use this to provide clustered
412  * read ahead.  We will read as many blocks as possible sequentially
413  * and then parcel them up into logical blocks in the buffer hash table.
414  *
415  * This function either returns a cluster buf or it returns fbp.  fbp is
416  * already expected to be set up as a synchronous or asynchronous request.
417  *
418  * If a cluster buf is returned it will always be async.
419  */
420 static struct buf *
421 cluster_rbuild(struct vnode *vp, off_t filesize, off_t loffset, off_t doffset,
422                int blksize, int run, struct buf *fbp)
423 {
424         struct buf *bp, *tbp;
425         off_t boffset;
426         int i, j;
427         int maxiosize = vmaxiosize(vp);
428
429         /*
430          * avoid a division
431          */
432         while (loffset + run * blksize > filesize) {
433                 --run;
434         }
435
436         tbp = fbp;
437         tbp->b_bio2.bio_offset = doffset;
438         if((tbp->b_flags & B_MALLOC) ||
439             ((tbp->b_flags & B_VMIO) == 0) || (run <= 1)) {
440                 return tbp;
441         }
442
443         bp = trypbuf_kva(&cluster_pbuf_freecnt);
444         if (bp == NULL) {
445                 return tbp;
446         }
447
448         /*
449          * We are synthesizing a buffer out of vm_page_t's, but
450          * if the block size is not page aligned then the starting
451          * address may not be either.  Inherit the b_data offset
452          * from the original buffer.
453          */
454         bp->b_data = (char *)((vm_offset_t)bp->b_data |
455             ((vm_offset_t)tbp->b_data & PAGE_MASK));
456         bp->b_flags |= B_CLUSTER | B_VMIO;
457         bp->b_cmd = BUF_CMD_READ;
458         bp->b_bio1.bio_done = cluster_callback;         /* default to async */
459         bp->b_bio1.bio_caller_info1.cluster_head = NULL;
460         bp->b_bio1.bio_caller_info2.cluster_tail = NULL;
461         bp->b_loffset = loffset;
462         bp->b_bio2.bio_offset = doffset;
463         KASSERT(bp->b_loffset != NOOFFSET,
464                 ("cluster_rbuild: no buffer offset"));
465
466         bp->b_bcount = 0;
467         bp->b_bufsize = 0;
468         bp->b_xio.xio_npages = 0;
469
470         for (boffset = doffset, i = 0; i < run; ++i, boffset += blksize) {
471                 if (i) {
472                         if ((bp->b_xio.xio_npages * PAGE_SIZE) +
473                             round_page(blksize) > maxiosize) {
474                                 break;
475                         }
476
477                         /*
478                          * Shortcut some checks and try to avoid buffers that
479                          * would block in the lock.  The same checks have to
480                          * be made again after we officially get the buffer.
481                          */
482                         tbp = getblk(vp, loffset + i * blksize, blksize,
483                                      GETBLK_SZMATCH|GETBLK_NOWAIT, 0);
484                         if (tbp == NULL)
485                                 break;
486                         for (j = 0; j < tbp->b_xio.xio_npages; j++) {
487                                 if (tbp->b_xio.xio_pages[j]->valid)
488                                         break;
489                         }
490                         if (j != tbp->b_xio.xio_npages) {
491                                 bqrelse(tbp);
492                                 break;
493                         }
494
495                         /*
496                          * Stop scanning if the buffer is fuly valid 
497                          * (marked B_CACHE), or locked (may be doing a
498                          * background write), or if the buffer is not
499                          * VMIO backed.  The clustering code can only deal
500                          * with VMIO-backed buffers.
501                          */
502                         if ((tbp->b_flags & (B_CACHE|B_LOCKED)) ||
503                             (tbp->b_flags & B_VMIO) == 0 ||
504                             (LIST_FIRST(&tbp->b_dep) != NULL &&
505                              buf_checkread(tbp))
506                         ) {
507                                 bqrelse(tbp);
508                                 break;
509                         }
510
511                         /*
512                          * The buffer must be completely invalid in order to
513                          * take part in the cluster.  If it is partially valid
514                          * then we stop.
515                          */
516                         for (j = 0;j < tbp->b_xio.xio_npages; j++) {
517                                 if (tbp->b_xio.xio_pages[j]->valid)
518                                         break;
519                         }
520                         if (j != tbp->b_xio.xio_npages) {
521                                 bqrelse(tbp);
522                                 break;
523                         }
524
525                         /*
526                          * Set a read-ahead mark as appropriate.  Always
527                          * set the read-ahead mark at (run - 1).  It is
528                          * unclear why we were also setting it at i == 1.
529                          */
530                         if (/*i == 1 ||*/ i == (run - 1))
531                                 cluster_setram(tbp);
532
533                         /*
534                          * Depress the priority of buffers not explicitly
535                          * requested.
536                          */
537                         /* tbp->b_flags |= B_AGE; */
538
539                         /*
540                          * Set the block number if it isn't set, otherwise
541                          * if it is make sure it matches the block number we
542                          * expect.
543                          */
544                         if (tbp->b_bio2.bio_offset == NOOFFSET) {
545                                 tbp->b_bio2.bio_offset = boffset;
546                         } else if (tbp->b_bio2.bio_offset != boffset) {
547                                 brelse(tbp);
548                                 break;
549                         }
550                 }
551
552                 /*
553                  * The passed-in tbp (i == 0) will already be set up for
554                  * async or sync operation.  All other tbp's acquire in
555                  * our loop are set up for async operation.
556                  */
557                 tbp->b_cmd = BUF_CMD_READ;
558                 BUF_KERNPROC(tbp);
559                 cluster_append(&bp->b_bio1, tbp);
560                 for (j = 0; j < tbp->b_xio.xio_npages; ++j) {
561                         vm_page_t m;
562
563                         m = tbp->b_xio.xio_pages[j];
564                         vm_page_busy_wait(m, FALSE, "clurpg");
565                         vm_page_io_start(m);
566                         vm_page_wakeup(m);
567                         vm_object_pip_add(m->object, 1);
568                         if ((bp->b_xio.xio_npages == 0) ||
569                                 (bp->b_xio.xio_pages[bp->b_xio.xio_npages-1] != m)) {
570                                 bp->b_xio.xio_pages[bp->b_xio.xio_npages] = m;
571                                 bp->b_xio.xio_npages++;
572                         }
573                         if ((m->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL)
574                                 tbp->b_xio.xio_pages[j] = bogus_page;
575                 }
576                 /*
577                  * XXX shouldn't this be += size for both, like in 
578                  * cluster_wbuild()?
579                  *
580                  * Don't inherit tbp->b_bufsize as it may be larger due to
581                  * a non-page-aligned size.  Instead just aggregate using
582                  * 'size'.
583                  */
584                 if (tbp->b_bcount != blksize)
585                     kprintf("warning: tbp->b_bcount wrong %d vs %d\n", tbp->b_bcount, blksize);
586                 if (tbp->b_bufsize != blksize)
587                     kprintf("warning: tbp->b_bufsize wrong %d vs %d\n", tbp->b_bufsize, blksize);
588                 bp->b_bcount += blksize;
589                 bp->b_bufsize += blksize;
590         }
591
592         /*
593          * Fully valid pages in the cluster are already good and do not need
594          * to be re-read from disk.  Replace the page with bogus_page
595          */
596         for (j = 0; j < bp->b_xio.xio_npages; j++) {
597                 if ((bp->b_xio.xio_pages[j]->valid & VM_PAGE_BITS_ALL) ==
598                     VM_PAGE_BITS_ALL) {
599                         bp->b_xio.xio_pages[j] = bogus_page;
600                 }
601         }
602         if (bp->b_bufsize > bp->b_kvasize) {
603                 panic("cluster_rbuild: b_bufsize(%d) > b_kvasize(%d)",
604                     bp->b_bufsize, bp->b_kvasize);
605         }
606         pmap_qenter(trunc_page((vm_offset_t) bp->b_data),
607                 (vm_page_t *)bp->b_xio.xio_pages, bp->b_xio.xio_npages);
608         BUF_KERNPROC(bp);
609         return (bp);
610 }
611
612 /*
613  * Cleanup after a clustered read or write.
614  * This is complicated by the fact that any of the buffers might have
615  * extra memory (if there were no empty buffer headers at allocbuf time)
616  * that we will need to shift around.
617  *
618  * The returned bio is &bp->b_bio1
619  */
620 void
621 cluster_callback(struct bio *bio)
622 {
623         struct buf *bp = bio->bio_buf;
624         struct buf *tbp;
625         int error = 0;
626
627         /*
628          * Must propogate errors to all the components.  A short read (EOF)
629          * is a critical error.
630          */
631         if (bp->b_flags & B_ERROR) {
632                 error = bp->b_error;
633         } else if (bp->b_bcount != bp->b_bufsize) {
634                 panic("cluster_callback: unexpected EOF on cluster %p!", bio);
635         }
636
637         pmap_qremove(trunc_page((vm_offset_t) bp->b_data), bp->b_xio.xio_npages);
638         /*
639          * Move memory from the large cluster buffer into the component
640          * buffers and mark IO as done on these.  Since the memory map
641          * is the same, no actual copying is required.
642          */
643         while ((tbp = bio->bio_caller_info1.cluster_head) != NULL) {
644                 bio->bio_caller_info1.cluster_head = tbp->b_cluster_next;
645                 if (error) {
646                         tbp->b_flags |= B_ERROR | B_IODEBUG;
647                         tbp->b_error = error;
648                 } else {
649                         tbp->b_dirtyoff = tbp->b_dirtyend = 0;
650                         tbp->b_flags &= ~(B_ERROR|B_INVAL);
651                         tbp->b_flags |= B_IODEBUG;
652                         /*
653                          * XXX the bdwrite()/bqrelse() issued during
654                          * cluster building clears B_RELBUF (see bqrelse()
655                          * comment).  If direct I/O was specified, we have
656                          * to restore it here to allow the buffer and VM
657                          * to be freed.
658                          */
659                         if (tbp->b_flags & B_DIRECT)
660                                 tbp->b_flags |= B_RELBUF;
661                 }
662                 biodone(&tbp->b_bio1);
663         }
664         relpbuf(bp, &cluster_pbuf_freecnt);
665 }
666
667 /*
668  *      cluster_wbuild_wb:
669  *
670  *      Implement modified write build for cluster.
671  *
672  *              write_behind = 0        write behind disabled
673  *              write_behind = 1        write behind normal (default)
674  *              write_behind = 2        write behind backed-off
675  */
676
677 static __inline int
678 cluster_wbuild_wb(struct vnode *vp, int blksize, off_t start_loffset, int len)
679 {
680         int r = 0;
681
682         switch(write_behind) {
683         case 2:
684                 if (start_loffset < len)
685                         break;
686                 start_loffset -= len;
687                 /* fall through */
688         case 1:
689                 r = cluster_wbuild(vp, NULL, blksize, start_loffset, len);
690                 /* fall through */
691         default:
692                 /* fall through */
693                 break;
694         }
695         return(r);
696 }
697
698 /*
699  * Do clustered write for FFS.
700  *
701  * Three cases:
702  *      1. Write is not sequential (write asynchronously)
703  *      Write is sequential:
704  *      2.      beginning of cluster - begin cluster
705  *      3.      middle of a cluster - add to cluster
706  *      4.      end of a cluster - asynchronously write cluster
707  */
708 void
709 cluster_write(struct buf *bp, off_t filesize, int blksize, int seqcount)
710 {
711         struct vnode *vp;
712         off_t loffset;
713         int maxclen, cursize;
714         int async;
715
716         vp = bp->b_vp;
717         if (vp->v_type == VREG)
718                 async = vp->v_mount->mnt_flag & MNT_ASYNC;
719         else
720                 async = 0;
721         loffset = bp->b_loffset;
722         KASSERT(bp->b_loffset != NOOFFSET, 
723                 ("cluster_write: no buffer offset"));
724
725         /* Initialize vnode to beginning of file. */
726         if (loffset == 0)
727                 vp->v_lasta = vp->v_clen = vp->v_cstart = vp->v_lastw = 0;
728
729         if (vp->v_clen == 0 || loffset != vp->v_lastw + blksize ||
730             bp->b_bio2.bio_offset == NOOFFSET ||
731             (bp->b_bio2.bio_offset != vp->v_lasta + blksize)) {
732                 maxclen = vmaxiosize(vp);
733                 if (vp->v_clen != 0) {
734                         /*
735                          * Next block is not sequential.
736                          *
737                          * If we are not writing at end of file, the process
738                          * seeked to another point in the file since its last
739                          * write, or we have reached our maximum cluster size,
740                          * then push the previous cluster. Otherwise try
741                          * reallocating to make it sequential.
742                          *
743                          * Change to algorithm: only push previous cluster if
744                          * it was sequential from the point of view of the
745                          * seqcount heuristic, otherwise leave the buffer 
746                          * intact so we can potentially optimize the I/O
747                          * later on in the buf_daemon or update daemon
748                          * flush.
749                          */
750                         cursize = vp->v_lastw - vp->v_cstart + blksize;
751                         if (bp->b_loffset + blksize < filesize ||
752                             loffset != vp->v_lastw + blksize || vp->v_clen <= cursize) {
753                                 if (!async && seqcount > 0) {
754                                         cluster_wbuild_wb(vp, blksize,
755                                                 vp->v_cstart, cursize);
756                                 }
757                         } else {
758                                 struct buf **bpp, **endbp;
759                                 struct cluster_save *buflist;
760
761                                 buflist = cluster_collectbufs(vp, bp, blksize);
762                                 endbp = &buflist->bs_children
763                                     [buflist->bs_nchildren - 1];
764                                 if (VOP_REALLOCBLKS(vp, buflist)) {
765                                         /*
766                                          * Failed, push the previous cluster
767                                          * if *really* writing sequentially
768                                          * in the logical file (seqcount > 1),
769                                          * otherwise delay it in the hopes that
770                                          * the low level disk driver can
771                                          * optimize the write ordering.
772                                          */
773                                         for (bpp = buflist->bs_children;
774                                              bpp < endbp; bpp++)
775                                                 brelse(*bpp);
776                                         kfree(buflist, M_SEGMENT);
777                                         if (seqcount > 1) {
778                                                 cluster_wbuild_wb(vp, 
779                                                     blksize, vp->v_cstart, 
780                                                     cursize);
781                                         }
782                                 } else {
783                                         /*
784                                          * Succeeded, keep building cluster.
785                                          */
786                                         for (bpp = buflist->bs_children;
787                                              bpp <= endbp; bpp++)
788                                                 bdwrite(*bpp);
789                                         kfree(buflist, M_SEGMENT);
790                                         vp->v_lastw = loffset;
791                                         vp->v_lasta = bp->b_bio2.bio_offset;
792                                         return;
793                                 }
794                         }
795                 }
796                 /*
797                  * Consider beginning a cluster. If at end of file, make
798                  * cluster as large as possible, otherwise find size of
799                  * existing cluster.
800                  */
801                 if ((vp->v_type == VREG) &&
802                     bp->b_loffset + blksize < filesize &&
803                     (bp->b_bio2.bio_offset == NOOFFSET) &&
804                     (VOP_BMAP(vp, loffset, &bp->b_bio2.bio_offset, &maxclen, NULL, BUF_CMD_WRITE) ||
805                      bp->b_bio2.bio_offset == NOOFFSET)) {
806                         bawrite(bp);
807                         vp->v_clen = 0;
808                         vp->v_lasta = bp->b_bio2.bio_offset;
809                         vp->v_cstart = loffset + blksize;
810                         vp->v_lastw = loffset;
811                         return;
812                 }
813                 if (maxclen > blksize)
814                         vp->v_clen = maxclen - blksize;
815                 else
816                         vp->v_clen = 0;
817                 if (!async && vp->v_clen == 0) { /* I/O not contiguous */
818                         vp->v_cstart = loffset + blksize;
819                         bawrite(bp);
820                 } else {        /* Wait for rest of cluster */
821                         vp->v_cstart = loffset;
822                         bdwrite(bp);
823                 }
824         } else if (loffset == vp->v_cstart + vp->v_clen) {
825                 /*
826                  * At end of cluster, write it out if seqcount tells us we
827                  * are operating sequentially, otherwise let the buf or
828                  * update daemon handle it.
829                  */
830                 bdwrite(bp);
831                 if (seqcount > 1)
832                         cluster_wbuild_wb(vp, blksize, vp->v_cstart,
833                                           vp->v_clen + blksize);
834                 vp->v_clen = 0;
835                 vp->v_cstart = loffset + blksize;
836         } else if (vm_page_count_severe()) {
837                 /*
838                  * We are low on memory, get it going NOW
839                  */
840                 bawrite(bp);
841         } else {
842                 /*
843                  * In the middle of a cluster, so just delay the I/O for now.
844                  */
845                 bdwrite(bp);
846         }
847         vp->v_lastw = loffset;
848         vp->v_lasta = bp->b_bio2.bio_offset;
849 }
850
851 /*
852  * This is the clustered version of bawrite().  It works similarly to
853  * cluster_write() except I/O on the buffer is guaranteed to occur.
854  */
855 int
856 cluster_awrite(struct buf *bp)
857 {
858         int total;
859
860         /*
861          * Don't bother if it isn't clusterable.
862          */
863         if ((bp->b_flags & B_CLUSTEROK) == 0 ||
864             bp->b_vp == NULL ||
865             (bp->b_vp->v_flag & VOBJBUF) == 0) {
866                 total = bp->b_bufsize;
867                 bawrite(bp);
868                 return (total);
869         }
870
871         total = cluster_wbuild(bp->b_vp, &bp, bp->b_bufsize,
872                                bp->b_loffset, vmaxiosize(bp->b_vp));
873         if (bp)
874                 bawrite(bp);
875
876         return total;
877 }
878
879 /*
880  * This is an awful lot like cluster_rbuild...wish they could be combined.
881  * The last lbn argument is the current block on which I/O is being
882  * performed.  Check to see that it doesn't fall in the middle of
883  * the current block (if last_bp == NULL).
884  *
885  * cluster_wbuild() normally does not guarantee anything.  If bpp is
886  * non-NULL and cluster_wbuild() is able to incorporate it into the
887  * I/O it will set *bpp to NULL, otherwise it will leave it alone and
888  * the caller must dispose of *bpp.
889  */
890 static int
891 cluster_wbuild(struct vnode *vp, struct buf **bpp,
892                int blksize, off_t start_loffset, int bytes)
893 {
894         struct buf *bp, *tbp;
895         int i, j;
896         int totalwritten = 0;
897         int must_initiate;
898         int maxiosize = vmaxiosize(vp);
899
900         while (bytes > 0) {
901                 /*
902                  * If the buffer matches the passed locked & removed buffer
903                  * we used the passed buffer (which might not be B_DELWRI).
904                  *
905                  * Otherwise locate the buffer and determine if it is
906                  * compatible.
907                  */
908                 if (bpp && (*bpp)->b_loffset == start_loffset) {
909                         tbp = *bpp;
910                         *bpp = NULL;
911                         bpp = NULL;
912                 } else {
913                         tbp = findblk(vp, start_loffset, FINDBLK_NBLOCK);
914                         if (tbp == NULL ||
915                             (tbp->b_flags & (B_LOCKED | B_INVAL | B_DELWRI)) !=
916                              B_DELWRI ||
917                             (LIST_FIRST(&tbp->b_dep) && buf_checkwrite(tbp))) {
918                                 if (tbp)
919                                         BUF_UNLOCK(tbp);
920                                 start_loffset += blksize;
921                                 bytes -= blksize;
922                                 continue;
923                         }
924                         bremfree(tbp);
925                 }
926                 KKASSERT(tbp->b_cmd == BUF_CMD_DONE);
927
928                 /*
929                  * Extra memory in the buffer, punt on this buffer.
930                  * XXX we could handle this in most cases, but we would
931                  * have to push the extra memory down to after our max
932                  * possible cluster size and then potentially pull it back
933                  * up if the cluster was terminated prematurely--too much
934                  * hassle.
935                  */
936                 if (((tbp->b_flags & (B_CLUSTEROK|B_MALLOC)) != B_CLUSTEROK) ||
937                     (tbp->b_bcount != tbp->b_bufsize) ||
938                     (tbp->b_bcount != blksize) ||
939                     (bytes == blksize) ||
940                     ((bp = getpbuf_kva(&cluster_pbuf_freecnt)) == NULL)) {
941                         totalwritten += tbp->b_bufsize;
942                         bawrite(tbp);
943                         start_loffset += blksize;
944                         bytes -= blksize;
945                         continue;
946                 }
947
948                 /*
949                  * Set up the pbuf.  Track our append point with b_bcount
950                  * and b_bufsize.  b_bufsize is not used by the device but
951                  * our caller uses it to loop clusters and we use it to
952                  * detect a premature EOF on the block device.
953                  */
954                 bp->b_bcount = 0;
955                 bp->b_bufsize = 0;
956                 bp->b_xio.xio_npages = 0;
957                 bp->b_loffset = tbp->b_loffset;
958                 bp->b_bio2.bio_offset = tbp->b_bio2.bio_offset;
959
960                 /*
961                  * We are synthesizing a buffer out of vm_page_t's, but
962                  * if the block size is not page aligned then the starting
963                  * address may not be either.  Inherit the b_data offset
964                  * from the original buffer.
965                  */
966                 bp->b_data = (char *)((vm_offset_t)bp->b_data |
967                     ((vm_offset_t)tbp->b_data & PAGE_MASK));
968                 bp->b_flags &= ~B_ERROR;
969                 bp->b_flags |= B_CLUSTER | B_BNOCLIP |
970                         (tbp->b_flags & (B_VMIO | B_NEEDCOMMIT));
971                 bp->b_bio1.bio_caller_info1.cluster_head = NULL;
972                 bp->b_bio1.bio_caller_info2.cluster_tail = NULL;
973
974                 /*
975                  * From this location in the file, scan forward to see
976                  * if there are buffers with adjacent data that need to
977                  * be written as well.
978                  *
979                  * IO *must* be initiated on index 0 at this point
980                  * (particularly when called from cluster_awrite()).
981                  */
982                 for (i = 0; i < bytes; (i += blksize), (start_loffset += blksize)) {
983                         if (i == 0) {
984                                 must_initiate = 1;
985                         } else {
986                                 /*
987                                  * Not first buffer.
988                                  */
989                                 must_initiate = 0;
990                                 tbp = findblk(vp, start_loffset,
991                                               FINDBLK_NBLOCK);
992                                 /*
993                                  * Buffer not found or could not be locked
994                                  * non-blocking.
995                                  */
996                                 if (tbp == NULL)
997                                         break;
998
999                                 /*
1000                                  * If it IS in core, but has different
1001                                  * characteristics, then don't cluster
1002                                  * with it.
1003                                  */
1004                                 if ((tbp->b_flags & (B_VMIO | B_CLUSTEROK |
1005                                      B_INVAL | B_DELWRI | B_NEEDCOMMIT))
1006                                     != (B_DELWRI | B_CLUSTEROK |
1007                                      (bp->b_flags & (B_VMIO | B_NEEDCOMMIT))) ||
1008                                     (tbp->b_flags & B_LOCKED)
1009                                 ) {
1010                                         BUF_UNLOCK(tbp);
1011                                         break;
1012                                 }
1013
1014                                 /*
1015                                  * Check that the combined cluster
1016                                  * would make sense with regard to pages
1017                                  * and would not be too large
1018                                  *
1019                                  * WARNING! buf_checkwrite() must be the last
1020                                  *          check made.  If it returns 0 then
1021                                  *          we must initiate the I/O.
1022                                  */
1023                                 if ((tbp->b_bcount != blksize) ||
1024                                   ((bp->b_bio2.bio_offset + i) !=
1025                                     tbp->b_bio2.bio_offset) ||
1026                                   ((tbp->b_xio.xio_npages + bp->b_xio.xio_npages) >
1027                                     (maxiosize / PAGE_SIZE)) ||
1028                                   (LIST_FIRST(&tbp->b_dep) &&
1029                                    buf_checkwrite(tbp))
1030                                 ) {
1031                                         BUF_UNLOCK(tbp);
1032                                         break;
1033                                 }
1034                                 if (LIST_FIRST(&tbp->b_dep))
1035                                         must_initiate = 1;
1036                                 /*
1037                                  * Ok, it's passed all the tests,
1038                                  * so remove it from the free list
1039                                  * and mark it busy. We will use it.
1040                                  */
1041                                 bremfree(tbp);
1042                                 KKASSERT(tbp->b_cmd == BUF_CMD_DONE);
1043                         }
1044
1045                         /*
1046                          * If the IO is via the VM then we do some
1047                          * special VM hackery (yuck).  Since the buffer's
1048                          * block size may not be page-aligned it is possible
1049                          * for a page to be shared between two buffers.  We
1050                          * have to get rid of the duplication when building
1051                          * the cluster.
1052                          */
1053                         if (tbp->b_flags & B_VMIO) {
1054                                 vm_page_t m;
1055
1056                                 /*
1057                                  * Try to avoid deadlocks with the VM system.
1058                                  * However, we cannot abort the I/O if
1059                                  * must_initiate is non-zero.
1060                                  */
1061                                 if (must_initiate == 0) {
1062                                         for (j = 0;
1063                                              j < tbp->b_xio.xio_npages;
1064                                              ++j) {
1065                                                 m = tbp->b_xio.xio_pages[j];
1066                                                 if (m->flags & PG_BUSY) {
1067                                                         bqrelse(tbp);
1068                                                         goto finishcluster;
1069                                                 }
1070                                         }
1071                                 }
1072                                         
1073                                 for (j = 0; j < tbp->b_xio.xio_npages; ++j) {
1074                                         m = tbp->b_xio.xio_pages[j];
1075                                         vm_page_busy_wait(m, FALSE, "clurpg");
1076                                         vm_page_io_start(m);
1077                                         vm_page_wakeup(m);
1078                                         vm_object_pip_add(m->object, 1);
1079                                         if ((bp->b_xio.xio_npages == 0) ||
1080                                           (bp->b_xio.xio_pages[bp->b_xio.xio_npages - 1] != m)) {
1081                                                 bp->b_xio.xio_pages[bp->b_xio.xio_npages] = m;
1082                                                 bp->b_xio.xio_npages++;
1083                                         }
1084                                 }
1085                         }
1086                         bp->b_bcount += blksize;
1087                         bp->b_bufsize += blksize;
1088
1089                         bundirty(tbp);
1090                         tbp->b_flags &= ~B_ERROR;
1091                         tbp->b_cmd = BUF_CMD_WRITE;
1092                         BUF_KERNPROC(tbp);
1093                         cluster_append(&bp->b_bio1, tbp);
1094
1095                         /*
1096                          * check for latent dependencies to be handled 
1097                          */
1098                         if (LIST_FIRST(&tbp->b_dep) != NULL)
1099                                 buf_start(tbp);
1100                 }
1101         finishcluster:
1102                 pmap_qenter(trunc_page((vm_offset_t)bp->b_data),
1103                             (vm_page_t *)bp->b_xio.xio_pages,
1104                             bp->b_xio.xio_npages);
1105                 if (bp->b_bufsize > bp->b_kvasize) {
1106                         panic("cluster_wbuild: b_bufsize(%d) "
1107                               "> b_kvasize(%d)\n",
1108                               bp->b_bufsize, bp->b_kvasize);
1109                 }
1110                 totalwritten += bp->b_bufsize;
1111                 bp->b_dirtyoff = 0;
1112                 bp->b_dirtyend = bp->b_bufsize;
1113                 bp->b_bio1.bio_done = cluster_callback;
1114                 bp->b_cmd = BUF_CMD_WRITE;
1115
1116                 vfs_busy_pages(vp, bp);
1117                 bsetrunningbufspace(bp, bp->b_bufsize);
1118                 BUF_KERNPROC(bp);
1119                 vn_strategy(vp, &bp->b_bio1);
1120
1121                 bytes -= i;
1122         }
1123         return totalwritten;
1124 }
1125
1126 /*
1127  * Collect together all the buffers in a cluster.
1128  * Plus add one additional buffer.
1129  */
1130 static struct cluster_save *
1131 cluster_collectbufs(struct vnode *vp, struct buf *last_bp, int blksize)
1132 {
1133         struct cluster_save *buflist;
1134         struct buf *bp;
1135         off_t loffset;
1136         int i, len;
1137
1138         len = (int)(vp->v_lastw - vp->v_cstart + blksize) / blksize;
1139         buflist = kmalloc(sizeof(struct buf *) * (len + 1) + sizeof(*buflist),
1140                          M_SEGMENT, M_WAITOK);
1141         buflist->bs_nchildren = 0;
1142         buflist->bs_children = (struct buf **) (buflist + 1);
1143         for (loffset = vp->v_cstart, i = 0; i < len; (loffset += blksize), i++) {
1144                 (void) bread(vp, loffset, last_bp->b_bcount, &bp);
1145                 buflist->bs_children[i] = bp;
1146                 if (bp->b_bio2.bio_offset == NOOFFSET) {
1147                         VOP_BMAP(bp->b_vp, bp->b_loffset,
1148                                  &bp->b_bio2.bio_offset,
1149                                  NULL, NULL, BUF_CMD_WRITE);
1150                 }
1151         }
1152         buflist->bs_children[i] = bp = last_bp;
1153         if (bp->b_bio2.bio_offset == NOOFFSET) {
1154                 VOP_BMAP(bp->b_vp, bp->b_loffset, &bp->b_bio2.bio_offset,
1155                          NULL, NULL, BUF_CMD_WRITE);
1156         }
1157         buflist->bs_nchildren = i + 1;
1158         return (buflist);
1159 }
1160
1161 void
1162 cluster_append(struct bio *bio, struct buf *tbp)
1163 {
1164         tbp->b_cluster_next = NULL;
1165         if (bio->bio_caller_info1.cluster_head == NULL) {
1166                 bio->bio_caller_info1.cluster_head = tbp;
1167                 bio->bio_caller_info2.cluster_tail = tbp;
1168         } else {
1169                 bio->bio_caller_info2.cluster_tail->b_cluster_next = tbp;
1170                 bio->bio_caller_info2.cluster_tail = tbp;
1171         }
1172 }
1173
1174 static
1175 void
1176 cluster_setram (struct buf *bp)
1177 {
1178         bp->b_flags |= B_RAM;
1179         if (bp->b_xio.xio_npages)
1180                 vm_page_flag_set(bp->b_xio.xio_pages[0], PG_RAM);
1181 }