Merge branch 'vendor/TNFTP'
[dragonfly.git] / sys / kern / vfs_cluster.c
1 /*-
2  * Copyright (c) 1993
3  *      The Regents of the University of California.  All rights reserved.
4  * Modifications/enhancements:
5  *      Copyright (c) 1995 John S. Dyson.  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. All advertising materials mentioning features or use of this software
16  *    must display the following acknowledgement:
17  *      This product includes software developed by the University of
18  *      California, Berkeley and its contributors.
19  * 4. Neither the name of the University nor the names of its contributors
20  *    may be used to endorse or promote products derived from this software
21  *    without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  *
35  *      @(#)vfs_cluster.c       8.7 (Berkeley) 2/13/94
36  * $FreeBSD: src/sys/kern/vfs_cluster.c,v 1.92.2.9 2001/11/18 07:10:59 dillon Exp $
37  * $DragonFly: src/sys/kern/vfs_cluster.c,v 1.40 2008/07/14 03:09:00 dillon Exp $
38  */
39
40 #include "opt_debug_cluster.h"
41
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/kernel.h>
45 #include <sys/proc.h>
46 #include <sys/buf.h>
47 #include <sys/vnode.h>
48 #include <sys/malloc.h>
49 #include <sys/mount.h>
50 #include <sys/resourcevar.h>
51 #include <sys/vmmeter.h>
52 #include <vm/vm.h>
53 #include <vm/vm_object.h>
54 #include <vm/vm_page.h>
55 #include <sys/sysctl.h>
56 #include <sys/buf2.h>
57 #include <vm/vm_page2.h>
58
59 #include <machine/limits.h>
60
61 #if defined(CLUSTERDEBUG)
62 #include <sys/sysctl.h>
63 static int      rcluster= 0;
64 SYSCTL_INT(_debug, OID_AUTO, rcluster, CTLFLAG_RW, &rcluster, 0, "");
65 #endif
66
67 static MALLOC_DEFINE(M_SEGMENT, "cluster_save", "cluster_save buffer");
68
69 static struct cluster_save *
70         cluster_collectbufs (struct vnode *vp, struct buf *last_bp,
71                             int blksize);
72 static struct buf *
73         cluster_rbuild (struct vnode *vp, off_t filesize, off_t loffset,
74                             off_t doffset, int blksize, int run, 
75                             struct buf *fbp);
76 static void cluster_callback (struct bio *);
77 static void cluster_setram (struct buf *);
78
79 static int write_behind = 1;
80 SYSCTL_INT(_vfs, OID_AUTO, write_behind, CTLFLAG_RW, &write_behind, 0, "");
81 static int max_readahead = 2 * 1024 * 1024;
82 SYSCTL_INT(_vfs, OID_AUTO, max_readahead, CTLFLAG_RW, &max_readahead, 0, "");
83
84 extern vm_page_t        bogus_page;
85
86 extern int cluster_pbuf_freecnt;
87
88 /*
89  * This replaces bread.
90  *
91  * filesize     - read-ahead @ blksize will not cross this boundary
92  * loffset      - loffset for returned *bpp
93  * blksize      - blocksize for returned *bpp and read-ahead bps
94  * minreq       - minimum (not a hard minimum) in bytes, typically reflects
95  *                a higher level uio resid.
96  * maxreq       - maximum (sequential heuristic) in bytes (highet typ ~2MB)
97  * bpp          - return buffer (*bpp) for (loffset,blksize)
98  */
99 int
100 cluster_read(struct vnode *vp, off_t filesize, off_t loffset, 
101              int blksize, size_t minreq, size_t maxreq, struct buf **bpp)
102 {
103         struct buf *bp, *rbp, *reqbp;
104         off_t origoffset;
105         off_t doffset;
106         int error;
107         int i;
108         int maxra;
109         int maxrbuild;
110
111         error = 0;
112
113         /*
114          * Calculate the desired read-ahead in blksize'd blocks (maxra).
115          * To do this we calculate maxreq.
116          *
117          * maxreq typically starts out as a sequential heuristic.  If the
118          * high level uio/resid is bigger (minreq), we pop maxreq up to
119          * minreq.  This represents the case where random I/O is being
120          * performed by the userland is issuing big read()'s.
121          *
122          * Then we limit maxreq to max_readahead to ensure it is a reasonable
123          * value.
124          *
125          * Finally we must ensure that (loffset + maxreq) does not cross the
126          * boundary (filesize) for the current blocksize.  If we allowed it
127          * to cross we could end up with buffers past the boundary with the
128          * wrong block size (HAMMER large-data areas use mixed block sizes).
129          * minreq is also absolutely limited to filesize.
130          */
131         if (maxreq < minreq)
132                 maxreq = minreq;
133         /* minreq not used beyond this point */
134
135         if (maxreq > max_readahead) {
136                 maxreq = max_readahead;
137                 if (maxreq > 16 * 1024 * 1024)
138                         maxreq = 16 * 1024 * 1024;
139         }
140         if (maxreq < blksize)
141                 maxreq = blksize;
142         if (loffset + maxreq > filesize) {
143                 if (loffset > filesize)
144                         maxreq = 0;
145                 else
146                         maxreq = filesize - loffset;
147         }
148
149         maxra = (int)(maxreq / blksize);
150
151         /*
152          * Get the requested block.
153          */
154         *bpp = reqbp = bp = getblk(vp, loffset, blksize, 0, 0);
155         origoffset = loffset;
156
157         /*
158          * Calculate the maximum cluster size for a single I/O, used
159          * by cluster_rbuild().
160          */
161         maxrbuild = vmaxiosize(vp) / blksize;
162
163         /*
164          * if it is in the cache, then check to see if the reads have been
165          * sequential.  If they have, then try some read-ahead, otherwise
166          * back-off on prospective read-aheads.
167          */
168         if (bp->b_flags & B_CACHE) {
169                 /*
170                  * Not sequential, do not do any read-ahead
171                  */
172                 if (maxra <= 1)
173                         return 0;
174
175                 /*
176                  * No read-ahead mark, do not do any read-ahead
177                  * yet.
178                  */
179                 if ((bp->b_flags & B_RAM) == 0)
180                         return 0;
181
182                 /*
183                  * We hit a read-ahead-mark, figure out how much read-ahead
184                  * to do (maxra) and where to start (loffset).
185                  *
186                  * Shortcut the scan.  Typically the way this works is that
187                  * we've built up all the blocks inbetween except for the
188                  * last in previous iterations, so if the second-to-last
189                  * block is present we just skip ahead to it.
190                  *
191                  * This algorithm has O(1) cpu in the steady state no
192                  * matter how large maxra is.
193                  */
194                 bp->b_flags &= ~B_RAM;
195
196                 if (findblk(vp, loffset + (maxra - 2) * blksize, FINDBLK_TEST))
197                         i = maxra - 1;
198                 else
199                         i = 1;
200                 while (i < maxra) {
201                         if (findblk(vp, loffset + i * blksize,
202                                     FINDBLK_TEST) == NULL) {
203                                 break;
204                         }
205                         ++i;
206                 }
207
208                 /*
209                  * We got everything or everything is in the cache, no
210                  * point continuing.
211                  */
212                 if (i >= maxra)
213                         return 0;
214                 maxra -= i;
215                 loffset += i * blksize;
216                 reqbp = bp = NULL;
217         } else {
218                 __debugvar off_t firstread = bp->b_loffset;
219                 int nblks;
220
221                 /*
222                  * Set-up synchronous read for bp.
223                  */
224                 bp->b_cmd = BUF_CMD_READ;
225                 bp->b_bio1.bio_done = biodone_sync;
226                 bp->b_bio1.bio_flags |= BIO_SYNC;
227
228                 KASSERT(firstread != NOOFFSET, 
229                         ("cluster_read: no buffer offset"));
230
231                 /*
232                  * nblks is our cluster_rbuild request size, limited
233                  * primarily by the device.
234                  */
235                 if ((nblks = maxra) > maxrbuild)
236                         nblks = maxrbuild;
237
238                 if (nblks > 1) {
239                         int burstbytes;
240
241                         error = VOP_BMAP(vp, loffset, &doffset,
242                                          &burstbytes, NULL, BUF_CMD_READ);
243                         if (error)
244                                 goto single_block_read;
245                         if (nblks > burstbytes / blksize)
246                                 nblks = burstbytes / blksize;
247                         if (doffset == NOOFFSET)
248                                 goto single_block_read;
249                         if (nblks <= 1)
250                                 goto single_block_read;
251
252                         bp = cluster_rbuild(vp, filesize, loffset,
253                                             doffset, blksize, nblks, bp);
254                         loffset += bp->b_bufsize;
255                         maxra -= bp->b_bufsize / blksize;
256                 } else {
257 single_block_read:
258                         /*
259                          * If it isn't in the cache, then get a chunk from
260                          * disk if sequential, otherwise just get the block.
261                          */
262                         cluster_setram(bp);
263                         loffset += blksize;
264                         --maxra;
265                 }
266         }
267
268         /*
269          * If B_CACHE was not set issue bp.  bp will either be an
270          * asynchronous cluster buf or a synchronous single-buf.
271          * If it is a single buf it will be the same as reqbp.
272          *
273          * NOTE: Once an async cluster buf is issued bp becomes invalid.
274          */
275         if (bp) {
276 #if defined(CLUSTERDEBUG)
277                 if (rcluster)
278                         kprintf("S(%012jx,%d,%d)\n",
279                             (intmax_t)bp->b_loffset, bp->b_bcount, maxra);
280 #endif
281                 if ((bp->b_flags & B_CLUSTER) == 0)
282                         vfs_busy_pages(vp, bp);
283                 bp->b_flags &= ~(B_ERROR|B_INVAL);
284                 vn_strategy(vp, &bp->b_bio1);
285                 error = 0;
286                 /* bp invalid now */
287         }
288
289         /*
290          * If we have been doing sequential I/O, then do some read-ahead.
291          * The code above us should have positioned us at the next likely
292          * offset.
293          *
294          * Only mess with buffers which we can immediately lock.  HAMMER
295          * will do device-readahead irrespective of what the blocks
296          * represent.
297          */
298         while (error == 0 && maxra > 0) {
299                 int burstbytes;
300                 int tmp_error;
301                 int nblks;
302
303                 rbp = getblk(vp, loffset, blksize,
304                              GETBLK_SZMATCH|GETBLK_NOWAIT, 0);
305                 if (rbp == NULL)
306                         goto no_read_ahead;
307                 if ((rbp->b_flags & B_CACHE)) {
308                         bqrelse(rbp);
309                         goto no_read_ahead;
310                 }
311
312                 /*
313                  * An error from the read-ahead bmap has nothing to do
314                  * with the caller's original request.
315                  */
316                 tmp_error = VOP_BMAP(vp, loffset, &doffset,
317                                      &burstbytes, NULL, BUF_CMD_READ);
318                 if (tmp_error || doffset == NOOFFSET) {
319                         rbp->b_flags |= B_INVAL;
320                         brelse(rbp);
321                         rbp = NULL;
322                         goto no_read_ahead;
323                 }
324                 if ((nblks = maxra) > maxrbuild)
325                         nblks = maxrbuild;
326                 if (nblks > burstbytes / blksize)
327                         nblks = burstbytes / blksize;
328
329                 /*
330                  * rbp: async read
331                  */
332                 rbp->b_cmd = BUF_CMD_READ;
333                 /*rbp->b_flags |= B_AGE*/;
334                 cluster_setram(rbp);
335
336                 if (nblks > 1) {
337                         rbp = cluster_rbuild(vp, filesize, loffset,
338                                              doffset, blksize, 
339                                              nblks, rbp);
340                 } else {
341                         rbp->b_bio2.bio_offset = doffset;
342                 }
343
344 #if defined(CLUSTERDEBUG)
345                 if (rcluster) {
346                         if (bp) {
347                                 kprintf("A+(%012jx,%d,%jd) "
348                                         "doff=%012jx minr=%zd ra=%d\n",
349                                     (intmax_t)loffset, rbp->b_bcount,
350                                     (intmax_t)(loffset - origoffset),
351                                     (intmax_t)doffset, minreq, maxra);
352                         } else {
353                                 kprintf("A-(%012jx,%d,%jd) "
354                                         "doff=%012jx minr=%zd ra=%d\n",
355                                     (intmax_t)rbp->b_loffset, rbp->b_bcount,
356                                     (intmax_t)(loffset - origoffset),
357                                     (intmax_t)doffset, minreq, maxra);
358                         }
359                 }
360 #endif
361                 rbp->b_flags &= ~(B_ERROR|B_INVAL);
362
363                 if ((rbp->b_flags & B_CLUSTER) == 0)
364                         vfs_busy_pages(vp, rbp);
365                 BUF_KERNPROC(rbp);
366                 loffset += rbp->b_bufsize;
367                 maxra -= rbp->b_bufsize / blksize;
368                 vn_strategy(vp, &rbp->b_bio1);
369                 /* rbp invalid now */
370         }
371
372         /*
373          * Wait for our original buffer to complete its I/O.  reqbp will
374          * be NULL if the original buffer was B_CACHE.  We are returning
375          * (*bpp) which is the same as reqbp when reqbp != NULL.
376          */
377 no_read_ahead:
378         if (reqbp) {
379                 KKASSERT(reqbp->b_bio1.bio_flags & BIO_SYNC);
380                 error = biowait(&reqbp->b_bio1, "clurd");
381         }
382         return (error);
383 }
384
385 /*
386  * If blocks are contiguous on disk, use this to provide clustered
387  * read ahead.  We will read as many blocks as possible sequentially
388  * and then parcel them up into logical blocks in the buffer hash table.
389  *
390  * This function either returns a cluster buf or it returns fbp.  fbp is
391  * already expected to be set up as a synchronous or asynchronous request.
392  *
393  * If a cluster buf is returned it will always be async.
394  */
395 static struct buf *
396 cluster_rbuild(struct vnode *vp, off_t filesize, off_t loffset, off_t doffset,
397                int blksize, int run, struct buf *fbp)
398 {
399         struct buf *bp, *tbp;
400         off_t boffset;
401         int i, j;
402         int maxiosize = vmaxiosize(vp);
403
404         /*
405          * avoid a division
406          */
407         while (loffset + run * blksize > filesize) {
408                 --run;
409         }
410
411         tbp = fbp;
412         tbp->b_bio2.bio_offset = doffset;
413         if((tbp->b_flags & B_MALLOC) ||
414             ((tbp->b_flags & B_VMIO) == 0) || (run <= 1)) {
415                 return tbp;
416         }
417
418         bp = trypbuf_kva(&cluster_pbuf_freecnt);
419         if (bp == NULL) {
420                 return tbp;
421         }
422
423         /*
424          * We are synthesizing a buffer out of vm_page_t's, but
425          * if the block size is not page aligned then the starting
426          * address may not be either.  Inherit the b_data offset
427          * from the original buffer.
428          */
429         bp->b_data = (char *)((vm_offset_t)bp->b_data |
430             ((vm_offset_t)tbp->b_data & PAGE_MASK));
431         bp->b_flags |= B_CLUSTER | B_VMIO;
432         bp->b_cmd = BUF_CMD_READ;
433         bp->b_bio1.bio_done = cluster_callback;         /* default to async */
434         bp->b_bio1.bio_caller_info1.cluster_head = NULL;
435         bp->b_bio1.bio_caller_info2.cluster_tail = NULL;
436         bp->b_loffset = loffset;
437         bp->b_bio2.bio_offset = doffset;
438         KASSERT(bp->b_loffset != NOOFFSET,
439                 ("cluster_rbuild: no buffer offset"));
440
441         bp->b_bcount = 0;
442         bp->b_bufsize = 0;
443         bp->b_xio.xio_npages = 0;
444
445         for (boffset = doffset, i = 0; i < run; ++i, boffset += blksize) {
446                 if (i) {
447                         if ((bp->b_xio.xio_npages * PAGE_SIZE) +
448                             round_page(blksize) > maxiosize) {
449                                 break;
450                         }
451
452                         /*
453                          * Shortcut some checks and try to avoid buffers that
454                          * would block in the lock.  The same checks have to
455                          * be made again after we officially get the buffer.
456                          */
457                         tbp = getblk(vp, loffset + i * blksize, blksize,
458                                      GETBLK_SZMATCH|GETBLK_NOWAIT, 0);
459                         if (tbp == NULL)
460                                 break;
461                         for (j = 0; j < tbp->b_xio.xio_npages; j++) {
462                                 if (tbp->b_xio.xio_pages[j]->valid)
463                                         break;
464                         }
465                         if (j != tbp->b_xio.xio_npages) {
466                                 bqrelse(tbp);
467                                 break;
468                         }
469
470                         /*
471                          * Stop scanning if the buffer is fuly valid 
472                          * (marked B_CACHE), or locked (may be doing a
473                          * background write), or if the buffer is not
474                          * VMIO backed.  The clustering code can only deal
475                          * with VMIO-backed buffers.
476                          */
477                         if ((tbp->b_flags & (B_CACHE|B_LOCKED)) ||
478                             (tbp->b_flags & B_VMIO) == 0 ||
479                             (LIST_FIRST(&tbp->b_dep) != NULL &&
480                              buf_checkread(tbp))
481                         ) {
482                                 bqrelse(tbp);
483                                 break;
484                         }
485
486                         /*
487                          * The buffer must be completely invalid in order to
488                          * take part in the cluster.  If it is partially valid
489                          * then we stop.
490                          */
491                         for (j = 0;j < tbp->b_xio.xio_npages; j++) {
492                                 if (tbp->b_xio.xio_pages[j]->valid)
493                                         break;
494                         }
495                         if (j != tbp->b_xio.xio_npages) {
496                                 bqrelse(tbp);
497                                 break;
498                         }
499
500                         /*
501                          * Set a read-ahead mark as appropriate
502                          */
503                         if (i == 1 || i == (run - 1))
504                                 cluster_setram(tbp);
505
506                         /*
507                          * Depress the priority of buffers not explicitly
508                          * requested.
509                          */
510                         /* tbp->b_flags |= B_AGE; */
511
512                         /*
513                          * Set the block number if it isn't set, otherwise
514                          * if it is make sure it matches the block number we
515                          * expect.
516                          */
517                         if (tbp->b_bio2.bio_offset == NOOFFSET) {
518                                 tbp->b_bio2.bio_offset = boffset;
519                         } else if (tbp->b_bio2.bio_offset != boffset) {
520                                 brelse(tbp);
521                                 break;
522                         }
523                 }
524
525                 /*
526                  * The passed-in tbp (i == 0) will already be set up for
527                  * async or sync operation.  All other tbp's acquire in
528                  * our loop are set up for async operation.
529                  */
530                 tbp->b_cmd = BUF_CMD_READ;
531                 BUF_KERNPROC(tbp);
532                 cluster_append(&bp->b_bio1, tbp);
533                 for (j = 0; j < tbp->b_xio.xio_npages; ++j) {
534                         vm_page_t m;
535                         m = tbp->b_xio.xio_pages[j];
536                         vm_page_io_start(m);
537                         vm_object_pip_add(m->object, 1);
538                         if ((bp->b_xio.xio_npages == 0) ||
539                                 (bp->b_xio.xio_pages[bp->b_xio.xio_npages-1] != m)) {
540                                 bp->b_xio.xio_pages[bp->b_xio.xio_npages] = m;
541                                 bp->b_xio.xio_npages++;
542                         }
543                         if ((m->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL)
544                                 tbp->b_xio.xio_pages[j] = bogus_page;
545                 }
546                 /*
547                  * XXX shouldn't this be += size for both, like in 
548                  * cluster_wbuild()?
549                  *
550                  * Don't inherit tbp->b_bufsize as it may be larger due to
551                  * a non-page-aligned size.  Instead just aggregate using
552                  * 'size'.
553                  */
554                 if (tbp->b_bcount != blksize)
555                     kprintf("warning: tbp->b_bcount wrong %d vs %d\n", tbp->b_bcount, blksize);
556                 if (tbp->b_bufsize != blksize)
557                     kprintf("warning: tbp->b_bufsize wrong %d vs %d\n", tbp->b_bufsize, blksize);
558                 bp->b_bcount += blksize;
559                 bp->b_bufsize += blksize;
560         }
561
562         /*
563          * Fully valid pages in the cluster are already good and do not need
564          * to be re-read from disk.  Replace the page with bogus_page
565          */
566         for (j = 0; j < bp->b_xio.xio_npages; j++) {
567                 if ((bp->b_xio.xio_pages[j]->valid & VM_PAGE_BITS_ALL) ==
568                     VM_PAGE_BITS_ALL) {
569                         bp->b_xio.xio_pages[j] = bogus_page;
570                 }
571         }
572         if (bp->b_bufsize > bp->b_kvasize) {
573                 panic("cluster_rbuild: b_bufsize(%d) > b_kvasize(%d)",
574                     bp->b_bufsize, bp->b_kvasize);
575         }
576         pmap_qenter(trunc_page((vm_offset_t) bp->b_data),
577                 (vm_page_t *)bp->b_xio.xio_pages, bp->b_xio.xio_npages);
578         BUF_KERNPROC(bp);
579         return (bp);
580 }
581
582 /*
583  * Cleanup after a clustered read or write.
584  * This is complicated by the fact that any of the buffers might have
585  * extra memory (if there were no empty buffer headers at allocbuf time)
586  * that we will need to shift around.
587  *
588  * The returned bio is &bp->b_bio1
589  */
590 void
591 cluster_callback(struct bio *bio)
592 {
593         struct buf *bp = bio->bio_buf;
594         struct buf *tbp;
595         int error = 0;
596
597         /*
598          * Must propogate errors to all the components.  A short read (EOF)
599          * is a critical error.
600          */
601         if (bp->b_flags & B_ERROR) {
602                 error = bp->b_error;
603         } else if (bp->b_bcount != bp->b_bufsize) {
604                 panic("cluster_callback: unexpected EOF on cluster %p!", bio);
605         }
606
607         pmap_qremove(trunc_page((vm_offset_t) bp->b_data), bp->b_xio.xio_npages);
608         /*
609          * Move memory from the large cluster buffer into the component
610          * buffers and mark IO as done on these.  Since the memory map
611          * is the same, no actual copying is required.
612          */
613         while ((tbp = bio->bio_caller_info1.cluster_head) != NULL) {
614                 bio->bio_caller_info1.cluster_head = tbp->b_cluster_next;
615                 if (error) {
616                         tbp->b_flags |= B_ERROR | B_IODEBUG;
617                         tbp->b_error = error;
618                 } else {
619                         tbp->b_dirtyoff = tbp->b_dirtyend = 0;
620                         tbp->b_flags &= ~(B_ERROR|B_INVAL);
621                         tbp->b_flags |= B_IODEBUG;
622                         /*
623                          * XXX the bdwrite()/bqrelse() issued during
624                          * cluster building clears B_RELBUF (see bqrelse()
625                          * comment).  If direct I/O was specified, we have
626                          * to restore it here to allow the buffer and VM
627                          * to be freed.
628                          */
629                         if (tbp->b_flags & B_DIRECT)
630                                 tbp->b_flags |= B_RELBUF;
631                 }
632                 biodone(&tbp->b_bio1);
633         }
634         relpbuf(bp, &cluster_pbuf_freecnt);
635 }
636
637 /*
638  *      cluster_wbuild_wb:
639  *
640  *      Implement modified write build for cluster.
641  *
642  *              write_behind = 0        write behind disabled
643  *              write_behind = 1        write behind normal (default)
644  *              write_behind = 2        write behind backed-off
645  */
646
647 static __inline int
648 cluster_wbuild_wb(struct vnode *vp, int blksize, off_t start_loffset, int len)
649 {
650         int r = 0;
651
652         switch(write_behind) {
653         case 2:
654                 if (start_loffset < len)
655                         break;
656                 start_loffset -= len;
657                 /* fall through */
658         case 1:
659                 r = cluster_wbuild(vp, blksize, start_loffset, len);
660                 /* fall through */
661         default:
662                 /* fall through */
663                 break;
664         }
665         return(r);
666 }
667
668 /*
669  * Do clustered write for FFS.
670  *
671  * Three cases:
672  *      1. Write is not sequential (write asynchronously)
673  *      Write is sequential:
674  *      2.      beginning of cluster - begin cluster
675  *      3.      middle of a cluster - add to cluster
676  *      4.      end of a cluster - asynchronously write cluster
677  */
678 void
679 cluster_write(struct buf *bp, off_t filesize, int blksize, int seqcount)
680 {
681         struct vnode *vp;
682         off_t loffset;
683         int maxclen, cursize;
684         int async;
685
686         vp = bp->b_vp;
687         if (vp->v_type == VREG)
688                 async = vp->v_mount->mnt_flag & MNT_ASYNC;
689         else
690                 async = 0;
691         loffset = bp->b_loffset;
692         KASSERT(bp->b_loffset != NOOFFSET, 
693                 ("cluster_write: no buffer offset"));
694
695         /* Initialize vnode to beginning of file. */
696         if (loffset == 0)
697                 vp->v_lasta = vp->v_clen = vp->v_cstart = vp->v_lastw = 0;
698
699         if (vp->v_clen == 0 || loffset != vp->v_lastw + blksize ||
700             bp->b_bio2.bio_offset == NOOFFSET ||
701             (bp->b_bio2.bio_offset != vp->v_lasta + blksize)) {
702                 maxclen = vmaxiosize(vp);
703                 if (vp->v_clen != 0) {
704                         /*
705                          * Next block is not sequential.
706                          *
707                          * If we are not writing at end of file, the process
708                          * seeked to another point in the file since its last
709                          * write, or we have reached our maximum cluster size,
710                          * then push the previous cluster. Otherwise try
711                          * reallocating to make it sequential.
712                          *
713                          * Change to algorithm: only push previous cluster if
714                          * it was sequential from the point of view of the
715                          * seqcount heuristic, otherwise leave the buffer 
716                          * intact so we can potentially optimize the I/O
717                          * later on in the buf_daemon or update daemon
718                          * flush.
719                          */
720                         cursize = vp->v_lastw - vp->v_cstart + blksize;
721                         if (bp->b_loffset + blksize != filesize ||
722                             loffset != vp->v_lastw + blksize || vp->v_clen <= cursize) {
723                                 if (!async && seqcount > 0) {
724                                         cluster_wbuild_wb(vp, blksize,
725                                                 vp->v_cstart, cursize);
726                                 }
727                         } else {
728                                 struct buf **bpp, **endbp;
729                                 struct cluster_save *buflist;
730
731                                 buflist = cluster_collectbufs(vp, bp, blksize);
732                                 endbp = &buflist->bs_children
733                                     [buflist->bs_nchildren - 1];
734                                 if (VOP_REALLOCBLKS(vp, buflist)) {
735                                         /*
736                                          * Failed, push the previous cluster
737                                          * if *really* writing sequentially
738                                          * in the logical file (seqcount > 1),
739                                          * otherwise delay it in the hopes that
740                                          * the low level disk driver can
741                                          * optimize the write ordering.
742                                          */
743                                         for (bpp = buflist->bs_children;
744                                              bpp < endbp; bpp++)
745                                                 brelse(*bpp);
746                                         kfree(buflist, M_SEGMENT);
747                                         if (seqcount > 1) {
748                                                 cluster_wbuild_wb(vp, 
749                                                     blksize, vp->v_cstart, 
750                                                     cursize);
751                                         }
752                                 } else {
753                                         /*
754                                          * Succeeded, keep building cluster.
755                                          */
756                                         for (bpp = buflist->bs_children;
757                                              bpp <= endbp; bpp++)
758                                                 bdwrite(*bpp);
759                                         kfree(buflist, M_SEGMENT);
760                                         vp->v_lastw = loffset;
761                                         vp->v_lasta = bp->b_bio2.bio_offset;
762                                         return;
763                                 }
764                         }
765                 }
766                 /*
767                  * Consider beginning a cluster. If at end of file, make
768                  * cluster as large as possible, otherwise find size of
769                  * existing cluster.
770                  */
771                 if ((vp->v_type == VREG) &&
772                     bp->b_loffset + blksize != filesize &&
773                     (bp->b_bio2.bio_offset == NOOFFSET) &&
774                     (VOP_BMAP(vp, loffset, &bp->b_bio2.bio_offset, &maxclen, NULL, BUF_CMD_WRITE) ||
775                      bp->b_bio2.bio_offset == NOOFFSET)) {
776                         bawrite(bp);
777                         vp->v_clen = 0;
778                         vp->v_lasta = bp->b_bio2.bio_offset;
779                         vp->v_cstart = loffset + blksize;
780                         vp->v_lastw = loffset;
781                         return;
782                 }
783                 if (maxclen > blksize)
784                         vp->v_clen = maxclen - blksize;
785                 else
786                         vp->v_clen = 0;
787                 if (!async && vp->v_clen == 0) { /* I/O not contiguous */
788                         vp->v_cstart = loffset + blksize;
789                         bawrite(bp);
790                 } else {        /* Wait for rest of cluster */
791                         vp->v_cstart = loffset;
792                         bdwrite(bp);
793                 }
794         } else if (loffset == vp->v_cstart + vp->v_clen) {
795                 /*
796                  * At end of cluster, write it out if seqcount tells us we
797                  * are operating sequentially, otherwise let the buf or
798                  * update daemon handle it.
799                  */
800                 bdwrite(bp);
801                 if (seqcount > 1)
802                         cluster_wbuild_wb(vp, blksize, vp->v_cstart,
803                                           vp->v_clen + blksize);
804                 vp->v_clen = 0;
805                 vp->v_cstart = loffset + blksize;
806         } else if (vm_page_count_severe()) {
807                 /*
808                  * We are low on memory, get it going NOW
809                  */
810                 bawrite(bp);
811         } else {
812                 /*
813                  * In the middle of a cluster, so just delay the I/O for now.
814                  */
815                 bdwrite(bp);
816         }
817         vp->v_lastw = loffset;
818         vp->v_lasta = bp->b_bio2.bio_offset;
819 }
820
821
822 /*
823  * This is an awful lot like cluster_rbuild...wish they could be combined.
824  * The last lbn argument is the current block on which I/O is being
825  * performed.  Check to see that it doesn't fall in the middle of
826  * the current block (if last_bp == NULL).
827  */
828 int
829 cluster_wbuild(struct vnode *vp, int blksize, off_t start_loffset, int bytes)
830 {
831         struct buf *bp, *tbp;
832         int i, j;
833         int totalwritten = 0;
834         int maxiosize = vmaxiosize(vp);
835
836         while (bytes > 0) {
837                 /*
838                  * If the buffer is not delayed-write (i.e. dirty), or it 
839                  * is delayed-write but either locked or inval, it cannot 
840                  * partake in the clustered write.
841                  */
842                 tbp = findblk(vp, start_loffset, FINDBLK_NBLOCK);
843                 if (tbp == NULL ||
844                     (tbp->b_flags & (B_LOCKED | B_INVAL | B_DELWRI)) != B_DELWRI ||
845                     (LIST_FIRST(&tbp->b_dep) && buf_checkwrite(tbp))) {
846                         if (tbp)
847                                 BUF_UNLOCK(tbp);
848                         start_loffset += blksize;
849                         bytes -= blksize;
850                         continue;
851                 }
852                 bremfree(tbp);
853                 KKASSERT(tbp->b_cmd == BUF_CMD_DONE);
854
855                 /*
856                  * Extra memory in the buffer, punt on this buffer.
857                  * XXX we could handle this in most cases, but we would
858                  * have to push the extra memory down to after our max
859                  * possible cluster size and then potentially pull it back
860                  * up if the cluster was terminated prematurely--too much
861                  * hassle.
862                  */
863                 if (((tbp->b_flags & (B_CLUSTEROK|B_MALLOC)) != B_CLUSTEROK) ||
864                     (tbp->b_bcount != tbp->b_bufsize) ||
865                     (tbp->b_bcount != blksize) ||
866                     (bytes == blksize) ||
867                     ((bp = getpbuf_kva(&cluster_pbuf_freecnt)) == NULL)) {
868                         totalwritten += tbp->b_bufsize;
869                         bawrite(tbp);
870                         start_loffset += blksize;
871                         bytes -= blksize;
872                         continue;
873                 }
874
875                 /*
876                  * Set up the pbuf.  Track our append point with b_bcount
877                  * and b_bufsize.  b_bufsize is not used by the device but
878                  * our caller uses it to loop clusters and we use it to
879                  * detect a premature EOF on the block device.
880                  */
881                 bp->b_bcount = 0;
882                 bp->b_bufsize = 0;
883                 bp->b_xio.xio_npages = 0;
884                 bp->b_loffset = tbp->b_loffset;
885                 bp->b_bio2.bio_offset = tbp->b_bio2.bio_offset;
886
887                 /*
888                  * We are synthesizing a buffer out of vm_page_t's, but
889                  * if the block size is not page aligned then the starting
890                  * address may not be either.  Inherit the b_data offset
891                  * from the original buffer.
892                  */
893                 bp->b_data = (char *)((vm_offset_t)bp->b_data |
894                     ((vm_offset_t)tbp->b_data & PAGE_MASK));
895                 bp->b_flags &= ~B_ERROR;
896                 bp->b_flags |= B_CLUSTER | B_BNOCLIP |
897                         (tbp->b_flags & (B_VMIO | B_NEEDCOMMIT));
898                 bp->b_bio1.bio_caller_info1.cluster_head = NULL;
899                 bp->b_bio1.bio_caller_info2.cluster_tail = NULL;
900
901                 /*
902                  * From this location in the file, scan forward to see
903                  * if there are buffers with adjacent data that need to
904                  * be written as well.
905                  */
906                 for (i = 0; i < bytes; (i += blksize), (start_loffset += blksize)) {
907                         if (i != 0) { /* If not the first buffer */
908                                 tbp = findblk(vp, start_loffset,
909                                               FINDBLK_NBLOCK);
910                                 /*
911                                  * Buffer not found or could not be locked
912                                  * non-blocking.
913                                  */
914                                 if (tbp == NULL)
915                                         break;
916
917                                 /*
918                                  * If it IS in core, but has different
919                                  * characteristics, then don't cluster
920                                  * with it.
921                                  */
922                                 if ((tbp->b_flags & (B_VMIO | B_CLUSTEROK |
923                                      B_INVAL | B_DELWRI | B_NEEDCOMMIT))
924                                     != (B_DELWRI | B_CLUSTEROK |
925                                      (bp->b_flags & (B_VMIO | B_NEEDCOMMIT))) ||
926                                     (tbp->b_flags & B_LOCKED) ||
927                                     (LIST_FIRST(&tbp->b_dep) &&
928                                      buf_checkwrite(tbp))
929                                 ) {
930                                         BUF_UNLOCK(tbp);
931                                         break;
932                                 }
933
934                                 /*
935                                  * Check that the combined cluster
936                                  * would make sense with regard to pages
937                                  * and would not be too large
938                                  */
939                                 if ((tbp->b_bcount != blksize) ||
940                                   ((bp->b_bio2.bio_offset + i) !=
941                                     tbp->b_bio2.bio_offset) ||
942                                   ((tbp->b_xio.xio_npages + bp->b_xio.xio_npages) >
943                                     (maxiosize / PAGE_SIZE))) {
944                                         BUF_UNLOCK(tbp);
945                                         break;
946                                 }
947                                 /*
948                                  * Ok, it's passed all the tests,
949                                  * so remove it from the free list
950                                  * and mark it busy. We will use it.
951                                  */
952                                 bremfree(tbp);
953                                 KKASSERT(tbp->b_cmd == BUF_CMD_DONE);
954                         } /* end of code for non-first buffers only */
955
956                         /*
957                          * If the IO is via the VM then we do some
958                          * special VM hackery (yuck).  Since the buffer's
959                          * block size may not be page-aligned it is possible
960                          * for a page to be shared between two buffers.  We
961                          * have to get rid of the duplication when building
962                          * the cluster.
963                          */
964                         if (tbp->b_flags & B_VMIO) {
965                                 vm_page_t m;
966
967                                 if (i != 0) { /* if not first buffer */
968                                         for (j = 0; j < tbp->b_xio.xio_npages; ++j) {
969                                                 m = tbp->b_xio.xio_pages[j];
970                                                 if (m->flags & PG_BUSY) {
971                                                         bqrelse(tbp);
972                                                         goto finishcluster;
973                                                 }
974                                         }
975                                 }
976                                         
977                                 for (j = 0; j < tbp->b_xio.xio_npages; ++j) {
978                                         m = tbp->b_xio.xio_pages[j];
979                                         vm_page_io_start(m);
980                                         vm_object_pip_add(m->object, 1);
981                                         if ((bp->b_xio.xio_npages == 0) ||
982                                           (bp->b_xio.xio_pages[bp->b_xio.xio_npages - 1] != m)) {
983                                                 bp->b_xio.xio_pages[bp->b_xio.xio_npages] = m;
984                                                 bp->b_xio.xio_npages++;
985                                         }
986                                 }
987                         }
988                         bp->b_bcount += blksize;
989                         bp->b_bufsize += blksize;
990
991                         bundirty(tbp);
992                         tbp->b_flags &= ~B_ERROR;
993                         tbp->b_cmd = BUF_CMD_WRITE;
994                         BUF_KERNPROC(tbp);
995                         cluster_append(&bp->b_bio1, tbp);
996
997                         /*
998                          * check for latent dependencies to be handled 
999                          */
1000                         if (LIST_FIRST(&tbp->b_dep) != NULL)
1001                                 buf_start(tbp);
1002                 }
1003         finishcluster:
1004                 pmap_qenter(trunc_page((vm_offset_t) bp->b_data),
1005                         (vm_page_t *) bp->b_xio.xio_pages, bp->b_xio.xio_npages);
1006                 if (bp->b_bufsize > bp->b_kvasize) {
1007                         panic(
1008                             "cluster_wbuild: b_bufsize(%d) > b_kvasize(%d)\n",
1009                             bp->b_bufsize, bp->b_kvasize);
1010                 }
1011                 totalwritten += bp->b_bufsize;
1012                 bp->b_dirtyoff = 0;
1013                 bp->b_dirtyend = bp->b_bufsize;
1014                 bp->b_bio1.bio_done = cluster_callback;
1015                 bp->b_cmd = BUF_CMD_WRITE;
1016
1017                 vfs_busy_pages(vp, bp);
1018                 bsetrunningbufspace(bp, bp->b_bufsize);
1019                 BUF_KERNPROC(bp);
1020                 vn_strategy(vp, &bp->b_bio1);
1021
1022                 bytes -= i;
1023         }
1024         return totalwritten;
1025 }
1026
1027 /*
1028  * Collect together all the buffers in a cluster.
1029  * Plus add one additional buffer.
1030  */
1031 static struct cluster_save *
1032 cluster_collectbufs(struct vnode *vp, struct buf *last_bp, int blksize)
1033 {
1034         struct cluster_save *buflist;
1035         struct buf *bp;
1036         off_t loffset;
1037         int i, len;
1038
1039         len = (int)(vp->v_lastw - vp->v_cstart + blksize) / blksize;
1040         buflist = kmalloc(sizeof(struct buf *) * (len + 1) + sizeof(*buflist),
1041                          M_SEGMENT, M_WAITOK);
1042         buflist->bs_nchildren = 0;
1043         buflist->bs_children = (struct buf **) (buflist + 1);
1044         for (loffset = vp->v_cstart, i = 0; i < len; (loffset += blksize), i++) {
1045                 (void) bread(vp, loffset, last_bp->b_bcount, &bp);
1046                 buflist->bs_children[i] = bp;
1047                 if (bp->b_bio2.bio_offset == NOOFFSET) {
1048                         VOP_BMAP(bp->b_vp, bp->b_loffset,
1049                                  &bp->b_bio2.bio_offset,
1050                                  NULL, NULL, BUF_CMD_WRITE);
1051                 }
1052         }
1053         buflist->bs_children[i] = bp = last_bp;
1054         if (bp->b_bio2.bio_offset == NOOFFSET) {
1055                 VOP_BMAP(bp->b_vp, bp->b_loffset, &bp->b_bio2.bio_offset,
1056                          NULL, NULL, BUF_CMD_WRITE);
1057         }
1058         buflist->bs_nchildren = i + 1;
1059         return (buflist);
1060 }
1061
1062 void
1063 cluster_append(struct bio *bio, struct buf *tbp)
1064 {
1065         tbp->b_cluster_next = NULL;
1066         if (bio->bio_caller_info1.cluster_head == NULL) {
1067                 bio->bio_caller_info1.cluster_head = tbp;
1068                 bio->bio_caller_info2.cluster_tail = tbp;
1069         } else {
1070                 bio->bio_caller_info2.cluster_tail->b_cluster_next = tbp;
1071                 bio->bio_caller_info2.cluster_tail = tbp;
1072         }
1073 }
1074
1075 static
1076 void
1077 cluster_setram (struct buf *bp)
1078 {
1079         bp->b_flags |= B_RAM;
1080         if (bp->b_xio.xio_npages)
1081                 vm_page_flag_set(bp->b_xio.xio_pages[0], PG_RAM);
1082 }