Merge branch 'vendor/EXPAT'
[dragonfly.git] / sys / kern / vfs_cluster.c
1 /*-
2  * Copyright (c) 1993
3  *      The Regents of the University of California.  All rights reserved.
4  * Modifications/enhancements:
5  *      Copyright (c) 1995 John S. Dyson.  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. All advertising materials mentioning features or use of this software
16  *    must display the following acknowledgement:
17  *      This product includes software developed by the University of
18  *      California, Berkeley and its contributors.
19  * 4. Neither the name of the University nor the names of its contributors
20  *    may be used to endorse or promote products derived from this software
21  *    without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  *
35  *      @(#)vfs_cluster.c       8.7 (Berkeley) 2/13/94
36  * $FreeBSD: src/sys/kern/vfs_cluster.c,v 1.92.2.9 2001/11/18 07:10:59 dillon Exp $
37  * $DragonFly: src/sys/kern/vfs_cluster.c,v 1.40 2008/07/14 03:09:00 dillon Exp $
38  */
39
40 #include "opt_debug_cluster.h"
41
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/kernel.h>
45 #include <sys/proc.h>
46 #include <sys/buf.h>
47 #include <sys/vnode.h>
48 #include <sys/malloc.h>
49 #include <sys/mount.h>
50 #include <sys/resourcevar.h>
51 #include <sys/vmmeter.h>
52 #include <vm/vm.h>
53 #include <vm/vm_object.h>
54 #include <vm/vm_page.h>
55 #include <sys/sysctl.h>
56 #include <sys/buf2.h>
57 #include <vm/vm_page2.h>
58
59 #include <machine/limits.h>
60
61 #if defined(CLUSTERDEBUG)
62 #include <sys/sysctl.h>
63 static int      rcluster= 0;
64 SYSCTL_INT(_debug, OID_AUTO, rcluster, CTLFLAG_RW, &rcluster, 0, "");
65 #endif
66
67 static MALLOC_DEFINE(M_SEGMENT, "cluster_save", "cluster_save buffer");
68
69 static struct cluster_save *
70         cluster_collectbufs (struct vnode *vp, struct buf *last_bp,
71                             int blksize);
72 static struct buf *
73         cluster_rbuild (struct vnode *vp, off_t filesize, off_t loffset,
74                             off_t doffset, int blksize, int run, 
75                             struct buf *fbp);
76 static void cluster_callback (struct bio *);
77 static void cluster_setram (struct buf *);
78
79 static int write_behind = 1;
80 SYSCTL_INT(_vfs, OID_AUTO, write_behind, CTLFLAG_RW, &write_behind, 0, "");
81
82 extern vm_page_t        bogus_page;
83
84 extern int cluster_pbuf_freecnt;
85
86 /*
87  * Maximum number of blocks for read-ahead.
88  */
89 #define MAXRA 32
90
91 /*
92  * This replaces bread.
93  */
94 int
95 cluster_read(struct vnode *vp, off_t filesize, off_t loffset, 
96              int blksize, size_t resid, int seqcount, struct buf **bpp)
97 {
98         struct buf *bp, *rbp, *reqbp;
99         off_t origoffset;
100         off_t doffset;
101         int error;
102         int i;
103         int maxra, racluster;
104         int totread;
105
106         error = 0;
107         totread = (resid > INT_MAX) ? INT_MAX : (int)resid;
108
109         /*
110          * racluster - calculate maximum cluster IO size (limited by
111          *             backing block device).
112          *
113          * Try to limit the amount of read-ahead by a few ad-hoc parameters.
114          * This needs work!!!
115          *
116          * NOTE!  The BMAP operations may involve synchronous I/O so we
117          *        really want several cluster IOs in progress to absorb
118          *        the time lag.
119          */
120         racluster = vmaxiosize(vp) / blksize;
121         maxra = 2 * racluster + (totread / blksize);
122         if (maxra > MAXRA)
123                 maxra = MAXRA;
124         if (maxra > nbuf / 8)
125                 maxra = nbuf / 8;
126
127         /*
128          * Get the requested block.
129          */
130         *bpp = reqbp = bp = getblk(vp, loffset, blksize, 0, 0);
131         origoffset = loffset;
132
133         /*
134          * if it is in the cache, then check to see if the reads have been
135          * sequential.  If they have, then try some read-ahead, otherwise
136          * back-off on prospective read-aheads.
137          */
138         if (bp->b_flags & B_CACHE) {
139                 /*
140                  * Not sequential, do not do any read-ahead
141                  */
142                 if (seqcount == 0 || maxra == 0)
143                         return 0;
144
145                 /*
146                  * No read-ahead mark, do not do any read-ahead
147                  * yet.
148                  */
149                 if ((bp->b_flags & B_RAM) == 0)
150                         return 0;
151
152                 /*
153                  * We hit a read-ahead-mark, figure out how much read-ahead
154                  * to do (maxra) and where to start (loffset).
155                  *
156                  * Shortcut the scan.  Typically the way this works is that
157                  * we've built up all the blocks inbetween except for the
158                  * last in previous iterations, so if the second-to-last
159                  * block is present we just skip ahead to it.
160                  *
161                  * This algorithm has O(1) cpu in the steady state no
162                  * matter how large maxra is.
163                  */
164                 bp->b_flags &= ~B_RAM;
165
166                 if (findblk(vp, loffset + (maxra - 2) * blksize, FINDBLK_TEST))
167                         i = maxra - 1;
168                 else
169                         i = 1;
170                 while (i < maxra) {
171                         if (findblk(vp, loffset + i * blksize,
172                                     FINDBLK_TEST) == NULL) {
173                                 break;
174                         }
175                         ++i;
176                 }
177                 if (i >= maxra)
178                         return 0;
179                 maxra -= i;
180                 loffset += i * blksize;
181                 reqbp = bp = NULL;
182         } else {
183                 off_t firstread = bp->b_loffset;
184                 int nblks;
185
186                 /*
187                  * Set-up synchronous read for bp.
188                  */
189                 bp->b_cmd = BUF_CMD_READ;
190                 bp->b_bio1.bio_done = biodone_sync;
191                 bp->b_bio1.bio_flags |= BIO_SYNC;
192
193                 KASSERT(firstread != NOOFFSET, 
194                         ("cluster_read: no buffer offset"));
195                 if (firstread + totread > filesize)
196                         totread = (int)(filesize - firstread);
197                 nblks = totread / blksize;
198                 if (nblks) {
199                         int burstbytes;
200
201                         if (nblks > racluster)
202                                 nblks = racluster;
203
204                         error = VOP_BMAP(vp, loffset, &doffset,
205                                          &burstbytes, NULL, BUF_CMD_READ);
206                         if (error)
207                                 goto single_block_read;
208                         if (doffset == NOOFFSET)
209                                 goto single_block_read;
210                         if (burstbytes < blksize * 2)
211                                 goto single_block_read;
212                         if (nblks > burstbytes / blksize)
213                                 nblks = burstbytes / blksize;
214
215                         bp = cluster_rbuild(vp, filesize, loffset,
216                                             doffset, blksize, nblks, bp);
217                         loffset += bp->b_bufsize;
218                         maxra -= (bp->b_bufsize - blksize) / blksize;
219                 } else {
220 single_block_read:
221                         /*
222                          * if it isn't in the cache, then get a chunk from
223                          * disk if sequential, otherwise just get the block.
224                          */
225                         cluster_setram(bp);
226                         loffset += blksize;
227                 }
228         }
229
230         /*
231          * If B_CACHE was not set issue bp.  bp will either be an
232          * asynchronous cluster buf or a synchronous single-buf.
233          * If it is a single buf it will be the same as reqbp.
234          *
235          * NOTE: Once an async cluster buf is issued bp becomes invalid.
236          */
237         if (bp) {
238 #if defined(CLUSTERDEBUG)
239                 if (rcluster)
240                         kprintf("S(%lld,%d,%d) ",
241                             bp->b_loffset, bp->b_bcount, seqcount);
242 #endif
243                 if ((bp->b_flags & B_CLUSTER) == 0)
244                         vfs_busy_pages(vp, bp);
245                 bp->b_flags &= ~(B_ERROR|B_INVAL);
246                 vn_strategy(vp, &bp->b_bio1);
247                 error = 0;
248                 /* bp invalid now */
249         }
250
251         /*
252          * If we have been doing sequential I/O, then do some read-ahead.
253          * The code above us should have positioned us at the next likely
254          * offset.
255          *
256          * Only mess with buffers which we can immediately lock.  HAMMER
257          * will do device-readahead irrespective of what the blocks
258          * represent.
259          */
260         while (!error && seqcount && maxra > 0 &&
261                loffset + blksize <= filesize) {
262                 int nblksread;
263                 int ntoread;
264                 int burstbytes;
265                 int tmp_error;
266
267                 rbp = getblk(vp, loffset, blksize,
268                              GETBLK_SZMATCH|GETBLK_NOWAIT, 0);
269                 if (rbp == NULL)
270                         goto no_read_ahead;
271                 if ((rbp->b_flags & B_CACHE)) {
272                         bqrelse(rbp);
273                         goto no_read_ahead;
274                 }
275
276                 /*
277                  * An error from the read-ahead bmap has nothing to do
278                  * with the caller's original request.
279                  */
280                 tmp_error = VOP_BMAP(vp, loffset, &doffset,
281                                      &burstbytes, NULL, BUF_CMD_READ);
282                 if (tmp_error || doffset == NOOFFSET) {
283                         rbp->b_flags |= B_INVAL;
284                         brelse(rbp);
285                         rbp = NULL;
286                         goto no_read_ahead;
287                 }
288                 ntoread = burstbytes / blksize;
289                 nblksread = (totread + blksize - 1) / blksize;
290                 if (seqcount < nblksread)
291                         seqcount = nblksread;
292                 if (ntoread > seqcount)
293                         ntoread = seqcount;
294
295                 /*
296                  * rbp: async read
297                  */
298                 rbp->b_cmd = BUF_CMD_READ;
299                 /*rbp->b_flags |= B_AGE*/;
300                 cluster_setram(rbp);
301
302                 if (burstbytes) {
303                         rbp = cluster_rbuild(vp, filesize, loffset,
304                                              doffset, blksize, 
305                                              ntoread, rbp);
306                 } else {
307                         rbp->b_bio2.bio_offset = doffset;
308                 }
309 #if defined(CLUSTERDEBUG)
310                 if (rcluster) {
311                         if (bp)
312                                 kprintf("A+(%lld,%d,%lld,%d) ",
313                                     rbp->b_loffset, rbp->b_bcount,
314                                     rbp->b_loffset - origoffset,
315                                     seqcount);
316                         else
317                                 kprintf("A(%lld,%d,%lld,%d) ",
318                                     rbp->b_loffset, rbp->b_bcount,
319                                     rbp->b_loffset - origoffset,
320                                     seqcount);
321                 }
322 #endif
323                 rbp->b_flags &= ~(B_ERROR|B_INVAL);
324
325                 if ((rbp->b_flags & B_CLUSTER) == 0)
326                         vfs_busy_pages(vp, rbp);
327                 BUF_KERNPROC(rbp);
328                 loffset += rbp->b_bufsize;
329                 maxra -= rbp->b_bufsize / blksize;
330                 vn_strategy(vp, &rbp->b_bio1);
331                 /* rbp invalid now */
332         }
333
334         /*
335          * Wait for our original buffer to complete its I/O.  reqbp will
336          * be NULL if the original buffer was B_CACHE.  We are returning
337          * (*bpp) which is the same as reqbp when reqbp != NULL.
338          */
339 no_read_ahead:
340         if (reqbp) {
341                 KKASSERT(reqbp->b_bio1.bio_flags & BIO_SYNC);
342                 error = biowait(&reqbp->b_bio1, "clurd");
343         }
344         return (error);
345 }
346
347 /*
348  * If blocks are contiguous on disk, use this to provide clustered
349  * read ahead.  We will read as many blocks as possible sequentially
350  * and then parcel them up into logical blocks in the buffer hash table.
351  *
352  * This function either returns a cluster buf or it returns fbp.  fbp is
353  * already expected to be set up as a synchronous or asynchronous request.
354  *
355  * If a cluster buf is returned it will always be async.
356  */
357 static struct buf *
358 cluster_rbuild(struct vnode *vp, off_t filesize, off_t loffset, off_t doffset,
359                int blksize, int run, struct buf *fbp)
360 {
361         struct buf *bp, *tbp;
362         off_t boffset;
363         int i, j;
364         int maxiosize = vmaxiosize(vp);
365
366         /*
367          * avoid a division
368          */
369         while (loffset + run * blksize > filesize) {
370                 --run;
371         }
372
373         tbp = fbp;
374         tbp->b_bio2.bio_offset = doffset;
375         if((tbp->b_flags & B_MALLOC) ||
376             ((tbp->b_flags & B_VMIO) == 0) || (run <= 1)) {
377                 return tbp;
378         }
379
380         bp = trypbuf(&cluster_pbuf_freecnt);
381         if (bp == NULL) {
382                 return tbp;
383         }
384
385         /*
386          * We are synthesizing a buffer out of vm_page_t's, but
387          * if the block size is not page aligned then the starting
388          * address may not be either.  Inherit the b_data offset
389          * from the original buffer.
390          */
391         bp->b_data = (char *)((vm_offset_t)bp->b_data |
392             ((vm_offset_t)tbp->b_data & PAGE_MASK));
393         bp->b_flags |= B_CLUSTER | B_VMIO;
394         bp->b_cmd = BUF_CMD_READ;
395         bp->b_bio1.bio_done = cluster_callback;         /* default to async */
396         bp->b_bio1.bio_caller_info1.cluster_head = NULL;
397         bp->b_bio1.bio_caller_info2.cluster_tail = NULL;
398         bp->b_loffset = loffset;
399         bp->b_bio2.bio_offset = doffset;
400         KASSERT(bp->b_loffset != NOOFFSET,
401                 ("cluster_rbuild: no buffer offset"));
402
403         bp->b_bcount = 0;
404         bp->b_bufsize = 0;
405         bp->b_xio.xio_npages = 0;
406
407         for (boffset = doffset, i = 0; i < run; ++i, boffset += blksize) {
408                 if (i) {
409                         if ((bp->b_xio.xio_npages * PAGE_SIZE) +
410                             round_page(blksize) > maxiosize) {
411                                 break;
412                         }
413
414                         /*
415                          * Shortcut some checks and try to avoid buffers that
416                          * would block in the lock.  The same checks have to
417                          * be made again after we officially get the buffer.
418                          */
419                         tbp = getblk(vp, loffset + i * blksize, blksize,
420                                      GETBLK_SZMATCH|GETBLK_NOWAIT, 0);
421                         if (tbp == NULL)
422                                 break;
423                         for (j = 0; j < tbp->b_xio.xio_npages; j++) {
424                                 if (tbp->b_xio.xio_pages[j]->valid)
425                                         break;
426                         }
427                         if (j != tbp->b_xio.xio_npages) {
428                                 bqrelse(tbp);
429                                 break;
430                         }
431
432                         /*
433                          * Stop scanning if the buffer is fuly valid 
434                          * (marked B_CACHE), or locked (may be doing a
435                          * background write), or if the buffer is not
436                          * VMIO backed.  The clustering code can only deal
437                          * with VMIO-backed buffers.
438                          */
439                         if ((tbp->b_flags & (B_CACHE|B_LOCKED)) ||
440                             (tbp->b_flags & B_VMIO) == 0 ||
441                             (LIST_FIRST(&tbp->b_dep) != NULL &&
442                              buf_checkread(tbp))
443                         ) {
444                                 bqrelse(tbp);
445                                 break;
446                         }
447
448                         /*
449                          * The buffer must be completely invalid in order to
450                          * take part in the cluster.  If it is partially valid
451                          * then we stop.
452                          */
453                         for (j = 0;j < tbp->b_xio.xio_npages; j++) {
454                                 if (tbp->b_xio.xio_pages[j]->valid)
455                                         break;
456                         }
457                         if (j != tbp->b_xio.xio_npages) {
458                                 bqrelse(tbp);
459                                 break;
460                         }
461
462                         /*
463                          * Set a read-ahead mark as appropriate
464                          */
465                         if (i == 1 || i == (run - 1))
466                                 cluster_setram(tbp);
467
468                         /*
469                          * Depress the priority of buffers not explicitly
470                          * requested.
471                          */
472                         /* tbp->b_flags |= B_AGE; */
473
474                         /*
475                          * Set the block number if it isn't set, otherwise
476                          * if it is make sure it matches the block number we
477                          * expect.
478                          */
479                         if (tbp->b_bio2.bio_offset == NOOFFSET) {
480                                 tbp->b_bio2.bio_offset = boffset;
481                         } else if (tbp->b_bio2.bio_offset != boffset) {
482                                 brelse(tbp);
483                                 break;
484                         }
485                 }
486
487                 /*
488                  * The passed-in tbp (i == 0) will already be set up for
489                  * async or sync operation.  All other tbp's acquire in
490                  * our loop are set up for async operation.
491                  */
492                 tbp->b_cmd = BUF_CMD_READ;
493                 BUF_KERNPROC(tbp);
494                 cluster_append(&bp->b_bio1, tbp);
495                 for (j = 0; j < tbp->b_xio.xio_npages; ++j) {
496                         vm_page_t m;
497                         m = tbp->b_xio.xio_pages[j];
498                         vm_page_io_start(m);
499                         vm_object_pip_add(m->object, 1);
500                         if ((bp->b_xio.xio_npages == 0) ||
501                                 (bp->b_xio.xio_pages[bp->b_xio.xio_npages-1] != m)) {
502                                 bp->b_xio.xio_pages[bp->b_xio.xio_npages] = m;
503                                 bp->b_xio.xio_npages++;
504                         }
505                         if ((m->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL)
506                                 tbp->b_xio.xio_pages[j] = bogus_page;
507                 }
508                 /*
509                  * XXX shouldn't this be += size for both, like in 
510                  * cluster_wbuild()?
511                  *
512                  * Don't inherit tbp->b_bufsize as it may be larger due to
513                  * a non-page-aligned size.  Instead just aggregate using
514                  * 'size'.
515                  */
516                 if (tbp->b_bcount != blksize)
517                     kprintf("warning: tbp->b_bcount wrong %d vs %d\n", tbp->b_bcount, blksize);
518                 if (tbp->b_bufsize != blksize)
519                     kprintf("warning: tbp->b_bufsize wrong %d vs %d\n", tbp->b_bufsize, blksize);
520                 bp->b_bcount += blksize;
521                 bp->b_bufsize += blksize;
522         }
523
524         /*
525          * Fully valid pages in the cluster are already good and do not need
526          * to be re-read from disk.  Replace the page with bogus_page
527          */
528         for (j = 0; j < bp->b_xio.xio_npages; j++) {
529                 if ((bp->b_xio.xio_pages[j]->valid & VM_PAGE_BITS_ALL) ==
530                     VM_PAGE_BITS_ALL) {
531                         bp->b_xio.xio_pages[j] = bogus_page;
532                 }
533         }
534         if (bp->b_bufsize > bp->b_kvasize) {
535                 panic("cluster_rbuild: b_bufsize(%d) > b_kvasize(%d)",
536                     bp->b_bufsize, bp->b_kvasize);
537         }
538         pmap_qenter(trunc_page((vm_offset_t) bp->b_data),
539                 (vm_page_t *)bp->b_xio.xio_pages, bp->b_xio.xio_npages);
540         BUF_KERNPROC(bp);
541         return (bp);
542 }
543
544 /*
545  * Cleanup after a clustered read or write.
546  * This is complicated by the fact that any of the buffers might have
547  * extra memory (if there were no empty buffer headers at allocbuf time)
548  * that we will need to shift around.
549  *
550  * The returned bio is &bp->b_bio1
551  */
552 void
553 cluster_callback(struct bio *bio)
554 {
555         struct buf *bp = bio->bio_buf;
556         struct buf *tbp;
557         int error = 0;
558
559         /*
560          * Must propogate errors to all the components.  A short read (EOF)
561          * is a critical error.
562          */
563         if (bp->b_flags & B_ERROR) {
564                 error = bp->b_error;
565         } else if (bp->b_bcount != bp->b_bufsize) {
566                 panic("cluster_callback: unexpected EOF on cluster %p!", bio);
567         }
568
569         pmap_qremove(trunc_page((vm_offset_t) bp->b_data), bp->b_xio.xio_npages);
570         /*
571          * Move memory from the large cluster buffer into the component
572          * buffers and mark IO as done on these.  Since the memory map
573          * is the same, no actual copying is required.
574          */
575         while ((tbp = bio->bio_caller_info1.cluster_head) != NULL) {
576                 bio->bio_caller_info1.cluster_head = tbp->b_cluster_next;
577                 if (error) {
578                         tbp->b_flags |= B_ERROR;
579                         tbp->b_error = error;
580                 } else {
581                         tbp->b_dirtyoff = tbp->b_dirtyend = 0;
582                         tbp->b_flags &= ~(B_ERROR|B_INVAL);
583                         /*
584                          * XXX the bdwrite()/bqrelse() issued during
585                          * cluster building clears B_RELBUF (see bqrelse()
586                          * comment).  If direct I/O was specified, we have
587                          * to restore it here to allow the buffer and VM
588                          * to be freed.
589                          */
590                         if (tbp->b_flags & B_DIRECT)
591                                 tbp->b_flags |= B_RELBUF;
592                 }
593                 biodone(&tbp->b_bio1);
594         }
595         relpbuf(bp, &cluster_pbuf_freecnt);
596 }
597
598 /*
599  *      cluster_wbuild_wb:
600  *
601  *      Implement modified write build for cluster.
602  *
603  *              write_behind = 0        write behind disabled
604  *              write_behind = 1        write behind normal (default)
605  *              write_behind = 2        write behind backed-off
606  */
607
608 static __inline int
609 cluster_wbuild_wb(struct vnode *vp, int blksize, off_t start_loffset, int len)
610 {
611         int r = 0;
612
613         switch(write_behind) {
614         case 2:
615                 if (start_loffset < len)
616                         break;
617                 start_loffset -= len;
618                 /* fall through */
619         case 1:
620                 r = cluster_wbuild(vp, blksize, start_loffset, len);
621                 /* fall through */
622         default:
623                 /* fall through */
624                 break;
625         }
626         return(r);
627 }
628
629 /*
630  * Do clustered write for FFS.
631  *
632  * Three cases:
633  *      1. Write is not sequential (write asynchronously)
634  *      Write is sequential:
635  *      2.      beginning of cluster - begin cluster
636  *      3.      middle of a cluster - add to cluster
637  *      4.      end of a cluster - asynchronously write cluster
638  */
639 void
640 cluster_write(struct buf *bp, off_t filesize, int blksize, int seqcount)
641 {
642         struct vnode *vp;
643         off_t loffset;
644         int maxclen, cursize;
645         int async;
646
647         vp = bp->b_vp;
648         if (vp->v_type == VREG)
649                 async = vp->v_mount->mnt_flag & MNT_ASYNC;
650         else
651                 async = 0;
652         loffset = bp->b_loffset;
653         KASSERT(bp->b_loffset != NOOFFSET, 
654                 ("cluster_write: no buffer offset"));
655
656         /* Initialize vnode to beginning of file. */
657         if (loffset == 0)
658                 vp->v_lasta = vp->v_clen = vp->v_cstart = vp->v_lastw = 0;
659
660         if (vp->v_clen == 0 || loffset != vp->v_lastw + blksize ||
661             bp->b_bio2.bio_offset == NOOFFSET ||
662             (bp->b_bio2.bio_offset != vp->v_lasta + blksize)) {
663                 maxclen = vmaxiosize(vp);
664                 if (vp->v_clen != 0) {
665                         /*
666                          * Next block is not sequential.
667                          *
668                          * If we are not writing at end of file, the process
669                          * seeked to another point in the file since its last
670                          * write, or we have reached our maximum cluster size,
671                          * then push the previous cluster. Otherwise try
672                          * reallocating to make it sequential.
673                          *
674                          * Change to algorithm: only push previous cluster if
675                          * it was sequential from the point of view of the
676                          * seqcount heuristic, otherwise leave the buffer 
677                          * intact so we can potentially optimize the I/O
678                          * later on in the buf_daemon or update daemon
679                          * flush.
680                          */
681                         cursize = vp->v_lastw - vp->v_cstart + blksize;
682                         if (bp->b_loffset + blksize != filesize ||
683                             loffset != vp->v_lastw + blksize || vp->v_clen <= cursize) {
684                                 if (!async && seqcount > 0) {
685                                         cluster_wbuild_wb(vp, blksize,
686                                                 vp->v_cstart, cursize);
687                                 }
688                         } else {
689                                 struct buf **bpp, **endbp;
690                                 struct cluster_save *buflist;
691
692                                 buflist = cluster_collectbufs(vp, bp, blksize);
693                                 endbp = &buflist->bs_children
694                                     [buflist->bs_nchildren - 1];
695                                 if (VOP_REALLOCBLKS(vp, buflist)) {
696                                         /*
697                                          * Failed, push the previous cluster
698                                          * if *really* writing sequentially
699                                          * in the logical file (seqcount > 1),
700                                          * otherwise delay it in the hopes that
701                                          * the low level disk driver can
702                                          * optimize the write ordering.
703                                          */
704                                         for (bpp = buflist->bs_children;
705                                              bpp < endbp; bpp++)
706                                                 brelse(*bpp);
707                                         kfree(buflist, M_SEGMENT);
708                                         if (seqcount > 1) {
709                                                 cluster_wbuild_wb(vp, 
710                                                     blksize, vp->v_cstart, 
711                                                     cursize);
712                                         }
713                                 } else {
714                                         /*
715                                          * Succeeded, keep building cluster.
716                                          */
717                                         for (bpp = buflist->bs_children;
718                                              bpp <= endbp; bpp++)
719                                                 bdwrite(*bpp);
720                                         kfree(buflist, M_SEGMENT);
721                                         vp->v_lastw = loffset;
722                                         vp->v_lasta = bp->b_bio2.bio_offset;
723                                         return;
724                                 }
725                         }
726                 }
727                 /*
728                  * Consider beginning a cluster. If at end of file, make
729                  * cluster as large as possible, otherwise find size of
730                  * existing cluster.
731                  */
732                 if ((vp->v_type == VREG) &&
733                     bp->b_loffset + blksize != filesize &&
734                     (bp->b_bio2.bio_offset == NOOFFSET) &&
735                     (VOP_BMAP(vp, loffset, &bp->b_bio2.bio_offset, &maxclen, NULL, BUF_CMD_WRITE) ||
736                      bp->b_bio2.bio_offset == NOOFFSET)) {
737                         bawrite(bp);
738                         vp->v_clen = 0;
739                         vp->v_lasta = bp->b_bio2.bio_offset;
740                         vp->v_cstart = loffset + blksize;
741                         vp->v_lastw = loffset;
742                         return;
743                 }
744                 if (maxclen > blksize)
745                         vp->v_clen = maxclen - blksize;
746                 else
747                         vp->v_clen = 0;
748                 if (!async && vp->v_clen == 0) { /* I/O not contiguous */
749                         vp->v_cstart = loffset + blksize;
750                         bawrite(bp);
751                 } else {        /* Wait for rest of cluster */
752                         vp->v_cstart = loffset;
753                         bdwrite(bp);
754                 }
755         } else if (loffset == vp->v_cstart + vp->v_clen) {
756                 /*
757                  * At end of cluster, write it out if seqcount tells us we
758                  * are operating sequentially, otherwise let the buf or
759                  * update daemon handle it.
760                  */
761                 bdwrite(bp);
762                 if (seqcount > 1)
763                         cluster_wbuild_wb(vp, blksize, vp->v_cstart,
764                                           vp->v_clen + blksize);
765                 vp->v_clen = 0;
766                 vp->v_cstart = loffset + blksize;
767         } else if (vm_page_count_severe()) {
768                 /*
769                  * We are low on memory, get it going NOW
770                  */
771                 bawrite(bp);
772         } else {
773                 /*
774                  * In the middle of a cluster, so just delay the I/O for now.
775                  */
776                 bdwrite(bp);
777         }
778         vp->v_lastw = loffset;
779         vp->v_lasta = bp->b_bio2.bio_offset;
780 }
781
782
783 /*
784  * This is an awful lot like cluster_rbuild...wish they could be combined.
785  * The last lbn argument is the current block on which I/O is being
786  * performed.  Check to see that it doesn't fall in the middle of
787  * the current block (if last_bp == NULL).
788  */
789 int
790 cluster_wbuild(struct vnode *vp, int blksize, off_t start_loffset, int bytes)
791 {
792         struct buf *bp, *tbp;
793         int i, j;
794         int totalwritten = 0;
795         int maxiosize = vmaxiosize(vp);
796
797         while (bytes > 0) {
798                 /*
799                  * If the buffer is not delayed-write (i.e. dirty), or it 
800                  * is delayed-write but either locked or inval, it cannot 
801                  * partake in the clustered write.
802                  */
803                 tbp = findblk(vp, start_loffset, FINDBLK_NBLOCK);
804                 if (tbp == NULL ||
805                     (tbp->b_flags & (B_LOCKED | B_INVAL | B_DELWRI)) != B_DELWRI ||
806                     (LIST_FIRST(&tbp->b_dep) && buf_checkwrite(tbp))) {
807                         if (tbp)
808                                 BUF_UNLOCK(tbp);
809                         start_loffset += blksize;
810                         bytes -= blksize;
811                         continue;
812                 }
813                 bremfree(tbp);
814                 KKASSERT(tbp->b_cmd == BUF_CMD_DONE);
815
816                 /*
817                  * Extra memory in the buffer, punt on this buffer.
818                  * XXX we could handle this in most cases, but we would
819                  * have to push the extra memory down to after our max
820                  * possible cluster size and then potentially pull it back
821                  * up if the cluster was terminated prematurely--too much
822                  * hassle.
823                  */
824                 if (((tbp->b_flags & (B_CLUSTEROK|B_MALLOC)) != B_CLUSTEROK) ||
825                     (tbp->b_bcount != tbp->b_bufsize) ||
826                     (tbp->b_bcount != blksize) ||
827                     (bytes == blksize) ||
828                     ((bp = getpbuf(&cluster_pbuf_freecnt)) == NULL)) {
829                         totalwritten += tbp->b_bufsize;
830                         bawrite(tbp);
831                         start_loffset += blksize;
832                         bytes -= blksize;
833                         continue;
834                 }
835
836                 /*
837                  * Set up the pbuf.  Track our append point with b_bcount
838                  * and b_bufsize.  b_bufsize is not used by the device but
839                  * our caller uses it to loop clusters and we use it to
840                  * detect a premature EOF on the block device.
841                  */
842                 bp->b_bcount = 0;
843                 bp->b_bufsize = 0;
844                 bp->b_xio.xio_npages = 0;
845                 bp->b_loffset = tbp->b_loffset;
846                 bp->b_bio2.bio_offset = tbp->b_bio2.bio_offset;
847
848                 /*
849                  * We are synthesizing a buffer out of vm_page_t's, but
850                  * if the block size is not page aligned then the starting
851                  * address may not be either.  Inherit the b_data offset
852                  * from the original buffer.
853                  */
854                 bp->b_data = (char *)((vm_offset_t)bp->b_data |
855                     ((vm_offset_t)tbp->b_data & PAGE_MASK));
856                 bp->b_flags &= ~B_ERROR;
857                 bp->b_flags |= B_CLUSTER | B_BNOCLIP |
858                         (tbp->b_flags & (B_VMIO | B_NEEDCOMMIT));
859                 bp->b_bio1.bio_caller_info1.cluster_head = NULL;
860                 bp->b_bio1.bio_caller_info2.cluster_tail = NULL;
861
862                 /*
863                  * From this location in the file, scan forward to see
864                  * if there are buffers with adjacent data that need to
865                  * be written as well.
866                  */
867                 for (i = 0; i < bytes; (i += blksize), (start_loffset += blksize)) {
868                         if (i != 0) { /* If not the first buffer */
869                                 tbp = findblk(vp, start_loffset,
870                                               FINDBLK_NBLOCK);
871                                 /*
872                                  * Buffer not found or could not be locked
873                                  * non-blocking.
874                                  */
875                                 if (tbp == NULL)
876                                         break;
877
878                                 /*
879                                  * If it IS in core, but has different
880                                  * characteristics, then don't cluster
881                                  * with it.
882                                  */
883                                 if ((tbp->b_flags & (B_VMIO | B_CLUSTEROK |
884                                      B_INVAL | B_DELWRI | B_NEEDCOMMIT))
885                                     != (B_DELWRI | B_CLUSTEROK |
886                                      (bp->b_flags & (B_VMIO | B_NEEDCOMMIT))) ||
887                                     (tbp->b_flags & B_LOCKED) ||
888                                     (LIST_FIRST(&tbp->b_dep) &&
889                                      buf_checkwrite(tbp))
890                                 ) {
891                                         BUF_UNLOCK(tbp);
892                                         break;
893                                 }
894
895                                 /*
896                                  * Check that the combined cluster
897                                  * would make sense with regard to pages
898                                  * and would not be too large
899                                  */
900                                 if ((tbp->b_bcount != blksize) ||
901                                   ((bp->b_bio2.bio_offset + i) !=
902                                     tbp->b_bio2.bio_offset) ||
903                                   ((tbp->b_xio.xio_npages + bp->b_xio.xio_npages) >
904                                     (maxiosize / PAGE_SIZE))) {
905                                         BUF_UNLOCK(tbp);
906                                         break;
907                                 }
908                                 /*
909                                  * Ok, it's passed all the tests,
910                                  * so remove it from the free list
911                                  * and mark it busy. We will use it.
912                                  */
913                                 bremfree(tbp);
914                                 KKASSERT(tbp->b_cmd == BUF_CMD_DONE);
915                         } /* end of code for non-first buffers only */
916
917                         /*
918                          * If the IO is via the VM then we do some
919                          * special VM hackery (yuck).  Since the buffer's
920                          * block size may not be page-aligned it is possible
921                          * for a page to be shared between two buffers.  We
922                          * have to get rid of the duplication when building
923                          * the cluster.
924                          */
925                         if (tbp->b_flags & B_VMIO) {
926                                 vm_page_t m;
927
928                                 if (i != 0) { /* if not first buffer */
929                                         for (j = 0; j < tbp->b_xio.xio_npages; ++j) {
930                                                 m = tbp->b_xio.xio_pages[j];
931                                                 if (m->flags & PG_BUSY) {
932                                                         bqrelse(tbp);
933                                                         goto finishcluster;
934                                                 }
935                                         }
936                                 }
937                                         
938                                 for (j = 0; j < tbp->b_xio.xio_npages; ++j) {
939                                         m = tbp->b_xio.xio_pages[j];
940                                         vm_page_io_start(m);
941                                         vm_object_pip_add(m->object, 1);
942                                         if ((bp->b_xio.xio_npages == 0) ||
943                                           (bp->b_xio.xio_pages[bp->b_xio.xio_npages - 1] != m)) {
944                                                 bp->b_xio.xio_pages[bp->b_xio.xio_npages] = m;
945                                                 bp->b_xio.xio_npages++;
946                                         }
947                                 }
948                         }
949                         bp->b_bcount += blksize;
950                         bp->b_bufsize += blksize;
951
952                         bundirty(tbp);
953                         tbp->b_flags &= ~B_ERROR;
954                         tbp->b_cmd = BUF_CMD_WRITE;
955                         BUF_KERNPROC(tbp);
956                         cluster_append(&bp->b_bio1, tbp);
957
958                         /*
959                          * check for latent dependencies to be handled 
960                          */
961                         if (LIST_FIRST(&tbp->b_dep) != NULL)
962                                 buf_start(tbp);
963                 }
964         finishcluster:
965                 pmap_qenter(trunc_page((vm_offset_t) bp->b_data),
966                         (vm_page_t *) bp->b_xio.xio_pages, bp->b_xio.xio_npages);
967                 if (bp->b_bufsize > bp->b_kvasize) {
968                         panic(
969                             "cluster_wbuild: b_bufsize(%d) > b_kvasize(%d)\n",
970                             bp->b_bufsize, bp->b_kvasize);
971                 }
972                 totalwritten += bp->b_bufsize;
973                 bp->b_dirtyoff = 0;
974                 bp->b_dirtyend = bp->b_bufsize;
975                 bp->b_bio1.bio_done = cluster_callback;
976                 bp->b_cmd = BUF_CMD_WRITE;
977
978                 vfs_busy_pages(vp, bp);
979                 bp->b_runningbufspace = bp->b_bufsize;
980                 if (bp->b_runningbufspace) {
981                         runningbufspace += bp->b_runningbufspace;
982                         ++runningbufcount;
983                 }
984                 BUF_KERNPROC(bp);
985                 vn_strategy(vp, &bp->b_bio1);
986
987                 bytes -= i;
988         }
989         return totalwritten;
990 }
991
992 /*
993  * Collect together all the buffers in a cluster.
994  * Plus add one additional buffer.
995  */
996 static struct cluster_save *
997 cluster_collectbufs(struct vnode *vp, struct buf *last_bp, int blksize)
998 {
999         struct cluster_save *buflist;
1000         struct buf *bp;
1001         off_t loffset;
1002         int i, len;
1003
1004         len = (int)(vp->v_lastw - vp->v_cstart + blksize) / blksize;
1005         buflist = kmalloc(sizeof(struct buf *) * (len + 1) + sizeof(*buflist),
1006                          M_SEGMENT, M_WAITOK);
1007         buflist->bs_nchildren = 0;
1008         buflist->bs_children = (struct buf **) (buflist + 1);
1009         for (loffset = vp->v_cstart, i = 0; i < len; (loffset += blksize), i++) {
1010                 (void) bread(vp, loffset, last_bp->b_bcount, &bp);
1011                 buflist->bs_children[i] = bp;
1012                 if (bp->b_bio2.bio_offset == NOOFFSET) {
1013                         VOP_BMAP(bp->b_vp, bp->b_loffset,
1014                                  &bp->b_bio2.bio_offset,
1015                                  NULL, NULL, BUF_CMD_WRITE);
1016                 }
1017         }
1018         buflist->bs_children[i] = bp = last_bp;
1019         if (bp->b_bio2.bio_offset == NOOFFSET) {
1020                 VOP_BMAP(bp->b_vp, bp->b_loffset, &bp->b_bio2.bio_offset,
1021                          NULL, NULL, BUF_CMD_WRITE);
1022         }
1023         buflist->bs_nchildren = i + 1;
1024         return (buflist);
1025 }
1026
1027 void
1028 cluster_append(struct bio *bio, struct buf *tbp)
1029 {
1030         tbp->b_cluster_next = NULL;
1031         if (bio->bio_caller_info1.cluster_head == NULL) {
1032                 bio->bio_caller_info1.cluster_head = tbp;
1033                 bio->bio_caller_info2.cluster_tail = tbp;
1034         } else {
1035                 bio->bio_caller_info2.cluster_tail->b_cluster_next = tbp;
1036                 bio->bio_caller_info2.cluster_tail = tbp;
1037         }
1038 }
1039
1040 static
1041 void
1042 cluster_setram (struct buf *bp)
1043 {
1044         bp->b_flags |= B_RAM;
1045         if (bp->b_xio.xio_npages)
1046                 vm_page_flag_set(bp->b_xio.xio_pages[0], PG_RAM);
1047 }