AMD64 - Refactor uio_resid and size_t assumptions.
[dragonfly.git] / sys / kern / vfs_cluster.c
1 /*-
2  * Copyright (c) 1993
3  *      The Regents of the University of California.  All rights reserved.
4  * Modifications/enhancements:
5  *      Copyright (c) 1995 John S. Dyson.  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. All advertising materials mentioning features or use of this software
16  *    must display the following acknowledgement:
17  *      This product includes software developed by the University of
18  *      California, Berkeley and its contributors.
19  * 4. Neither the name of the University nor the names of its contributors
20  *    may be used to endorse or promote products derived from this software
21  *    without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  *
35  *      @(#)vfs_cluster.c       8.7 (Berkeley) 2/13/94
36  * $FreeBSD: src/sys/kern/vfs_cluster.c,v 1.92.2.9 2001/11/18 07:10:59 dillon Exp $
37  * $DragonFly: src/sys/kern/vfs_cluster.c,v 1.40 2008/07/14 03:09:00 dillon Exp $
38  */
39
40 #include "opt_debug_cluster.h"
41
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/kernel.h>
45 #include <sys/proc.h>
46 #include <sys/buf.h>
47 #include <sys/vnode.h>
48 #include <sys/malloc.h>
49 #include <sys/mount.h>
50 #include <sys/resourcevar.h>
51 #include <sys/vmmeter.h>
52 #include <vm/vm.h>
53 #include <vm/vm_object.h>
54 #include <vm/vm_page.h>
55 #include <sys/sysctl.h>
56 #include <sys/buf2.h>
57 #include <vm/vm_page2.h>
58
59 #include <machine/limits.h>
60
61 #if defined(CLUSTERDEBUG)
62 #include <sys/sysctl.h>
63 static int      rcluster= 0;
64 SYSCTL_INT(_debug, OID_AUTO, rcluster, CTLFLAG_RW, &rcluster, 0, "");
65 #endif
66
67 static MALLOC_DEFINE(M_SEGMENT, "cluster_save", "cluster_save buffer");
68
69 static struct cluster_save *
70         cluster_collectbufs (struct vnode *vp, struct buf *last_bp,
71                             int blksize);
72 static struct buf *
73         cluster_rbuild (struct vnode *vp, off_t filesize, off_t loffset,
74                             off_t doffset, int blksize, int run, 
75                             struct buf *fbp);
76 static void cluster_callback (struct bio *);
77
78
79 static int write_behind = 1;
80 SYSCTL_INT(_vfs, OID_AUTO, write_behind, CTLFLAG_RW, &write_behind, 0, "");
81
82 extern vm_page_t        bogus_page;
83
84 extern int cluster_pbuf_freecnt;
85
86 /*
87  * Maximum number of blocks for read-ahead.
88  */
89 #define MAXRA 32
90
91 /*
92  * This replaces bread.
93  */
94 int
95 cluster_read(struct vnode *vp, off_t filesize, off_t loffset, 
96              int blksize, size_t resid, int seqcount, struct buf **bpp)
97 {
98         struct buf *bp, *rbp, *reqbp;
99         off_t origoffset;
100         off_t doffset;
101         int error;
102         int i;
103         int maxra, racluster;
104         int totread;
105
106         error = 0;
107         totread = (resid > INT_MAX) ? INT_MAX : (int)resid;
108
109         /*
110          * Try to limit the amount of read-ahead by a few
111          * ad-hoc parameters.  This needs work!!!
112          */
113         racluster = vmaxiosize(vp) / blksize;
114         maxra = 2 * racluster + (totread / blksize);
115         if (maxra > MAXRA)
116                 maxra = MAXRA;
117         if (maxra > nbuf/8)
118                 maxra = nbuf/8;
119
120         /*
121          * Get the requested block.
122          */
123         *bpp = reqbp = bp = getblk(vp, loffset, blksize, 0, 0);
124         origoffset = loffset;
125
126         /*
127          * if it is in the cache, then check to see if the reads have been
128          * sequential.  If they have, then try some read-ahead, otherwise
129          * back-off on prospective read-aheads.
130          */
131         if (bp->b_flags & B_CACHE) {
132                 if (!seqcount) {
133                         return 0;
134                 } else if ((bp->b_flags & B_RAM) == 0) {
135                         return 0;
136                 } else {
137                         struct buf *tbp;
138                         bp->b_flags &= ~B_RAM;
139
140                         /*
141                          * Set read-ahead-mark only if we can passively lock
142                          * the buffer.  Note that with these flags the bp
143                          * could very exist even though NULL is returned.
144                          */
145                         for (i = 1; i < maxra; i++) {
146                                 tbp = findblk(vp, loffset + i * blksize,
147                                               FINDBLK_NBLOCK);
148                                 if (tbp == NULL)
149                                         break;
150                                 if (((i % racluster) == (racluster - 1)) ||
151                                     (i == (maxra - 1))) {
152                                         tbp->b_flags |= B_RAM;
153                                 }
154                                 BUF_UNLOCK(tbp);
155                         }
156                         if (i >= maxra)
157                                 return 0;
158                         loffset += i * blksize;
159                 }
160                 reqbp = bp = NULL;
161         } else {
162                 off_t firstread = bp->b_loffset;
163                 int nblks;
164
165                 /*
166                  * Set-up synchronous read for bp.
167                  */
168                 bp->b_cmd = BUF_CMD_READ;
169                 bp->b_bio1.bio_done = biodone_sync;
170                 bp->b_bio1.bio_flags |= BIO_SYNC;
171
172                 KASSERT(firstread != NOOFFSET, 
173                         ("cluster_read: no buffer offset"));
174                 if (firstread + totread > filesize)
175                         totread = (int)(filesize - firstread);
176                 nblks = totread / blksize;
177                 if (nblks) {
178                         int burstbytes;
179
180                         if (nblks > racluster)
181                                 nblks = racluster;
182
183                         error = VOP_BMAP(vp, loffset, &doffset,
184                                          &burstbytes, NULL, BUF_CMD_READ);
185                         if (error)
186                                 goto single_block_read;
187                         if (doffset == NOOFFSET)
188                                 goto single_block_read;
189                         if (burstbytes < blksize * 2)
190                                 goto single_block_read;
191                         if (nblks > burstbytes / blksize)
192                                 nblks = burstbytes / blksize;
193
194                         bp = cluster_rbuild(vp, filesize, loffset,
195                                             doffset, blksize, nblks, bp);
196                         loffset += bp->b_bufsize;
197                 } else {
198 single_block_read:
199                         /*
200                          * if it isn't in the cache, then get a chunk from
201                          * disk if sequential, otherwise just get the block.
202                          */
203                         bp->b_flags |= B_RAM;
204                         loffset += blksize;
205                 }
206         }
207
208         /*
209          * If B_CACHE was not set issue bp.  bp will either be an
210          * asynchronous cluster buf or a synchronous single-buf.
211          * If it is a single buf it will be the same as reqbp.
212          *
213          * NOTE: Once an async cluster buf is issued bp becomes invalid.
214          */
215         if (bp) {
216 #if defined(CLUSTERDEBUG)
217                 if (rcluster)
218                         kprintf("S(%lld,%d,%d) ",
219                             bp->b_loffset, bp->b_bcount, seqcount);
220 #endif
221                 if ((bp->b_flags & B_CLUSTER) == 0)
222                         vfs_busy_pages(vp, bp);
223                 bp->b_flags &= ~(B_ERROR|B_INVAL);
224                 vn_strategy(vp, &bp->b_bio1);
225                 error = 0;
226                 /* bp invalid now */
227         }
228
229         /*
230          * If we have been doing sequential I/O, then do some read-ahead.
231          *
232          * Only mess with buffers which we can immediately lock.  HAMMER
233          * will do device-readahead irrespective of what the blocks
234          * represent.
235          */
236         rbp = NULL;
237         if (!error &&
238             seqcount &&
239             loffset < origoffset + seqcount * blksize &&
240             loffset + blksize <= filesize
241         ) {
242                 int nblksread;
243                 int ntoread;
244                 int burstbytes;
245                 int tmp_error;
246
247                 rbp = getblk(vp, loffset, blksize,
248                              GETBLK_SZMATCH|GETBLK_NOWAIT, 0);
249                 if (rbp == NULL)
250                         goto no_read_ahead;
251                 if ((rbp->b_flags & B_CACHE)) {
252                         bqrelse(rbp);
253                         goto no_read_ahead;
254                 }
255
256                 /*
257                  * An error from the read-ahead bmap has nothing to do
258                  * with the caller's original request.
259                  */
260                 tmp_error = VOP_BMAP(vp, loffset, &doffset,
261                                      &burstbytes, NULL, BUF_CMD_READ);
262                 if (tmp_error || doffset == NOOFFSET) {
263                         rbp->b_flags |= B_INVAL;
264                         brelse(rbp);
265                         rbp = NULL;
266                         goto no_read_ahead;
267                 }
268                 ntoread = burstbytes / blksize;
269                 nblksread = (totread + blksize - 1) / blksize;
270                 if (seqcount < nblksread)
271                         seqcount = nblksread;
272                 if (ntoread > seqcount)
273                         ntoread = seqcount;
274
275                 /*
276                  * rbp: async read
277                  */
278                 rbp->b_cmd = BUF_CMD_READ;
279                 rbp->b_flags |= B_RAM/* | B_AGE*/;
280
281                 if (burstbytes) {
282                         rbp = cluster_rbuild(vp, filesize, loffset,
283                                              doffset, blksize, 
284                                              ntoread, rbp);
285                 } else {
286                         rbp->b_bio2.bio_offset = doffset;
287                 }
288 #if defined(CLUSTERDEBUG)
289                 if (rcluster) {
290                         if (bp)
291                                 kprintf("A+(%lld,%d,%lld,%d) ",
292                                     rbp->b_loffset, rbp->b_bcount,
293                                     rbp->b_loffset - origoffset,
294                                     seqcount);
295                         else
296                                 kprintf("A(%lld,%d,%lld,%d) ",
297                                     rbp->b_loffset, rbp->b_bcount,
298                                     rbp->b_loffset - origoffset,
299                                     seqcount);
300                 }
301 #endif
302                 rbp->b_flags &= ~(B_ERROR|B_INVAL);
303
304                 if ((rbp->b_flags & B_CLUSTER) == 0)
305                         vfs_busy_pages(vp, rbp);
306                 BUF_KERNPROC(rbp);
307                 vn_strategy(vp, &rbp->b_bio1);
308                 /* rbp invalid now */
309         }
310
311         /*
312          * Wait for our original buffer to complete its I/O.  reqbp will
313          * be NULL if the original buffer was B_CACHE.  We are returning
314          * (*bpp) which is the same as reqbp when reqbp != NULL.
315          */
316 no_read_ahead:
317         if (reqbp) {
318                 KKASSERT(reqbp->b_bio1.bio_flags & BIO_SYNC);
319                 error = biowait(&reqbp->b_bio1, "clurd");
320         }
321         return (error);
322 }
323
324 /*
325  * If blocks are contiguous on disk, use this to provide clustered
326  * read ahead.  We will read as many blocks as possible sequentially
327  * and then parcel them up into logical blocks in the buffer hash table.
328  *
329  * This function either returns a cluster buf or it returns fbp.  fbp is
330  * already expected to be set up as a synchronous or asynchronous request.
331  *
332  * If a cluster buf is returned it will always be async.
333  */
334 static struct buf *
335 cluster_rbuild(struct vnode *vp, off_t filesize, off_t loffset, off_t doffset,
336                int blksize, int run, struct buf *fbp)
337 {
338         struct buf *bp, *tbp;
339         off_t boffset;
340         int i, j;
341         int maxiosize = vmaxiosize(vp);
342
343         /*
344          * avoid a division
345          */
346         while (loffset + run * blksize > filesize) {
347                 --run;
348         }
349
350         tbp = fbp;
351         tbp->b_bio2.bio_offset = doffset;
352         if((tbp->b_flags & B_MALLOC) ||
353             ((tbp->b_flags & B_VMIO) == 0) || (run <= 1)) {
354                 return tbp;
355         }
356
357         bp = trypbuf(&cluster_pbuf_freecnt);
358         if (bp == NULL) {
359                 return tbp;
360         }
361
362         /*
363          * We are synthesizing a buffer out of vm_page_t's, but
364          * if the block size is not page aligned then the starting
365          * address may not be either.  Inherit the b_data offset
366          * from the original buffer.
367          */
368         bp->b_data = (char *)((vm_offset_t)bp->b_data |
369             ((vm_offset_t)tbp->b_data & PAGE_MASK));
370         bp->b_flags |= B_CLUSTER | B_VMIO;
371         bp->b_cmd = BUF_CMD_READ;
372         bp->b_bio1.bio_done = cluster_callback;         /* default to async */
373         bp->b_bio1.bio_caller_info1.cluster_head = NULL;
374         bp->b_bio1.bio_caller_info2.cluster_tail = NULL;
375         bp->b_loffset = loffset;
376         bp->b_bio2.bio_offset = doffset;
377         KASSERT(bp->b_loffset != NOOFFSET,
378                 ("cluster_rbuild: no buffer offset"));
379
380         bp->b_bcount = 0;
381         bp->b_bufsize = 0;
382         bp->b_xio.xio_npages = 0;
383
384         for (boffset = doffset, i = 0; i < run; ++i, boffset += blksize) {
385                 if (i) {
386                         if ((bp->b_xio.xio_npages * PAGE_SIZE) +
387                             round_page(blksize) > maxiosize) {
388                                 break;
389                         }
390
391                         /*
392                          * Shortcut some checks and try to avoid buffers that
393                          * would block in the lock.  The same checks have to
394                          * be made again after we officially get the buffer.
395                          */
396                         tbp = getblk(vp, loffset + i * blksize, blksize,
397                                      GETBLK_SZMATCH|GETBLK_NOWAIT, 0);
398                         if (tbp == NULL)
399                                 break;
400                         for (j = 0; j < tbp->b_xio.xio_npages; j++) {
401                                 if (tbp->b_xio.xio_pages[j]->valid)
402                                         break;
403                         }
404                         if (j != tbp->b_xio.xio_npages) {
405                                 bqrelse(tbp);
406                                 break;
407                         }
408
409                         /*
410                          * Stop scanning if the buffer is fuly valid 
411                          * (marked B_CACHE), or locked (may be doing a
412                          * background write), or if the buffer is not
413                          * VMIO backed.  The clustering code can only deal
414                          * with VMIO-backed buffers.
415                          */
416                         if ((tbp->b_flags & (B_CACHE|B_LOCKED)) ||
417                             (tbp->b_flags & B_VMIO) == 0 ||
418                             (LIST_FIRST(&tbp->b_dep) != NULL &&
419                              buf_checkread(tbp))
420                         ) {
421                                 bqrelse(tbp);
422                                 break;
423                         }
424
425                         /*
426                          * The buffer must be completely invalid in order to
427                          * take part in the cluster.  If it is partially valid
428                          * then we stop.
429                          */
430                         for (j = 0;j < tbp->b_xio.xio_npages; j++) {
431                                 if (tbp->b_xio.xio_pages[j]->valid)
432                                         break;
433                         }
434                         if (j != tbp->b_xio.xio_npages) {
435                                 bqrelse(tbp);
436                                 break;
437                         }
438
439                         /*
440                          * Set a read-ahead mark as appropriate
441                          */
442                         if (i == 1 || i == (run - 1))
443                                 tbp->b_flags |= B_RAM;
444
445                         /*
446                          * Depress the priority of buffers not explicitly
447                          * requested.
448                          */
449                         /* tbp->b_flags |= B_AGE; */
450
451                         /*
452                          * Set the block number if it isn't set, otherwise
453                          * if it is make sure it matches the block number we
454                          * expect.
455                          */
456                         if (tbp->b_bio2.bio_offset == NOOFFSET) {
457                                 tbp->b_bio2.bio_offset = boffset;
458                         } else if (tbp->b_bio2.bio_offset != boffset) {
459                                 brelse(tbp);
460                                 break;
461                         }
462                 }
463
464                 /*
465                  * The passed-in tbp (i == 0) will already be set up for
466                  * async or sync operation.  All other tbp's acquire in
467                  * our loop are set up for async operation.
468                  */
469                 tbp->b_cmd = BUF_CMD_READ;
470                 BUF_KERNPROC(tbp);
471                 cluster_append(&bp->b_bio1, tbp);
472                 for (j = 0; j < tbp->b_xio.xio_npages; ++j) {
473                         vm_page_t m;
474                         m = tbp->b_xio.xio_pages[j];
475                         vm_page_io_start(m);
476                         vm_object_pip_add(m->object, 1);
477                         if ((bp->b_xio.xio_npages == 0) ||
478                                 (bp->b_xio.xio_pages[bp->b_xio.xio_npages-1] != m)) {
479                                 bp->b_xio.xio_pages[bp->b_xio.xio_npages] = m;
480                                 bp->b_xio.xio_npages++;
481                         }
482                         if ((m->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL)
483                                 tbp->b_xio.xio_pages[j] = bogus_page;
484                 }
485                 /*
486                  * XXX shouldn't this be += size for both, like in 
487                  * cluster_wbuild()?
488                  *
489                  * Don't inherit tbp->b_bufsize as it may be larger due to
490                  * a non-page-aligned size.  Instead just aggregate using
491                  * 'size'.
492                  */
493                 if (tbp->b_bcount != blksize)
494                     kprintf("warning: tbp->b_bcount wrong %d vs %d\n", tbp->b_bcount, blksize);
495                 if (tbp->b_bufsize != blksize)
496                     kprintf("warning: tbp->b_bufsize wrong %d vs %d\n", tbp->b_bufsize, blksize);
497                 bp->b_bcount += blksize;
498                 bp->b_bufsize += blksize;
499         }
500
501         /*
502          * Fully valid pages in the cluster are already good and do not need
503          * to be re-read from disk.  Replace the page with bogus_page
504          */
505         for (j = 0; j < bp->b_xio.xio_npages; j++) {
506                 if ((bp->b_xio.xio_pages[j]->valid & VM_PAGE_BITS_ALL) ==
507                     VM_PAGE_BITS_ALL) {
508                         bp->b_xio.xio_pages[j] = bogus_page;
509                 }
510         }
511         if (bp->b_bufsize > bp->b_kvasize) {
512                 panic("cluster_rbuild: b_bufsize(%d) > b_kvasize(%d)",
513                     bp->b_bufsize, bp->b_kvasize);
514         }
515         pmap_qenter(trunc_page((vm_offset_t) bp->b_data),
516                 (vm_page_t *)bp->b_xio.xio_pages, bp->b_xio.xio_npages);
517         BUF_KERNPROC(bp);
518         return (bp);
519 }
520
521 /*
522  * Cleanup after a clustered read or write.
523  * This is complicated by the fact that any of the buffers might have
524  * extra memory (if there were no empty buffer headers at allocbuf time)
525  * that we will need to shift around.
526  *
527  * The returned bio is &bp->b_bio1
528  */
529 void
530 cluster_callback(struct bio *bio)
531 {
532         struct buf *bp = bio->bio_buf;
533         struct buf *tbp;
534         int error = 0;
535
536         /*
537          * Must propogate errors to all the components.  A short read (EOF)
538          * is a critical error.
539          */
540         if (bp->b_flags & B_ERROR) {
541                 error = bp->b_error;
542         } else if (bp->b_bcount != bp->b_bufsize) {
543                 panic("cluster_callback: unexpected EOF on cluster %p!", bio);
544         }
545
546         pmap_qremove(trunc_page((vm_offset_t) bp->b_data), bp->b_xio.xio_npages);
547         /*
548          * Move memory from the large cluster buffer into the component
549          * buffers and mark IO as done on these.  Since the memory map
550          * is the same, no actual copying is required.
551          */
552         while ((tbp = bio->bio_caller_info1.cluster_head) != NULL) {
553                 bio->bio_caller_info1.cluster_head = tbp->b_cluster_next;
554                 if (error) {
555                         tbp->b_flags |= B_ERROR;
556                         tbp->b_error = error;
557                 } else {
558                         tbp->b_dirtyoff = tbp->b_dirtyend = 0;
559                         tbp->b_flags &= ~(B_ERROR|B_INVAL);
560                         /*
561                          * XXX the bdwrite()/bqrelse() issued during
562                          * cluster building clears B_RELBUF (see bqrelse()
563                          * comment).  If direct I/O was specified, we have
564                          * to restore it here to allow the buffer and VM
565                          * to be freed.
566                          */
567                         if (tbp->b_flags & B_DIRECT)
568                                 tbp->b_flags |= B_RELBUF;
569                 }
570                 biodone(&tbp->b_bio1);
571         }
572         relpbuf(bp, &cluster_pbuf_freecnt);
573 }
574
575 /*
576  *      cluster_wbuild_wb:
577  *
578  *      Implement modified write build for cluster.
579  *
580  *              write_behind = 0        write behind disabled
581  *              write_behind = 1        write behind normal (default)
582  *              write_behind = 2        write behind backed-off
583  */
584
585 static __inline int
586 cluster_wbuild_wb(struct vnode *vp, int blksize, off_t start_loffset, int len)
587 {
588         int r = 0;
589
590         switch(write_behind) {
591         case 2:
592                 if (start_loffset < len)
593                         break;
594                 start_loffset -= len;
595                 /* fall through */
596         case 1:
597                 r = cluster_wbuild(vp, blksize, start_loffset, len);
598                 /* fall through */
599         default:
600                 /* fall through */
601                 break;
602         }
603         return(r);
604 }
605
606 /*
607  * Do clustered write for FFS.
608  *
609  * Three cases:
610  *      1. Write is not sequential (write asynchronously)
611  *      Write is sequential:
612  *      2.      beginning of cluster - begin cluster
613  *      3.      middle of a cluster - add to cluster
614  *      4.      end of a cluster - asynchronously write cluster
615  */
616 void
617 cluster_write(struct buf *bp, off_t filesize, int blksize, int seqcount)
618 {
619         struct vnode *vp;
620         off_t loffset;
621         int maxclen, cursize;
622         int async;
623
624         vp = bp->b_vp;
625         if (vp->v_type == VREG)
626                 async = vp->v_mount->mnt_flag & MNT_ASYNC;
627         else
628                 async = 0;
629         loffset = bp->b_loffset;
630         KASSERT(bp->b_loffset != NOOFFSET, 
631                 ("cluster_write: no buffer offset"));
632
633         /* Initialize vnode to beginning of file. */
634         if (loffset == 0)
635                 vp->v_lasta = vp->v_clen = vp->v_cstart = vp->v_lastw = 0;
636
637         if (vp->v_clen == 0 || loffset != vp->v_lastw + blksize ||
638             bp->b_bio2.bio_offset == NOOFFSET ||
639             (bp->b_bio2.bio_offset != vp->v_lasta + blksize)) {
640                 maxclen = vmaxiosize(vp);
641                 if (vp->v_clen != 0) {
642                         /*
643                          * Next block is not sequential.
644                          *
645                          * If we are not writing at end of file, the process
646                          * seeked to another point in the file since its last
647                          * write, or we have reached our maximum cluster size,
648                          * then push the previous cluster. Otherwise try
649                          * reallocating to make it sequential.
650                          *
651                          * Change to algorithm: only push previous cluster if
652                          * it was sequential from the point of view of the
653                          * seqcount heuristic, otherwise leave the buffer 
654                          * intact so we can potentially optimize the I/O
655                          * later on in the buf_daemon or update daemon
656                          * flush.
657                          */
658                         cursize = vp->v_lastw - vp->v_cstart + blksize;
659                         if (bp->b_loffset + blksize != filesize ||
660                             loffset != vp->v_lastw + blksize || vp->v_clen <= cursize) {
661                                 if (!async && seqcount > 0) {
662                                         cluster_wbuild_wb(vp, blksize,
663                                                 vp->v_cstart, cursize);
664                                 }
665                         } else {
666                                 struct buf **bpp, **endbp;
667                                 struct cluster_save *buflist;
668
669                                 buflist = cluster_collectbufs(vp, bp, blksize);
670                                 endbp = &buflist->bs_children
671                                     [buflist->bs_nchildren - 1];
672                                 if (VOP_REALLOCBLKS(vp, buflist)) {
673                                         /*
674                                          * Failed, push the previous cluster
675                                          * if *really* writing sequentially
676                                          * in the logical file (seqcount > 1),
677                                          * otherwise delay it in the hopes that
678                                          * the low level disk driver can
679                                          * optimize the write ordering.
680                                          */
681                                         for (bpp = buflist->bs_children;
682                                              bpp < endbp; bpp++)
683                                                 brelse(*bpp);
684                                         kfree(buflist, M_SEGMENT);
685                                         if (seqcount > 1) {
686                                                 cluster_wbuild_wb(vp, 
687                                                     blksize, vp->v_cstart, 
688                                                     cursize);
689                                         }
690                                 } else {
691                                         /*
692                                          * Succeeded, keep building cluster.
693                                          */
694                                         for (bpp = buflist->bs_children;
695                                              bpp <= endbp; bpp++)
696                                                 bdwrite(*bpp);
697                                         kfree(buflist, M_SEGMENT);
698                                         vp->v_lastw = loffset;
699                                         vp->v_lasta = bp->b_bio2.bio_offset;
700                                         return;
701                                 }
702                         }
703                 }
704                 /*
705                  * Consider beginning a cluster. If at end of file, make
706                  * cluster as large as possible, otherwise find size of
707                  * existing cluster.
708                  */
709                 if ((vp->v_type == VREG) &&
710                     bp->b_loffset + blksize != filesize &&
711                     (bp->b_bio2.bio_offset == NOOFFSET) &&
712                     (VOP_BMAP(vp, loffset, &bp->b_bio2.bio_offset, &maxclen, NULL, BUF_CMD_WRITE) ||
713                      bp->b_bio2.bio_offset == NOOFFSET)) {
714                         bawrite(bp);
715                         vp->v_clen = 0;
716                         vp->v_lasta = bp->b_bio2.bio_offset;
717                         vp->v_cstart = loffset + blksize;
718                         vp->v_lastw = loffset;
719                         return;
720                 }
721                 if (maxclen > blksize)
722                         vp->v_clen = maxclen - blksize;
723                 else
724                         vp->v_clen = 0;
725                 if (!async && vp->v_clen == 0) { /* I/O not contiguous */
726                         vp->v_cstart = loffset + blksize;
727                         bawrite(bp);
728                 } else {        /* Wait for rest of cluster */
729                         vp->v_cstart = loffset;
730                         bdwrite(bp);
731                 }
732         } else if (loffset == vp->v_cstart + vp->v_clen) {
733                 /*
734                  * At end of cluster, write it out if seqcount tells us we
735                  * are operating sequentially, otherwise let the buf or
736                  * update daemon handle it.
737                  */
738                 bdwrite(bp);
739                 if (seqcount > 1)
740                         cluster_wbuild_wb(vp, blksize, vp->v_cstart,
741                                           vp->v_clen + blksize);
742                 vp->v_clen = 0;
743                 vp->v_cstart = loffset + blksize;
744         } else if (vm_page_count_severe()) {
745                 /*
746                  * We are low on memory, get it going NOW
747                  */
748                 bawrite(bp);
749         } else {
750                 /*
751                  * In the middle of a cluster, so just delay the I/O for now.
752                  */
753                 bdwrite(bp);
754         }
755         vp->v_lastw = loffset;
756         vp->v_lasta = bp->b_bio2.bio_offset;
757 }
758
759
760 /*
761  * This is an awful lot like cluster_rbuild...wish they could be combined.
762  * The last lbn argument is the current block on which I/O is being
763  * performed.  Check to see that it doesn't fall in the middle of
764  * the current block (if last_bp == NULL).
765  */
766 int
767 cluster_wbuild(struct vnode *vp, int blksize, off_t start_loffset, int bytes)
768 {
769         struct buf *bp, *tbp;
770         int i, j;
771         int totalwritten = 0;
772         int maxiosize = vmaxiosize(vp);
773
774         while (bytes > 0) {
775                 /*
776                  * If the buffer is not delayed-write (i.e. dirty), or it 
777                  * is delayed-write but either locked or inval, it cannot 
778                  * partake in the clustered write.
779                  */
780                 tbp = findblk(vp, start_loffset, FINDBLK_NBLOCK);
781                 if (tbp == NULL ||
782                     (tbp->b_flags & (B_LOCKED | B_INVAL | B_DELWRI)) != B_DELWRI ||
783                     (LIST_FIRST(&tbp->b_dep) && buf_checkwrite(tbp))) {
784                         if (tbp)
785                                 BUF_UNLOCK(tbp);
786                         start_loffset += blksize;
787                         bytes -= blksize;
788                         continue;
789                 }
790                 bremfree(tbp);
791                 KKASSERT(tbp->b_cmd == BUF_CMD_DONE);
792
793                 /*
794                  * Extra memory in the buffer, punt on this buffer.
795                  * XXX we could handle this in most cases, but we would
796                  * have to push the extra memory down to after our max
797                  * possible cluster size and then potentially pull it back
798                  * up if the cluster was terminated prematurely--too much
799                  * hassle.
800                  */
801                 if (((tbp->b_flags & (B_CLUSTEROK|B_MALLOC)) != B_CLUSTEROK) ||
802                     (tbp->b_bcount != tbp->b_bufsize) ||
803                     (tbp->b_bcount != blksize) ||
804                     (bytes == blksize) ||
805                     ((bp = getpbuf(&cluster_pbuf_freecnt)) == NULL)) {
806                         totalwritten += tbp->b_bufsize;
807                         bawrite(tbp);
808                         start_loffset += blksize;
809                         bytes -= blksize;
810                         continue;
811                 }
812
813                 /*
814                  * Set up the pbuf.  Track our append point with b_bcount
815                  * and b_bufsize.  b_bufsize is not used by the device but
816                  * our caller uses it to loop clusters and we use it to
817                  * detect a premature EOF on the block device.
818                  */
819                 bp->b_bcount = 0;
820                 bp->b_bufsize = 0;
821                 bp->b_xio.xio_npages = 0;
822                 bp->b_loffset = tbp->b_loffset;
823                 bp->b_bio2.bio_offset = tbp->b_bio2.bio_offset;
824
825                 /*
826                  * We are synthesizing a buffer out of vm_page_t's, but
827                  * if the block size is not page aligned then the starting
828                  * address may not be either.  Inherit the b_data offset
829                  * from the original buffer.
830                  */
831                 bp->b_data = (char *)((vm_offset_t)bp->b_data |
832                     ((vm_offset_t)tbp->b_data & PAGE_MASK));
833                 bp->b_flags &= ~B_ERROR;
834                 bp->b_flags |= B_CLUSTER | B_BNOCLIP |
835                         (tbp->b_flags & (B_VMIO | B_NEEDCOMMIT));
836                 bp->b_bio1.bio_caller_info1.cluster_head = NULL;
837                 bp->b_bio1.bio_caller_info2.cluster_tail = NULL;
838
839                 /*
840                  * From this location in the file, scan forward to see
841                  * if there are buffers with adjacent data that need to
842                  * be written as well.
843                  */
844                 for (i = 0; i < bytes; (i += blksize), (start_loffset += blksize)) {
845                         if (i != 0) { /* If not the first buffer */
846                                 tbp = findblk(vp, start_loffset,
847                                               FINDBLK_NBLOCK);
848                                 /*
849                                  * Buffer not found or could not be locked
850                                  * non-blocking.
851                                  */
852                                 if (tbp == NULL)
853                                         break;
854
855                                 /*
856                                  * If it IS in core, but has different
857                                  * characteristics, then don't cluster
858                                  * with it.
859                                  */
860                                 if ((tbp->b_flags & (B_VMIO | B_CLUSTEROK |
861                                      B_INVAL | B_DELWRI | B_NEEDCOMMIT))
862                                     != (B_DELWRI | B_CLUSTEROK |
863                                      (bp->b_flags & (B_VMIO | B_NEEDCOMMIT))) ||
864                                     (tbp->b_flags & B_LOCKED) ||
865                                     (LIST_FIRST(&tbp->b_dep) &&
866                                      buf_checkwrite(tbp))
867                                 ) {
868                                         BUF_UNLOCK(tbp);
869                                         break;
870                                 }
871
872                                 /*
873                                  * Check that the combined cluster
874                                  * would make sense with regard to pages
875                                  * and would not be too large
876                                  */
877                                 if ((tbp->b_bcount != blksize) ||
878                                   ((bp->b_bio2.bio_offset + i) !=
879                                     tbp->b_bio2.bio_offset) ||
880                                   ((tbp->b_xio.xio_npages + bp->b_xio.xio_npages) >
881                                     (maxiosize / PAGE_SIZE))) {
882                                         BUF_UNLOCK(tbp);
883                                         break;
884                                 }
885                                 /*
886                                  * Ok, it's passed all the tests,
887                                  * so remove it from the free list
888                                  * and mark it busy. We will use it.
889                                  */
890                                 bremfree(tbp);
891                                 KKASSERT(tbp->b_cmd == BUF_CMD_DONE);
892                         } /* end of code for non-first buffers only */
893
894                         /*
895                          * If the IO is via the VM then we do some
896                          * special VM hackery (yuck).  Since the buffer's
897                          * block size may not be page-aligned it is possible
898                          * for a page to be shared between two buffers.  We
899                          * have to get rid of the duplication when building
900                          * the cluster.
901                          */
902                         if (tbp->b_flags & B_VMIO) {
903                                 vm_page_t m;
904
905                                 if (i != 0) { /* if not first buffer */
906                                         for (j = 0; j < tbp->b_xio.xio_npages; ++j) {
907                                                 m = tbp->b_xio.xio_pages[j];
908                                                 if (m->flags & PG_BUSY) {
909                                                         bqrelse(tbp);
910                                                         goto finishcluster;
911                                                 }
912                                         }
913                                 }
914                                         
915                                 for (j = 0; j < tbp->b_xio.xio_npages; ++j) {
916                                         m = tbp->b_xio.xio_pages[j];
917                                         vm_page_io_start(m);
918                                         vm_object_pip_add(m->object, 1);
919                                         if ((bp->b_xio.xio_npages == 0) ||
920                                           (bp->b_xio.xio_pages[bp->b_xio.xio_npages - 1] != m)) {
921                                                 bp->b_xio.xio_pages[bp->b_xio.xio_npages] = m;
922                                                 bp->b_xio.xio_npages++;
923                                         }
924                                 }
925                         }
926                         bp->b_bcount += blksize;
927                         bp->b_bufsize += blksize;
928
929                         bundirty(tbp);
930                         tbp->b_flags &= ~B_ERROR;
931                         tbp->b_cmd = BUF_CMD_WRITE;
932                         BUF_KERNPROC(tbp);
933                         cluster_append(&bp->b_bio1, tbp);
934
935                         /*
936                          * check for latent dependencies to be handled 
937                          */
938                         if (LIST_FIRST(&tbp->b_dep) != NULL)
939                                 buf_start(tbp);
940                 }
941         finishcluster:
942                 pmap_qenter(trunc_page((vm_offset_t) bp->b_data),
943                         (vm_page_t *) bp->b_xio.xio_pages, bp->b_xio.xio_npages);
944                 if (bp->b_bufsize > bp->b_kvasize) {
945                         panic(
946                             "cluster_wbuild: b_bufsize(%d) > b_kvasize(%d)\n",
947                             bp->b_bufsize, bp->b_kvasize);
948                 }
949                 totalwritten += bp->b_bufsize;
950                 bp->b_dirtyoff = 0;
951                 bp->b_dirtyend = bp->b_bufsize;
952                 bp->b_bio1.bio_done = cluster_callback;
953                 bp->b_cmd = BUF_CMD_WRITE;
954
955                 vfs_busy_pages(vp, bp);
956                 bp->b_runningbufspace = bp->b_bufsize;
957                 if (bp->b_runningbufspace) {
958                         runningbufspace += bp->b_runningbufspace;
959                         ++runningbufcount;
960                 }
961                 BUF_KERNPROC(bp);
962                 vn_strategy(vp, &bp->b_bio1);
963
964                 bytes -= i;
965         }
966         return totalwritten;
967 }
968
969 /*
970  * Collect together all the buffers in a cluster.
971  * Plus add one additional buffer.
972  */
973 static struct cluster_save *
974 cluster_collectbufs(struct vnode *vp, struct buf *last_bp, int blksize)
975 {
976         struct cluster_save *buflist;
977         struct buf *bp;
978         off_t loffset;
979         int i, len;
980
981         len = (int)(vp->v_lastw - vp->v_cstart + blksize) / blksize;
982         buflist = kmalloc(sizeof(struct buf *) * (len + 1) + sizeof(*buflist),
983                          M_SEGMENT, M_WAITOK);
984         buflist->bs_nchildren = 0;
985         buflist->bs_children = (struct buf **) (buflist + 1);
986         for (loffset = vp->v_cstart, i = 0; i < len; (loffset += blksize), i++) {
987                 (void) bread(vp, loffset, last_bp->b_bcount, &bp);
988                 buflist->bs_children[i] = bp;
989                 if (bp->b_bio2.bio_offset == NOOFFSET) {
990                         VOP_BMAP(bp->b_vp, bp->b_loffset,
991                                  &bp->b_bio2.bio_offset,
992                                  NULL, NULL, BUF_CMD_WRITE);
993                 }
994         }
995         buflist->bs_children[i] = bp = last_bp;
996         if (bp->b_bio2.bio_offset == NOOFFSET) {
997                 VOP_BMAP(bp->b_vp, bp->b_loffset, &bp->b_bio2.bio_offset,
998                          NULL, NULL, BUF_CMD_WRITE);
999         }
1000         buflist->bs_nchildren = i + 1;
1001         return (buflist);
1002 }
1003
1004 void
1005 cluster_append(struct bio *bio, struct buf *tbp)
1006 {
1007         tbp->b_cluster_next = NULL;
1008         if (bio->bio_caller_info1.cluster_head == NULL) {
1009                 bio->bio_caller_info1.cluster_head = tbp;
1010                 bio->bio_caller_info2.cluster_tail = tbp;
1011         } else {
1012                 bio->bio_caller_info2.cluster_tail->b_cluster_next = tbp;
1013                 bio->bio_caller_info2.cluster_tail = tbp;
1014         }
1015 }
1016