kernel: Adjust some comments to the unionfs removal.
[dragonfly.git] / sys / kern / vfs_cluster.c
1 /*-
2  * Copyright (c) 1993
3  *      The Regents of the University of California.  All rights reserved.
4  * Modifications/enhancements:
5  *      Copyright (c) 1995 John S. Dyson.  All rights reserved.
6  *      Copyright (c) 2012-2013 Matthew Dillon.  All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the name of the University nor the names of its contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  */
32
33 #include "opt_debug_cluster.h"
34
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/kernel.h>
38 #include <sys/proc.h>
39 #include <sys/buf.h>
40 #include <sys/vnode.h>
41 #include <sys/malloc.h>
42 #include <sys/mount.h>
43 #include <sys/resourcevar.h>
44 #include <sys/vmmeter.h>
45 #include <vm/vm.h>
46 #include <vm/vm_object.h>
47 #include <vm/vm_page.h>
48 #include <sys/sysctl.h>
49
50 #include <sys/buf2.h>
51 #include <vm/vm_page2.h>
52
53 #include <machine/limits.h>
54
55 /*
56  * Cluster tracking cache - replaces the original vnode v_* fields which had
57  * limited utility and were not MP safe.
58  *
59  * The cluster tracking cache is a simple 4-way set-associative non-chained
60  * cache.  It is capable of tracking up to four zones separated by 1MB or
61  * more per vnode.
62  *
63  * NOTE: We want this structure to be cache-line friendly so the iterator
64  *       is embedded rather than in a separate array.
65  *
66  * NOTE: A cluster cache entry can become stale when a vnode is recycled.
67  *       For now we treat the values as heuristical but also self-consistent.
68  *       i.e. the values cannot be completely random and cannot be SMP unsafe
69  *       or the cluster code might end-up clustering non-contiguous buffers
70  *       at the wrong offsets.
71  */
72 struct cluster_cache {
73         struct vnode *vp;
74         u_int   locked;
75         off_t   v_lastw;                /* last write (write cluster) */
76         off_t   v_cstart;               /* start block of cluster */
77         off_t   v_lasta;                /* last allocation */
78         u_int   v_clen;                 /* length of current cluster */
79         u_int   iterator;
80 } __cachealign;
81
82 typedef struct cluster_cache cluster_cache_t;
83
84 #define CLUSTER_CACHE_SIZE      512
85 #define CLUSTER_CACHE_MASK      (CLUSTER_CACHE_SIZE - 1)
86
87 #define CLUSTER_ZONE            ((off_t)(1024 * 1024))
88
89 cluster_cache_t cluster_array[CLUSTER_CACHE_SIZE];
90
91 #if defined(CLUSTERDEBUG)
92 #include <sys/sysctl.h>
93 static int      rcluster= 0;
94 SYSCTL_INT(_debug, OID_AUTO, rcluster, CTLFLAG_RW, &rcluster, 0, "");
95 #endif
96
97 static MALLOC_DEFINE(M_SEGMENT, "cluster_save", "cluster_save buffer");
98
99 static struct cluster_save *
100         cluster_collectbufs (cluster_cache_t *cc, struct vnode *vp,
101                                 struct buf *last_bp, int blksize);
102 static struct buf *
103         cluster_rbuild (struct vnode *vp, off_t filesize, off_t loffset,
104                             off_t doffset, int blksize, int run, 
105                             struct buf *fbp);
106 static void cluster_callback (struct bio *);
107 static void cluster_setram (struct buf *);
108 static int cluster_wbuild(struct vnode *vp, struct buf **bpp, int blksize,
109                             off_t start_loffset, int bytes);
110
111 static int write_behind = 1;
112 SYSCTL_INT(_vfs, OID_AUTO, write_behind, CTLFLAG_RW, &write_behind, 0,
113     "Cluster write-behind setting");
114 static quad_t write_behind_minfilesize = 10 * 1024 * 1024;
115 SYSCTL_QUAD(_vfs, OID_AUTO, write_behind_minfilesize, CTLFLAG_RW,
116     &write_behind_minfilesize, 0, "Cluster write-behind setting");
117 static int max_readahead = 2 * 1024 * 1024;
118 SYSCTL_INT(_vfs, OID_AUTO, max_readahead, CTLFLAG_RW, &max_readahead, 0,
119     "Limit in bytes for desired cluster read-ahead");
120
121 extern vm_page_t        bogus_page;
122
123 extern int cluster_pbuf_freecnt;
124
125 /*
126  * Acquire/release cluster cache (can return dummy entry)
127  */
128 static
129 cluster_cache_t *
130 cluster_getcache(cluster_cache_t *dummy, struct vnode *vp, off_t loffset)
131 {
132         cluster_cache_t *cc;
133         size_t hv;
134         int i;
135         int xact;
136
137         hv = (size_t)(intptr_t)vp ^ (size_t)(intptr_t)vp / sizeof(*vp);
138         hv &= CLUSTER_CACHE_MASK & ~3;
139         cc = &cluster_array[hv];
140
141         xact = -1;
142         for (i = 0; i < 4; ++i) {
143                 if (cc[i].vp != vp)
144                         continue;
145                 if (((cc[i].v_cstart ^ loffset) & ~(CLUSTER_ZONE - 1)) == 0) {
146                         xact = i;
147                         break;
148                 }
149         }
150         if (xact >= 0 && atomic_swap_int(&cc[xact].locked, 1) == 0) {
151                 if (cc[xact].vp == vp &&
152                     ((cc[i].v_cstart ^ loffset) & ~(CLUSTER_ZONE - 1)) == 0) {
153                         return(&cc[xact]);
154                 }
155                 atomic_swap_int(&cc[xact].locked, 0);
156         }
157
158         /*
159          * New entry.  If we can't acquire the cache line then use the
160          * passed-in dummy element and reset all fields.
161          *
162          * When we are able to acquire the cache line we only clear the
163          * fields if the vp does not match.  This allows us to multi-zone
164          * a vp and for excessive zones / partial clusters to be retired.
165          */
166         i = cc->iterator++ & 3;
167         cc += i;
168         if (atomic_swap_int(&cc->locked, 1) != 0) {
169                 cc = dummy;
170                 cc->locked = 1;
171                 cc->vp = NULL;
172         }
173         if (cc->vp != vp) {
174                 cc->vp = vp;
175                 cc->v_lasta = 0;
176                 cc->v_clen = 0;
177                 cc->v_cstart = 0;
178                 cc->v_lastw = 0;
179         }
180         return(cc);
181 }
182
183 static
184 void
185 cluster_putcache(cluster_cache_t *cc)
186 {
187         atomic_swap_int(&cc->locked, 0);
188 }
189
190 /*
191  * This replaces bread(), providing a synchronous read of the requested
192  * buffer plus asynchronous read-ahead within the specified bounds.
193  *
194  * The caller may pre-populate *bpp if it already has the requested buffer
195  * in-hand, else must set *bpp to NULL.  Note that the cluster_read() inline
196  * sets *bpp to NULL and then calls cluster_readx() for compatibility.
197  *
198  * filesize     - read-ahead @ blksize will not cross this boundary
199  * loffset      - loffset for returned *bpp
200  * blksize      - blocksize for returned *bpp and read-ahead bps
201  * minreq       - minimum (not a hard minimum) in bytes, typically reflects
202  *                a higher level uio resid.
203  * maxreq       - maximum (sequential heuristic) in bytes (highet typ ~2MB)
204  * bpp          - return buffer (*bpp) for (loffset,blksize)
205  */
206 int
207 cluster_readx(struct vnode *vp, off_t filesize, off_t loffset,
208              int blksize, size_t minreq, size_t maxreq, struct buf **bpp)
209 {
210         struct buf *bp, *rbp, *reqbp;
211         off_t origoffset;
212         off_t doffset;
213         int error;
214         int i;
215         int maxra;
216         int maxrbuild;
217
218         error = 0;
219
220         /*
221          * Calculate the desired read-ahead in blksize'd blocks (maxra).
222          * To do this we calculate maxreq.
223          *
224          * maxreq typically starts out as a sequential heuristic.  If the
225          * high level uio/resid is bigger (minreq), we pop maxreq up to
226          * minreq.  This represents the case where random I/O is being
227          * performed by the userland is issuing big read()'s.
228          *
229          * Then we limit maxreq to max_readahead to ensure it is a reasonable
230          * value.
231          *
232          * Finally we must ensure that (loffset + maxreq) does not cross the
233          * boundary (filesize) for the current blocksize.  If we allowed it
234          * to cross we could end up with buffers past the boundary with the
235          * wrong block size (HAMMER large-data areas use mixed block sizes).
236          * minreq is also absolutely limited to filesize.
237          */
238         if (maxreq < minreq)
239                 maxreq = minreq;
240         /* minreq not used beyond this point */
241
242         if (maxreq > max_readahead) {
243                 maxreq = max_readahead;
244                 if (maxreq > 16 * 1024 * 1024)
245                         maxreq = 16 * 1024 * 1024;
246         }
247         if (maxreq < blksize)
248                 maxreq = blksize;
249         if (loffset + maxreq > filesize) {
250                 if (loffset > filesize)
251                         maxreq = 0;
252                 else
253                         maxreq = filesize - loffset;
254         }
255
256         maxra = (int)(maxreq / blksize);
257
258         /*
259          * Get the requested block.
260          */
261         if (*bpp)
262                 reqbp = bp = *bpp;
263         else
264                 *bpp = reqbp = bp = getblk(vp, loffset, blksize, 0, 0);
265         origoffset = loffset;
266
267         /*
268          * Calculate the maximum cluster size for a single I/O, used
269          * by cluster_rbuild().
270          */
271         maxrbuild = vmaxiosize(vp) / blksize;
272
273         /*
274          * if it is in the cache, then check to see if the reads have been
275          * sequential.  If they have, then try some read-ahead, otherwise
276          * back-off on prospective read-aheads.
277          */
278         if (bp->b_flags & B_CACHE) {
279                 /*
280                  * Not sequential, do not do any read-ahead
281                  */
282                 if (maxra <= 1)
283                         return 0;
284
285                 /*
286                  * No read-ahead mark, do not do any read-ahead
287                  * yet.
288                  */
289                 if ((bp->b_flags & B_RAM) == 0)
290                         return 0;
291
292                 /*
293                  * We hit a read-ahead-mark, figure out how much read-ahead
294                  * to do (maxra) and where to start (loffset).
295                  *
296                  * Shortcut the scan.  Typically the way this works is that
297                  * we've built up all the blocks inbetween except for the
298                  * last in previous iterations, so if the second-to-last
299                  * block is present we just skip ahead to it.
300                  *
301                  * This algorithm has O(1) cpu in the steady state no
302                  * matter how large maxra is.
303                  */
304                 bp->b_flags &= ~B_RAM;
305
306                 if (findblk(vp, loffset + (maxra - 2) * blksize, FINDBLK_TEST))
307                         i = maxra - 1;
308                 else
309                         i = 1;
310                 while (i < maxra) {
311                         if (findblk(vp, loffset + i * blksize,
312                                     FINDBLK_TEST) == NULL) {
313                                 break;
314                         }
315                         ++i;
316                 }
317
318                 /*
319                  * We got everything or everything is in the cache, no
320                  * point continuing.
321                  */
322                 if (i >= maxra)
323                         return 0;
324
325                 /*
326                  * Calculate where to start the read-ahead and how much
327                  * to do.  Generally speaking we want to read-ahead by
328                  * (maxra) when we've found a read-ahead mark.  We do
329                  * not want to reduce maxra here as it will cause
330                  * successive read-ahead I/O's to be smaller and smaller.
331                  *
332                  * However, we have to make sure we don't break the
333                  * filesize limitation for the clustered operation.
334                  */
335                 loffset += i * blksize;
336                 reqbp = bp = NULL;
337
338                 if (loffset >= filesize)
339                         return 0;
340                 if (loffset + maxra * blksize > filesize) {
341                         maxreq = filesize - loffset;
342                         maxra = (int)(maxreq / blksize);
343                 }
344         } else {
345                 __debugvar off_t firstread = bp->b_loffset;
346                 int nblks;
347
348                 /*
349                  * Set-up synchronous read for bp.
350                  */
351                 bp->b_cmd = BUF_CMD_READ;
352                 bp->b_bio1.bio_done = biodone_sync;
353                 bp->b_bio1.bio_flags |= BIO_SYNC;
354
355                 KASSERT(firstread != NOOFFSET, 
356                         ("cluster_read: no buffer offset"));
357
358                 /*
359                  * nblks is our cluster_rbuild request size, limited
360                  * primarily by the device.
361                  */
362                 if ((nblks = maxra) > maxrbuild)
363                         nblks = maxrbuild;
364
365                 if (nblks > 1) {
366                         int burstbytes;
367
368                         error = VOP_BMAP(vp, loffset, &doffset,
369                                          &burstbytes, NULL, BUF_CMD_READ);
370                         if (error)
371                                 goto single_block_read;
372                         if (nblks > burstbytes / blksize)
373                                 nblks = burstbytes / blksize;
374                         if (doffset == NOOFFSET)
375                                 goto single_block_read;
376                         if (nblks <= 1)
377                                 goto single_block_read;
378
379                         bp = cluster_rbuild(vp, filesize, loffset,
380                                             doffset, blksize, nblks, bp);
381                         loffset += bp->b_bufsize;
382                         maxra -= bp->b_bufsize / blksize;
383                 } else {
384 single_block_read:
385                         /*
386                          * If it isn't in the cache, then get a chunk from
387                          * disk if sequential, otherwise just get the block.
388                          */
389                         cluster_setram(bp);
390                         loffset += blksize;
391                         --maxra;
392                 }
393         }
394
395         /*
396          * If B_CACHE was not set issue bp.  bp will either be an
397          * asynchronous cluster buf or a synchronous single-buf.
398          * If it is a single buf it will be the same as reqbp.
399          *
400          * NOTE: Once an async cluster buf is issued bp becomes invalid.
401          */
402         if (bp) {
403 #if defined(CLUSTERDEBUG)
404                 if (rcluster)
405                         kprintf("S(%012jx,%d,%d)\n",
406                             (intmax_t)bp->b_loffset, bp->b_bcount, maxra);
407 #endif
408                 if ((bp->b_flags & B_CLUSTER) == 0)
409                         vfs_busy_pages(vp, bp);
410                 bp->b_flags &= ~(B_ERROR|B_INVAL);
411                 vn_strategy(vp, &bp->b_bio1);
412                 error = 0;
413                 /* bp invalid now */
414                 bp = NULL;
415         }
416
417         /*
418          * If we have been doing sequential I/O, then do some read-ahead.
419          * The code above us should have positioned us at the next likely
420          * offset.
421          *
422          * Only mess with buffers which we can immediately lock.  HAMMER
423          * will do device-readahead irrespective of what the blocks
424          * represent.
425          */
426         while (error == 0 && maxra > 0) {
427                 int burstbytes;
428                 int tmp_error;
429                 int nblks;
430
431                 rbp = getblk(vp, loffset, blksize,
432                              GETBLK_SZMATCH|GETBLK_NOWAIT, 0);
433                 if (rbp == NULL)
434                         goto no_read_ahead;
435                 if ((rbp->b_flags & B_CACHE)) {
436                         bqrelse(rbp);
437                         goto no_read_ahead;
438                 }
439
440                 /*
441                  * An error from the read-ahead bmap has nothing to do
442                  * with the caller's original request.
443                  */
444                 tmp_error = VOP_BMAP(vp, loffset, &doffset,
445                                      &burstbytes, NULL, BUF_CMD_READ);
446                 if (tmp_error || doffset == NOOFFSET) {
447                         rbp->b_flags |= B_INVAL;
448                         brelse(rbp);
449                         rbp = NULL;
450                         goto no_read_ahead;
451                 }
452                 if ((nblks = maxra) > maxrbuild)
453                         nblks = maxrbuild;
454                 if (nblks > burstbytes / blksize)
455                         nblks = burstbytes / blksize;
456
457                 /*
458                  * rbp: async read
459                  */
460                 rbp->b_cmd = BUF_CMD_READ;
461                 /*rbp->b_flags |= B_AGE*/;
462                 cluster_setram(rbp);
463
464                 if (nblks > 1) {
465                         rbp = cluster_rbuild(vp, filesize, loffset,
466                                              doffset, blksize, 
467                                              nblks, rbp);
468                 } else {
469                         rbp->b_bio2.bio_offset = doffset;
470                 }
471
472                 rbp->b_flags &= ~(B_ERROR|B_INVAL);
473
474                 if ((rbp->b_flags & B_CLUSTER) == 0)
475                         vfs_busy_pages(vp, rbp);
476                 BUF_KERNPROC(rbp);
477                 loffset += rbp->b_bufsize;
478                 maxra -= rbp->b_bufsize / blksize;
479                 vn_strategy(vp, &rbp->b_bio1);
480                 /* rbp invalid now */
481         }
482
483         /*
484          * Wait for our original buffer to complete its I/O.  reqbp will
485          * be NULL if the original buffer was B_CACHE.  We are returning
486          * (*bpp) which is the same as reqbp when reqbp != NULL.
487          */
488 no_read_ahead:
489         if (reqbp) {
490                 KKASSERT(reqbp->b_bio1.bio_flags & BIO_SYNC);
491                 error = biowait(&reqbp->b_bio1, "clurd");
492         }
493         return (error);
494 }
495
496 /*
497  * This replaces breadcb(), providing an asynchronous read of the requested
498  * buffer with a callback, plus an asynchronous read-ahead within the
499  * specified bounds.
500  *
501  * The callback must check whether BIO_DONE is set in the bio and issue
502  * the bpdone(bp, 0) if it isn't.  The callback is responsible for clearing
503  * BIO_DONE and disposing of the I/O (bqrelse()ing it).
504  *
505  * filesize     - read-ahead @ blksize will not cross this boundary
506  * loffset      - loffset for returned *bpp
507  * blksize      - blocksize for returned *bpp and read-ahead bps
508  * minreq       - minimum (not a hard minimum) in bytes, typically reflects
509  *                a higher level uio resid.
510  * maxreq       - maximum (sequential heuristic) in bytes (highet typ ~2MB)
511  * bpp          - return buffer (*bpp) for (loffset,blksize)
512  */
513 void
514 cluster_readcb(struct vnode *vp, off_t filesize, off_t loffset,
515              int blksize, size_t minreq, size_t maxreq,
516              void (*func)(struct bio *), void *arg)
517 {
518         struct buf *bp, *rbp, *reqbp;
519         off_t origoffset;
520         off_t doffset;
521         int i;
522         int maxra;
523         int maxrbuild;
524
525         /*
526          * Calculate the desired read-ahead in blksize'd blocks (maxra).
527          * To do this we calculate maxreq.
528          *
529          * maxreq typically starts out as a sequential heuristic.  If the
530          * high level uio/resid is bigger (minreq), we pop maxreq up to
531          * minreq.  This represents the case where random I/O is being
532          * performed by the userland is issuing big read()'s.
533          *
534          * Then we limit maxreq to max_readahead to ensure it is a reasonable
535          * value.
536          *
537          * Finally we must ensure that (loffset + maxreq) does not cross the
538          * boundary (filesize) for the current blocksize.  If we allowed it
539          * to cross we could end up with buffers past the boundary with the
540          * wrong block size (HAMMER large-data areas use mixed block sizes).
541          * minreq is also absolutely limited to filesize.
542          */
543         if (maxreq < minreq)
544                 maxreq = minreq;
545         /* minreq not used beyond this point */
546
547         if (maxreq > max_readahead) {
548                 maxreq = max_readahead;
549                 if (maxreq > 16 * 1024 * 1024)
550                         maxreq = 16 * 1024 * 1024;
551         }
552         if (maxreq < blksize)
553                 maxreq = blksize;
554         if (loffset + maxreq > filesize) {
555                 if (loffset > filesize)
556                         maxreq = 0;
557                 else
558                         maxreq = filesize - loffset;
559         }
560
561         maxra = (int)(maxreq / blksize);
562
563         /*
564          * Get the requested block.
565          */
566         reqbp = bp = getblk(vp, loffset, blksize, 0, 0);
567         origoffset = loffset;
568
569         /*
570          * Calculate the maximum cluster size for a single I/O, used
571          * by cluster_rbuild().
572          */
573         maxrbuild = vmaxiosize(vp) / blksize;
574
575         /*
576          * if it is in the cache, then check to see if the reads have been
577          * sequential.  If they have, then try some read-ahead, otherwise
578          * back-off on prospective read-aheads.
579          */
580         if (bp->b_flags & B_CACHE) {
581                 /*
582                  * Setup for func() call whether we do read-ahead or not.
583                  */
584                 bp->b_bio1.bio_caller_info1.ptr = arg;
585                 bp->b_bio1.bio_flags |= BIO_DONE;
586
587                 /*
588                  * Not sequential, do not do any read-ahead
589                  */
590                 if (maxra <= 1)
591                         goto no_read_ahead;
592
593                 /*
594                  * No read-ahead mark, do not do any read-ahead
595                  * yet.
596                  */
597                 if ((bp->b_flags & B_RAM) == 0)
598                         goto no_read_ahead;
599                 bp->b_flags &= ~B_RAM;
600
601                 /*
602                  * We hit a read-ahead-mark, figure out how much read-ahead
603                  * to do (maxra) and where to start (loffset).
604                  *
605                  * Shortcut the scan.  Typically the way this works is that
606                  * we've built up all the blocks inbetween except for the
607                  * last in previous iterations, so if the second-to-last
608                  * block is present we just skip ahead to it.
609                  *
610                  * This algorithm has O(1) cpu in the steady state no
611                  * matter how large maxra is.
612                  */
613                 if (findblk(vp, loffset + (maxra - 2) * blksize, FINDBLK_TEST))
614                         i = maxra - 1;
615                 else
616                         i = 1;
617                 while (i < maxra) {
618                         if (findblk(vp, loffset + i * blksize,
619                                     FINDBLK_TEST) == NULL) {
620                                 break;
621                         }
622                         ++i;
623                 }
624
625                 /*
626                  * We got everything or everything is in the cache, no
627                  * point continuing.
628                  */
629                 if (i >= maxra)
630                         goto no_read_ahead;
631
632                 /*
633                  * Calculate where to start the read-ahead and how much
634                  * to do.  Generally speaking we want to read-ahead by
635                  * (maxra) when we've found a read-ahead mark.  We do
636                  * not want to reduce maxra here as it will cause
637                  * successive read-ahead I/O's to be smaller and smaller.
638                  *
639                  * However, we have to make sure we don't break the
640                  * filesize limitation for the clustered operation.
641                  */
642                 loffset += i * blksize;
643                 bp = NULL;
644                 /* leave reqbp intact to force function callback */
645
646                 if (loffset >= filesize)
647                         goto no_read_ahead;
648                 if (loffset + maxra * blksize > filesize) {
649                         maxreq = filesize - loffset;
650                         maxra = (int)(maxreq / blksize);
651                 }
652         } else {
653                 __debugvar off_t firstread = bp->b_loffset;
654                 int nblks;
655                 int tmp_error;
656
657                 /*
658                  * Set-up synchronous read for bp.
659                  */
660                 bp->b_flags &= ~(B_ERROR | B_EINTR | B_INVAL);
661                 bp->b_cmd = BUF_CMD_READ;
662                 bp->b_bio1.bio_done = func;
663                 bp->b_bio1.bio_caller_info1.ptr = arg;
664                 BUF_KERNPROC(bp);
665                 reqbp = NULL;   /* don't func() reqbp, it's running async */
666
667                 KASSERT(firstread != NOOFFSET,
668                         ("cluster_read: no buffer offset"));
669
670                 /*
671                  * nblks is our cluster_rbuild request size, limited
672                  * primarily by the device.
673                  */
674                 if ((nblks = maxra) > maxrbuild)
675                         nblks = maxrbuild;
676
677                 if (nblks > 1) {
678                         int burstbytes;
679
680                         tmp_error = VOP_BMAP(vp, loffset, &doffset,
681                                              &burstbytes, NULL, BUF_CMD_READ);
682                         if (tmp_error)
683                                 goto single_block_read;
684                         if (nblks > burstbytes / blksize)
685                                 nblks = burstbytes / blksize;
686                         if (doffset == NOOFFSET)
687                                 goto single_block_read;
688                         if (nblks <= 1)
689                                 goto single_block_read;
690
691                         bp = cluster_rbuild(vp, filesize, loffset,
692                                             doffset, blksize, nblks, bp);
693                         loffset += bp->b_bufsize;
694                         maxra -= bp->b_bufsize / blksize;
695                 } else {
696 single_block_read:
697                         /*
698                          * If it isn't in the cache, then get a chunk from
699                          * disk if sequential, otherwise just get the block.
700                          */
701                         cluster_setram(bp);
702                         loffset += blksize;
703                         --maxra;
704                 }
705         }
706
707         /*
708          * If bp != NULL then B_CACHE was *NOT* set and bp must be issued.
709          * bp will either be an asynchronous cluster buf or an asynchronous
710          * single-buf.
711          *
712          * NOTE: Once an async cluster buf is issued bp becomes invalid.
713          */
714         if (bp) {
715 #if defined(CLUSTERDEBUG)
716                 if (rcluster)
717                         kprintf("S(%012jx,%d,%d)\n",
718                             (intmax_t)bp->b_loffset, bp->b_bcount, maxra);
719 #endif
720                 if ((bp->b_flags & B_CLUSTER) == 0)
721                         vfs_busy_pages(vp, bp);
722                 bp->b_flags &= ~(B_ERROR|B_INVAL);
723                 vn_strategy(vp, &bp->b_bio1);
724                 /* bp invalid now */
725                 bp = NULL;
726         }
727
728         /*
729          * If we have been doing sequential I/O, then do some read-ahead.
730          * The code above us should have positioned us at the next likely
731          * offset.
732          *
733          * Only mess with buffers which we can immediately lock.  HAMMER
734          * will do device-readahead irrespective of what the blocks
735          * represent.
736          */
737         while (maxra > 0) {
738                 int burstbytes;
739                 int tmp_error;
740                 int nblks;
741
742                 rbp = getblk(vp, loffset, blksize,
743                              GETBLK_SZMATCH|GETBLK_NOWAIT, 0);
744                 if (rbp == NULL)
745                         goto no_read_ahead;
746                 if ((rbp->b_flags & B_CACHE)) {
747                         bqrelse(rbp);
748                         goto no_read_ahead;
749                 }
750
751                 /*
752                  * An error from the read-ahead bmap has nothing to do
753                  * with the caller's original request.
754                  */
755                 tmp_error = VOP_BMAP(vp, loffset, &doffset,
756                                      &burstbytes, NULL, BUF_CMD_READ);
757                 if (tmp_error || doffset == NOOFFSET) {
758                         rbp->b_flags |= B_INVAL;
759                         brelse(rbp);
760                         rbp = NULL;
761                         goto no_read_ahead;
762                 }
763                 if ((nblks = maxra) > maxrbuild)
764                         nblks = maxrbuild;
765                 if (nblks > burstbytes / blksize)
766                         nblks = burstbytes / blksize;
767
768                 /*
769                  * rbp: async read
770                  */
771                 rbp->b_cmd = BUF_CMD_READ;
772                 /*rbp->b_flags |= B_AGE*/;
773                 cluster_setram(rbp);
774
775                 if (nblks > 1) {
776                         rbp = cluster_rbuild(vp, filesize, loffset,
777                                              doffset, blksize,
778                                              nblks, rbp);
779                 } else {
780                         rbp->b_bio2.bio_offset = doffset;
781                 }
782
783                 rbp->b_flags &= ~(B_ERROR|B_INVAL);
784
785                 if ((rbp->b_flags & B_CLUSTER) == 0)
786                         vfs_busy_pages(vp, rbp);
787                 BUF_KERNPROC(rbp);
788                 loffset += rbp->b_bufsize;
789                 maxra -= rbp->b_bufsize / blksize;
790                 vn_strategy(vp, &rbp->b_bio1);
791                 /* rbp invalid now */
792         }
793
794         /*
795          * If reqbp is non-NULL it had B_CACHE set and we issue the
796          * function callback synchronously.
797          *
798          * Note that we may start additional asynchronous I/O before doing
799          * the func() callback for the B_CACHE case
800          */
801 no_read_ahead:
802         if (reqbp)
803                 func(&reqbp->b_bio1);
804 }
805
806 /*
807  * If blocks are contiguous on disk, use this to provide clustered
808  * read ahead.  We will read as many blocks as possible sequentially
809  * and then parcel them up into logical blocks in the buffer hash table.
810  *
811  * This function either returns a cluster buf or it returns fbp.  fbp is
812  * already expected to be set up as a synchronous or asynchronous request.
813  *
814  * If a cluster buf is returned it will always be async.
815  */
816 static struct buf *
817 cluster_rbuild(struct vnode *vp, off_t filesize, off_t loffset, off_t doffset,
818                int blksize, int run, struct buf *fbp)
819 {
820         struct buf *bp, *tbp;
821         off_t boffset;
822         int i, j;
823         int maxiosize = vmaxiosize(vp);
824
825         /*
826          * avoid a division
827          */
828         while (loffset + run * blksize > filesize) {
829                 --run;
830         }
831
832         tbp = fbp;
833         tbp->b_bio2.bio_offset = doffset;
834         if((tbp->b_flags & B_MALLOC) ||
835             ((tbp->b_flags & B_VMIO) == 0) || (run <= 1)) {
836                 return tbp;
837         }
838
839         bp = trypbuf_kva(&cluster_pbuf_freecnt);
840         if (bp == NULL) {
841                 return tbp;
842         }
843
844         /*
845          * We are synthesizing a buffer out of vm_page_t's, but
846          * if the block size is not page aligned then the starting
847          * address may not be either.  Inherit the b_data offset
848          * from the original buffer.
849          */
850         bp->b_data = (char *)((vm_offset_t)bp->b_data |
851             ((vm_offset_t)tbp->b_data & PAGE_MASK));
852         bp->b_flags |= B_CLUSTER | B_VMIO;
853         bp->b_cmd = BUF_CMD_READ;
854         bp->b_bio1.bio_done = cluster_callback;         /* default to async */
855         bp->b_bio1.bio_caller_info1.cluster_head = NULL;
856         bp->b_bio1.bio_caller_info2.cluster_tail = NULL;
857         bp->b_loffset = loffset;
858         bp->b_bio2.bio_offset = doffset;
859         KASSERT(bp->b_loffset != NOOFFSET,
860                 ("cluster_rbuild: no buffer offset"));
861
862         bp->b_bcount = 0;
863         bp->b_bufsize = 0;
864         bp->b_xio.xio_npages = 0;
865
866         for (boffset = doffset, i = 0; i < run; ++i, boffset += blksize) {
867                 if (i) {
868                         if ((bp->b_xio.xio_npages * PAGE_SIZE) +
869                             round_page(blksize) > maxiosize) {
870                                 break;
871                         }
872
873                         /*
874                          * Shortcut some checks and try to avoid buffers that
875                          * would block in the lock.  The same checks have to
876                          * be made again after we officially get the buffer.
877                          */
878                         tbp = getblk(vp, loffset + i * blksize, blksize,
879                                      GETBLK_SZMATCH|GETBLK_NOWAIT, 0);
880                         if (tbp == NULL)
881                                 break;
882                         for (j = 0; j < tbp->b_xio.xio_npages; j++) {
883                                 if (tbp->b_xio.xio_pages[j]->valid)
884                                         break;
885                         }
886                         if (j != tbp->b_xio.xio_npages) {
887                                 bqrelse(tbp);
888                                 break;
889                         }
890
891                         /*
892                          * Stop scanning if the buffer is fuly valid 
893                          * (marked B_CACHE), or locked (may be doing a
894                          * background write), or if the buffer is not
895                          * VMIO backed.  The clustering code can only deal
896                          * with VMIO-backed buffers.
897                          */
898                         if ((tbp->b_flags & (B_CACHE|B_LOCKED)) ||
899                             (tbp->b_flags & B_VMIO) == 0 ||
900                             (LIST_FIRST(&tbp->b_dep) != NULL &&
901                              buf_checkread(tbp))
902                         ) {
903                                 bqrelse(tbp);
904                                 break;
905                         }
906
907                         /*
908                          * The buffer must be completely invalid in order to
909                          * take part in the cluster.  If it is partially valid
910                          * then we stop.
911                          */
912                         for (j = 0;j < tbp->b_xio.xio_npages; j++) {
913                                 if (tbp->b_xio.xio_pages[j]->valid)
914                                         break;
915                         }
916                         if (j != tbp->b_xio.xio_npages) {
917                                 bqrelse(tbp);
918                                 break;
919                         }
920
921                         /*
922                          * Set a read-ahead mark as appropriate.  Always
923                          * set the read-ahead mark at (run - 1).  It is
924                          * unclear why we were also setting it at i == 1.
925                          */
926                         if (/*i == 1 ||*/ i == (run - 1))
927                                 cluster_setram(tbp);
928
929                         /*
930                          * Depress the priority of buffers not explicitly
931                          * requested.
932                          */
933                         /* tbp->b_flags |= B_AGE; */
934
935                         /*
936                          * Set the block number if it isn't set, otherwise
937                          * if it is make sure it matches the block number we
938                          * expect.
939                          */
940                         if (tbp->b_bio2.bio_offset == NOOFFSET) {
941                                 tbp->b_bio2.bio_offset = boffset;
942                         } else if (tbp->b_bio2.bio_offset != boffset) {
943                                 brelse(tbp);
944                                 break;
945                         }
946                 }
947
948                 /*
949                  * The passed-in tbp (i == 0) will already be set up for
950                  * async or sync operation.  All other tbp's acquire in
951                  * our loop are set up for async operation.
952                  */
953                 tbp->b_cmd = BUF_CMD_READ;
954                 BUF_KERNPROC(tbp);
955                 cluster_append(&bp->b_bio1, tbp);
956                 for (j = 0; j < tbp->b_xio.xio_npages; ++j) {
957                         vm_page_t m;
958
959                         m = tbp->b_xio.xio_pages[j];
960                         vm_page_busy_wait(m, FALSE, "clurpg");
961                         vm_page_io_start(m);
962                         vm_page_wakeup(m);
963                         vm_object_pip_add(m->object, 1);
964                         if ((bp->b_xio.xio_npages == 0) ||
965                                 (bp->b_xio.xio_pages[bp->b_xio.xio_npages-1] != m)) {
966                                 bp->b_xio.xio_pages[bp->b_xio.xio_npages] = m;
967                                 bp->b_xio.xio_npages++;
968                         }
969                         if ((m->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL)
970                                 tbp->b_xio.xio_pages[j] = bogus_page;
971                 }
972                 /*
973                  * XXX shouldn't this be += size for both, like in 
974                  * cluster_wbuild()?
975                  *
976                  * Don't inherit tbp->b_bufsize as it may be larger due to
977                  * a non-page-aligned size.  Instead just aggregate using
978                  * 'size'.
979                  */
980                 if (tbp->b_bcount != blksize)
981                     kprintf("warning: tbp->b_bcount wrong %d vs %d\n", tbp->b_bcount, blksize);
982                 if (tbp->b_bufsize != blksize)
983                     kprintf("warning: tbp->b_bufsize wrong %d vs %d\n", tbp->b_bufsize, blksize);
984                 bp->b_bcount += blksize;
985                 bp->b_bufsize += blksize;
986         }
987
988         /*
989          * Fully valid pages in the cluster are already good and do not need
990          * to be re-read from disk.  Replace the page with bogus_page
991          */
992         for (j = 0; j < bp->b_xio.xio_npages; j++) {
993                 if ((bp->b_xio.xio_pages[j]->valid & VM_PAGE_BITS_ALL) ==
994                     VM_PAGE_BITS_ALL) {
995                         bp->b_xio.xio_pages[j] = bogus_page;
996                 }
997         }
998         if (bp->b_bufsize > bp->b_kvasize) {
999                 panic("cluster_rbuild: b_bufsize(%d) > b_kvasize(%d)",
1000                     bp->b_bufsize, bp->b_kvasize);
1001         }
1002         pmap_qenter(trunc_page((vm_offset_t) bp->b_data),
1003                 (vm_page_t *)bp->b_xio.xio_pages, bp->b_xio.xio_npages);
1004         BUF_KERNPROC(bp);
1005         return (bp);
1006 }
1007
1008 /*
1009  * Cleanup after a clustered read or write.
1010  * This is complicated by the fact that any of the buffers might have
1011  * extra memory (if there were no empty buffer headers at allocbuf time)
1012  * that we will need to shift around.
1013  *
1014  * The returned bio is &bp->b_bio1
1015  */
1016 static void
1017 cluster_callback(struct bio *bio)
1018 {
1019         struct buf *bp = bio->bio_buf;
1020         struct buf *tbp;
1021         int error = 0;
1022
1023         /*
1024          * Must propogate errors to all the components.  A short read (EOF)
1025          * is a critical error.
1026          */
1027         if (bp->b_flags & B_ERROR) {
1028                 error = bp->b_error;
1029         } else if (bp->b_bcount != bp->b_bufsize) {
1030                 panic("cluster_callback: unexpected EOF on cluster %p!", bio);
1031         }
1032
1033         pmap_qremove(trunc_page((vm_offset_t) bp->b_data),
1034                      bp->b_xio.xio_npages);
1035         /*
1036          * Move memory from the large cluster buffer into the component
1037          * buffers and mark IO as done on these.  Since the memory map
1038          * is the same, no actual copying is required.
1039          */
1040         while ((tbp = bio->bio_caller_info1.cluster_head) != NULL) {
1041                 bio->bio_caller_info1.cluster_head = tbp->b_cluster_next;
1042                 if (error) {
1043                         tbp->b_flags |= B_ERROR | B_IODEBUG;
1044                         tbp->b_error = error;
1045                 } else {
1046                         tbp->b_dirtyoff = tbp->b_dirtyend = 0;
1047                         tbp->b_flags &= ~(B_ERROR|B_INVAL);
1048                         tbp->b_flags |= B_IODEBUG;
1049                         /*
1050                          * XXX the bdwrite()/bqrelse() issued during
1051                          * cluster building clears B_RELBUF (see bqrelse()
1052                          * comment).  If direct I/O was specified, we have
1053                          * to restore it here to allow the buffer and VM
1054                          * to be freed.
1055                          */
1056                         if (tbp->b_flags & B_DIRECT)
1057                                 tbp->b_flags |= B_RELBUF;
1058
1059                         /*
1060                          * XXX I think biodone() below will do this, but do
1061                          *     it here anyway for consistency.
1062                          */
1063                         if (tbp->b_cmd == BUF_CMD_WRITE)
1064                                 bundirty(tbp);
1065                 }
1066                 biodone(&tbp->b_bio1);
1067         }
1068         relpbuf(bp, &cluster_pbuf_freecnt);
1069 }
1070
1071 /*
1072  * Implement modified write build for cluster.
1073  *
1074  *      write_behind = 0        write behind disabled
1075  *      write_behind = 1        write behind normal (default)
1076  *      write_behind = 2        write behind backed-off
1077  *
1078  * In addition, write_behind is only activated for files that have
1079  * grown past a certain size (default 10MB).  Otherwise temporary files
1080  * wind up generating a lot of unnecessary disk I/O.
1081  */
1082 static __inline int
1083 cluster_wbuild_wb(struct vnode *vp, int blksize, off_t start_loffset, int len)
1084 {
1085         int r = 0;
1086
1087         switch(write_behind) {
1088         case 2:
1089                 if (start_loffset < len)
1090                         break;
1091                 start_loffset -= len;
1092                 /* fall through */
1093         case 1:
1094                 if (vp->v_filesize >= write_behind_minfilesize) {
1095                         r = cluster_wbuild(vp, NULL, blksize,
1096                                            start_loffset, len);
1097                 }
1098                 /* fall through */
1099         default:
1100                 /* fall through */
1101                 break;
1102         }
1103         return(r);
1104 }
1105
1106 /*
1107  * Do clustered write for FFS.
1108  *
1109  * Three cases:
1110  *      1. Write is not sequential (write asynchronously)
1111  *      Write is sequential:
1112  *      2.      beginning of cluster - begin cluster
1113  *      3.      middle of a cluster - add to cluster
1114  *      4.      end of a cluster - asynchronously write cluster
1115  *
1116  * WARNING! vnode fields are not locked and must ONLY be used heuristically.
1117  */
1118 void
1119 cluster_write(struct buf *bp, off_t filesize, int blksize, int seqcount)
1120 {
1121         struct vnode *vp;
1122         off_t loffset;
1123         int maxclen, cursize;
1124         int async;
1125         cluster_cache_t dummy;
1126         cluster_cache_t *cc;
1127
1128         vp = bp->b_vp;
1129         if (vp->v_type == VREG)
1130                 async = vp->v_mount->mnt_flag & MNT_ASYNC;
1131         else
1132                 async = 0;
1133         loffset = bp->b_loffset;
1134         KASSERT(bp->b_loffset != NOOFFSET, 
1135                 ("cluster_write: no buffer offset"));
1136
1137         cc = cluster_getcache(&dummy, vp, loffset);
1138
1139         /*
1140          * Initialize vnode to beginning of file.
1141          */
1142         if (loffset == 0)
1143                 cc->v_lasta = cc->v_clen = cc->v_cstart = cc->v_lastw = 0;
1144
1145         if (cc->v_clen == 0 || loffset != cc->v_lastw + blksize ||
1146             bp->b_bio2.bio_offset == NOOFFSET ||
1147             (bp->b_bio2.bio_offset != cc->v_lasta + blksize)) {
1148                 maxclen = vmaxiosize(vp);
1149                 if (cc->v_clen != 0) {
1150                         /*
1151                          * Next block is not sequential.
1152                          *
1153                          * If we are not writing at end of file, the process
1154                          * seeked to another point in the file since its last
1155                          * write, or we have reached our maximum cluster size,
1156                          * then push the previous cluster. Otherwise try
1157                          * reallocating to make it sequential.
1158                          *
1159                          * Change to algorithm: only push previous cluster if
1160                          * it was sequential from the point of view of the
1161                          * seqcount heuristic, otherwise leave the buffer 
1162                          * intact so we can potentially optimize the I/O
1163                          * later on in the buf_daemon or update daemon
1164                          * flush.
1165                          */
1166                         cursize = cc->v_lastw - cc->v_cstart + blksize;
1167                         if (bp->b_loffset + blksize < filesize ||
1168                             loffset != cc->v_lastw + blksize ||
1169                             cc->v_clen <= cursize) {
1170                                 if (!async && seqcount > 0) {
1171                                         cluster_wbuild_wb(vp, blksize,
1172                                                 cc->v_cstart, cursize);
1173                                 }
1174                         } else {
1175                                 struct buf **bpp, **endbp;
1176                                 struct cluster_save *buflist;
1177
1178                                 buflist = cluster_collectbufs(cc, vp,
1179                                                               bp, blksize);
1180                                 endbp = &buflist->bs_children
1181                                     [buflist->bs_nchildren - 1];
1182                                 if (VOP_REALLOCBLKS(vp, buflist)) {
1183                                         /*
1184                                          * Failed, push the previous cluster
1185                                          * if *really* writing sequentially
1186                                          * in the logical file (seqcount > 1),
1187                                          * otherwise delay it in the hopes that
1188                                          * the low level disk driver can
1189                                          * optimize the write ordering.
1190                                          *
1191                                          * NOTE: We do not brelse the last
1192                                          *       element which is bp, and we
1193                                          *       do not return here.
1194                                          */
1195                                         for (bpp = buflist->bs_children;
1196                                              bpp < endbp; bpp++)
1197                                                 brelse(*bpp);
1198                                         kfree(buflist, M_SEGMENT);
1199                                         if (seqcount > 1) {
1200                                                 cluster_wbuild_wb(vp, 
1201                                                     blksize, cc->v_cstart,
1202                                                     cursize);
1203                                         }
1204                                 } else {
1205                                         /*
1206                                          * Succeeded, keep building cluster.
1207                                          */
1208                                         for (bpp = buflist->bs_children;
1209                                              bpp <= endbp; bpp++)
1210                                                 bdwrite(*bpp);
1211                                         kfree(buflist, M_SEGMENT);
1212                                         cc->v_lastw = loffset;
1213                                         cc->v_lasta = bp->b_bio2.bio_offset;
1214                                         cluster_putcache(cc);
1215                                         return;
1216                                 }
1217                         }
1218                 }
1219                 /*
1220                  * Consider beginning a cluster. If at end of file, make
1221                  * cluster as large as possible, otherwise find size of
1222                  * existing cluster.
1223                  */
1224                 if ((vp->v_type == VREG) &&
1225                     bp->b_loffset + blksize < filesize &&
1226                     (bp->b_bio2.bio_offset == NOOFFSET) &&
1227                     (VOP_BMAP(vp, loffset, &bp->b_bio2.bio_offset, &maxclen, NULL, BUF_CMD_WRITE) ||
1228                      bp->b_bio2.bio_offset == NOOFFSET)) {
1229                         bdwrite(bp);
1230                         cc->v_clen = 0;
1231                         cc->v_lasta = bp->b_bio2.bio_offset;
1232                         cc->v_cstart = loffset + blksize;
1233                         cc->v_lastw = loffset;
1234                         cluster_putcache(cc);
1235                         return;
1236                 }
1237                 if (maxclen > blksize)
1238                         cc->v_clen = maxclen - blksize;
1239                 else
1240                         cc->v_clen = 0;
1241                 if (!async && cc->v_clen == 0) { /* I/O not contiguous */
1242                         cc->v_cstart = loffset + blksize;
1243                         bdwrite(bp);
1244                 } else {        /* Wait for rest of cluster */
1245                         cc->v_cstart = loffset;
1246                         bdwrite(bp);
1247                 }
1248         } else if (loffset == cc->v_cstart + cc->v_clen) {
1249                 /*
1250                  * At end of cluster, write it out if seqcount tells us we
1251                  * are operating sequentially, otherwise let the buf or
1252                  * update daemon handle it.
1253                  */
1254                 bdwrite(bp);
1255                 if (seqcount > 1)
1256                         cluster_wbuild_wb(vp, blksize, cc->v_cstart,
1257                                           cc->v_clen + blksize);
1258                 cc->v_clen = 0;
1259                 cc->v_cstart = loffset + blksize;
1260         } else if (vm_page_count_severe() &&
1261                    bp->b_loffset + blksize < filesize) {
1262                 /*
1263                  * We are low on memory, get it going NOW.  However, do not
1264                  * try to push out a partial block at the end of the file
1265                  * as this could lead to extremely non-optimal write activity.
1266                  */
1267                 bawrite(bp);
1268         } else {
1269                 /*
1270                  * In the middle of a cluster, so just delay the I/O for now.
1271                  */
1272                 bdwrite(bp);
1273         }
1274         cc->v_lastw = loffset;
1275         cc->v_lasta = bp->b_bio2.bio_offset;
1276         cluster_putcache(cc);
1277 }
1278
1279 /*
1280  * This is the clustered version of bawrite().  It works similarly to
1281  * cluster_write() except I/O on the buffer is guaranteed to occur.
1282  */
1283 int
1284 cluster_awrite(struct buf *bp)
1285 {
1286         int total;
1287
1288         /*
1289          * Don't bother if it isn't clusterable.
1290          */
1291         if ((bp->b_flags & B_CLUSTEROK) == 0 ||
1292             bp->b_vp == NULL ||
1293             (bp->b_vp->v_flag & VOBJBUF) == 0) {
1294                 total = bp->b_bufsize;
1295                 bawrite(bp);
1296                 return (total);
1297         }
1298
1299         total = cluster_wbuild(bp->b_vp, &bp, bp->b_bufsize,
1300                                bp->b_loffset, vmaxiosize(bp->b_vp));
1301         if (bp)
1302                 bawrite(bp);
1303
1304         return total;
1305 }
1306
1307 /*
1308  * This is an awful lot like cluster_rbuild...wish they could be combined.
1309  * The last lbn argument is the current block on which I/O is being
1310  * performed.  Check to see that it doesn't fall in the middle of
1311  * the current block (if last_bp == NULL).
1312  *
1313  * cluster_wbuild() normally does not guarantee anything.  If bpp is
1314  * non-NULL and cluster_wbuild() is able to incorporate it into the
1315  * I/O it will set *bpp to NULL, otherwise it will leave it alone and
1316  * the caller must dispose of *bpp.
1317  */
1318 static int
1319 cluster_wbuild(struct vnode *vp, struct buf **bpp,
1320                int blksize, off_t start_loffset, int bytes)
1321 {
1322         struct buf *bp, *tbp;
1323         int i, j;
1324         int totalwritten = 0;
1325         int must_initiate;
1326         int maxiosize = vmaxiosize(vp);
1327
1328         while (bytes > 0) {
1329                 /*
1330                  * If the buffer matches the passed locked & removed buffer
1331                  * we used the passed buffer (which might not be B_DELWRI).
1332                  *
1333                  * Otherwise locate the buffer and determine if it is
1334                  * compatible.
1335                  */
1336                 if (bpp && (*bpp)->b_loffset == start_loffset) {
1337                         tbp = *bpp;
1338                         *bpp = NULL;
1339                         bpp = NULL;
1340                 } else {
1341                         tbp = findblk(vp, start_loffset, FINDBLK_NBLOCK);
1342                         if (tbp == NULL ||
1343                             (tbp->b_flags & (B_LOCKED | B_INVAL | B_DELWRI)) !=
1344                              B_DELWRI ||
1345                             (LIST_FIRST(&tbp->b_dep) && buf_checkwrite(tbp))) {
1346                                 if (tbp)
1347                                         BUF_UNLOCK(tbp);
1348                                 start_loffset += blksize;
1349                                 bytes -= blksize;
1350                                 continue;
1351                         }
1352                         bremfree(tbp);
1353                 }
1354                 KKASSERT(tbp->b_cmd == BUF_CMD_DONE);
1355
1356                 /*
1357                  * Extra memory in the buffer, punt on this buffer.
1358                  * XXX we could handle this in most cases, but we would
1359                  * have to push the extra memory down to after our max
1360                  * possible cluster size and then potentially pull it back
1361                  * up if the cluster was terminated prematurely--too much
1362                  * hassle.
1363                  */
1364                 if (((tbp->b_flags & (B_CLUSTEROK|B_MALLOC)) != B_CLUSTEROK) ||
1365                     (tbp->b_bcount != tbp->b_bufsize) ||
1366                     (tbp->b_bcount != blksize) ||
1367                     (bytes == blksize) ||
1368                     ((bp = getpbuf_kva(&cluster_pbuf_freecnt)) == NULL)) {
1369                         totalwritten += tbp->b_bufsize;
1370                         bawrite(tbp);
1371                         start_loffset += blksize;
1372                         bytes -= blksize;
1373                         continue;
1374                 }
1375
1376                 /*
1377                  * Set up the pbuf.  Track our append point with b_bcount
1378                  * and b_bufsize.  b_bufsize is not used by the device but
1379                  * our caller uses it to loop clusters and we use it to
1380                  * detect a premature EOF on the block device.
1381                  */
1382                 bp->b_bcount = 0;
1383                 bp->b_bufsize = 0;
1384                 bp->b_xio.xio_npages = 0;
1385                 bp->b_loffset = tbp->b_loffset;
1386                 bp->b_bio2.bio_offset = tbp->b_bio2.bio_offset;
1387
1388                 /*
1389                  * We are synthesizing a buffer out of vm_page_t's, but
1390                  * if the block size is not page aligned then the starting
1391                  * address may not be either.  Inherit the b_data offset
1392                  * from the original buffer.
1393                  */
1394                 bp->b_data = (char *)((vm_offset_t)bp->b_data |
1395                     ((vm_offset_t)tbp->b_data & PAGE_MASK));
1396                 bp->b_flags &= ~B_ERROR;
1397                 bp->b_flags |= B_CLUSTER | B_BNOCLIP |
1398                         (tbp->b_flags & (B_VMIO | B_NEEDCOMMIT));
1399                 bp->b_bio1.bio_caller_info1.cluster_head = NULL;
1400                 bp->b_bio1.bio_caller_info2.cluster_tail = NULL;
1401
1402                 /*
1403                  * From this location in the file, scan forward to see
1404                  * if there are buffers with adjacent data that need to
1405                  * be written as well.
1406                  *
1407                  * IO *must* be initiated on index 0 at this point
1408                  * (particularly when called from cluster_awrite()).
1409                  */
1410                 for (i = 0; i < bytes; (i += blksize), (start_loffset += blksize)) {
1411                         if (i == 0) {
1412                                 must_initiate = 1;
1413                         } else {
1414                                 /*
1415                                  * Not first buffer.
1416                                  */
1417                                 must_initiate = 0;
1418                                 tbp = findblk(vp, start_loffset,
1419                                               FINDBLK_NBLOCK);
1420                                 /*
1421                                  * Buffer not found or could not be locked
1422                                  * non-blocking.
1423                                  */
1424                                 if (tbp == NULL)
1425                                         break;
1426
1427                                 /*
1428                                  * If it IS in core, but has different
1429                                  * characteristics, then don't cluster
1430                                  * with it.
1431                                  */
1432                                 if ((tbp->b_flags & (B_VMIO | B_CLUSTEROK |
1433                                      B_INVAL | B_DELWRI | B_NEEDCOMMIT))
1434                                     != (B_DELWRI | B_CLUSTEROK |
1435                                      (bp->b_flags & (B_VMIO | B_NEEDCOMMIT))) ||
1436                                     (tbp->b_flags & B_LOCKED)
1437                                 ) {
1438                                         BUF_UNLOCK(tbp);
1439                                         break;
1440                                 }
1441
1442                                 /*
1443                                  * Check that the combined cluster
1444                                  * would make sense with regard to pages
1445                                  * and would not be too large
1446                                  *
1447                                  * WARNING! buf_checkwrite() must be the last
1448                                  *          check made.  If it returns 0 then
1449                                  *          we must initiate the I/O.
1450                                  */
1451                                 if ((tbp->b_bcount != blksize) ||
1452                                   ((bp->b_bio2.bio_offset + i) !=
1453                                     tbp->b_bio2.bio_offset) ||
1454                                   ((tbp->b_xio.xio_npages + bp->b_xio.xio_npages) >
1455                                     (maxiosize / PAGE_SIZE)) ||
1456                                   (LIST_FIRST(&tbp->b_dep) &&
1457                                    buf_checkwrite(tbp))
1458                                 ) {
1459                                         BUF_UNLOCK(tbp);
1460                                         break;
1461                                 }
1462                                 if (LIST_FIRST(&tbp->b_dep))
1463                                         must_initiate = 1;
1464                                 /*
1465                                  * Ok, it's passed all the tests,
1466                                  * so remove it from the free list
1467                                  * and mark it busy. We will use it.
1468                                  */
1469                                 bremfree(tbp);
1470                                 KKASSERT(tbp->b_cmd == BUF_CMD_DONE);
1471                         }
1472
1473                         /*
1474                          * If the IO is via the VM then we do some
1475                          * special VM hackery (yuck).  Since the buffer's
1476                          * block size may not be page-aligned it is possible
1477                          * for a page to be shared between two buffers.  We
1478                          * have to get rid of the duplication when building
1479                          * the cluster.
1480                          */
1481                         if (tbp->b_flags & B_VMIO) {
1482                                 vm_page_t m;
1483
1484                                 /*
1485                                  * Try to avoid deadlocks with the VM system.
1486                                  * However, we cannot abort the I/O if
1487                                  * must_initiate is non-zero.
1488                                  */
1489                                 if (must_initiate == 0) {
1490                                         for (j = 0;
1491                                              j < tbp->b_xio.xio_npages;
1492                                              ++j) {
1493                                                 m = tbp->b_xio.xio_pages[j];
1494                                                 if (m->flags & PG_BUSY) {
1495                                                         bqrelse(tbp);
1496                                                         goto finishcluster;
1497                                                 }
1498                                         }
1499                                 }
1500                                         
1501                                 for (j = 0; j < tbp->b_xio.xio_npages; ++j) {
1502                                         m = tbp->b_xio.xio_pages[j];
1503                                         vm_page_busy_wait(m, FALSE, "clurpg");
1504                                         vm_page_io_start(m);
1505                                         vm_page_wakeup(m);
1506                                         vm_object_pip_add(m->object, 1);
1507                                         if ((bp->b_xio.xio_npages == 0) ||
1508                                           (bp->b_xio.xio_pages[bp->b_xio.xio_npages - 1] != m)) {
1509                                                 bp->b_xio.xio_pages[bp->b_xio.xio_npages] = m;
1510                                                 bp->b_xio.xio_npages++;
1511                                         }
1512                                 }
1513                         }
1514                         bp->b_bcount += blksize;
1515                         bp->b_bufsize += blksize;
1516
1517                         /*
1518                          * NOTE: see bwrite/bawrite code for why we no longer
1519                          *       undirty tbp here.
1520                          *
1521                          *       bundirty(tbp); REMOVED
1522                          */
1523                         tbp->b_flags &= ~B_ERROR;
1524                         tbp->b_cmd = BUF_CMD_WRITE;
1525                         BUF_KERNPROC(tbp);
1526                         cluster_append(&bp->b_bio1, tbp);
1527
1528                         /*
1529                          * check for latent dependencies to be handled 
1530                          */
1531                         if (LIST_FIRST(&tbp->b_dep) != NULL)
1532                                 buf_start(tbp);
1533                 }
1534         finishcluster:
1535                 pmap_qenter(trunc_page((vm_offset_t)bp->b_data),
1536                             (vm_page_t *)bp->b_xio.xio_pages,
1537                             bp->b_xio.xio_npages);
1538                 if (bp->b_bufsize > bp->b_kvasize) {
1539                         panic("cluster_wbuild: b_bufsize(%d) "
1540                               "> b_kvasize(%d)\n",
1541                               bp->b_bufsize, bp->b_kvasize);
1542                 }
1543                 totalwritten += bp->b_bufsize;
1544                 bp->b_dirtyoff = 0;
1545                 bp->b_dirtyend = bp->b_bufsize;
1546                 bp->b_bio1.bio_done = cluster_callback;
1547                 bp->b_cmd = BUF_CMD_WRITE;
1548
1549                 vfs_busy_pages(vp, bp);
1550                 bsetrunningbufspace(bp, bp->b_bufsize);
1551                 BUF_KERNPROC(bp);
1552                 vn_strategy(vp, &bp->b_bio1);
1553
1554                 bytes -= i;
1555         }
1556         return totalwritten;
1557 }
1558
1559 /*
1560  * Collect together all the buffers in a cluster, plus add one
1561  * additional buffer passed-in.
1562  *
1563  * Only pre-existing buffers whos block size matches blksize are collected.
1564  * (this is primarily because HAMMER1 uses varying block sizes and we don't
1565  * want to override its choices).
1566  *
1567  * This code will not try to collect buffers that it cannot lock, otherwise
1568  * it might deadlock against SMP-friendly filesystems.
1569  */
1570 static struct cluster_save *
1571 cluster_collectbufs(cluster_cache_t *cc, struct vnode *vp,
1572                     struct buf *last_bp, int blksize)
1573 {
1574         struct cluster_save *buflist;
1575         struct buf *bp;
1576         off_t loffset;
1577         int i, len;
1578         int j;
1579         int k;
1580
1581         len = (int)(cc->v_lastw - cc->v_cstart + blksize) / blksize;
1582         KKASSERT(len > 0);
1583         buflist = kmalloc(sizeof(struct buf *) * (len + 1) + sizeof(*buflist),
1584                          M_SEGMENT, M_WAITOK);
1585         buflist->bs_nchildren = 0;
1586         buflist->bs_children = (struct buf **) (buflist + 1);
1587         for (loffset = cc->v_cstart, i = 0, j = 0;
1588              i < len;
1589              (loffset += blksize), i++) {
1590                 bp = getcacheblk(vp, loffset,
1591                                  last_bp->b_bcount, GETBLK_SZMATCH |
1592                                                     GETBLK_NOWAIT);
1593                 buflist->bs_children[i] = bp;
1594                 if (bp == NULL) {
1595                         j = i + 1;
1596                 } else if (bp->b_bio2.bio_offset == NOOFFSET) {
1597                         VOP_BMAP(bp->b_vp, bp->b_loffset,
1598                                  &bp->b_bio2.bio_offset,
1599                                  NULL, NULL, BUF_CMD_WRITE);
1600                 }
1601         }
1602
1603         /*
1604          * Get rid of gaps
1605          */
1606         for (k = 0; k < j; ++k) {
1607                 if (buflist->bs_children[k]) {
1608                         bqrelse(buflist->bs_children[k]);
1609                         buflist->bs_children[k] = NULL;
1610                 }
1611         }
1612         if (j != 0) {
1613                 if (j != i) {
1614                         bcopy(buflist->bs_children + j,
1615                               buflist->bs_children + 0,
1616                               sizeof(buflist->bs_children[0]) * (i - j));
1617                 }
1618                 i -= j;
1619         }
1620         buflist->bs_children[i] = bp = last_bp;
1621         if (bp->b_bio2.bio_offset == NOOFFSET) {
1622                 VOP_BMAP(bp->b_vp, bp->b_loffset, &bp->b_bio2.bio_offset,
1623                          NULL, NULL, BUF_CMD_WRITE);
1624         }
1625         buflist->bs_nchildren = i + 1;
1626         return (buflist);
1627 }
1628
1629 void
1630 cluster_append(struct bio *bio, struct buf *tbp)
1631 {
1632         tbp->b_cluster_next = NULL;
1633         if (bio->bio_caller_info1.cluster_head == NULL) {
1634                 bio->bio_caller_info1.cluster_head = tbp;
1635                 bio->bio_caller_info2.cluster_tail = tbp;
1636         } else {
1637                 bio->bio_caller_info2.cluster_tail->b_cluster_next = tbp;
1638                 bio->bio_caller_info2.cluster_tail = tbp;
1639         }
1640 }
1641
1642 static
1643 void
1644 cluster_setram (struct buf *bp)
1645 {
1646         bp->b_flags |= B_RAM;
1647         if (bp->b_xio.xio_npages)
1648                 vm_page_flag_set(bp->b_xio.xio_pages[0], PG_RAM);
1649 }