lwp: Add two syscalls to set/get lwp's CPU affinity mask.
[dragonfly.git] / sys / kern / vfs_cluster.c
1 /*-
2  * Copyright (c) 1993
3  *      The Regents of the University of California.  All rights reserved.
4  * Modifications/enhancements:
5  *      Copyright (c) 1995 John S. Dyson.  All rights reserved.
6  *      Copyright (c) 2012-2013 Matthew Dillon.  All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the name of the University nor the names of its contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  */
32
33 #include "opt_debug_cluster.h"
34
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/kernel.h>
38 #include <sys/proc.h>
39 #include <sys/buf.h>
40 #include <sys/vnode.h>
41 #include <sys/malloc.h>
42 #include <sys/mount.h>
43 #include <sys/resourcevar.h>
44 #include <sys/vmmeter.h>
45 #include <vm/vm.h>
46 #include <vm/vm_object.h>
47 #include <vm/vm_page.h>
48 #include <sys/sysctl.h>
49
50 #include <sys/buf2.h>
51 #include <vm/vm_page2.h>
52
53 #include <machine/limits.h>
54
55 /*
56  * Cluster tracking cache - replaces the original vnode v_* fields which had
57  * limited utility and were not MP safe.
58  *
59  * The cluster tracking cache is a simple 4-way set-associative non-chained
60  * cache.  It is capable of tracking up to four zones separated by 1MB or
61  * more per vnode.
62  *
63  * NOTE: We want this structure to be cache-line friendly so the iterator
64  *       is embedded rather than in a separate array.
65  *
66  * NOTE: A cluster cache entry can become stale when a vnode is recycled.
67  *       For now we treat the values as heuristical but also self-consistent.
68  *       i.e. the values cannot be completely random and cannot be SMP unsafe
69  *       or the cluster code might end-up clustering non-contiguous buffers
70  *       at the wrong offsets.
71  */
72 struct cluster_cache {
73         struct vnode *vp;
74         u_int   locked;
75         off_t   v_lastw;                /* last write (write cluster) */
76         off_t   v_cstart;               /* start block of cluster */
77         off_t   v_lasta;                /* last allocation */
78         u_int   v_clen;                 /* length of current cluster */
79         u_int   iterator;
80 } __cachealign;
81
82 typedef struct cluster_cache cluster_cache_t;
83
84 #define CLUSTER_CACHE_SIZE      512
85 #define CLUSTER_CACHE_MASK      (CLUSTER_CACHE_SIZE - 1)
86
87 #define CLUSTER_ZONE            ((off_t)(1024 * 1024))
88
89 cluster_cache_t cluster_array[CLUSTER_CACHE_SIZE];
90
91 #if defined(CLUSTERDEBUG)
92 #include <sys/sysctl.h>
93 static int      rcluster= 0;
94 SYSCTL_INT(_debug, OID_AUTO, rcluster, CTLFLAG_RW, &rcluster, 0, "");
95 #endif
96
97 static MALLOC_DEFINE(M_SEGMENT, "cluster_save", "cluster_save buffer");
98
99 static struct cluster_save *
100         cluster_collectbufs (cluster_cache_t *cc, struct vnode *vp,
101                                 struct buf *last_bp, int blksize);
102 static struct buf *
103         cluster_rbuild (struct vnode *vp, off_t filesize, off_t loffset,
104                             off_t doffset, int blksize, int run, 
105                             struct buf *fbp, int *srp);
106 static void cluster_callback (struct bio *);
107 static void cluster_setram (struct buf *);
108 static void cluster_clrram (struct buf *);
109 static int cluster_wbuild(struct vnode *vp, struct buf **bpp, int blksize,
110                             off_t start_loffset, int bytes);
111
112 static int write_behind = 1;
113 SYSCTL_INT(_vfs, OID_AUTO, write_behind, CTLFLAG_RW, &write_behind, 0,
114     "Cluster write-behind setting");
115 static quad_t write_behind_minfilesize = 10 * 1024 * 1024;
116 SYSCTL_QUAD(_vfs, OID_AUTO, write_behind_minfilesize, CTLFLAG_RW,
117     &write_behind_minfilesize, 0, "Cluster write-behind setting");
118 static int max_readahead = 2 * 1024 * 1024;
119 SYSCTL_INT(_vfs, OID_AUTO, max_readahead, CTLFLAG_RW, &max_readahead, 0,
120     "Limit in bytes for desired cluster read-ahead");
121
122 extern vm_page_t        bogus_page;
123
124 /*
125  * nblks is our cluster_rbuild request size.  The approximate number of
126  * physical read-ahead requests is maxra / nblks.  The physical request
127  * size is limited by the device (maxrbuild).  We also do not want to make
128  * the request size too big or it will mess up the B_RAM streaming.
129  */
130 static __inline
131 int
132 calc_rbuild_reqsize(int maxra, int maxrbuild)
133 {
134         int nblks;
135
136         if ((nblks = maxra / 4) > maxrbuild)
137                 nblks = maxrbuild;
138         if (nblks < 1)
139                 nblks = maxra;
140         return nblks;
141 }
142
143 /*
144  * Acquire/release cluster cache (can return dummy entry)
145  */
146 static
147 cluster_cache_t *
148 cluster_getcache(cluster_cache_t *dummy, struct vnode *vp, off_t loffset)
149 {
150         cluster_cache_t *cc;
151         size_t hv;
152         int i;
153         int xact;
154
155         hv = (size_t)(intptr_t)vp ^ (size_t)(intptr_t)vp / sizeof(*vp);
156         hv &= CLUSTER_CACHE_MASK & ~3;
157         cc = &cluster_array[hv];
158
159         xact = -1;
160         for (i = 0; i < 4; ++i) {
161                 if (cc[i].vp != vp)
162                         continue;
163                 if (((cc[i].v_cstart ^ loffset) & ~(CLUSTER_ZONE - 1)) == 0) {
164                         xact = i;
165                         break;
166                 }
167         }
168         if (xact >= 0 && atomic_swap_int(&cc[xact].locked, 1) == 0) {
169                 if (cc[xact].vp == vp &&
170                     ((cc[i].v_cstart ^ loffset) & ~(CLUSTER_ZONE - 1)) == 0) {
171                         return(&cc[xact]);
172                 }
173                 atomic_swap_int(&cc[xact].locked, 0);
174         }
175
176         /*
177          * New entry.  If we can't acquire the cache line then use the
178          * passed-in dummy element and reset all fields.
179          *
180          * When we are able to acquire the cache line we only clear the
181          * fields if the vp does not match.  This allows us to multi-zone
182          * a vp and for excessive zones / partial clusters to be retired.
183          */
184         i = cc->iterator++ & 3;
185         cc += i;
186         if (atomic_swap_int(&cc->locked, 1) != 0) {
187                 cc = dummy;
188                 cc->locked = 1;
189                 cc->vp = NULL;
190         }
191         if (cc->vp != vp) {
192                 cc->vp = vp;
193                 cc->v_lasta = 0;
194                 cc->v_clen = 0;
195                 cc->v_cstart = 0;
196                 cc->v_lastw = 0;
197         }
198         return(cc);
199 }
200
201 static
202 void
203 cluster_putcache(cluster_cache_t *cc)
204 {
205         atomic_swap_int(&cc->locked, 0);
206 }
207
208 /*
209  * This replaces bread(), providing a synchronous read of the requested
210  * buffer plus asynchronous read-ahead within the specified bounds.
211  *
212  * The caller may pre-populate *bpp if it already has the requested buffer
213  * in-hand, else must set *bpp to NULL.  Note that the cluster_read() inline
214  * sets *bpp to NULL and then calls cluster_readx() for compatibility.
215  *
216  * filesize     - read-ahead @ blksize will not cross this boundary
217  * loffset      - loffset for returned *bpp
218  * blksize      - blocksize for returned *bpp and read-ahead bps
219  * minreq       - minimum (not a hard minimum) in bytes, typically reflects
220  *                a higher level uio resid.
221  * maxreq       - maximum (sequential heuristic) in bytes (highet typ ~2MB)
222  * bpp          - return buffer (*bpp) for (loffset,blksize)
223  */
224 int
225 cluster_readx(struct vnode *vp, off_t filesize, off_t loffset,
226              int blksize, size_t minreq, size_t maxreq, struct buf **bpp)
227 {
228         struct buf *bp, *rbp, *reqbp;
229         off_t origoffset;
230         off_t doffset;
231         int error;
232         int i;
233         int maxra;
234         int maxrbuild;
235         int sr;
236
237         sr = 0;
238
239         /*
240          * Calculate the desired read-ahead in blksize'd blocks (maxra).
241          * To do this we calculate maxreq.
242          *
243          * maxreq typically starts out as a sequential heuristic.  If the
244          * high level uio/resid is bigger (minreq), we pop maxreq up to
245          * minreq.  This represents the case where random I/O is being
246          * performed by the userland is issuing big read()'s.
247          *
248          * Then we limit maxreq to max_readahead to ensure it is a reasonable
249          * value.
250          *
251          * Finally we must ensure that (loffset + maxreq) does not cross the
252          * boundary (filesize) for the current blocksize.  If we allowed it
253          * to cross we could end up with buffers past the boundary with the
254          * wrong block size (HAMMER large-data areas use mixed block sizes).
255          * minreq is also absolutely limited to filesize.
256          */
257         if (maxreq < minreq)
258                 maxreq = minreq;
259         /* minreq not used beyond this point */
260
261         if (maxreq > max_readahead) {
262                 maxreq = max_readahead;
263                 if (maxreq > 16 * 1024 * 1024)
264                         maxreq = 16 * 1024 * 1024;
265         }
266         if (maxreq < blksize)
267                 maxreq = blksize;
268         if (loffset + maxreq > filesize) {
269                 if (loffset > filesize)
270                         maxreq = 0;
271                 else
272                         maxreq = filesize - loffset;
273         }
274
275         maxra = (int)(maxreq / blksize);
276
277         /*
278          * Get the requested block.
279          */
280         if (*bpp)
281                 reqbp = bp = *bpp;
282         else
283                 *bpp = reqbp = bp = getblk(vp, loffset, blksize, 0, 0);
284         origoffset = loffset;
285
286         /*
287          * Calculate the maximum cluster size for a single I/O, used
288          * by cluster_rbuild().
289          */
290         maxrbuild = vmaxiosize(vp) / blksize;
291
292         /*
293          * If it is in the cache, then check to see if the reads have been
294          * sequential.  If they have, then try some read-ahead, otherwise
295          * back-off on prospective read-aheads.
296          */
297         if (bp->b_flags & B_CACHE) {
298                 /*
299                  * Not sequential, do not do any read-ahead
300                  */
301                 if (maxra <= 1)
302                         return 0;
303
304                 /*
305                  * No read-ahead mark, do not do any read-ahead
306                  * yet.
307                  */
308                 if ((bp->b_flags & B_RAM) == 0)
309                         return 0;
310
311                 /*
312                  * We hit a read-ahead-mark, figure out how much read-ahead
313                  * to do (maxra) and where to start (loffset).
314                  *
315                  * Typically the way this works is that B_RAM is set in the
316                  * middle of the cluster and triggers an overlapping
317                  * read-ahead of 1/2 a cluster more blocks.  This ensures
318                  * that the cluster read-ahead scales with the read-ahead
319                  * count and is thus better-able to absorb the caller's
320                  * latency.
321                  *
322                  * Estimate where the next unread block will be by assuming
323                  * that the B_RAM's are placed at the half-way point.
324                  */
325                 bp->b_flags &= ~B_RAM;
326
327                 i = maxra / 2;
328                 rbp = findblk(vp, loffset + i * blksize, FINDBLK_TEST);
329                 if (rbp == NULL || (rbp->b_flags & B_CACHE) == 0) {
330                         while (i) {
331                                 --i;
332                                 rbp = findblk(vp, loffset + i * blksize,
333                                               FINDBLK_TEST);
334                                 if (rbp) {
335                                         ++i;
336                                         break;
337                                 }
338                         }
339                 } else {
340                         while (i < maxra) {
341                                 rbp = findblk(vp, loffset + i * blksize,
342                                               FINDBLK_TEST);
343                                 if (rbp == NULL)
344                                         break;
345                                 ++i;
346                         }
347                 }
348
349                 /*
350                  * We got everything or everything is in the cache, no
351                  * point continuing.
352                  */
353                 if (i >= maxra)
354                         return 0;
355
356                 /*
357                  * Calculate where to start the read-ahead and how much
358                  * to do.  Generally speaking we want to read-ahead by
359                  * (maxra) when we've found a read-ahead mark.  We do
360                  * not want to reduce maxra here as it will cause
361                  * successive read-ahead I/O's to be smaller and smaller.
362                  *
363                  * However, we have to make sure we don't break the
364                  * filesize limitation for the clustered operation.
365                  */
366                 loffset += i * blksize;
367                 reqbp = bp = NULL;
368
369                 if (loffset >= filesize)
370                         return 0;
371                 if (loffset + maxra * blksize > filesize) {
372                         maxreq = filesize - loffset;
373                         maxra = (int)(maxreq / blksize);
374                 }
375
376                 /*
377                  * Set RAM on first read-ahead block since we still have
378                  * approximate maxra/2 blocks ahead of us that are already
379                  * cached or in-progress.
380                  */
381                 sr = 1;
382         } else {
383                 /*
384                  * Start block is not valid, we will want to do a
385                  * full read-ahead.
386                  */
387                 __debugvar off_t firstread = bp->b_loffset;
388                 int nblks;
389
390                 /*
391                  * Set-up synchronous read for bp.
392                  */
393                 bp->b_cmd = BUF_CMD_READ;
394                 bp->b_bio1.bio_done = biodone_sync;
395                 bp->b_bio1.bio_flags |= BIO_SYNC;
396
397                 KASSERT(firstread != NOOFFSET, 
398                         ("cluster_read: no buffer offset"));
399
400                 nblks = calc_rbuild_reqsize(maxra, maxrbuild);
401
402                 /*
403                  * Set RAM half-way through the full-cluster.
404                  */
405                 sr = (maxra + 1) / 2;
406
407                 if (nblks > 1) {
408                         int burstbytes;
409
410                         error = VOP_BMAP(vp, loffset, &doffset,
411                                          &burstbytes, NULL, BUF_CMD_READ);
412                         if (error)
413                                 goto single_block_read;
414                         if (nblks > burstbytes / blksize)
415                                 nblks = burstbytes / blksize;
416                         if (doffset == NOOFFSET)
417                                 goto single_block_read;
418                         if (nblks <= 1)
419                                 goto single_block_read;
420
421                         bp = cluster_rbuild(vp, filesize, loffset,
422                                             doffset, blksize, nblks, bp, &sr);
423                         loffset += bp->b_bufsize;
424                         maxra -= bp->b_bufsize / blksize;
425                 } else {
426 single_block_read:
427                         /*
428                          * If it isn't in the cache, then get a chunk from
429                          * disk if sequential, otherwise just get the block.
430                          */
431                         loffset += blksize;
432                         --maxra;
433                 }
434         }
435
436         /*
437          * If B_CACHE was not set issue bp.  bp will either be an
438          * asynchronous cluster buf or a synchronous single-buf.
439          * If it is a single buf it will be the same as reqbp.
440          *
441          * NOTE: Once an async cluster buf is issued bp becomes invalid.
442          */
443         if (bp) {
444 #if defined(CLUSTERDEBUG)
445                 if (rcluster)
446                         kprintf("S(%012jx,%d,%d)\n",
447                             (intmax_t)bp->b_loffset, bp->b_bcount, maxra);
448 #endif
449                 if ((bp->b_flags & B_CLUSTER) == 0)
450                         vfs_busy_pages(vp, bp);
451                 bp->b_flags &= ~(B_ERROR|B_INVAL);
452                 vn_strategy(vp, &bp->b_bio1);
453                 /* bp invalid now */
454                 bp = NULL;
455         }
456
457 #if defined(CLUSTERDEBUG)
458         if (rcluster)
459                 kprintf("cluster_rd %016jx/%d maxra=%d sr=%d\n",
460                         loffset, blksize, maxra, sr);
461 #endif
462
463         /*
464          * If we have been doing sequential I/O, then do some read-ahead.
465          * The code above us should have positioned us at the next likely
466          * offset.
467          *
468          * Only mess with buffers which we can immediately lock.  HAMMER
469          * will do device-readahead irrespective of what the blocks
470          * represent.
471          *
472          * Set B_RAM on the first buffer (the next likely offset needing
473          * read-ahead), under the assumption that there are still
474          * approximately maxra/2 blocks good ahead of us.
475          */
476         while (maxra > 0) {
477                 int burstbytes;
478                 int nblks;
479
480                 rbp = getblk(vp, loffset, blksize,
481                              GETBLK_SZMATCH|GETBLK_NOWAIT, 0);
482 #if defined(CLUSTERDEBUG)
483                 if (rcluster) {
484                         kprintf("read-ahead %016jx rbp=%p ",
485                                 loffset, rbp);
486                 }
487 #endif
488                 if (rbp == NULL)
489                         goto no_read_ahead;
490                 if ((rbp->b_flags & B_CACHE)) {
491                         bqrelse(rbp);
492                         goto no_read_ahead;
493                 }
494
495                 /*
496                  * If BMAP is not supported or has an issue, we still do
497                  * (maxra) read-ahead, but we do not try to use rbuild.
498                  */
499                 error = VOP_BMAP(vp, loffset, &doffset,
500                                  &burstbytes, NULL, BUF_CMD_READ);
501                 if (error || doffset == NOOFFSET) {
502                         nblks = 1;
503                         doffset = NOOFFSET;
504                 } else {
505                         nblks = calc_rbuild_reqsize(maxra, maxrbuild);
506                         if (nblks > burstbytes / blksize)
507                                 nblks = burstbytes / blksize;
508                 }
509                 rbp->b_cmd = BUF_CMD_READ;
510
511                 if (nblks > 1) {
512                         rbp = cluster_rbuild(vp, filesize, loffset,
513                                              doffset, blksize, 
514                                              nblks, rbp, &sr);
515                 } else {
516                         rbp->b_bio2.bio_offset = doffset;
517                         if (--sr == 0)
518                                 cluster_setram(rbp);
519                 }
520
521                 rbp->b_flags &= ~(B_ERROR|B_INVAL);
522
523                 if ((rbp->b_flags & B_CLUSTER) == 0)
524                         vfs_busy_pages(vp, rbp);
525                 BUF_KERNPROC(rbp);
526                 loffset += rbp->b_bufsize;
527                 maxra -= rbp->b_bufsize / blksize;
528                 vn_strategy(vp, &rbp->b_bio1);
529                 /* rbp invalid now */
530         }
531
532         /*
533          * Wait for our original buffer to complete its I/O.  reqbp will
534          * be NULL if the original buffer was B_CACHE.  We are returning
535          * (*bpp) which is the same as reqbp when reqbp != NULL.
536          */
537 no_read_ahead:
538         if (reqbp) {
539                 KKASSERT(reqbp->b_bio1.bio_flags & BIO_SYNC);
540                 error = biowait(&reqbp->b_bio1, "clurd");
541         } else {
542                 error = 0;
543         }
544         return (error);
545 }
546
547 /*
548  * This replaces breadcb(), providing an asynchronous read of the requested
549  * buffer with a callback, plus an asynchronous read-ahead within the
550  * specified bounds.
551  *
552  * The callback must check whether BIO_DONE is set in the bio and issue
553  * the bpdone(bp, 0) if it isn't.  The callback is responsible for clearing
554  * BIO_DONE and disposing of the I/O (bqrelse()ing it).
555  *
556  * filesize     - read-ahead @ blksize will not cross this boundary
557  * loffset      - loffset for returned *bpp
558  * blksize      - blocksize for returned *bpp and read-ahead bps
559  * minreq       - minimum (not a hard minimum) in bytes, typically reflects
560  *                a higher level uio resid.
561  * maxreq       - maximum (sequential heuristic) in bytes (highet typ ~2MB)
562  * bpp          - return buffer (*bpp) for (loffset,blksize)
563  */
564 void
565 cluster_readcb(struct vnode *vp, off_t filesize, off_t loffset,
566              int blksize, size_t minreq, size_t maxreq,
567              void (*func)(struct bio *), void *arg)
568 {
569         struct buf *bp, *rbp, *reqbp;
570         off_t origoffset;
571         off_t doffset;
572         int i;
573         int maxra;
574         int maxrbuild;
575         int sr;
576
577         sr = 0;
578
579         /*
580          * Calculate the desired read-ahead in blksize'd blocks (maxra).
581          * To do this we calculate maxreq.
582          *
583          * maxreq typically starts out as a sequential heuristic.  If the
584          * high level uio/resid is bigger (minreq), we pop maxreq up to
585          * minreq.  This represents the case where random I/O is being
586          * performed by the userland is issuing big read()'s.
587          *
588          * Then we limit maxreq to max_readahead to ensure it is a reasonable
589          * value.
590          *
591          * Finally we must ensure that (loffset + maxreq) does not cross the
592          * boundary (filesize) for the current blocksize.  If we allowed it
593          * to cross we could end up with buffers past the boundary with the
594          * wrong block size (HAMMER large-data areas use mixed block sizes).
595          * minreq is also absolutely limited to filesize.
596          */
597         if (maxreq < minreq)
598                 maxreq = minreq;
599         /* minreq not used beyond this point */
600
601         if (maxreq > max_readahead) {
602                 maxreq = max_readahead;
603                 if (maxreq > 16 * 1024 * 1024)
604                         maxreq = 16 * 1024 * 1024;
605         }
606         if (maxreq < blksize)
607                 maxreq = blksize;
608         if (loffset + maxreq > filesize) {
609                 if (loffset > filesize)
610                         maxreq = 0;
611                 else
612                         maxreq = filesize - loffset;
613         }
614
615         maxra = (int)(maxreq / blksize);
616
617         /*
618          * Get the requested block.
619          */
620         reqbp = bp = getblk(vp, loffset, blksize, 0, 0);
621         origoffset = loffset;
622
623         /*
624          * Calculate the maximum cluster size for a single I/O, used
625          * by cluster_rbuild().
626          */
627         maxrbuild = vmaxiosize(vp) / blksize;
628
629         /*
630          * if it is in the cache, then check to see if the reads have been
631          * sequential.  If they have, then try some read-ahead, otherwise
632          * back-off on prospective read-aheads.
633          */
634         if (bp->b_flags & B_CACHE) {
635                 /*
636                  * Setup for func() call whether we do read-ahead or not.
637                  */
638                 bp->b_bio1.bio_caller_info1.ptr = arg;
639                 bp->b_bio1.bio_flags |= BIO_DONE;
640
641                 /*
642                  * Not sequential, do not do any read-ahead
643                  */
644                 if (maxra <= 1)
645                         goto no_read_ahead;
646
647                 /*
648                  * No read-ahead mark, do not do any read-ahead
649                  * yet.
650                  */
651                 if ((bp->b_flags & B_RAM) == 0)
652                         goto no_read_ahead;
653                 bp->b_flags &= ~B_RAM;
654
655                 /*
656                  * We hit a read-ahead-mark, figure out how much read-ahead
657                  * to do (maxra) and where to start (loffset).
658                  *
659                  * Shortcut the scan.  Typically the way this works is that
660                  * we've built up all the blocks inbetween except for the
661                  * last in previous iterations, so if the second-to-last
662                  * block is present we just skip ahead to it.
663                  *
664                  * This algorithm has O(1) cpu in the steady state no
665                  * matter how large maxra is.
666                  */
667                 if (findblk(vp, loffset + (maxra - 2) * blksize, FINDBLK_TEST))
668                         i = maxra - 1;
669                 else
670                         i = 1;
671                 while (i < maxra) {
672                         if (findblk(vp, loffset + i * blksize,
673                                     FINDBLK_TEST) == NULL) {
674                                 break;
675                         }
676                         ++i;
677                 }
678
679                 /*
680                  * We got everything or everything is in the cache, no
681                  * point continuing.
682                  */
683                 if (i >= maxra)
684                         goto no_read_ahead;
685
686                 /*
687                  * Calculate where to start the read-ahead and how much
688                  * to do.  Generally speaking we want to read-ahead by
689                  * (maxra) when we've found a read-ahead mark.  We do
690                  * not want to reduce maxra here as it will cause
691                  * successive read-ahead I/O's to be smaller and smaller.
692                  *
693                  * However, we have to make sure we don't break the
694                  * filesize limitation for the clustered operation.
695                  */
696                 loffset += i * blksize;
697                 bp = NULL;
698                 /* leave reqbp intact to force function callback */
699
700                 if (loffset >= filesize)
701                         goto no_read_ahead;
702                 if (loffset + maxra * blksize > filesize) {
703                         maxreq = filesize - loffset;
704                         maxra = (int)(maxreq / blksize);
705                 }
706                 sr = 1;
707         } else {
708                 /*
709                  * bp is not valid, no prior cluster in progress so get a
710                  * full cluster read-ahead going.
711                  */
712                 __debugvar off_t firstread = bp->b_loffset;
713                 int nblks;
714                 int error;
715
716                 /*
717                  * Set-up synchronous read for bp.
718                  */
719                 bp->b_flags &= ~(B_ERROR | B_EINTR | B_INVAL);
720                 bp->b_cmd = BUF_CMD_READ;
721                 bp->b_bio1.bio_done = func;
722                 bp->b_bio1.bio_caller_info1.ptr = arg;
723                 BUF_KERNPROC(bp);
724                 reqbp = NULL;   /* don't func() reqbp, it's running async */
725
726                 KASSERT(firstread != NOOFFSET,
727                         ("cluster_read: no buffer offset"));
728
729                 /*
730                  * nblks is our cluster_rbuild request size, limited
731                  * primarily by the device.
732                  */
733                 nblks = calc_rbuild_reqsize(maxra, maxrbuild);
734
735                 /*
736                  * Set RAM half-way through the full-cluster.
737                  */
738                 sr = (maxra + 1) / 2;
739
740                 if (nblks > 1) {
741                         int burstbytes;
742
743                         error = VOP_BMAP(vp, loffset, &doffset,
744                                          &burstbytes, NULL, BUF_CMD_READ);
745                         if (error)
746                                 goto single_block_read;
747                         if (nblks > burstbytes / blksize)
748                                 nblks = burstbytes / blksize;
749                         if (doffset == NOOFFSET)
750                                 goto single_block_read;
751                         if (nblks <= 1)
752                                 goto single_block_read;
753
754                         bp = cluster_rbuild(vp, filesize, loffset,
755                                             doffset, blksize, nblks, bp, &sr);
756                         loffset += bp->b_bufsize;
757                         maxra -= bp->b_bufsize / blksize;
758                 } else {
759 single_block_read:
760                         /*
761                          * If it isn't in the cache, then get a chunk from
762                          * disk if sequential, otherwise just get the block.
763                          */
764                         loffset += blksize;
765                         --maxra;
766                 }
767         }
768
769         /*
770          * If bp != NULL then B_CACHE was *NOT* set and bp must be issued.
771          * bp will either be an asynchronous cluster buf or an asynchronous
772          * single-buf.
773          *
774          * NOTE: Once an async cluster buf is issued bp becomes invalid.
775          */
776         if (bp) {
777 #if defined(CLUSTERDEBUG)
778                 if (rcluster)
779                         kprintf("S(%012jx,%d,%d)\n",
780                             (intmax_t)bp->b_loffset, bp->b_bcount, maxra);
781 #endif
782                 if ((bp->b_flags & B_CLUSTER) == 0)
783                         vfs_busy_pages(vp, bp);
784                 bp->b_flags &= ~(B_ERROR|B_INVAL);
785                 vn_strategy(vp, &bp->b_bio1);
786                 /* bp invalid now */
787                 bp = NULL;
788         }
789
790 #if defined(CLUSTERDEBUG)
791         if (rcluster)
792                 kprintf("cluster_rd %016jx/%d maxra=%d sr=%d\n",
793                         loffset, blksize, maxra, sr);
794 #endif
795
796         /*
797          * If we have been doing sequential I/O, then do some read-ahead.
798          * The code above us should have positioned us at the next likely
799          * offset.
800          *
801          * Only mess with buffers which we can immediately lock.  HAMMER
802          * will do device-readahead irrespective of what the blocks
803          * represent.
804          */
805         while (maxra > 0) {
806                 int burstbytes;
807                 int error;
808                 int nblks;
809
810                 rbp = getblk(vp, loffset, blksize,
811                              GETBLK_SZMATCH|GETBLK_NOWAIT, 0);
812                 if (rbp == NULL)
813                         goto no_read_ahead;
814                 if ((rbp->b_flags & B_CACHE)) {
815                         bqrelse(rbp);
816                         goto no_read_ahead;
817                 }
818
819                 /*
820                  * If BMAP is not supported or has an issue, we still do
821                  * (maxra) read-ahead, but we do not try to use rbuild.
822                  */
823                 error = VOP_BMAP(vp, loffset, &doffset,
824                                  &burstbytes, NULL, BUF_CMD_READ);
825                 if (error || doffset == NOOFFSET) {
826                         nblks = 1;
827                         doffset = NOOFFSET;
828                 } else {
829                         nblks = calc_rbuild_reqsize(maxra, maxrbuild);
830                         if (nblks > burstbytes / blksize)
831                                 nblks = burstbytes / blksize;
832                 }
833                 rbp->b_cmd = BUF_CMD_READ;
834
835                 if (nblks > 1) {
836                         rbp = cluster_rbuild(vp, filesize, loffset,
837                                              doffset, blksize,
838                                              nblks, rbp, &sr);
839                 } else {
840                         rbp->b_bio2.bio_offset = doffset;
841                         if (--sr == 0)
842                                 cluster_setram(rbp);
843                 }
844
845                 rbp->b_flags &= ~(B_ERROR|B_INVAL);
846
847                 if ((rbp->b_flags & B_CLUSTER) == 0)
848                         vfs_busy_pages(vp, rbp);
849                 BUF_KERNPROC(rbp);
850                 loffset += rbp->b_bufsize;
851                 maxra -= rbp->b_bufsize / blksize;
852                 vn_strategy(vp, &rbp->b_bio1);
853                 /* rbp invalid now */
854         }
855
856         /*
857          * If reqbp is non-NULL it had B_CACHE set and we issue the
858          * function callback synchronously.
859          *
860          * Note that we may start additional asynchronous I/O before doing
861          * the func() callback for the B_CACHE case
862          */
863 no_read_ahead:
864         if (reqbp)
865                 func(&reqbp->b_bio1);
866 }
867
868 /*
869  * If blocks are contiguous on disk, use this to provide clustered
870  * read ahead.  We will read as many blocks as possible sequentially
871  * and then parcel them up into logical blocks in the buffer hash table.
872  *
873  * This function either returns a cluster buf or it returns fbp.  fbp is
874  * already expected to be set up as a synchronous or asynchronous request.
875  *
876  * If a cluster buf is returned it will always be async.
877  *
878  * (*srp) counts down original blocks to determine where B_RAM should be set.
879  * Set B_RAM when *srp drops to 0.  If (*srp) starts at 0, B_RAM will not be
880  * set on any buffer.  Make sure B_RAM is cleared on any other buffers to
881  * prevent degenerate read-aheads from being generated.
882  */
883 static struct buf *
884 cluster_rbuild(struct vnode *vp, off_t filesize, off_t loffset, off_t doffset,
885                int blksize, int run, struct buf *fbp, int *srp)
886 {
887         struct buf *bp, *tbp;
888         off_t boffset;
889         int i, j;
890         int maxiosize = vmaxiosize(vp);
891
892         /*
893          * avoid a division
894          */
895         while (loffset + run * blksize > filesize) {
896                 --run;
897         }
898
899         tbp = fbp;
900         tbp->b_bio2.bio_offset = doffset;
901         if((tbp->b_flags & B_MALLOC) ||
902             ((tbp->b_flags & B_VMIO) == 0) || (run <= 1)) {
903                 if (--*srp == 0)
904                         cluster_setram(tbp);
905                 else
906                         cluster_clrram(tbp);
907                 return tbp;
908         }
909
910         /*
911          * Get a pbuf, limit cluster I/O on a per-device basis.  If
912          * doing cluster I/O for a file, limit cluster I/O on a
913          * per-mount basis.
914          */
915         if (vp->v_type == VCHR || vp->v_type == VBLK)
916                 bp = trypbuf_kva(&vp->v_pbuf_count);
917         else
918                 bp = trypbuf_kva(&vp->v_mount->mnt_pbuf_count);
919
920         if (bp == NULL)
921                 return tbp;
922
923         /*
924          * We are synthesizing a buffer out of vm_page_t's, but
925          * if the block size is not page aligned then the starting
926          * address may not be either.  Inherit the b_data offset
927          * from the original buffer.
928          */
929         bp->b_vp = vp;
930         bp->b_data = (char *)((vm_offset_t)bp->b_data |
931             ((vm_offset_t)tbp->b_data & PAGE_MASK));
932         bp->b_flags |= B_CLUSTER | B_VMIO;
933         bp->b_cmd = BUF_CMD_READ;
934         bp->b_bio1.bio_done = cluster_callback;         /* default to async */
935         bp->b_bio1.bio_caller_info1.cluster_head = NULL;
936         bp->b_bio1.bio_caller_info2.cluster_tail = NULL;
937         bp->b_loffset = loffset;
938         bp->b_bio2.bio_offset = doffset;
939         KASSERT(bp->b_loffset != NOOFFSET,
940                 ("cluster_rbuild: no buffer offset"));
941
942         bp->b_bcount = 0;
943         bp->b_bufsize = 0;
944         bp->b_xio.xio_npages = 0;
945
946         for (boffset = doffset, i = 0; i < run; ++i, boffset += blksize) {
947                 if (i) {
948                         if ((bp->b_xio.xio_npages * PAGE_SIZE) +
949                             round_page(blksize) > maxiosize) {
950                                 break;
951                         }
952
953                         /*
954                          * Shortcut some checks and try to avoid buffers that
955                          * would block in the lock.  The same checks have to
956                          * be made again after we officially get the buffer.
957                          */
958                         tbp = getblk(vp, loffset + i * blksize, blksize,
959                                      GETBLK_SZMATCH|GETBLK_NOWAIT, 0);
960                         if (tbp == NULL)
961                                 break;
962                         for (j = 0; j < tbp->b_xio.xio_npages; j++) {
963                                 if (tbp->b_xio.xio_pages[j]->valid)
964                                         break;
965                         }
966                         if (j != tbp->b_xio.xio_npages) {
967                                 bqrelse(tbp);
968                                 break;
969                         }
970
971                         /*
972                          * Stop scanning if the buffer is fuly valid 
973                          * (marked B_CACHE), or locked (may be doing a
974                          * background write), or if the buffer is not
975                          * VMIO backed.  The clustering code can only deal
976                          * with VMIO-backed buffers.
977                          */
978                         if ((tbp->b_flags & (B_CACHE|B_LOCKED)) ||
979                             (tbp->b_flags & B_VMIO) == 0 ||
980                             (LIST_FIRST(&tbp->b_dep) != NULL &&
981                              buf_checkread(tbp))
982                         ) {
983                                 bqrelse(tbp);
984                                 break;
985                         }
986
987                         /*
988                          * The buffer must be completely invalid in order to
989                          * take part in the cluster.  If it is partially valid
990                          * then we stop.
991                          */
992                         for (j = 0;j < tbp->b_xio.xio_npages; j++) {
993                                 if (tbp->b_xio.xio_pages[j]->valid)
994                                         break;
995                         }
996                         if (j != tbp->b_xio.xio_npages) {
997                                 bqrelse(tbp);
998                                 break;
999                         }
1000
1001                         /*
1002                          * Depress the priority of buffers not explicitly
1003                          * requested.
1004                          */
1005                         /* tbp->b_flags |= B_AGE; */
1006
1007                         /*
1008                          * Set the block number if it isn't set, otherwise
1009                          * if it is make sure it matches the block number we
1010                          * expect.
1011                          */
1012                         if (tbp->b_bio2.bio_offset == NOOFFSET) {
1013                                 tbp->b_bio2.bio_offset = boffset;
1014                         } else if (tbp->b_bio2.bio_offset != boffset) {
1015                                 brelse(tbp);
1016                                 break;
1017                         }
1018                 }
1019
1020                 /*
1021                  * Set B_RAM if (*srp) is 1.  B_RAM is only set on one buffer
1022                  * in the cluster, including potentially the first buffer
1023                  * once we start streaming the read-aheads.
1024                  */
1025                 if (--*srp == 0)
1026                         cluster_setram(tbp);
1027                 else
1028                         cluster_clrram(tbp);
1029
1030                 /*
1031                  * The passed-in tbp (i == 0) will already be set up for
1032                  * async or sync operation.  All other tbp's acquire in
1033                  * our loop are set up for async operation.
1034                  */
1035                 tbp->b_cmd = BUF_CMD_READ;
1036                 BUF_KERNPROC(tbp);
1037                 cluster_append(&bp->b_bio1, tbp);
1038                 for (j = 0; j < tbp->b_xio.xio_npages; ++j) {
1039                         vm_page_t m;
1040
1041                         m = tbp->b_xio.xio_pages[j];
1042                         vm_page_busy_wait(m, FALSE, "clurpg");
1043                         vm_page_io_start(m);
1044                         vm_page_wakeup(m);
1045                         vm_object_pip_add(m->object, 1);
1046                         if ((bp->b_xio.xio_npages == 0) ||
1047                                 (bp->b_xio.xio_pages[bp->b_xio.xio_npages-1] != m)) {
1048                                 bp->b_xio.xio_pages[bp->b_xio.xio_npages] = m;
1049                                 bp->b_xio.xio_npages++;
1050                         }
1051                         if ((m->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) {
1052                                 tbp->b_xio.xio_pages[j] = bogus_page;
1053                                 tbp->b_flags |= B_HASBOGUS;
1054                         }
1055                 }
1056                 /*
1057                  * XXX shouldn't this be += size for both, like in 
1058                  * cluster_wbuild()?
1059                  *
1060                  * Don't inherit tbp->b_bufsize as it may be larger due to
1061                  * a non-page-aligned size.  Instead just aggregate using
1062                  * 'size'.
1063                  */
1064                 if (tbp->b_bcount != blksize)
1065                     kprintf("warning: tbp->b_bcount wrong %d vs %d\n", tbp->b_bcount, blksize);
1066                 if (tbp->b_bufsize != blksize)
1067                     kprintf("warning: tbp->b_bufsize wrong %d vs %d\n", tbp->b_bufsize, blksize);
1068                 bp->b_bcount += blksize;
1069                 bp->b_bufsize += blksize;
1070         }
1071
1072         /*
1073          * Fully valid pages in the cluster are already good and do not need
1074          * to be re-read from disk.  Replace the page with bogus_page
1075          */
1076         for (j = 0; j < bp->b_xio.xio_npages; j++) {
1077                 if ((bp->b_xio.xio_pages[j]->valid & VM_PAGE_BITS_ALL) ==
1078                     VM_PAGE_BITS_ALL) {
1079                         bp->b_xio.xio_pages[j] = bogus_page;
1080                         bp->b_flags |= B_HASBOGUS;
1081                 }
1082         }
1083         if (bp->b_bufsize > bp->b_kvasize) {
1084                 panic("cluster_rbuild: b_bufsize(%d) > b_kvasize(%d)",
1085                     bp->b_bufsize, bp->b_kvasize);
1086         }
1087         pmap_qenter(trunc_page((vm_offset_t) bp->b_data),
1088                 (vm_page_t *)bp->b_xio.xio_pages, bp->b_xio.xio_npages);
1089         BUF_KERNPROC(bp);
1090         return (bp);
1091 }
1092
1093 /*
1094  * Cleanup after a clustered read or write.
1095  * This is complicated by the fact that any of the buffers might have
1096  * extra memory (if there were no empty buffer headers at allocbuf time)
1097  * that we will need to shift around.
1098  *
1099  * The returned bio is &bp->b_bio1
1100  */
1101 static void
1102 cluster_callback(struct bio *bio)
1103 {
1104         struct buf *bp = bio->bio_buf;
1105         struct buf *tbp;
1106         struct vnode *vp;
1107         int error = 0;
1108
1109         /*
1110          * Must propogate errors to all the components.  A short read (EOF)
1111          * is a critical error.
1112          */
1113         if (bp->b_flags & B_ERROR) {
1114                 error = bp->b_error;
1115         } else if (bp->b_bcount != bp->b_bufsize) {
1116                 panic("cluster_callback: unexpected EOF on cluster %p!", bio);
1117         }
1118
1119         pmap_qremove(trunc_page((vm_offset_t) bp->b_data),
1120                      bp->b_xio.xio_npages);
1121         /*
1122          * Move memory from the large cluster buffer into the component
1123          * buffers and mark IO as done on these.  Since the memory map
1124          * is the same, no actual copying is required.
1125          */
1126         while ((tbp = bio->bio_caller_info1.cluster_head) != NULL) {
1127                 bio->bio_caller_info1.cluster_head = tbp->b_cluster_next;
1128                 if (error) {
1129                         tbp->b_flags |= B_ERROR | B_IOISSUED;
1130                         tbp->b_error = error;
1131                 } else {
1132                         tbp->b_dirtyoff = tbp->b_dirtyend = 0;
1133                         tbp->b_flags &= ~(B_ERROR|B_INVAL);
1134                         tbp->b_flags |= B_IOISSUED;
1135                         /*
1136                          * XXX the bdwrite()/bqrelse() issued during
1137                          * cluster building clears B_RELBUF (see bqrelse()
1138                          * comment).  If direct I/O was specified, we have
1139                          * to restore it here to allow the buffer and VM
1140                          * to be freed.
1141                          */
1142                         if (tbp->b_flags & B_DIRECT)
1143                                 tbp->b_flags |= B_RELBUF;
1144
1145                         /*
1146                          * XXX I think biodone() below will do this, but do
1147                          *     it here anyway for consistency.
1148                          */
1149                         if (tbp->b_cmd == BUF_CMD_WRITE)
1150                                 bundirty(tbp);
1151                 }
1152                 biodone(&tbp->b_bio1);
1153         }
1154         vp = bp->b_vp;
1155         bp->b_vp = NULL;
1156         if (vp->v_type == VCHR || vp->v_type == VBLK)
1157                 relpbuf(bp, &vp->v_pbuf_count);
1158         else
1159                 relpbuf(bp, &vp->v_mount->mnt_pbuf_count);
1160 }
1161
1162 /*
1163  * Implement modified write build for cluster.
1164  *
1165  *      write_behind = 0        write behind disabled
1166  *      write_behind = 1        write behind normal (default)
1167  *      write_behind = 2        write behind backed-off
1168  *
1169  * In addition, write_behind is only activated for files that have
1170  * grown past a certain size (default 10MB).  Otherwise temporary files
1171  * wind up generating a lot of unnecessary disk I/O.
1172  */
1173 static __inline int
1174 cluster_wbuild_wb(struct vnode *vp, int blksize, off_t start_loffset, int len)
1175 {
1176         int r = 0;
1177
1178         switch(write_behind) {
1179         case 2:
1180                 if (start_loffset < len)
1181                         break;
1182                 start_loffset -= len;
1183                 /* fall through */
1184         case 1:
1185                 if (vp->v_filesize >= write_behind_minfilesize) {
1186                         r = cluster_wbuild(vp, NULL, blksize,
1187                                            start_loffset, len);
1188                 }
1189                 /* fall through */
1190         default:
1191                 /* fall through */
1192                 break;
1193         }
1194         return(r);
1195 }
1196
1197 /*
1198  * Do clustered write for FFS.
1199  *
1200  * Three cases:
1201  *      1. Write is not sequential (write asynchronously)
1202  *      Write is sequential:
1203  *      2.      beginning of cluster - begin cluster
1204  *      3.      middle of a cluster - add to cluster
1205  *      4.      end of a cluster - asynchronously write cluster
1206  *
1207  * WARNING! vnode fields are not locked and must ONLY be used heuristically.
1208  */
1209 void
1210 cluster_write(struct buf *bp, off_t filesize, int blksize, int seqcount)
1211 {
1212         struct vnode *vp;
1213         off_t loffset;
1214         int maxclen, cursize;
1215         int async;
1216         cluster_cache_t dummy;
1217         cluster_cache_t *cc;
1218
1219         vp = bp->b_vp;
1220         if (vp->v_type == VREG)
1221                 async = vp->v_mount->mnt_flag & MNT_ASYNC;
1222         else
1223                 async = 0;
1224         loffset = bp->b_loffset;
1225         KASSERT(bp->b_loffset != NOOFFSET, 
1226                 ("cluster_write: no buffer offset"));
1227
1228         cc = cluster_getcache(&dummy, vp, loffset);
1229
1230         /*
1231          * Initialize vnode to beginning of file.
1232          */
1233         if (loffset == 0)
1234                 cc->v_lasta = cc->v_clen = cc->v_cstart = cc->v_lastw = 0;
1235
1236         if (cc->v_clen == 0 || loffset != cc->v_lastw + blksize ||
1237             (bp->b_bio2.bio_offset != NOOFFSET &&
1238              (bp->b_bio2.bio_offset != cc->v_lasta + blksize))) {
1239                 /*
1240                  * Next block is not logically sequential, or, if physical
1241                  * block offsets are available, not physically sequential.
1242                  *
1243                  * If physical block offsets are not available we only
1244                  * get here if we weren't logically sequential.
1245                  */
1246                 maxclen = vmaxiosize(vp);
1247                 if (cc->v_clen != 0) {
1248                         /*
1249                          * Next block is not sequential.
1250                          *
1251                          * If we are not writing at end of file, the process
1252                          * seeked to another point in the file since its last
1253                          * write, or we have reached our maximum cluster size,
1254                          * then push the previous cluster. Otherwise try
1255                          * reallocating to make it sequential.
1256                          *
1257                          * Change to algorithm: only push previous cluster if
1258                          * it was sequential from the point of view of the
1259                          * seqcount heuristic, otherwise leave the buffer 
1260                          * intact so we can potentially optimize the I/O
1261                          * later on in the buf_daemon or update daemon
1262                          * flush.
1263                          */
1264                         cursize = cc->v_lastw - cc->v_cstart + blksize;
1265                         if (bp->b_loffset + blksize < filesize ||
1266                             loffset != cc->v_lastw + blksize ||
1267                             cc->v_clen <= cursize) {
1268                                 if (!async && seqcount > 0) {
1269                                         cluster_wbuild_wb(vp, blksize,
1270                                                 cc->v_cstart, cursize);
1271                                 }
1272                         } else {
1273                                 struct buf **bpp, **endbp;
1274                                 struct cluster_save *buflist;
1275
1276                                 buflist = cluster_collectbufs(cc, vp,
1277                                                               bp, blksize);
1278                                 endbp = &buflist->bs_children
1279                                     [buflist->bs_nchildren - 1];
1280                                 if (VOP_REALLOCBLKS(vp, buflist)) {
1281                                         /*
1282                                          * Failed, push the previous cluster
1283                                          * if *really* writing sequentially
1284                                          * in the logical file (seqcount > 1),
1285                                          * otherwise delay it in the hopes that
1286                                          * the low level disk driver can
1287                                          * optimize the write ordering.
1288                                          *
1289                                          * NOTE: We do not brelse the last
1290                                          *       element which is bp, and we
1291                                          *       do not return here.
1292                                          */
1293                                         for (bpp = buflist->bs_children;
1294                                              bpp < endbp; bpp++)
1295                                                 brelse(*bpp);
1296                                         kfree(buflist, M_SEGMENT);
1297                                         if (seqcount > 1) {
1298                                                 cluster_wbuild_wb(vp, 
1299                                                     blksize, cc->v_cstart,
1300                                                     cursize);
1301                                         }
1302                                 } else {
1303                                         /*
1304                                          * Succeeded, keep building cluster.
1305                                          */
1306                                         for (bpp = buflist->bs_children;
1307                                              bpp <= endbp; bpp++)
1308                                                 bdwrite(*bpp);
1309                                         kfree(buflist, M_SEGMENT);
1310                                         cc->v_lastw = loffset;
1311                                         cc->v_lasta = bp->b_bio2.bio_offset;
1312                                         cluster_putcache(cc);
1313                                         return;
1314                                 }
1315                         }
1316                 }
1317
1318                 /*
1319                  * Consider beginning a cluster. If at end of file, make
1320                  * cluster as large as possible, otherwise find size of
1321                  * existing cluster.
1322                  */
1323                 if ((vp->v_type == VREG) &&
1324                     bp->b_loffset + blksize < filesize &&
1325                     (bp->b_bio2.bio_offset == NOOFFSET) &&
1326                     (VOP_BMAP(vp, loffset, &bp->b_bio2.bio_offset, &maxclen, NULL, BUF_CMD_WRITE) ||
1327                      bp->b_bio2.bio_offset == NOOFFSET)) {
1328                         bdwrite(bp);
1329                         cc->v_clen = 0;
1330                         cc->v_lasta = bp->b_bio2.bio_offset;
1331                         cc->v_cstart = loffset + blksize;
1332                         cc->v_lastw = loffset;
1333                         cluster_putcache(cc);
1334                         return;
1335                 }
1336                 if (maxclen > blksize)
1337                         cc->v_clen = maxclen - blksize;
1338                 else
1339                         cc->v_clen = 0;
1340                 if (!async && cc->v_clen == 0) { /* I/O not contiguous */
1341                         cc->v_cstart = loffset + blksize;
1342                         bdwrite(bp);
1343                 } else {        /* Wait for rest of cluster */
1344                         cc->v_cstart = loffset;
1345                         bdwrite(bp);
1346                 }
1347         } else if (loffset == cc->v_cstart + cc->v_clen) {
1348                 /*
1349                  * At end of cluster, write it out if seqcount tells us we
1350                  * are operating sequentially, otherwise let the buf or
1351                  * update daemon handle it.
1352                  */
1353                 bdwrite(bp);
1354                 if (seqcount > 1)
1355                         cluster_wbuild_wb(vp, blksize, cc->v_cstart,
1356                                           cc->v_clen + blksize);
1357                 cc->v_clen = 0;
1358                 cc->v_cstart = loffset + blksize;
1359         } else if (vm_page_count_severe() &&
1360                    bp->b_loffset + blksize < filesize) {
1361                 /*
1362                  * We are low on memory, get it going NOW.  However, do not
1363                  * try to push out a partial block at the end of the file
1364                  * as this could lead to extremely non-optimal write activity.
1365                  */
1366                 bawrite(bp);
1367         } else {
1368                 /*
1369                  * In the middle of a cluster, so just delay the I/O for now.
1370                  */
1371                 bdwrite(bp);
1372         }
1373         cc->v_lastw = loffset;
1374         cc->v_lasta = bp->b_bio2.bio_offset;
1375         cluster_putcache(cc);
1376 }
1377
1378 /*
1379  * This is the clustered version of bawrite().  It works similarly to
1380  * cluster_write() except I/O on the buffer is guaranteed to occur.
1381  */
1382 int
1383 cluster_awrite(struct buf *bp)
1384 {
1385         int total;
1386
1387         /*
1388          * Don't bother if it isn't clusterable.
1389          */
1390         if ((bp->b_flags & B_CLUSTEROK) == 0 ||
1391             bp->b_vp == NULL ||
1392             (bp->b_vp->v_flag & VOBJBUF) == 0) {
1393                 total = bp->b_bufsize;
1394                 bawrite(bp);
1395                 return (total);
1396         }
1397
1398         total = cluster_wbuild(bp->b_vp, &bp, bp->b_bufsize,
1399                                bp->b_loffset, vmaxiosize(bp->b_vp));
1400
1401         /*
1402          * If bp is still non-NULL then cluster_wbuild() did not initiate
1403          * I/O on it and we must do so here to provide the API guarantee.
1404          */
1405         if (bp)
1406                 bawrite(bp);
1407
1408         return total;
1409 }
1410
1411 /*
1412  * This is an awful lot like cluster_rbuild...wish they could be combined.
1413  * The last lbn argument is the current block on which I/O is being
1414  * performed.  Check to see that it doesn't fall in the middle of
1415  * the current block (if last_bp == NULL).
1416  *
1417  * cluster_wbuild() normally does not guarantee anything.  If bpp is
1418  * non-NULL and cluster_wbuild() is able to incorporate it into the
1419  * I/O it will set *bpp to NULL, otherwise it will leave it alone and
1420  * the caller must dispose of *bpp.
1421  */
1422 static int
1423 cluster_wbuild(struct vnode *vp, struct buf **bpp,
1424                int blksize, off_t start_loffset, int bytes)
1425 {
1426         struct buf *bp, *tbp;
1427         int i, j;
1428         int totalwritten = 0;
1429         int must_initiate;
1430         int maxiosize = vmaxiosize(vp);
1431
1432         while (bytes > 0) {
1433                 /*
1434                  * If the buffer matches the passed locked & removed buffer
1435                  * we used the passed buffer (which might not be B_DELWRI).
1436                  *
1437                  * Otherwise locate the buffer and determine if it is
1438                  * compatible.
1439                  */
1440                 if (bpp && (*bpp)->b_loffset == start_loffset) {
1441                         tbp = *bpp;
1442                         *bpp = NULL;
1443                         bpp = NULL;
1444                 } else {
1445                         tbp = findblk(vp, start_loffset, FINDBLK_NBLOCK);
1446                         if (tbp == NULL ||
1447                             (tbp->b_flags & (B_LOCKED | B_INVAL | B_DELWRI)) !=
1448                              B_DELWRI ||
1449                             (LIST_FIRST(&tbp->b_dep) && buf_checkwrite(tbp))) {
1450                                 if (tbp)
1451                                         BUF_UNLOCK(tbp);
1452                                 start_loffset += blksize;
1453                                 bytes -= blksize;
1454                                 continue;
1455                         }
1456                         bremfree(tbp);
1457                 }
1458                 KKASSERT(tbp->b_cmd == BUF_CMD_DONE);
1459
1460                 /*
1461                  * Extra memory in the buffer, punt on this buffer.
1462                  * XXX we could handle this in most cases, but we would
1463                  * have to push the extra memory down to after our max
1464                  * possible cluster size and then potentially pull it back
1465                  * up if the cluster was terminated prematurely--too much
1466                  * hassle.
1467                  */
1468                 if (((tbp->b_flags & (B_CLUSTEROK|B_MALLOC)) != B_CLUSTEROK) ||
1469                     (tbp->b_bcount != tbp->b_bufsize) ||
1470                     (tbp->b_bcount != blksize) ||
1471                     (bytes == blksize)) {
1472                         totalwritten += tbp->b_bufsize;
1473                         bawrite(tbp);
1474                         start_loffset += blksize;
1475                         bytes -= blksize;
1476                         continue;
1477                 }
1478
1479                 /*
1480                  * Get a pbuf, limit cluster I/O on a per-device basis.  If
1481                  * doing cluster I/O for a file, limit cluster I/O on a
1482                  * per-mount basis.
1483                  *
1484                  * HAMMER and other filesystems may attempt to queue a massive
1485                  * amount of write I/O, using trypbuf() here easily results in
1486                  * situation where the I/O stream becomes non-clustered.
1487                  */
1488                 if (vp->v_type == VCHR || vp->v_type == VBLK)
1489                         bp = getpbuf_kva(&vp->v_pbuf_count);
1490                 else
1491                         bp = getpbuf_kva(&vp->v_mount->mnt_pbuf_count);
1492
1493                 /*
1494                  * Set up the pbuf.  Track our append point with b_bcount
1495                  * and b_bufsize.  b_bufsize is not used by the device but
1496                  * our caller uses it to loop clusters and we use it to
1497                  * detect a premature EOF on the block device.
1498                  */
1499                 bp->b_bcount = 0;
1500                 bp->b_bufsize = 0;
1501                 bp->b_xio.xio_npages = 0;
1502                 bp->b_loffset = tbp->b_loffset;
1503                 bp->b_bio2.bio_offset = tbp->b_bio2.bio_offset;
1504                 bp->b_vp = vp;
1505
1506                 /*
1507                  * We are synthesizing a buffer out of vm_page_t's, but
1508                  * if the block size is not page aligned then the starting
1509                  * address may not be either.  Inherit the b_data offset
1510                  * from the original buffer.
1511                  */
1512                 bp->b_data = (char *)((vm_offset_t)bp->b_data |
1513                     ((vm_offset_t)tbp->b_data & PAGE_MASK));
1514                 bp->b_flags &= ~B_ERROR;
1515                 bp->b_flags |= B_CLUSTER | B_BNOCLIP |
1516                         (tbp->b_flags & (B_VMIO | B_NEEDCOMMIT));
1517                 bp->b_bio1.bio_caller_info1.cluster_head = NULL;
1518                 bp->b_bio1.bio_caller_info2.cluster_tail = NULL;
1519
1520                 /*
1521                  * From this location in the file, scan forward to see
1522                  * if there are buffers with adjacent data that need to
1523                  * be written as well.
1524                  *
1525                  * IO *must* be initiated on index 0 at this point
1526                  * (particularly when called from cluster_awrite()).
1527                  */
1528                 for (i = 0; i < bytes; (i += blksize), (start_loffset += blksize)) {
1529                         if (i == 0) {
1530                                 must_initiate = 1;
1531                         } else {
1532                                 /*
1533                                  * Not first buffer.
1534                                  */
1535                                 must_initiate = 0;
1536                                 tbp = findblk(vp, start_loffset,
1537                                               FINDBLK_NBLOCK);
1538                                 /*
1539                                  * Buffer not found or could not be locked
1540                                  * non-blocking.
1541                                  */
1542                                 if (tbp == NULL)
1543                                         break;
1544
1545                                 /*
1546                                  * If it IS in core, but has different
1547                                  * characteristics, then don't cluster
1548                                  * with it.
1549                                  */
1550                                 if ((tbp->b_flags & (B_VMIO | B_CLUSTEROK |
1551                                      B_INVAL | B_DELWRI | B_NEEDCOMMIT))
1552                                     != (B_DELWRI | B_CLUSTEROK |
1553                                      (bp->b_flags & (B_VMIO | B_NEEDCOMMIT))) ||
1554                                     (tbp->b_flags & B_LOCKED)
1555                                 ) {
1556                                         BUF_UNLOCK(tbp);
1557                                         break;
1558                                 }
1559
1560                                 /*
1561                                  * Check that the combined cluster
1562                                  * would make sense with regard to pages
1563                                  * and would not be too large
1564                                  *
1565                                  * WARNING! buf_checkwrite() must be the last
1566                                  *          check made.  If it returns 0 then
1567                                  *          we must initiate the I/O.
1568                                  */
1569                                 if ((tbp->b_bcount != blksize) ||
1570                                   ((bp->b_bio2.bio_offset + i) !=
1571                                     tbp->b_bio2.bio_offset) ||
1572                                   ((tbp->b_xio.xio_npages + bp->b_xio.xio_npages) >
1573                                     (maxiosize / PAGE_SIZE)) ||
1574                                   (LIST_FIRST(&tbp->b_dep) &&
1575                                    buf_checkwrite(tbp))
1576                                 ) {
1577                                         BUF_UNLOCK(tbp);
1578                                         break;
1579                                 }
1580                                 if (LIST_FIRST(&tbp->b_dep))
1581                                         must_initiate = 1;
1582                                 /*
1583                                  * Ok, it's passed all the tests,
1584                                  * so remove it from the free list
1585                                  * and mark it busy. We will use it.
1586                                  */
1587                                 bremfree(tbp);
1588                                 KKASSERT(tbp->b_cmd == BUF_CMD_DONE);
1589                         }
1590
1591                         /*
1592                          * If the IO is via the VM then we do some
1593                          * special VM hackery (yuck).  Since the buffer's
1594                          * block size may not be page-aligned it is possible
1595                          * for a page to be shared between two buffers.  We
1596                          * have to get rid of the duplication when building
1597                          * the cluster.
1598                          */
1599                         if (tbp->b_flags & B_VMIO) {
1600                                 vm_page_t m;
1601
1602                                 /*
1603                                  * Try to avoid deadlocks with the VM system.
1604                                  * However, we cannot abort the I/O if
1605                                  * must_initiate is non-zero.
1606                                  */
1607                                 if (must_initiate == 0) {
1608                                         for (j = 0;
1609                                              j < tbp->b_xio.xio_npages;
1610                                              ++j) {
1611                                                 m = tbp->b_xio.xio_pages[j];
1612                                                 if (m->flags & PG_BUSY) {
1613                                                         bqrelse(tbp);
1614                                                         goto finishcluster;
1615                                                 }
1616                                         }
1617                                 }
1618                                         
1619                                 for (j = 0; j < tbp->b_xio.xio_npages; ++j) {
1620                                         m = tbp->b_xio.xio_pages[j];
1621                                         vm_page_busy_wait(m, FALSE, "clurpg");
1622                                         vm_page_io_start(m);
1623                                         vm_page_wakeup(m);
1624                                         vm_object_pip_add(m->object, 1);
1625                                         if ((bp->b_xio.xio_npages == 0) ||
1626                                           (bp->b_xio.xio_pages[bp->b_xio.xio_npages - 1] != m)) {
1627                                                 bp->b_xio.xio_pages[bp->b_xio.xio_npages] = m;
1628                                                 bp->b_xio.xio_npages++;
1629                                         }
1630                                 }
1631                         }
1632                         bp->b_bcount += blksize;
1633                         bp->b_bufsize += blksize;
1634
1635                         /*
1636                          * NOTE: see bwrite/bawrite code for why we no longer
1637                          *       undirty tbp here.
1638                          *
1639                          *       bundirty(tbp); REMOVED
1640                          */
1641                         tbp->b_flags &= ~B_ERROR;
1642                         tbp->b_cmd = BUF_CMD_WRITE;
1643                         BUF_KERNPROC(tbp);
1644                         cluster_append(&bp->b_bio1, tbp);
1645
1646                         /*
1647                          * check for latent dependencies to be handled 
1648                          */
1649                         if (LIST_FIRST(&tbp->b_dep) != NULL)
1650                                 buf_start(tbp);
1651                 }
1652         finishcluster:
1653                 pmap_qenter(trunc_page((vm_offset_t)bp->b_data),
1654                             (vm_page_t *)bp->b_xio.xio_pages,
1655                             bp->b_xio.xio_npages);
1656                 if (bp->b_bufsize > bp->b_kvasize) {
1657                         panic("cluster_wbuild: b_bufsize(%d) "
1658                               "> b_kvasize(%d)\n",
1659                               bp->b_bufsize, bp->b_kvasize);
1660                 }
1661                 totalwritten += bp->b_bufsize;
1662                 bp->b_dirtyoff = 0;
1663                 bp->b_dirtyend = bp->b_bufsize;
1664                 bp->b_bio1.bio_done = cluster_callback;
1665                 bp->b_cmd = BUF_CMD_WRITE;
1666
1667                 vfs_busy_pages(vp, bp);
1668                 bsetrunningbufspace(bp, bp->b_bufsize);
1669                 BUF_KERNPROC(bp);
1670                 vn_strategy(vp, &bp->b_bio1);
1671
1672                 bytes -= i;
1673         }
1674         return totalwritten;
1675 }
1676
1677 /*
1678  * Collect together all the buffers in a cluster, plus add one
1679  * additional buffer passed-in.
1680  *
1681  * Only pre-existing buffers whos block size matches blksize are collected.
1682  * (this is primarily because HAMMER1 uses varying block sizes and we don't
1683  * want to override its choices).
1684  *
1685  * This code will not try to collect buffers that it cannot lock, otherwise
1686  * it might deadlock against SMP-friendly filesystems.
1687  */
1688 static struct cluster_save *
1689 cluster_collectbufs(cluster_cache_t *cc, struct vnode *vp,
1690                     struct buf *last_bp, int blksize)
1691 {
1692         struct cluster_save *buflist;
1693         struct buf *bp;
1694         off_t loffset;
1695         int i, len;
1696         int j;
1697         int k;
1698
1699         len = (int)(cc->v_lastw - cc->v_cstart + blksize) / blksize;
1700         KKASSERT(len > 0);
1701         buflist = kmalloc(sizeof(struct buf *) * (len + 1) + sizeof(*buflist),
1702                          M_SEGMENT, M_WAITOK);
1703         buflist->bs_nchildren = 0;
1704         buflist->bs_children = (struct buf **) (buflist + 1);
1705         for (loffset = cc->v_cstart, i = 0, j = 0;
1706              i < len;
1707              (loffset += blksize), i++) {
1708                 bp = getcacheblk(vp, loffset,
1709                                  last_bp->b_bcount, GETBLK_SZMATCH |
1710                                                     GETBLK_NOWAIT);
1711                 buflist->bs_children[i] = bp;
1712                 if (bp == NULL) {
1713                         j = i + 1;
1714                 } else if (bp->b_bio2.bio_offset == NOOFFSET) {
1715                         VOP_BMAP(bp->b_vp, bp->b_loffset,
1716                                  &bp->b_bio2.bio_offset,
1717                                  NULL, NULL, BUF_CMD_WRITE);
1718                 }
1719         }
1720
1721         /*
1722          * Get rid of gaps
1723          */
1724         for (k = 0; k < j; ++k) {
1725                 if (buflist->bs_children[k]) {
1726                         bqrelse(buflist->bs_children[k]);
1727                         buflist->bs_children[k] = NULL;
1728                 }
1729         }
1730         if (j != 0) {
1731                 if (j != i) {
1732                         bcopy(buflist->bs_children + j,
1733                               buflist->bs_children + 0,
1734                               sizeof(buflist->bs_children[0]) * (i - j));
1735                 }
1736                 i -= j;
1737         }
1738         buflist->bs_children[i] = bp = last_bp;
1739         if (bp->b_bio2.bio_offset == NOOFFSET) {
1740                 VOP_BMAP(bp->b_vp, bp->b_loffset, &bp->b_bio2.bio_offset,
1741                          NULL, NULL, BUF_CMD_WRITE);
1742         }
1743         buflist->bs_nchildren = i + 1;
1744         return (buflist);
1745 }
1746
1747 void
1748 cluster_append(struct bio *bio, struct buf *tbp)
1749 {
1750         tbp->b_cluster_next = NULL;
1751         if (bio->bio_caller_info1.cluster_head == NULL) {
1752                 bio->bio_caller_info1.cluster_head = tbp;
1753                 bio->bio_caller_info2.cluster_tail = tbp;
1754         } else {
1755                 bio->bio_caller_info2.cluster_tail->b_cluster_next = tbp;
1756                 bio->bio_caller_info2.cluster_tail = tbp;
1757         }
1758 }
1759
1760 static
1761 void
1762 cluster_setram(struct buf *bp)
1763 {
1764         bp->b_flags |= B_RAM;
1765         if (bp->b_xio.xio_npages)
1766                 vm_page_flag_set(bp->b_xio.xio_pages[0], PG_RAM);
1767 }
1768
1769 static
1770 void
1771 cluster_clrram(struct buf *bp)
1772 {
1773         bp->b_flags &= ~B_RAM;
1774         if (bp->b_xio.xio_npages)
1775                 vm_page_flag_clear(bp->b_xio.xio_pages[0], PG_RAM);
1776 }