kernel - Add callout debugging
[dragonfly.git] / sys / kern / vfs_cluster.c
1 /*-
2  * Copyright (c) 1993
3  *      The Regents of the University of California.  All rights reserved.
4  * Modifications/enhancements:
5  *      Copyright (c) 1995 John S. Dyson.  All rights reserved.
6  *      Copyright (c) 2012-2013 Matthew Dillon.  All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the name of the University nor the names of its contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  */
32
33 #include "opt_debug_cluster.h"
34
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/kernel.h>
38 #include <sys/proc.h>
39 #include <sys/buf.h>
40 #include <sys/vnode.h>
41 #include <sys/malloc.h>
42 #include <sys/mount.h>
43 #include <sys/resourcevar.h>
44 #include <sys/vmmeter.h>
45 #include <vm/vm.h>
46 #include <vm/vm_object.h>
47 #include <vm/vm_page.h>
48 #include <sys/sysctl.h>
49
50 #include <sys/buf2.h>
51 #include <vm/vm_page2.h>
52
53 #include <machine/limits.h>
54
55 /*
56  * Cluster tracking cache - replaces the original vnode v_* fields which had
57  * limited utility and were not MP safe.
58  *
59  * The cluster tracking cache is a simple 4-way set-associative non-chained
60  * cache.  It is capable of tracking up to four zones separated by 1MB or
61  * more per vnode.
62  *
63  * NOTE: We want this structure to be cache-line friendly so the iterator
64  *       is embedded rather than in a separate array.
65  *
66  * NOTE: A cluster cache entry can become stale when a vnode is recycled.
67  *       For now we treat the values as heuristical but also self-consistent.
68  *       i.e. the values cannot be completely random and cannot be SMP unsafe
69  *       or the cluster code might end-up clustering non-contiguous buffers
70  *       at the wrong offsets.
71  */
72 struct cluster_cache {
73         struct vnode *vp;
74         u_int   locked;
75         off_t   v_lastw;                /* last write (end) (write cluster) */
76         off_t   v_cstart;               /* start block (beg) of cluster */
77         off_t   v_lasta;                /* last allocation (end) */
78         u_int   v_clen;                 /* length of current cluster */
79         u_int   iterator;
80 } __cachealign;
81
82 typedef struct cluster_cache cluster_cache_t;
83
84 #define CLUSTER_CACHE_SIZE      512
85 #define CLUSTER_CACHE_MASK      (CLUSTER_CACHE_SIZE - 1)
86
87 #define CLUSTER_ZONE            ((off_t)(1024 * 1024))
88
89 cluster_cache_t cluster_array[CLUSTER_CACHE_SIZE];
90
91 #if defined(CLUSTERDEBUG)
92 #include <sys/sysctl.h>
93 static int      rcluster= 0;
94 SYSCTL_INT(_debug, OID_AUTO, rcluster, CTLFLAG_RW, &rcluster, 0, "");
95 #endif
96
97 static MALLOC_DEFINE(M_SEGMENT, "cluster_save", "cluster_save buffer");
98
99 static struct cluster_save *
100         cluster_collectbufs (cluster_cache_t *cc, struct vnode *vp,
101                                 struct buf *last_bp, int blksize);
102 static struct buf *
103         cluster_rbuild (struct vnode *vp, off_t filesize, off_t loffset,
104                             off_t doffset, int blksize, int run, 
105                             struct buf *fbp, int *srp);
106 static void cluster_callback (struct bio *);
107 static void cluster_setram (struct buf *);
108 static void cluster_clrram (struct buf *);
109 static int cluster_wbuild(struct vnode *vp, struct buf **bpp, int blksize,
110                             off_t start_loffset, int bytes);
111
112 static int write_behind = 1;
113 SYSCTL_INT(_vfs, OID_AUTO, write_behind, CTLFLAG_RW, &write_behind, 0,
114     "Cluster write-behind setting");
115 static quad_t write_behind_minfilesize = 10 * 1024 * 1024;
116 SYSCTL_QUAD(_vfs, OID_AUTO, write_behind_minfilesize, CTLFLAG_RW,
117     &write_behind_minfilesize, 0, "Cluster write-behind setting");
118 static int max_readahead = 2 * 1024 * 1024;
119 SYSCTL_INT(_vfs, OID_AUTO, max_readahead, CTLFLAG_RW, &max_readahead, 0,
120     "Limit in bytes for desired cluster read-ahead");
121
122 extern vm_page_t        bogus_page;
123
124 /*
125  * nblks is our cluster_rbuild request size.  The approximate number of
126  * physical read-ahead requests is maxra / nblks.  The physical request
127  * size is limited by the device (maxrbuild).  We also do not want to make
128  * the request size too big or it will mess up the B_RAM streaming.
129  */
130 static __inline
131 int
132 calc_rbuild_reqsize(int maxra, int maxrbuild)
133 {
134         int nblks;
135
136         if ((nblks = maxra / 4) > maxrbuild)
137                 nblks = maxrbuild;
138         if (nblks < 1)
139                 nblks = maxra;
140         return nblks;
141 }
142
143 /*
144  * Acquire/release cluster cache (can return dummy entry)
145  */
146 static
147 cluster_cache_t *
148 cluster_getcache(cluster_cache_t *dummy, struct vnode *vp, off_t loffset)
149 {
150         cluster_cache_t *cc;
151         size_t hv;
152         int i;
153         int xact;
154
155         hv = (size_t)(intptr_t)vp ^ (size_t)(intptr_t)vp / sizeof(*vp);
156         hv &= CLUSTER_CACHE_MASK & ~3;
157         cc = &cluster_array[hv];
158
159         xact = -1;
160         for (i = 0; i < 4; ++i) {
161                 if (cc[i].vp != vp)
162                         continue;
163                 if (((cc[i].v_cstart ^ loffset) & ~(CLUSTER_ZONE - 1)) == 0) {
164                         xact = i;
165                         break;
166                 }
167         }
168         if (xact >= 0 && atomic_swap_int(&cc[xact].locked, 1) == 0) {
169                 if (cc[xact].vp == vp &&
170                     ((cc[i].v_cstart ^ loffset) & ~(CLUSTER_ZONE - 1)) == 0) {
171                         return(&cc[xact]);
172                 }
173                 atomic_swap_int(&cc[xact].locked, 0);
174         }
175
176         /*
177          * New entry.  If we can't acquire the cache line then use the
178          * passed-in dummy element and reset all fields.
179          *
180          * When we are able to acquire the cache line we only clear the
181          * fields if the vp does not match.  This allows us to multi-zone
182          * a vp and for excessive zones / partial clusters to be retired.
183          */
184         i = cc->iterator++ & 3;
185         cc += i;
186         if (atomic_swap_int(&cc->locked, 1) != 0) {
187                 cc = dummy;
188                 cc->locked = 1;
189                 cc->vp = NULL;
190         }
191         if (cc->vp != vp) {
192                 cc->vp = vp;
193                 cc->v_lasta = 0;
194                 cc->v_clen = 0;
195                 cc->v_cstart = 0;
196                 cc->v_lastw = 0;
197         }
198         return(cc);
199 }
200
201 static
202 void
203 cluster_putcache(cluster_cache_t *cc)
204 {
205         atomic_swap_int(&cc->locked, 0);
206 }
207
208 /*
209  * This replaces bread(), providing a synchronous read of the requested
210  * buffer plus asynchronous read-ahead within the specified bounds.
211  *
212  * The caller may pre-populate *bpp if it already has the requested buffer
213  * in-hand, else must set *bpp to NULL.  Note that the cluster_read() inline
214  * sets *bpp to NULL and then calls cluster_readx() for compatibility.
215  *
216  * filesize     - read-ahead @ blksize will not cross this boundary
217  * loffset      - loffset for returned *bpp
218  * blksize      - blocksize for returned *bpp and read-ahead bps
219  * minreq       - minimum (not a hard minimum) in bytes, typically reflects
220  *                a higher level uio resid.
221  * maxreq       - maximum (sequential heuristic) in bytes (highet typ ~2MB)
222  * bpp          - return buffer (*bpp) for (loffset,blksize)
223  */
224 int
225 cluster_readx(struct vnode *vp, off_t filesize, off_t loffset, int blksize,
226               int bflags, size_t minreq, size_t maxreq,
227               struct buf **bpp)
228 {
229         struct buf *bp, *rbp, *reqbp;
230         off_t origoffset;
231         off_t doffset;
232         int error;
233         int i;
234         int maxra;
235         int maxrbuild;
236         int sr;
237         int blkflags = (bflags & B_KVABIO) ? GETBLK_KVABIO : 0;
238
239         sr = 0;
240
241         /*
242          * Calculate the desired read-ahead in blksize'd blocks (maxra).
243          * To do this we calculate maxreq.
244          *
245          * maxreq typically starts out as a sequential heuristic.  If the
246          * high level uio/resid is bigger (minreq), we pop maxreq up to
247          * minreq.  This represents the case where random I/O is being
248          * performed by the userland is issuing big read()'s.
249          *
250          * Then we limit maxreq to max_readahead to ensure it is a reasonable
251          * value.
252          *
253          * Finally we must ensure that (loffset + maxreq) does not cross the
254          * boundary (filesize) for the current blocksize.  If we allowed it
255          * to cross we could end up with buffers past the boundary with the
256          * wrong block size (HAMMER large-data areas use mixed block sizes).
257          * minreq is also absolutely limited to filesize.
258          */
259         if (maxreq < minreq)
260                 maxreq = minreq;
261         /* minreq not used beyond this point */
262
263         if (maxreq > max_readahead) {
264                 maxreq = max_readahead;
265                 if (maxreq > 16 * 1024 * 1024)
266                         maxreq = 16 * 1024 * 1024;
267         }
268         if (maxreq < blksize)
269                 maxreq = blksize;
270         if (loffset + maxreq > filesize) {
271                 if (loffset > filesize)
272                         maxreq = 0;
273                 else
274                         maxreq = filesize - loffset;
275         }
276
277         maxra = (int)(maxreq / blksize);
278
279         /*
280          * Get the requested block.
281          */
282         if (*bpp)
283                 reqbp = bp = *bpp;
284         else
285                 *bpp = reqbp = bp = getblk(vp, loffset, blksize, blkflags, 0);
286         origoffset = loffset;
287
288         /*
289          * Calculate the maximum cluster size for a single I/O, used
290          * by cluster_rbuild().
291          */
292         maxrbuild = vmaxiosize(vp) / blksize;
293
294         /*
295          * If it is in the cache, then check to see if the reads have been
296          * sequential.  If they have, then try some read-ahead, otherwise
297          * back-off on prospective read-aheads.
298          */
299         if (bp->b_flags & B_CACHE) {
300                 /*
301                  * Not sequential, do not do any read-ahead
302                  */
303                 if (maxra <= 1)
304                         return 0;
305
306                 /*
307                  * No read-ahead mark, do not do any read-ahead
308                  * yet.
309                  */
310                 if ((bp->b_flags & B_RAM) == 0)
311                         return 0;
312
313                 /*
314                  * We hit a read-ahead-mark, figure out how much read-ahead
315                  * to do (maxra) and where to start (loffset).
316                  *
317                  * Typically the way this works is that B_RAM is set in the
318                  * middle of the cluster and triggers an overlapping
319                  * read-ahead of 1/2 a cluster more blocks.  This ensures
320                  * that the cluster read-ahead scales with the read-ahead
321                  * count and is thus better-able to absorb the caller's
322                  * latency.
323                  *
324                  * Estimate where the next unread block will be by assuming
325                  * that the B_RAM's are placed at the half-way point.
326                  */
327                 bp->b_flags &= ~B_RAM;
328
329                 i = maxra / 2;
330                 rbp = findblk(vp, loffset + i * blksize, FINDBLK_TEST);
331                 if (rbp == NULL || (rbp->b_flags & B_CACHE) == 0) {
332                         while (i) {
333                                 --i;
334                                 rbp = findblk(vp, loffset + i * blksize,
335                                               FINDBLK_TEST);
336                                 if (rbp) {
337                                         ++i;
338                                         break;
339                                 }
340                         }
341                 } else {
342                         while (i < maxra) {
343                                 rbp = findblk(vp, loffset + i * blksize,
344                                               FINDBLK_TEST);
345                                 if (rbp == NULL)
346                                         break;
347                                 ++i;
348                         }
349                 }
350
351                 /*
352                  * We got everything or everything is in the cache, no
353                  * point continuing.
354                  */
355                 if (i >= maxra)
356                         return 0;
357
358                 /*
359                  * Calculate where to start the read-ahead and how much
360                  * to do.  Generally speaking we want to read-ahead by
361                  * (maxra) when we've found a read-ahead mark.  We do
362                  * not want to reduce maxra here as it will cause
363                  * successive read-ahead I/O's to be smaller and smaller.
364                  *
365                  * However, we have to make sure we don't break the
366                  * filesize limitation for the clustered operation.
367                  */
368                 loffset += i * blksize;
369                 reqbp = bp = NULL;
370
371                 if (loffset >= filesize)
372                         return 0;
373                 if (loffset + maxra * blksize > filesize) {
374                         maxreq = filesize - loffset;
375                         maxra = (int)(maxreq / blksize);
376                 }
377
378                 /*
379                  * Set RAM on first read-ahead block since we still have
380                  * approximate maxra/2 blocks ahead of us that are already
381                  * cached or in-progress.
382                  */
383                 sr = 1;
384         } else {
385                 /*
386                  * Start block is not valid, we will want to do a
387                  * full read-ahead.
388                  */
389                 __debugvar off_t firstread = bp->b_loffset;
390                 int nblks;
391
392                 /*
393                  * Set-up synchronous read for bp.
394                  */
395                 bp->b_cmd = BUF_CMD_READ;
396                 bp->b_bio1.bio_done = biodone_sync;
397                 bp->b_bio1.bio_flags |= BIO_SYNC;
398
399                 KASSERT(firstread != NOOFFSET, 
400                         ("cluster_read: no buffer offset"));
401
402                 nblks = calc_rbuild_reqsize(maxra, maxrbuild);
403
404                 /*
405                  * Set RAM half-way through the full-cluster.
406                  */
407                 sr = (maxra + 1) / 2;
408
409                 if (nblks > 1) {
410                         int burstbytes;
411
412                         error = VOP_BMAP(vp, loffset, &doffset,
413                                          &burstbytes, NULL, BUF_CMD_READ);
414                         if (error)
415                                 goto single_block_read;
416                         if (nblks > burstbytes / blksize)
417                                 nblks = burstbytes / blksize;
418                         if (doffset == NOOFFSET)
419                                 goto single_block_read;
420                         if (nblks <= 1)
421                                 goto single_block_read;
422
423                         bp = cluster_rbuild(vp, filesize, loffset,
424                                             doffset, blksize, nblks, bp, &sr);
425                         loffset += bp->b_bufsize;
426                         maxra -= bp->b_bufsize / blksize;
427                 } else {
428 single_block_read:
429                         /*
430                          * If it isn't in the cache, then get a chunk from
431                          * disk if sequential, otherwise just get the block.
432                          */
433                         loffset += blksize;
434                         --maxra;
435                 }
436         }
437
438         /*
439          * If B_CACHE was not set issue bp.  bp will either be an
440          * asynchronous cluster buf or a synchronous single-buf.
441          * If it is a single buf it will be the same as reqbp.
442          *
443          * NOTE: Once an async cluster buf is issued bp becomes invalid.
444          */
445         if (bp) {
446 #if defined(CLUSTERDEBUG)
447                 if (rcluster)
448                         kprintf("S(%012jx,%d,%d)\n",
449                             (intmax_t)bp->b_loffset, bp->b_bcount, maxra);
450 #endif
451                 if ((bp->b_flags & B_CLUSTER) == 0)
452                         vfs_busy_pages(vp, bp);
453                 bp->b_flags &= ~(B_ERROR | B_INVAL | B_NOTMETA);
454                 bp->b_flags |= bflags;
455                 vn_strategy(vp, &bp->b_bio1);
456                 /* bp invalid now */
457                 bp = NULL;
458         }
459
460 #if defined(CLUSTERDEBUG)
461         if (rcluster)
462                 kprintf("cluster_rd %016jx/%d maxra=%d sr=%d\n",
463                         loffset, blksize, maxra, sr);
464 #endif
465
466         /*
467          * If we have been doing sequential I/O, then do some read-ahead.
468          * The code above us should have positioned us at the next likely
469          * offset.
470          *
471          * Only mess with buffers which we can immediately lock.  HAMMER
472          * will do device-readahead irrespective of what the blocks
473          * represent.
474          *
475          * Set B_RAM on the first buffer (the next likely offset needing
476          * read-ahead), under the assumption that there are still
477          * approximately maxra/2 blocks good ahead of us.
478          */
479         while (maxra > 0) {
480                 int burstbytes;
481                 int nblks;
482
483                 rbp = getblk(vp, loffset, blksize,
484                              GETBLK_SZMATCH | GETBLK_NOWAIT | GETBLK_KVABIO,
485                              0);
486 #if defined(CLUSTERDEBUG)
487                 if (rcluster) {
488                         kprintf("read-ahead %016jx rbp=%p ",
489                                 loffset, rbp);
490                 }
491 #endif
492                 if (rbp == NULL)
493                         goto no_read_ahead;
494                 if ((rbp->b_flags & B_CACHE)) {
495                         bqrelse(rbp);
496                         goto no_read_ahead;
497                 }
498
499                 /*
500                  * If BMAP is not supported or has an issue, we still do
501                  * (maxra) read-ahead, but we do not try to use rbuild.
502                  */
503                 error = VOP_BMAP(vp, loffset, &doffset,
504                                  &burstbytes, NULL, BUF_CMD_READ);
505                 if (error || doffset == NOOFFSET) {
506                         nblks = 1;
507                         doffset = NOOFFSET;
508                 } else {
509                         nblks = calc_rbuild_reqsize(maxra, maxrbuild);
510                         if (nblks > burstbytes / blksize)
511                                 nblks = burstbytes / blksize;
512                 }
513                 rbp->b_cmd = BUF_CMD_READ;
514
515                 if (nblks > 1) {
516                         rbp = cluster_rbuild(vp, filesize, loffset,
517                                              doffset, blksize, 
518                                              nblks, rbp, &sr);
519                 } else {
520                         rbp->b_bio2.bio_offset = doffset;
521                         if (--sr == 0)
522                                 cluster_setram(rbp);
523                 }
524
525                 rbp->b_flags &= ~(B_ERROR | B_INVAL | B_NOTMETA);
526                 rbp->b_flags |= bflags;
527
528                 if ((rbp->b_flags & B_CLUSTER) == 0)
529                         vfs_busy_pages(vp, rbp);
530                 BUF_KERNPROC(rbp);
531                 loffset += rbp->b_bufsize;
532                 maxra -= rbp->b_bufsize / blksize;
533                 vn_strategy(vp, &rbp->b_bio1);
534                 /* rbp invalid now */
535         }
536
537         /*
538          * Wait for our original buffer to complete its I/O.  reqbp will
539          * be NULL if the original buffer was B_CACHE.  We are returning
540          * (*bpp) which is the same as reqbp when reqbp != NULL.
541          */
542 no_read_ahead:
543         if (reqbp) {
544                 KKASSERT(reqbp->b_bio1.bio_flags & BIO_SYNC);
545                 error = biowait(&reqbp->b_bio1, "clurd");
546         } else {
547                 error = 0;
548         }
549         return (error);
550 }
551
552 /*
553  * This replaces breadcb(), providing an asynchronous read of the requested
554  * buffer with a callback, plus an asynchronous read-ahead within the
555  * specified bounds.
556  *
557  * The callback must check whether BIO_DONE is set in the bio and issue
558  * the bpdone(bp, 0) if it isn't.  The callback is responsible for clearing
559  * BIO_DONE and disposing of the I/O (bqrelse()ing it).
560  *
561  * filesize     - read-ahead @ blksize will not cross this boundary
562  * loffset      - loffset for returned *bpp
563  * blksize      - blocksize for returned *bpp and read-ahead bps
564  * minreq       - minimum (not a hard minimum) in bytes, typically reflects
565  *                a higher level uio resid.
566  * maxreq       - maximum (sequential heuristic) in bytes (highet typ ~2MB)
567  * bpp          - return buffer (*bpp) for (loffset,blksize)
568  */
569 void
570 cluster_readcb(struct vnode *vp, off_t filesize, off_t loffset, int blksize,
571                int bflags, size_t minreq, size_t maxreq,
572                void (*func)(struct bio *), void *arg)
573 {
574         struct buf *bp, *rbp, *reqbp;
575         off_t origoffset;
576         off_t doffset;
577         int i;
578         int maxra;
579         int maxrbuild;
580         int sr;
581         int blkflags = (bflags & B_KVABIO) ? GETBLK_KVABIO : 0;
582
583         sr = 0;
584
585         /*
586          * Calculate the desired read-ahead in blksize'd blocks (maxra).
587          * To do this we calculate maxreq.
588          *
589          * maxreq typically starts out as a sequential heuristic.  If the
590          * high level uio/resid is bigger (minreq), we pop maxreq up to
591          * minreq.  This represents the case where random I/O is being
592          * performed by the userland is issuing big read()'s.
593          *
594          * Then we limit maxreq to max_readahead to ensure it is a reasonable
595          * value.
596          *
597          * Finally we must ensure that (loffset + maxreq) does not cross the
598          * boundary (filesize) for the current blocksize.  If we allowed it
599          * to cross we could end up with buffers past the boundary with the
600          * wrong block size (HAMMER large-data areas use mixed block sizes).
601          * minreq is also absolutely limited to filesize.
602          */
603         if (maxreq < minreq)
604                 maxreq = minreq;
605         /* minreq not used beyond this point */
606
607         if (maxreq > max_readahead) {
608                 maxreq = max_readahead;
609                 if (maxreq > 16 * 1024 * 1024)
610                         maxreq = 16 * 1024 * 1024;
611         }
612         if (maxreq < blksize)
613                 maxreq = blksize;
614         if (loffset + maxreq > filesize) {
615                 if (loffset > filesize)
616                         maxreq = 0;
617                 else
618                         maxreq = filesize - loffset;
619         }
620
621         maxra = (int)(maxreq / blksize);
622
623         /*
624          * Get the requested block.
625          */
626         reqbp = bp = getblk(vp, loffset, blksize, blkflags, 0);
627         origoffset = loffset;
628
629         /*
630          * Calculate the maximum cluster size for a single I/O, used
631          * by cluster_rbuild().
632          */
633         maxrbuild = vmaxiosize(vp) / blksize;
634
635         /*
636          * if it is in the cache, then check to see if the reads have been
637          * sequential.  If they have, then try some read-ahead, otherwise
638          * back-off on prospective read-aheads.
639          */
640         if (bp->b_flags & B_CACHE) {
641                 /*
642                  * Setup for func() call whether we do read-ahead or not.
643                  */
644                 bp->b_bio1.bio_caller_info1.ptr = arg;
645                 bp->b_bio1.bio_flags |= BIO_DONE;
646
647                 /*
648                  * Not sequential, do not do any read-ahead
649                  */
650                 if (maxra <= 1)
651                         goto no_read_ahead;
652
653                 /*
654                  * No read-ahead mark, do not do any read-ahead
655                  * yet.
656                  */
657                 if ((bp->b_flags & B_RAM) == 0)
658                         goto no_read_ahead;
659                 bp->b_flags &= ~B_RAM;
660
661                 /*
662                  * We hit a read-ahead-mark, figure out how much read-ahead
663                  * to do (maxra) and where to start (loffset).
664                  *
665                  * Shortcut the scan.  Typically the way this works is that
666                  * we've built up all the blocks inbetween except for the
667                  * last in previous iterations, so if the second-to-last
668                  * block is present we just skip ahead to it.
669                  *
670                  * This algorithm has O(1) cpu in the steady state no
671                  * matter how large maxra is.
672                  */
673                 if (findblk(vp, loffset + (maxra - 2) * blksize, FINDBLK_TEST))
674                         i = maxra - 1;
675                 else
676                         i = 1;
677                 while (i < maxra) {
678                         if (findblk(vp, loffset + i * blksize,
679                                     FINDBLK_TEST) == NULL) {
680                                 break;
681                         }
682                         ++i;
683                 }
684
685                 /*
686                  * We got everything or everything is in the cache, no
687                  * point continuing.
688                  */
689                 if (i >= maxra)
690                         goto no_read_ahead;
691
692                 /*
693                  * Calculate where to start the read-ahead and how much
694                  * to do.  Generally speaking we want to read-ahead by
695                  * (maxra) when we've found a read-ahead mark.  We do
696                  * not want to reduce maxra here as it will cause
697                  * successive read-ahead I/O's to be smaller and smaller.
698                  *
699                  * However, we have to make sure we don't break the
700                  * filesize limitation for the clustered operation.
701                  */
702                 loffset += i * blksize;
703                 bp = NULL;
704                 /* leave reqbp intact to force function callback */
705
706                 if (loffset >= filesize)
707                         goto no_read_ahead;
708                 if (loffset + maxra * blksize > filesize) {
709                         maxreq = filesize - loffset;
710                         maxra = (int)(maxreq / blksize);
711                 }
712                 sr = 1;
713         } else {
714                 /*
715                  * bp is not valid, no prior cluster in progress so get a
716                  * full cluster read-ahead going.
717                  */
718                 __debugvar off_t firstread = bp->b_loffset;
719                 int nblks;
720                 int error;
721
722                 /*
723                  * Set-up synchronous read for bp.
724                  */
725                 bp->b_flags &= ~(B_ERROR | B_EINTR | B_INVAL | B_NOTMETA);
726                 bp->b_flags |= bflags;
727                 bp->b_cmd = BUF_CMD_READ;
728                 bp->b_bio1.bio_done = func;
729                 bp->b_bio1.bio_caller_info1.ptr = arg;
730                 BUF_KERNPROC(bp);
731                 reqbp = NULL;   /* don't func() reqbp, it's running async */
732
733                 KASSERT(firstread != NOOFFSET,
734                         ("cluster_read: no buffer offset"));
735
736                 /*
737                  * nblks is our cluster_rbuild request size, limited
738                  * primarily by the device.
739                  */
740                 nblks = calc_rbuild_reqsize(maxra, maxrbuild);
741
742                 /*
743                  * Set RAM half-way through the full-cluster.
744                  */
745                 sr = (maxra + 1) / 2;
746
747                 if (nblks > 1) {
748                         int burstbytes;
749
750                         error = VOP_BMAP(vp, loffset, &doffset,
751                                          &burstbytes, NULL, BUF_CMD_READ);
752                         if (error)
753                                 goto single_block_read;
754                         if (nblks > burstbytes / blksize)
755                                 nblks = burstbytes / blksize;
756                         if (doffset == NOOFFSET)
757                                 goto single_block_read;
758                         if (nblks <= 1)
759                                 goto single_block_read;
760
761                         bp = cluster_rbuild(vp, filesize, loffset,
762                                             doffset, blksize, nblks, bp, &sr);
763                         loffset += bp->b_bufsize;
764                         maxra -= bp->b_bufsize / blksize;
765                 } else {
766 single_block_read:
767                         /*
768                          * If it isn't in the cache, then get a chunk from
769                          * disk if sequential, otherwise just get the block.
770                          */
771                         loffset += blksize;
772                         --maxra;
773                 }
774         }
775
776         /*
777          * If bp != NULL then B_CACHE was *NOT* set and bp must be issued.
778          * bp will either be an asynchronous cluster buf or an asynchronous
779          * single-buf.
780          *
781          * NOTE: Once an async cluster buf is issued bp becomes invalid.
782          */
783         if (bp) {
784 #if defined(CLUSTERDEBUG)
785                 if (rcluster)
786                         kprintf("S(%012jx,%d,%d)\n",
787                             (intmax_t)bp->b_loffset, bp->b_bcount, maxra);
788 #endif
789                 if ((bp->b_flags & B_CLUSTER) == 0)
790                         vfs_busy_pages(vp, bp);
791                 bp->b_flags &= ~(B_ERROR | B_INVAL | B_NOTMETA);
792                 bp->b_flags |= bflags;
793                 vn_strategy(vp, &bp->b_bio1);
794                 /* bp invalid now */
795                 bp = NULL;
796         }
797
798 #if defined(CLUSTERDEBUG)
799         if (rcluster)
800                 kprintf("cluster_rd %016jx/%d maxra=%d sr=%d\n",
801                         loffset, blksize, maxra, sr);
802 #endif
803
804         /*
805          * If we have been doing sequential I/O, then do some read-ahead.
806          * The code above us should have positioned us at the next likely
807          * offset.
808          *
809          * Only mess with buffers which we can immediately lock.  HAMMER
810          * will do device-readahead irrespective of what the blocks
811          * represent.
812          */
813         while (maxra > 0) {
814                 int burstbytes;
815                 int error;
816                 int nblks;
817
818                 rbp = getblk(vp, loffset, blksize,
819                              GETBLK_SZMATCH | GETBLK_NOWAIT | GETBLK_KVABIO,
820                              0);
821                 if (rbp == NULL)
822                         goto no_read_ahead;
823                 if ((rbp->b_flags & B_CACHE)) {
824                         bqrelse(rbp);
825                         goto no_read_ahead;
826                 }
827
828                 /*
829                  * If BMAP is not supported or has an issue, we still do
830                  * (maxra) read-ahead, but we do not try to use rbuild.
831                  */
832                 error = VOP_BMAP(vp, loffset, &doffset,
833                                  &burstbytes, NULL, BUF_CMD_READ);
834                 if (error || doffset == NOOFFSET) {
835                         nblks = 1;
836                         doffset = NOOFFSET;
837                 } else {
838                         nblks = calc_rbuild_reqsize(maxra, maxrbuild);
839                         if (nblks > burstbytes / blksize)
840                                 nblks = burstbytes / blksize;
841                 }
842                 rbp->b_cmd = BUF_CMD_READ;
843
844                 if (nblks > 1) {
845                         rbp = cluster_rbuild(vp, filesize, loffset,
846                                              doffset, blksize,
847                                              nblks, rbp, &sr);
848                 } else {
849                         rbp->b_bio2.bio_offset = doffset;
850                         if (--sr == 0)
851                                 cluster_setram(rbp);
852                 }
853
854                 rbp->b_flags &= ~(B_ERROR | B_INVAL | B_NOTMETA);
855                 rbp->b_flags |= bflags;
856
857                 if ((rbp->b_flags & B_CLUSTER) == 0)
858                         vfs_busy_pages(vp, rbp);
859                 BUF_KERNPROC(rbp);
860                 loffset += rbp->b_bufsize;
861                 maxra -= rbp->b_bufsize / blksize;
862                 vn_strategy(vp, &rbp->b_bio1);
863                 /* rbp invalid now */
864         }
865
866         /*
867          * If reqbp is non-NULL it had B_CACHE set and we issue the
868          * function callback synchronously.
869          *
870          * Note that we may start additional asynchronous I/O before doing
871          * the func() callback for the B_CACHE case
872          */
873 no_read_ahead:
874         if (reqbp)
875                 func(&reqbp->b_bio1);
876 }
877
878 /*
879  * If blocks are contiguous on disk, use this to provide clustered
880  * read ahead.  We will read as many blocks as possible sequentially
881  * and then parcel them up into logical blocks in the buffer hash table.
882  *
883  * This function either returns a cluster buf or it returns fbp.  fbp is
884  * already expected to be set up as a synchronous or asynchronous request.
885  *
886  * If a cluster buf is returned it will always be async.
887  *
888  * (*srp) counts down original blocks to determine where B_RAM should be set.
889  * Set B_RAM when *srp drops to 0.  If (*srp) starts at 0, B_RAM will not be
890  * set on any buffer.  Make sure B_RAM is cleared on any other buffers to
891  * prevent degenerate read-aheads from being generated.
892  */
893 static struct buf *
894 cluster_rbuild(struct vnode *vp, off_t filesize, off_t loffset, off_t doffset,
895                int blksize, int run, struct buf *fbp, int *srp)
896 {
897         struct buf *bp, *tbp;
898         off_t boffset;
899         int i, j;
900         int maxiosize = vmaxiosize(vp);
901
902         /*
903          * avoid a division
904          */
905         while (loffset + run * blksize > filesize) {
906                 --run;
907         }
908
909         tbp = fbp;
910         tbp->b_bio2.bio_offset = doffset;
911         if (((tbp->b_flags & B_VMIO) == 0) || (run <= 1)) {
912                 if (--*srp == 0)
913                         cluster_setram(tbp);
914                 else
915                         cluster_clrram(tbp);
916                 return tbp;
917         }
918
919         /*
920          * Get a pbuf, limit cluster I/O on a per-device basis.  If
921          * doing cluster I/O for a file, limit cluster I/O on a
922          * per-mount basis.
923          */
924         if (vp->v_type == VCHR || vp->v_type == VBLK)
925                 bp = trypbuf_kva(&vp->v_pbuf_count);
926         else
927                 bp = trypbuf_kva(&vp->v_mount->mnt_pbuf_count);
928
929         if (bp == NULL)
930                 return tbp;
931
932         /*
933          * We are synthesizing a buffer out of vm_page_t's, but
934          * if the block size is not page aligned then the starting
935          * address may not be either.  Inherit the b_data offset
936          * from the original buffer.
937          */
938         bp->b_vp = vp;
939         bp->b_data = (char *)((vm_offset_t)bp->b_data |
940                               ((vm_offset_t)tbp->b_data & PAGE_MASK));
941         bp->b_flags |= B_CLUSTER | B_VMIO | B_KVABIO;
942         bp->b_cmd = BUF_CMD_READ;
943         bp->b_bio1.bio_done = cluster_callback;         /* default to async */
944         bp->b_bio1.bio_caller_info1.cluster_head = NULL;
945         bp->b_bio1.bio_caller_info2.cluster_tail = NULL;
946         bp->b_loffset = loffset;
947         bp->b_bio2.bio_offset = doffset;
948         KASSERT(bp->b_loffset != NOOFFSET,
949                 ("cluster_rbuild: no buffer offset"));
950
951         bp->b_bcount = 0;
952         bp->b_bufsize = 0;
953         bp->b_xio.xio_npages = 0;
954
955         for (boffset = doffset, i = 0; i < run; ++i, boffset += blksize) {
956                 if (i) {
957                         if ((bp->b_xio.xio_npages * PAGE_SIZE) +
958                             round_page(blksize) > maxiosize) {
959                                 break;
960                         }
961
962                         /*
963                          * Shortcut some checks and try to avoid buffers that
964                          * would block in the lock.  The same checks have to
965                          * be made again after we officially get the buffer.
966                          */
967                         tbp = getblk(vp, loffset + i * blksize, blksize,
968                                      GETBLK_SZMATCH |
969                                      GETBLK_NOWAIT |
970                                      GETBLK_KVABIO,
971                                      0);
972                         if (tbp == NULL)
973                                 break;
974                         for (j = 0; j < tbp->b_xio.xio_npages; j++) {
975                                 if (tbp->b_xio.xio_pages[j]->valid)
976                                         break;
977                         }
978                         if (j != tbp->b_xio.xio_npages) {
979                                 bqrelse(tbp);
980                                 break;
981                         }
982
983                         /*
984                          * Stop scanning if the buffer is fuly valid 
985                          * (marked B_CACHE), or locked (may be doing a
986                          * background write), or if the buffer is not
987                          * VMIO backed.  The clustering code can only deal
988                          * with VMIO-backed buffers.
989                          */
990                         if ((tbp->b_flags & (B_CACHE|B_LOCKED)) ||
991                             (tbp->b_flags & B_VMIO) == 0 ||
992                             (LIST_FIRST(&tbp->b_dep) != NULL &&
993                              buf_checkread(tbp))
994                         ) {
995                                 bqrelse(tbp);
996                                 break;
997                         }
998
999                         /*
1000                          * The buffer must be completely invalid in order to
1001                          * take part in the cluster.  If it is partially valid
1002                          * then we stop.
1003                          */
1004                         for (j = 0;j < tbp->b_xio.xio_npages; j++) {
1005                                 if (tbp->b_xio.xio_pages[j]->valid)
1006                                         break;
1007                         }
1008                         if (j != tbp->b_xio.xio_npages) {
1009                                 bqrelse(tbp);
1010                                 break;
1011                         }
1012
1013                         /*
1014                          * Depress the priority of buffers not explicitly
1015                          * requested.
1016                          */
1017                         /* tbp->b_flags |= B_AGE; */
1018
1019                         /*
1020                          * Set the block number if it isn't set, otherwise
1021                          * if it is make sure it matches the block number we
1022                          * expect.
1023                          */
1024                         if (tbp->b_bio2.bio_offset == NOOFFSET) {
1025                                 tbp->b_bio2.bio_offset = boffset;
1026                         } else if (tbp->b_bio2.bio_offset != boffset) {
1027                                 brelse(tbp);
1028                                 break;
1029                         }
1030                 }
1031
1032                 /*
1033                  * Set B_RAM if (*srp) is 1.  B_RAM is only set on one buffer
1034                  * in the cluster, including potentially the first buffer
1035                  * once we start streaming the read-aheads.
1036                  */
1037                 if (--*srp == 0)
1038                         cluster_setram(tbp);
1039                 else
1040                         cluster_clrram(tbp);
1041
1042                 /*
1043                  * The passed-in tbp (i == 0) will already be set up for
1044                  * async or sync operation.  All other tbp's acquire in
1045                  * our loop are set up for async operation.
1046                  */
1047                 tbp->b_cmd = BUF_CMD_READ;
1048                 BUF_KERNPROC(tbp);
1049                 cluster_append(&bp->b_bio1, tbp);
1050                 for (j = 0; j < tbp->b_xio.xio_npages; ++j) {
1051                         vm_page_t m;
1052
1053                         m = tbp->b_xio.xio_pages[j];
1054                         vm_page_busy_wait(m, FALSE, "clurpg");
1055                         vm_page_io_start(m);
1056                         vm_page_wakeup(m);
1057                         vm_object_pip_add(m->object, 1);
1058                         if ((bp->b_xio.xio_npages == 0) ||
1059                             (bp->b_xio.xio_pages[bp->b_xio.xio_npages-1] != m)) {
1060                                 bp->b_xio.xio_pages[bp->b_xio.xio_npages] = m;
1061                                 bp->b_xio.xio_npages++;
1062                         }
1063                         if ((m->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) {
1064                                 tbp->b_xio.xio_pages[j] = bogus_page;
1065                                 tbp->b_flags |= B_HASBOGUS;
1066                         }
1067                 }
1068                 /*
1069                  * XXX shouldn't this be += size for both, like in 
1070                  * cluster_wbuild()?
1071                  *
1072                  * Don't inherit tbp->b_bufsize as it may be larger due to
1073                  * a non-page-aligned size.  Instead just aggregate using
1074                  * 'size'.
1075                  */
1076                 if (tbp->b_bcount != blksize)
1077                     kprintf("warning: tbp->b_bcount wrong %d vs %d\n", tbp->b_bcount, blksize);
1078                 if (tbp->b_bufsize != blksize)
1079                     kprintf("warning: tbp->b_bufsize wrong %d vs %d\n", tbp->b_bufsize, blksize);
1080                 bp->b_bcount += blksize;
1081                 bp->b_bufsize += blksize;
1082         }
1083
1084         /*
1085          * Fully valid pages in the cluster are already good and do not need
1086          * to be re-read from disk.  Replace the page with bogus_page
1087          */
1088         for (j = 0; j < bp->b_xio.xio_npages; j++) {
1089                 if ((bp->b_xio.xio_pages[j]->valid & VM_PAGE_BITS_ALL) ==
1090                     VM_PAGE_BITS_ALL) {
1091                         bp->b_xio.xio_pages[j] = bogus_page;
1092                         bp->b_flags |= B_HASBOGUS;
1093                 }
1094         }
1095         if (bp->b_bufsize > bp->b_kvasize) {
1096                 panic("cluster_rbuild: b_bufsize(%d) > b_kvasize(%d)",
1097                     bp->b_bufsize, bp->b_kvasize);
1098         }
1099         pmap_qenter_noinval(trunc_page((vm_offset_t)bp->b_data),
1100                             (vm_page_t *)bp->b_xio.xio_pages,
1101                             bp->b_xio.xio_npages);
1102         BUF_KERNPROC(bp);
1103         return (bp);
1104 }
1105
1106 /*
1107  * Cleanup after a clustered read or write.
1108  * This is complicated by the fact that any of the buffers might have
1109  * extra memory (if there were no empty buffer headers at allocbuf time)
1110  * that we will need to shift around.
1111  *
1112  * The returned bio is &bp->b_bio1
1113  */
1114 static void
1115 cluster_callback(struct bio *bio)
1116 {
1117         struct buf *bp = bio->bio_buf;
1118         struct buf *tbp;
1119         struct buf *next;
1120         struct vnode *vp;
1121         int error = 0;
1122         int bpflags;
1123
1124         /*
1125          * Must propogate errors to all the components.  A short read (EOF)
1126          * is a critical error.
1127          */
1128         if (bp->b_flags & B_ERROR) {
1129                 error = bp->b_error;
1130         } else if (bp->b_bcount != bp->b_bufsize) {
1131                 panic("cluster_callback: unexpected EOF on cluster %p!", bio);
1132         }
1133
1134         pmap_qremove_noinval(trunc_page((vm_offset_t) bp->b_data),
1135                              bp->b_xio.xio_npages);
1136
1137         /*
1138          * Retrieve the cluster head and dispose of the cluster buffer.
1139          * the vp is only valid while we hold one or more cluster elements,
1140          * so we have to do this before disposing of them.
1141          */
1142         tbp = bio->bio_caller_info1.cluster_head;
1143         bio->bio_caller_info1.cluster_head = NULL;
1144         bpflags = bp->b_flags;
1145         vp = bp->b_vp;
1146         bp->b_vp = NULL;
1147
1148         if (vp->v_type == VCHR || vp->v_type == VBLK)
1149                 relpbuf(bp, &vp->v_pbuf_count);
1150         else
1151                 relpbuf(bp, &vp->v_mount->mnt_pbuf_count);
1152         bp = NULL;      /* SAFETY */
1153
1154         /*
1155          * Move memory from the large cluster buffer into the component
1156          * buffers and mark IO as done on these.  Since the memory map
1157          * is the same, no actual copying is required.
1158          *
1159          * (And we already disposed of the larger cluster buffer)
1160          */
1161         while (tbp) {
1162                 next = tbp->b_cluster_next;
1163                 if (error) {
1164                         tbp->b_flags |= B_ERROR | B_IOISSUED;
1165                         tbp->b_error = error;
1166                 } else {
1167                         tbp->b_dirtyoff = tbp->b_dirtyend = 0;
1168                         tbp->b_flags &= ~(B_ERROR | B_INVAL);
1169                         if (tbp->b_cmd == BUF_CMD_READ) {
1170                                 tbp->b_flags = (tbp->b_flags & ~B_NOTMETA) |
1171                                                (bpflags & B_NOTMETA);
1172                         }
1173                         tbp->b_flags |= B_IOISSUED;
1174
1175                         /*
1176                          * XXX the bdwrite()/bqrelse() issued during
1177                          * cluster building clears B_RELBUF (see bqrelse()
1178                          * comment).  If direct I/O was specified, we have
1179                          * to restore it here to allow the buffer and VM
1180                          * to be freed.
1181                          */
1182                         if (tbp->b_flags & B_DIRECT)
1183                                 tbp->b_flags |= B_RELBUF;
1184
1185                         /*
1186                          * XXX I think biodone() below will do this, but do
1187                          *     it here anyway for consistency.
1188                          */
1189                         if (tbp->b_cmd == BUF_CMD_WRITE)
1190                                 bundirty(tbp);
1191                 }
1192                 biodone(&tbp->b_bio1);
1193                 tbp = next;
1194         }
1195 }
1196
1197 /*
1198  * Implement modified write build for cluster.
1199  *
1200  *      write_behind = 0        write behind disabled
1201  *      write_behind = 1        write behind normal (default)
1202  *      write_behind = 2        write behind backed-off
1203  *
1204  * In addition, write_behind is only activated for files that have
1205  * grown past a certain size (default 10MB).  Otherwise temporary files
1206  * wind up generating a lot of unnecessary disk I/O.
1207  */
1208 static __inline int
1209 cluster_wbuild_wb(struct vnode *vp, int blksize, off_t start_loffset, int len)
1210 {
1211         int r = 0;
1212
1213         switch(write_behind) {
1214         case 2:
1215                 if (start_loffset < len)
1216                         break;
1217                 start_loffset -= len;
1218                 /* fall through */
1219         case 1:
1220                 if (vp->v_filesize >= write_behind_minfilesize) {
1221                         r = cluster_wbuild(vp, NULL, blksize,
1222                                            start_loffset, len);
1223                 }
1224                 /* fall through */
1225         default:
1226                 /* fall through */
1227                 break;
1228         }
1229         return(r);
1230 }
1231
1232 /*
1233  * Do clustered write for FFS.
1234  *
1235  * Three cases:
1236  *      1. Write is not sequential (write asynchronously)
1237  *      Write is sequential:
1238  *      2.      beginning of cluster - begin cluster
1239  *      3.      middle of a cluster - add to cluster
1240  *      4.      end of a cluster - asynchronously write cluster
1241  *
1242  * WARNING! vnode fields are not locked and must ONLY be used heuristically.
1243  */
1244 void
1245 cluster_write(struct buf *bp, off_t filesize, int blksize, int seqcount)
1246 {
1247         struct vnode *vp;
1248         off_t loffset;
1249         int maxclen, cursize;
1250         int async;
1251         cluster_cache_t dummy;
1252         cluster_cache_t *cc;
1253
1254         vp = bp->b_vp;
1255         if (vp->v_type == VREG)
1256                 async = vp->v_mount->mnt_flag & MNT_ASYNC;
1257         else
1258                 async = 0;
1259         loffset = bp->b_loffset;
1260         KASSERT(bp->b_loffset != NOOFFSET, 
1261                 ("cluster_write: no buffer offset"));
1262
1263         cc = cluster_getcache(&dummy, vp, loffset);
1264
1265         /*
1266          * Initialize vnode to beginning of file.
1267          */
1268         if (loffset == 0)
1269                 cc->v_lasta = cc->v_clen = cc->v_cstart = cc->v_lastw = 0;
1270
1271         if (cc->v_clen == 0 || loffset != cc->v_lastw ||
1272             (bp->b_bio2.bio_offset != NOOFFSET &&
1273              (bp->b_bio2.bio_offset != cc->v_lasta))) {
1274                 /*
1275                  * Next block is not logically sequential, or, if physical
1276                  * block offsets are available, not physically sequential.
1277                  *
1278                  * If physical block offsets are not available we only
1279                  * get here if we weren't logically sequential.
1280                  */
1281                 maxclen = vmaxiosize(vp);
1282                 if (cc->v_clen != 0) {
1283                         /*
1284                          * Next block is not sequential.
1285                          *
1286                          * If we are not writing at end of file, the process
1287                          * seeked to another point in the file since its last
1288                          * write, or we have reached our maximum cluster size,
1289                          * then push the previous cluster. Otherwise try
1290                          * reallocating to make it sequential.
1291                          *
1292                          * Change to algorithm: only push previous cluster if
1293                          * it was sequential from the point of view of the
1294                          * seqcount heuristic, otherwise leave the buffer 
1295                          * intact so we can potentially optimize the I/O
1296                          * later on in the buf_daemon or update daemon
1297                          * flush.
1298                          */
1299                         cursize = cc->v_lastw - cc->v_cstart;
1300                         if (bp->b_loffset + blksize < filesize ||
1301                             loffset != cc->v_lastw ||
1302                             cc->v_clen <= cursize) {
1303                                 if (!async && seqcount > 0) {
1304                                         cluster_wbuild_wb(vp, blksize,
1305                                                 cc->v_cstart, cursize);
1306                                 }
1307                         } else {
1308                                 struct buf **bpp, **endbp;
1309                                 struct cluster_save *buflist;
1310
1311                                 buflist = cluster_collectbufs(cc, vp,
1312                                                               bp, blksize);
1313                                 endbp = &buflist->bs_children
1314                                         [buflist->bs_nchildren - 1];
1315                                 if (VOP_REALLOCBLKS(vp, buflist)) {
1316                                         /*
1317                                          * Failed, push the previous cluster
1318                                          * if *really* writing sequentially
1319                                          * in the logical file (seqcount > 1),
1320                                          * otherwise delay it in the hopes that
1321                                          * the low level disk driver can
1322                                          * optimize the write ordering.
1323                                          *
1324                                          * NOTE: We do not brelse the last
1325                                          *       element which is bp, and we
1326                                          *       do not return here.
1327                                          */
1328                                         for (bpp = buflist->bs_children;
1329                                              bpp < endbp; bpp++)
1330                                                 brelse(*bpp);
1331                                         kfree(buflist, M_SEGMENT);
1332                                         if (seqcount > 1) {
1333                                                 cluster_wbuild_wb(vp, 
1334                                                     blksize, cc->v_cstart,
1335                                                     cursize);
1336                                         }
1337                                 } else {
1338                                         /*
1339                                          * Succeeded, keep building cluster.
1340                                          */
1341                                         for (bpp = buflist->bs_children;
1342                                              bpp <= endbp; bpp++)
1343                                                 bdwrite(*bpp);
1344                                         kfree(buflist, M_SEGMENT);
1345                                         cc->v_lastw = loffset + blksize;
1346                                         cc->v_lasta = bp->b_bio2.bio_offset +
1347                                                       blksize;
1348                                         cluster_putcache(cc);
1349                                         return;
1350                                 }
1351                         }
1352                 }
1353
1354                 /*
1355                  * Consider beginning a cluster. If at end of file, make
1356                  * cluster as large as possible, otherwise find size of
1357                  * existing cluster.
1358                  */
1359                 if ((vp->v_type == VREG) &&
1360                     bp->b_loffset + blksize < filesize &&
1361                     (bp->b_bio2.bio_offset == NOOFFSET) &&
1362                     (VOP_BMAP(vp, loffset, &bp->b_bio2.bio_offset, &maxclen, NULL, BUF_CMD_WRITE) ||
1363                      bp->b_bio2.bio_offset == NOOFFSET)) {
1364                         bdwrite(bp);
1365                         cc->v_clen = 0;
1366                         cc->v_lasta = bp->b_bio2.bio_offset + blksize;
1367                         cc->v_cstart = loffset;
1368                         cc->v_lastw = loffset + blksize;
1369                         cluster_putcache(cc);
1370                         return;
1371                 }
1372                 if (maxclen > blksize)
1373                         cc->v_clen = maxclen;
1374                 else
1375                         cc->v_clen = blksize;
1376                 if (!async && cc->v_clen == 0) { /* I/O not contiguous */
1377                         cc->v_cstart = loffset;
1378                         bdwrite(bp);
1379                 } else {        /* Wait for rest of cluster */
1380                         cc->v_cstart = loffset;
1381                         bdwrite(bp);
1382                 }
1383         } else if (loffset == cc->v_cstart + cc->v_clen) {
1384                 /*
1385                  * At end of cluster, write it out if seqcount tells us we
1386                  * are operating sequentially, otherwise let the buf or
1387                  * update daemon handle it.
1388                  */
1389                 bdwrite(bp);
1390                 if (seqcount > 1)
1391                         cluster_wbuild_wb(vp, blksize, cc->v_cstart,
1392                                           cc->v_clen + blksize);
1393                 cc->v_clen = 0;
1394                 cc->v_cstart = loffset;
1395         } else if (vm_page_count_severe() &&
1396                    bp->b_loffset + blksize < filesize) {
1397                 /*
1398                  * We are low on memory, get it going NOW.  However, do not
1399                  * try to push out a partial block at the end of the file
1400                  * as this could lead to extremely non-optimal write activity.
1401                  */
1402                 bawrite(bp);
1403         } else {
1404                 /*
1405                  * In the middle of a cluster, so just delay the I/O for now.
1406                  */
1407                 bdwrite(bp);
1408         }
1409         cc->v_lastw = loffset + blksize;
1410         cc->v_lasta = bp->b_bio2.bio_offset + blksize;
1411         cluster_putcache(cc);
1412 }
1413
1414 /*
1415  * This is the clustered version of bawrite().  It works similarly to
1416  * cluster_write() except I/O on the buffer is guaranteed to occur.
1417  */
1418 int
1419 cluster_awrite(struct buf *bp)
1420 {
1421         int total;
1422
1423         /*
1424          * Don't bother if it isn't clusterable.
1425          */
1426         if ((bp->b_flags & B_CLUSTEROK) == 0 ||
1427             bp->b_vp == NULL ||
1428             (bp->b_vp->v_flag & VOBJBUF) == 0) {
1429                 total = bp->b_bufsize;
1430                 bawrite(bp);
1431                 return (total);
1432         }
1433
1434         total = cluster_wbuild(bp->b_vp, &bp, bp->b_bufsize,
1435                                bp->b_loffset, vmaxiosize(bp->b_vp));
1436
1437         /*
1438          * If bp is still non-NULL then cluster_wbuild() did not initiate
1439          * I/O on it and we must do so here to provide the API guarantee.
1440          */
1441         if (bp)
1442                 bawrite(bp);
1443
1444         return total;
1445 }
1446
1447 /*
1448  * This is an awful lot like cluster_rbuild...wish they could be combined.
1449  * The last lbn argument is the current block on which I/O is being
1450  * performed.  Check to see that it doesn't fall in the middle of
1451  * the current block (if last_bp == NULL).
1452  *
1453  * cluster_wbuild() normally does not guarantee anything.  If bpp is
1454  * non-NULL and cluster_wbuild() is able to incorporate it into the
1455  * I/O it will set *bpp to NULL, otherwise it will leave it alone and
1456  * the caller must dispose of *bpp.
1457  */
1458 static int
1459 cluster_wbuild(struct vnode *vp, struct buf **bpp,
1460                int blksize, off_t start_loffset, int bytes)
1461 {
1462         struct buf *bp, *tbp;
1463         int i, j;
1464         int totalwritten = 0;
1465         int must_initiate;
1466         int maxiosize = vmaxiosize(vp);
1467
1468         while (bytes > 0) {
1469                 /*
1470                  * If the buffer matches the passed locked & removed buffer
1471                  * we used the passed buffer (which might not be B_DELWRI).
1472                  *
1473                  * Otherwise locate the buffer and determine if it is
1474                  * compatible.
1475                  */
1476                 if (bpp && (*bpp)->b_loffset == start_loffset) {
1477                         tbp = *bpp;
1478                         *bpp = NULL;
1479                         bpp = NULL;
1480                 } else {
1481                         tbp = findblk(vp, start_loffset, FINDBLK_NBLOCK |
1482                                                          FINDBLK_KVABIO);
1483                         if (tbp == NULL ||
1484                             (tbp->b_flags & (B_LOCKED | B_INVAL | B_DELWRI)) !=
1485                              B_DELWRI ||
1486                             (LIST_FIRST(&tbp->b_dep) && buf_checkwrite(tbp))) {
1487                                 if (tbp)
1488                                         BUF_UNLOCK(tbp);
1489                                 start_loffset += blksize;
1490                                 bytes -= blksize;
1491                                 continue;
1492                         }
1493                         bremfree(tbp);
1494                 }
1495                 KKASSERT(tbp->b_cmd == BUF_CMD_DONE);
1496
1497                 /*
1498                  * Extra memory in the buffer, punt on this buffer.
1499                  * XXX we could handle this in most cases, but we would
1500                  * have to push the extra memory down to after our max
1501                  * possible cluster size and then potentially pull it back
1502                  * up if the cluster was terminated prematurely--too much
1503                  * hassle.
1504                  */
1505                 if ((tbp->b_flags & B_CLUSTEROK) == 0 ||
1506                     tbp->b_bcount != tbp->b_bufsize ||
1507                     tbp->b_bcount != blksize ||
1508                     bytes == blksize) {
1509                         totalwritten += tbp->b_bufsize;
1510                         bawrite(tbp);
1511                         start_loffset += blksize;
1512                         bytes -= blksize;
1513                         continue;
1514                 }
1515
1516                 /*
1517                  * Get a pbuf, limit cluster I/O on a per-device basis.  If
1518                  * doing cluster I/O for a file, limit cluster I/O on a
1519                  * per-mount basis.
1520                  *
1521                  * HAMMER and other filesystems may attempt to queue a massive
1522                  * amount of write I/O, using trypbuf() here easily results in
1523                  * situation where the I/O stream becomes non-clustered.
1524                  */
1525                 if (vp->v_type == VCHR || vp->v_type == VBLK)
1526                         bp = getpbuf_kva(&vp->v_pbuf_count);
1527                 else
1528                         bp = getpbuf_kva(&vp->v_mount->mnt_pbuf_count);
1529
1530                 /*
1531                  * Set up the pbuf.  Track our append point with b_bcount
1532                  * and b_bufsize.  b_bufsize is not used by the device but
1533                  * our caller uses it to loop clusters and we use it to
1534                  * detect a premature EOF on the block device.
1535                  */
1536                 bp->b_bcount = 0;
1537                 bp->b_bufsize = 0;
1538                 bp->b_xio.xio_npages = 0;
1539                 bp->b_loffset = tbp->b_loffset;
1540                 bp->b_bio2.bio_offset = tbp->b_bio2.bio_offset;
1541                 bp->b_vp = vp;
1542
1543                 /*
1544                  * We are synthesizing a buffer out of vm_page_t's, but
1545                  * if the block size is not page aligned then the starting
1546                  * address may not be either.  Inherit the b_data offset
1547                  * from the original buffer.
1548                  */
1549                 bp->b_data = (char *)((vm_offset_t)bp->b_data |
1550                                       ((vm_offset_t)tbp->b_data & PAGE_MASK));
1551                 bp->b_flags &= ~(B_ERROR | B_NOTMETA);
1552                 bp->b_flags |= B_CLUSTER | B_BNOCLIP | B_KVABIO |
1553                                (tbp->b_flags & (B_VMIO | B_NEEDCOMMIT |
1554                                                 B_NOTMETA));
1555                 bp->b_bio1.bio_caller_info1.cluster_head = NULL;
1556                 bp->b_bio1.bio_caller_info2.cluster_tail = NULL;
1557
1558                 /*
1559                  * From this location in the file, scan forward to see
1560                  * if there are buffers with adjacent data that need to
1561                  * be written as well.
1562                  *
1563                  * IO *must* be initiated on index 0 at this point
1564                  * (particularly when called from cluster_awrite()).
1565                  */
1566                 for (i = 0; i < bytes; (i += blksize), (start_loffset += blksize)) {
1567                         if (i == 0) {
1568                                 must_initiate = 1;
1569                         } else {
1570                                 /*
1571                                  * Not first buffer.
1572                                  */
1573                                 must_initiate = 0;
1574                                 tbp = findblk(vp, start_loffset,
1575                                               FINDBLK_NBLOCK | FINDBLK_KVABIO);
1576                                 /*
1577                                  * Buffer not found or could not be locked
1578                                  * non-blocking.
1579                                  */
1580                                 if (tbp == NULL)
1581                                         break;
1582
1583                                 /*
1584                                  * If it IS in core, but has different
1585                                  * characteristics, then don't cluster
1586                                  * with it.
1587                                  */
1588                                 if ((tbp->b_flags & (B_VMIO | B_CLUSTEROK |
1589                                      B_INVAL | B_DELWRI | B_NEEDCOMMIT))
1590                                     != (B_DELWRI | B_CLUSTEROK |
1591                                      (bp->b_flags & (B_VMIO | B_NEEDCOMMIT))) ||
1592                                     (tbp->b_flags & B_LOCKED)
1593                                 ) {
1594                                         BUF_UNLOCK(tbp);
1595                                         break;
1596                                 }
1597
1598                                 /*
1599                                  * Check that the combined cluster
1600                                  * would make sense with regard to pages
1601                                  * and would not be too large
1602                                  *
1603                                  * WARNING! buf_checkwrite() must be the last
1604                                  *          check made.  If it returns 0 then
1605                                  *          we must initiate the I/O.
1606                                  */
1607                                 if ((tbp->b_bcount != blksize) ||
1608                                   ((bp->b_bio2.bio_offset + i) !=
1609                                     tbp->b_bio2.bio_offset) ||
1610                                   ((tbp->b_xio.xio_npages + bp->b_xio.xio_npages) >
1611                                     (maxiosize / PAGE_SIZE)) ||
1612                                   (LIST_FIRST(&tbp->b_dep) &&
1613                                    buf_checkwrite(tbp))
1614                                 ) {
1615                                         BUF_UNLOCK(tbp);
1616                                         break;
1617                                 }
1618                                 if (LIST_FIRST(&tbp->b_dep))
1619                                         must_initiate = 1;
1620                                 /*
1621                                  * Ok, it's passed all the tests,
1622                                  * so remove it from the free list
1623                                  * and mark it busy. We will use it.
1624                                  */
1625                                 bremfree(tbp);
1626                                 KKASSERT(tbp->b_cmd == BUF_CMD_DONE);
1627                         }
1628
1629                         /*
1630                          * If the IO is via the VM then we do some
1631                          * special VM hackery (yuck).  Since the buffer's
1632                          * block size may not be page-aligned it is possible
1633                          * for a page to be shared between two buffers.  We
1634                          * have to get rid of the duplication when building
1635                          * the cluster.
1636                          */
1637                         if (tbp->b_flags & B_VMIO) {
1638                                 vm_page_t m;
1639
1640                                 /*
1641                                  * Try to avoid deadlocks with the VM system.
1642                                  * However, we cannot abort the I/O if
1643                                  * must_initiate is non-zero.
1644                                  */
1645                                 if (must_initiate == 0) {
1646                                         for (j = 0;
1647                                              j < tbp->b_xio.xio_npages;
1648                                              ++j) {
1649                                                 m = tbp->b_xio.xio_pages[j];
1650                                                 if (m->busy_count &
1651                                                     PBUSY_LOCKED) {
1652                                                         bqrelse(tbp);
1653                                                         goto finishcluster;
1654                                                 }
1655                                         }
1656                                 }
1657                                         
1658                                 for (j = 0; j < tbp->b_xio.xio_npages; ++j) {
1659                                         m = tbp->b_xio.xio_pages[j];
1660                                         vm_page_busy_wait(m, FALSE, "clurpg");
1661                                         vm_page_io_start(m);
1662                                         vm_page_wakeup(m);
1663                                         vm_object_pip_add(m->object, 1);
1664                                         if ((bp->b_xio.xio_npages == 0) ||
1665                                           (bp->b_xio.xio_pages[bp->b_xio.xio_npages - 1] != m)) {
1666                                                 bp->b_xio.xio_pages[bp->b_xio.xio_npages] = m;
1667                                                 bp->b_xio.xio_npages++;
1668                                         }
1669                                 }
1670                         }
1671                         bp->b_bcount += blksize;
1672                         bp->b_bufsize += blksize;
1673
1674                         /*
1675                          * NOTE: see bwrite/bawrite code for why we no longer
1676                          *       undirty tbp here.
1677                          *
1678                          *       bundirty(tbp); REMOVED
1679                          */
1680                         tbp->b_flags &= ~B_ERROR;
1681                         tbp->b_cmd = BUF_CMD_WRITE;
1682                         BUF_KERNPROC(tbp);
1683                         cluster_append(&bp->b_bio1, tbp);
1684
1685                         /*
1686                          * check for latent dependencies to be handled 
1687                          */
1688                         if (LIST_FIRST(&tbp->b_dep) != NULL)
1689                                 buf_start(tbp);
1690                 }
1691         finishcluster:
1692                 pmap_qenter_noinval(trunc_page((vm_offset_t)bp->b_data),
1693                                     (vm_page_t *)bp->b_xio.xio_pages,
1694                                     bp->b_xio.xio_npages);
1695                 if (bp->b_bufsize > bp->b_kvasize) {
1696                         panic("cluster_wbuild: b_bufsize(%d) "
1697                               "> b_kvasize(%d)\n",
1698                               bp->b_bufsize, bp->b_kvasize);
1699                 }
1700                 totalwritten += bp->b_bufsize;
1701                 bp->b_dirtyoff = 0;
1702                 bp->b_dirtyend = bp->b_bufsize;
1703                 bp->b_bio1.bio_done = cluster_callback;
1704                 bp->b_cmd = BUF_CMD_WRITE;
1705
1706                 vfs_busy_pages(vp, bp);
1707                 bsetrunningbufspace(bp, bp->b_bufsize);
1708                 BUF_KERNPROC(bp);
1709                 vn_strategy(vp, &bp->b_bio1);
1710
1711                 bytes -= i;
1712         }
1713         return totalwritten;
1714 }
1715
1716 /*
1717  * Collect together all the buffers in a cluster, plus add one
1718  * additional buffer passed-in.
1719  *
1720  * Only pre-existing buffers whos block size matches blksize are collected.
1721  * (this is primarily because HAMMER1 uses varying block sizes and we don't
1722  * want to override its choices).
1723  *
1724  * This code will not try to collect buffers that it cannot lock, otherwise
1725  * it might deadlock against SMP-friendly filesystems.
1726  */
1727 static struct cluster_save *
1728 cluster_collectbufs(cluster_cache_t *cc, struct vnode *vp,
1729                     struct buf *last_bp, int blksize)
1730 {
1731         struct cluster_save *buflist;
1732         struct buf *bp;
1733         off_t loffset;
1734         int i, len;
1735         int j;
1736         int k;
1737
1738         len = (int)(cc->v_lastw - cc->v_cstart) / blksize;
1739         KKASSERT(len > 0);
1740         buflist = kmalloc(sizeof(struct buf *) * (len + 1) + sizeof(*buflist),
1741                          M_SEGMENT, M_WAITOK);
1742         buflist->bs_nchildren = 0;
1743         buflist->bs_children = (struct buf **) (buflist + 1);
1744         for (loffset = cc->v_cstart, i = 0, j = 0;
1745              i < len;
1746              (loffset += blksize), i++) {
1747                 bp = getcacheblk(vp, loffset,
1748                                  last_bp->b_bcount, GETBLK_SZMATCH |
1749                                                     GETBLK_NOWAIT);
1750                 buflist->bs_children[i] = bp;
1751                 if (bp == NULL) {
1752                         j = i + 1;
1753                 } else if (bp->b_bio2.bio_offset == NOOFFSET) {
1754                         VOP_BMAP(bp->b_vp, bp->b_loffset,
1755                                  &bp->b_bio2.bio_offset,
1756                                  NULL, NULL, BUF_CMD_WRITE);
1757                 }
1758         }
1759
1760         /*
1761          * Get rid of gaps
1762          */
1763         for (k = 0; k < j; ++k) {
1764                 if (buflist->bs_children[k]) {
1765                         bqrelse(buflist->bs_children[k]);
1766                         buflist->bs_children[k] = NULL;
1767                 }
1768         }
1769         if (j != 0) {
1770                 if (j != i) {
1771                         bcopy(buflist->bs_children + j,
1772                               buflist->bs_children + 0,
1773                               sizeof(buflist->bs_children[0]) * (i - j));
1774                 }
1775                 i -= j;
1776         }
1777         buflist->bs_children[i] = bp = last_bp;
1778         if (bp->b_bio2.bio_offset == NOOFFSET) {
1779                 VOP_BMAP(bp->b_vp, bp->b_loffset, &bp->b_bio2.bio_offset,
1780                          NULL, NULL, BUF_CMD_WRITE);
1781         }
1782         buflist->bs_nchildren = i + 1;
1783         return (buflist);
1784 }
1785
1786 void
1787 cluster_append(struct bio *bio, struct buf *tbp)
1788 {
1789         tbp->b_cluster_next = NULL;
1790         if (bio->bio_caller_info1.cluster_head == NULL) {
1791                 bio->bio_caller_info1.cluster_head = tbp;
1792                 bio->bio_caller_info2.cluster_tail = tbp;
1793         } else {
1794                 bio->bio_caller_info2.cluster_tail->b_cluster_next = tbp;
1795                 bio->bio_caller_info2.cluster_tail = tbp;
1796         }
1797 }
1798
1799 static
1800 void
1801 cluster_setram(struct buf *bp)
1802 {
1803         bp->b_flags |= B_RAM;
1804         if (bp->b_xio.xio_npages)
1805                 vm_page_flag_set(bp->b_xio.xio_pages[0], PG_RAM);
1806 }
1807
1808 static
1809 void
1810 cluster_clrram(struct buf *bp)
1811 {
1812         bp->b_flags &= ~B_RAM;
1813         if (bp->b_xio.xio_npages)
1814                 vm_page_flag_clear(bp->b_xio.xio_pages[0], PG_RAM);
1815 }