Block devices generally truncate the size of I/O requests which go past EOF.
[dragonfly.git] / sys / kern / vfs_cluster.c
CommitLineData
984263bc
MD
1/*-
2 * Copyright (c) 1993
3 * The Regents of the University of California. All rights reserved.
4 * Modifications/enhancements:
5 * Copyright (c) 1995 John S. Dyson. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by the University of
18 * California, Berkeley and its contributors.
19 * 4. Neither the name of the University nor the names of its contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 *
35 * @(#)vfs_cluster.c 8.7 (Berkeley) 2/13/94
36 * $FreeBSD: src/sys/kern/vfs_cluster.c,v 1.92.2.9 2001/11/18 07:10:59 dillon Exp $
4414f2c9 37 * $DragonFly: src/sys/kern/vfs_cluster.c,v 1.25 2006/05/04 18:32:22 dillon Exp $
984263bc
MD
38 */
39
40#include "opt_debug_cluster.h"
41
42#include <sys/param.h>
43#include <sys/systm.h>
44#include <sys/kernel.h>
45#include <sys/proc.h>
46#include <sys/buf.h>
47#include <sys/vnode.h>
48#include <sys/malloc.h>
49#include <sys/mount.h>
50#include <sys/resourcevar.h>
51#include <sys/vmmeter.h>
52#include <vm/vm.h>
53#include <vm/vm_object.h>
54#include <vm/vm_page.h>
55#include <sys/sysctl.h>
3020e3be 56#include <sys/buf2.h>
12e4aaff 57#include <vm/vm_page2.h>
984263bc
MD
58
59#if defined(CLUSTERDEBUG)
60#include <sys/sysctl.h>
61static int rcluster= 0;
62SYSCTL_INT(_debug, OID_AUTO, rcluster, CTLFLAG_RW, &rcluster, 0, "");
63#endif
64
d1cd9d97 65static MALLOC_DEFINE(M_SEGMENT, "cluster_save", "cluster_save buffer");
984263bc
MD
66
67static struct cluster_save *
54078292
MD
68 cluster_collectbufs (struct vnode *vp, struct buf *last_bp,
69 int lblocksize);
984263bc 70static struct buf *
54078292 71 cluster_rbuild (struct vnode *vp, off_t filesize, off_t loffset,
10f3fee5
MD
72 off_t doffset, int size, int run,
73 struct buf *fbp, int doasync);
81b5c339
MD
74static void cluster_callback (struct bio *);
75
984263bc
MD
76
77static int write_behind = 1;
78SYSCTL_INT(_vfs, OID_AUTO, write_behind, CTLFLAG_RW, &write_behind, 0, "");
79
80extern vm_page_t bogus_page;
81
82extern int cluster_pbuf_freecnt;
83
84/*
85 * Maximum number of blocks for read-ahead.
86 */
87#define MAXRA 32
88
89/*
90 * This replaces bread.
91 */
92int
54078292
MD
93cluster_read(struct vnode *vp, off_t filesize, off_t loffset,
94 int size, int totread, int seqcount, struct buf **bpp)
984263bc
MD
95{
96 struct buf *bp, *rbp, *reqbp;
54078292
MD
97 off_t origoffset;
98 off_t doffset;
99 int error;
984263bc
MD
100 int i;
101 int maxra, racluster;
984263bc
MD
102
103 error = 0;
104
105 /*
106 * Try to limit the amount of read-ahead by a few
107 * ad-hoc parameters. This needs work!!!
108 */
109 racluster = vp->v_mount->mnt_iosize_max / size;
110 maxra = 2 * racluster + (totread / size);
111 if (maxra > MAXRA)
112 maxra = MAXRA;
113 if (maxra > nbuf/8)
114 maxra = nbuf/8;
115
116 /*
117 * get the requested block
118 */
54078292
MD
119 *bpp = reqbp = bp = getblk(vp, loffset, size, 0, 0);
120 origoffset = loffset;
984263bc
MD
121
122 /*
123 * if it is in the cache, then check to see if the reads have been
124 * sequential. If they have, then try some read-ahead, otherwise
125 * back-off on prospective read-aheads.
126 */
127 if (bp->b_flags & B_CACHE) {
128 if (!seqcount) {
129 return 0;
130 } else if ((bp->b_flags & B_RAM) == 0) {
131 return 0;
132 } else {
984263bc
MD
133 struct buf *tbp;
134 bp->b_flags &= ~B_RAM;
135 /*
e43a034f 136 * We do the crit here so that there is no window
1f1ea522 137 * between the findblk and the b_usecount increment
e43a034f 138 * below. We opt to keep the crit out of the loop
984263bc
MD
139 * for efficiency.
140 */
e43a034f 141 crit_enter();
984263bc 142 for (i = 1; i < maxra; i++) {
54078292 143 if (!(tbp = findblk(vp, loffset + i * size))) {
984263bc
MD
144 break;
145 }
146
147 /*
148 * Set another read-ahead mark so we know
149 * to check again.
150 */
151 if (((i % racluster) == (racluster - 1)) ||
152 (i == (maxra - 1)))
153 tbp->b_flags |= B_RAM;
154 }
e43a034f 155 crit_exit();
984263bc
MD
156 if (i >= maxra) {
157 return 0;
158 }
54078292 159 loffset += i * size;
984263bc
MD
160 }
161 reqbp = bp = NULL;
162 } else {
81b5c339 163 off_t firstread = bp->b_loffset;
54078292 164 int nblks;
984263bc 165
81b5c339
MD
166 KASSERT(firstread != NOOFFSET,
167 ("cluster_read: no buffer offset"));
984263bc 168 if (firstread + totread > filesize)
54078292
MD
169 totread = (int)(filesize - firstread);
170 nblks = totread / size;
171 if (nblks) {
172 int burstbytes;
173
984263bc
MD
174 if (nblks > racluster)
175 nblks = racluster;
176
54078292
MD
177 error = VOP_BMAP(vp, loffset, NULL,
178 &doffset, &burstbytes, NULL);
984263bc
MD
179 if (error)
180 goto single_block_read;
54078292 181 if (doffset == NOOFFSET)
984263bc 182 goto single_block_read;
54078292 183 if (burstbytes < size * 2)
984263bc 184 goto single_block_read;
54078292
MD
185 if (nblks > burstbytes / size)
186 nblks = burstbytes / size;
984263bc 187
54078292 188 bp = cluster_rbuild(vp, filesize, loffset,
10f3fee5 189 doffset, size, nblks, bp, 0);
54078292 190 loffset += bp->b_bufsize;
984263bc
MD
191 } else {
192single_block_read:
193 /*
194 * if it isn't in the cache, then get a chunk from
195 * disk if sequential, otherwise just get the block.
196 */
10f3fee5 197 bp->b_flags |= B_RAM;
54078292 198 loffset += size;
984263bc
MD
199 }
200 }
201
202 /*
6260e485 203 * If we have been doing sequential I/O, then do some read-ahead.
984263bc
MD
204 */
205 rbp = NULL;
6260e485 206 if (seqcount &&
54078292
MD
207 loffset < origoffset + seqcount * size &&
208 loffset + size <= filesize
6260e485 209 ) {
54078292 210 rbp = getblk(vp, loffset, size, 0, 0);
6260e485 211 if ((rbp->b_flags & B_CACHE) == 0) {
984263bc 212 int nblksread;
6260e485 213 int ntoread;
54078292 214 int burstbytes;
6260e485 215
54078292
MD
216 error = VOP_BMAP(vp, loffset, NULL,
217 &doffset, &burstbytes, NULL);
218 if (error || doffset == NOOFFSET) {
6260e485
MD
219 brelse(rbp);
220 rbp = NULL;
221 goto no_read_ahead;
222 }
54078292
MD
223 ntoread = burstbytes / size;
224 nblksread = (totread + size - 1) / size;
984263bc
MD
225 if (seqcount < nblksread)
226 seqcount = nblksread;
227 if (seqcount < ntoread)
228 ntoread = seqcount;
6260e485 229
10f3fee5 230 rbp->b_flags |= B_RAM;
54078292
MD
231 if (burstbytes) {
232 rbp = cluster_rbuild(vp, filesize, loffset,
233 doffset, size,
10f3fee5 234 ntoread, rbp, 1);
984263bc 235 } else {
54078292 236 rbp->b_bio2.bio_offset = doffset;
984263bc
MD
237 }
238 }
239 }
6260e485 240no_read_ahead:
984263bc
MD
241
242 /*
a8f169e2 243 * Handle the synchronous read. This only occurs if B_CACHE was
10f3fee5
MD
244 * not set. bp (and rbp) could be either a cluster bp or a normal
245 * bp depending on the what cluster_rbuild() decided to do. If
246 * it is a cluster bp, vfs_busy_pages() has already been called.
984263bc
MD
247 */
248 if (bp) {
249#if defined(CLUSTERDEBUG)
250 if (rcluster)
591bdbe9 251 printf("S(%lld,%d,%d) ",
54078292 252 bp->b_loffset, bp->b_bcount, seqcount);
984263bc 253#endif
10f3fee5
MD
254 bp->b_cmd = BUF_CMD_READ;
255 if ((bp->b_flags & B_CLUSTER) == 0)
256 vfs_busy_pages(vp, bp);
984263bc 257 bp->b_flags &= ~(B_ERROR|B_INVAL);
81b5c339 258 if ((bp->b_flags & B_ASYNC) || bp->b_bio1.bio_done != NULL)
984263bc 259 BUF_KERNPROC(bp);
81b5c339
MD
260 vn_strategy(vp, &bp->b_bio1);
261 error = bp->b_error;
984263bc
MD
262 }
263
264 /*
10f3fee5 265 * And if we have read-aheads, do them too.
984263bc
MD
266 */
267 if (rbp) {
268 if (error) {
984263bc
MD
269 brelse(rbp);
270 } else if (rbp->b_flags & B_CACHE) {
984263bc
MD
271 bqrelse(rbp);
272 } else {
273#if defined(CLUSTERDEBUG)
274 if (rcluster) {
275 if (bp)
591bdbe9 276 printf("A+(%lld,%d,%lld,%d) ",
54078292
MD
277 rbp->b_loffset, rbp->b_bcount,
278 rbp->b_loffset - origoffset,
984263bc
MD
279 seqcount);
280 else
591bdbe9 281 printf("A(%lld,%d,%lld,%d) ",
54078292
MD
282 rbp->b_loffset, rbp->b_bcount,
283 rbp->b_loffset - origoffset,
984263bc
MD
284 seqcount);
285 }
286#endif
984263bc 287 rbp->b_flags &= ~(B_ERROR|B_INVAL);
10f3fee5
MD
288 rbp->b_flags |= B_ASYNC;
289 rbp->b_cmd = BUF_CMD_READ;
290
291 if ((rbp->b_flags & B_CLUSTER) == 0)
292 vfs_busy_pages(vp, rbp);
81b5c339 293 if ((rbp->b_flags & B_ASYNC) || rbp->b_bio1.bio_done != NULL)
984263bc 294 BUF_KERNPROC(rbp);
81b5c339 295 vn_strategy(vp, &rbp->b_bio1);
984263bc
MD
296 }
297 }
298 if (reqbp)
299 return (biowait(reqbp));
300 else
301 return (error);
302}
303
304/*
305 * If blocks are contiguous on disk, use this to provide clustered
306 * read ahead. We will read as many blocks as possible sequentially
307 * and then parcel them up into logical blocks in the buffer hash table.
308 */
309static struct buf *
54078292 310cluster_rbuild(struct vnode *vp, off_t filesize, off_t loffset,
10f3fee5 311 off_t doffset, int size, int run, struct buf *fbp, int doasync)
984263bc
MD
312{
313 struct buf *bp, *tbp;
54078292
MD
314 off_t boffset;
315 int i, j;
984263bc
MD
316
317 KASSERT(size == vp->v_mount->mnt_stat.f_iosize,
54078292 318 ("cluster_rbuild: size %d != filesize %ld\n",
984263bc
MD
319 size, vp->v_mount->mnt_stat.f_iosize));
320
321 /*
322 * avoid a division
323 */
54078292 324 while (loffset + run * size > filesize) {
984263bc
MD
325 --run;
326 }
327
6260e485 328 tbp = fbp;
54078292 329 tbp->b_bio2.bio_offset = doffset;
10f3fee5
MD
330 if((tbp->b_flags & B_MALLOC) ||
331 ((tbp->b_flags & B_VMIO) == 0) || (run <= 1)) {
984263bc 332 return tbp;
10f3fee5 333 }
984263bc
MD
334
335 bp = trypbuf(&cluster_pbuf_freecnt);
a8f169e2 336 if (bp == NULL)
984263bc
MD
337 return tbp;
338
339 /*
340 * We are synthesizing a buffer out of vm_page_t's, but
341 * if the block size is not page aligned then the starting
342 * address may not be either. Inherit the b_data offset
343 * from the original buffer.
344 */
345 bp->b_data = (char *)((vm_offset_t)bp->b_data |
346 ((vm_offset_t)tbp->b_data & PAGE_MASK));
10f3fee5
MD
347 bp->b_flags |= B_ASYNC | B_CLUSTER | B_VMIO;
348 bp->b_cmd = BUF_CMD_READ;
81b5c339
MD
349 bp->b_bio1.bio_done = cluster_callback;
350 bp->b_bio1.bio_caller_info1.cluster_head = NULL;
351 bp->b_bio1.bio_caller_info2.cluster_tail = NULL;
54078292
MD
352 bp->b_loffset = loffset;
353 bp->b_bio2.bio_offset = NOOFFSET;
81b5c339
MD
354 KASSERT(bp->b_loffset != NOOFFSET,
355 ("cluster_rbuild: no buffer offset"));
984263bc 356
984263bc
MD
357 bp->b_bcount = 0;
358 bp->b_bufsize = 0;
54f51aeb 359 bp->b_xio.xio_npages = 0;
984263bc 360
54078292 361 for (boffset = doffset, i = 0; i < run; ++i, boffset += size) {
10f3fee5 362 if (i) {
54f51aeb 363 if ((bp->b_xio.xio_npages * PAGE_SIZE) +
984263bc
MD
364 round_page(size) > vp->v_mount->mnt_iosize_max) {
365 break;
366 }
367
368 /*
369 * Shortcut some checks and try to avoid buffers that
370 * would block in the lock. The same checks have to
371 * be made again after we officially get the buffer.
372 */
54078292 373 if ((tbp = findblk(vp, loffset + i * size)) != NULL) {
984263bc
MD
374 if (BUF_LOCK(tbp, LK_EXCLUSIVE | LK_NOWAIT))
375 break;
376 BUF_UNLOCK(tbp);
377
54f51aeb
HP
378 for (j = 0; j < tbp->b_xio.xio_npages; j++) {
379 if (tbp->b_xio.xio_pages[j]->valid)
984263bc
MD
380 break;
381 }
382
54f51aeb 383 if (j != tbp->b_xio.xio_npages)
984263bc
MD
384 break;
385
386 if (tbp->b_bcount != size)
387 break;
388 }
389
54078292 390 tbp = getblk(vp, loffset + i * size, size, 0, 0);
984263bc
MD
391
392 /*
393 * Stop scanning if the buffer is fuly valid
394 * (marked B_CACHE), or locked (may be doing a
395 * background write), or if the buffer is not
396 * VMIO backed. The clustering code can only deal
397 * with VMIO-backed buffers.
398 */
399 if ((tbp->b_flags & (B_CACHE|B_LOCKED)) ||
400 (tbp->b_flags & B_VMIO) == 0) {
401 bqrelse(tbp);
402 break;
403 }
404
405 /*
406 * The buffer must be completely invalid in order to
407 * take part in the cluster. If it is partially valid
408 * then we stop.
409 */
54f51aeb
HP
410 for (j = 0;j < tbp->b_xio.xio_npages; j++) {
411 if (tbp->b_xio.xio_pages[j]->valid)
984263bc
MD
412 break;
413 }
54f51aeb 414 if (j != tbp->b_xio.xio_npages) {
984263bc
MD
415 bqrelse(tbp);
416 break;
417 }
418
419 /*
420 * Set a read-ahead mark as appropriate
421 */
6260e485 422 if (i == 1 || i == (run - 1))
984263bc
MD
423 tbp->b_flags |= B_RAM;
424
425 /*
984263bc
MD
426 * Set the block number if it isn't set, otherwise
427 * if it is make sure it matches the block number we
428 * expect.
429 */
54078292
MD
430 if (tbp->b_bio2.bio_offset == NOOFFSET) {
431 tbp->b_bio2.bio_offset = boffset;
432 } else if (tbp->b_bio2.bio_offset != boffset) {
984263bc
MD
433 brelse(tbp);
434 break;
435 }
436 }
437 /*
10f3fee5
MD
438 * The first buffer is setup async if doasync is specified.
439 * All other buffers in the cluster are setup async. This
440 * way the caller can decide how to deal with the requested
441 * buffer.
984263bc 442 */
10f3fee5
MD
443 if (i || doasync)
444 tbp->b_flags |= B_ASYNC;
445 tbp->b_cmd = BUF_CMD_READ;
984263bc 446 BUF_KERNPROC(tbp);
81b5c339 447 cluster_append(&bp->b_bio1, tbp);
54078292 448 for (j = 0; j < tbp->b_xio.xio_npages; ++j) {
984263bc 449 vm_page_t m;
54f51aeb 450 m = tbp->b_xio.xio_pages[j];
984263bc
MD
451 vm_page_io_start(m);
452 vm_object_pip_add(m->object, 1);
54f51aeb
HP
453 if ((bp->b_xio.xio_npages == 0) ||
454 (bp->b_xio.xio_pages[bp->b_xio.xio_npages-1] != m)) {
455 bp->b_xio.xio_pages[bp->b_xio.xio_npages] = m;
456 bp->b_xio.xio_npages++;
984263bc
MD
457 }
458 if ((m->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL)
54f51aeb 459 tbp->b_xio.xio_pages[j] = bogus_page;
984263bc
MD
460 }
461 /*
462 * XXX shouldn't this be += size for both, like in
463 * cluster_wbuild()?
464 *
465 * Don't inherit tbp->b_bufsize as it may be larger due to
466 * a non-page-aligned size. Instead just aggregate using
467 * 'size'.
468 */
469 if (tbp->b_bcount != size)
54078292 470 printf("warning: tbp->b_bcount wrong %d vs %d\n", tbp->b_bcount, size);
984263bc 471 if (tbp->b_bufsize != size)
54078292 472 printf("warning: tbp->b_bufsize wrong %d vs %d\n", tbp->b_bufsize, size);
984263bc
MD
473 bp->b_bcount += size;
474 bp->b_bufsize += size;
475 }
476
477 /*
478 * Fully valid pages in the cluster are already good and do not need
479 * to be re-read from disk. Replace the page with bogus_page
480 */
54f51aeb
HP
481 for (j = 0; j < bp->b_xio.xio_npages; j++) {
482 if ((bp->b_xio.xio_pages[j]->valid & VM_PAGE_BITS_ALL) ==
984263bc 483 VM_PAGE_BITS_ALL) {
54f51aeb 484 bp->b_xio.xio_pages[j] = bogus_page;
984263bc
MD
485 }
486 }
312dcd01 487 if (bp->b_bufsize > bp->b_kvasize) {
54078292 488 panic("cluster_rbuild: b_bufsize(%d) > b_kvasize(%d)",
984263bc 489 bp->b_bufsize, bp->b_kvasize);
312dcd01 490 }
984263bc
MD
491
492 pmap_qenter(trunc_page((vm_offset_t) bp->b_data),
54f51aeb 493 (vm_page_t *)bp->b_xio.xio_pages, bp->b_xio.xio_npages);
984263bc
MD
494 return (bp);
495}
496
497/*
498 * Cleanup after a clustered read or write.
499 * This is complicated by the fact that any of the buffers might have
500 * extra memory (if there were no empty buffer headers at allocbuf time)
501 * that we will need to shift around.
81b5c339
MD
502 *
503 * The returned bio is &bp->b_bio1
984263bc
MD
504 */
505void
81b5c339 506cluster_callback(struct bio *bio)
984263bc 507{
81b5c339
MD
508 struct buf *bp = bio->bio_buf;
509 struct buf *tbp;
984263bc
MD
510 int error = 0;
511
512 /*
9a71d53f
MD
513 * Must propogate errors to all the components. A short read (EOF)
514 * is a critical error.
984263bc 515 */
9a71d53f 516 if (bp->b_flags & B_ERROR) {
984263bc 517 error = bp->b_error;
9a71d53f
MD
518 } else if (bp->b_bcount != bp->b_bufsize) {
519 panic("cluster_callback: unexpected EOF on cluster %p!", bio);
520 }
984263bc 521
54f51aeb 522 pmap_qremove(trunc_page((vm_offset_t) bp->b_data), bp->b_xio.xio_npages);
984263bc
MD
523 /*
524 * Move memory from the large cluster buffer into the component
81b5c339
MD
525 * buffers and mark IO as done on these. Since the memory map
526 * is the same, no actual copying is required.
984263bc 527 */
81b5c339
MD
528 while ((tbp = bio->bio_caller_info1.cluster_head) != NULL) {
529 bio->bio_caller_info1.cluster_head = tbp->b_cluster_next;
984263bc
MD
530 if (error) {
531 tbp->b_flags |= B_ERROR;
532 tbp->b_error = error;
533 } else {
534 tbp->b_dirtyoff = tbp->b_dirtyend = 0;
535 tbp->b_flags &= ~(B_ERROR|B_INVAL);
536 /*
537 * XXX the bdwrite()/bqrelse() issued during
538 * cluster building clears B_RELBUF (see bqrelse()
539 * comment). If direct I/O was specified, we have
540 * to restore it here to allow the buffer and VM
541 * to be freed.
542 */
543 if (tbp->b_flags & B_DIRECT)
544 tbp->b_flags |= B_RELBUF;
545 }
81b5c339 546 biodone(&tbp->b_bio1);
984263bc
MD
547 }
548 relpbuf(bp, &cluster_pbuf_freecnt);
549}
550
551/*
552 * cluster_wbuild_wb:
553 *
554 * Implement modified write build for cluster.
555 *
556 * write_behind = 0 write behind disabled
557 * write_behind = 1 write behind normal (default)
558 * write_behind = 2 write behind backed-off
559 */
560
561static __inline int
54078292 562cluster_wbuild_wb(struct vnode *vp, int size, off_t start_loffset, int len)
984263bc
MD
563{
564 int r = 0;
565
566 switch(write_behind) {
567 case 2:
54078292 568 if (start_loffset < len)
984263bc 569 break;
54078292 570 start_loffset -= len;
984263bc
MD
571 /* fall through */
572 case 1:
54078292 573 r = cluster_wbuild(vp, size, start_loffset, len);
984263bc
MD
574 /* fall through */
575 default:
576 /* fall through */
577 break;
578 }
579 return(r);
580}
581
582/*
583 * Do clustered write for FFS.
584 *
585 * Three cases:
586 * 1. Write is not sequential (write asynchronously)
587 * Write is sequential:
588 * 2. beginning of cluster - begin cluster
589 * 3. middle of a cluster - add to cluster
590 * 4. end of a cluster - asynchronously write cluster
591 */
592void
54078292 593cluster_write(struct buf *bp, off_t filesize, int seqcount)
984263bc
MD
594{
595 struct vnode *vp;
54078292 596 off_t loffset;
984263bc
MD
597 int maxclen, cursize;
598 int lblocksize;
599 int async;
600
601 vp = bp->b_vp;
602 if (vp->v_type == VREG) {
603 async = vp->v_mount->mnt_flag & MNT_ASYNC;
604 lblocksize = vp->v_mount->mnt_stat.f_iosize;
605 } else {
606 async = 0;
607 lblocksize = bp->b_bufsize;
608 }
54078292 609 loffset = bp->b_loffset;
81b5c339
MD
610 KASSERT(bp->b_loffset != NOOFFSET,
611 ("cluster_write: no buffer offset"));
984263bc
MD
612
613 /* Initialize vnode to beginning of file. */
54078292 614 if (loffset == 0)
984263bc
MD
615 vp->v_lasta = vp->v_clen = vp->v_cstart = vp->v_lastw = 0;
616
54078292
MD
617 if (vp->v_clen == 0 || loffset != vp->v_lastw + lblocksize ||
618 bp->b_bio2.bio_offset == NOOFFSET ||
619 (bp->b_bio2.bio_offset != vp->v_lasta + lblocksize)) {
620 maxclen = vp->v_mount->mnt_iosize_max;
984263bc
MD
621 if (vp->v_clen != 0) {
622 /*
623 * Next block is not sequential.
624 *
625 * If we are not writing at end of file, the process
626 * seeked to another point in the file since its last
627 * write, or we have reached our maximum cluster size,
628 * then push the previous cluster. Otherwise try
629 * reallocating to make it sequential.
630 *
631 * Change to algorithm: only push previous cluster if
632 * it was sequential from the point of view of the
633 * seqcount heuristic, otherwise leave the buffer
634 * intact so we can potentially optimize the I/O
635 * later on in the buf_daemon or update daemon
636 * flush.
637 */
54078292
MD
638 cursize = vp->v_lastw - vp->v_cstart + lblocksize;
639 if (bp->b_loffset + lblocksize != filesize ||
640 loffset != vp->v_lastw + lblocksize || vp->v_clen <= cursize) {
984263bc
MD
641 if (!async && seqcount > 0) {
642 cluster_wbuild_wb(vp, lblocksize,
643 vp->v_cstart, cursize);
644 }
645 } else {
646 struct buf **bpp, **endbp;
647 struct cluster_save *buflist;
648
54078292
MD
649 buflist = cluster_collectbufs(vp, bp,
650 lblocksize);
984263bc
MD
651 endbp = &buflist->bs_children
652 [buflist->bs_nchildren - 1];
653 if (VOP_REALLOCBLKS(vp, buflist)) {
654 /*
655 * Failed, push the previous cluster
656 * if *really* writing sequentially
657 * in the logical file (seqcount > 1),
658 * otherwise delay it in the hopes that
659 * the low level disk driver can
660 * optimize the write ordering.
661 */
662 for (bpp = buflist->bs_children;
663 bpp < endbp; bpp++)
664 brelse(*bpp);
665 free(buflist, M_SEGMENT);
666 if (seqcount > 1) {
667 cluster_wbuild_wb(vp,
668 lblocksize, vp->v_cstart,
669 cursize);
670 }
671 } else {
672 /*
673 * Succeeded, keep building cluster.
674 */
675 for (bpp = buflist->bs_children;
676 bpp <= endbp; bpp++)
677 bdwrite(*bpp);
678 free(buflist, M_SEGMENT);
54078292
MD
679 vp->v_lastw = loffset;
680 vp->v_lasta = bp->b_bio2.bio_offset;
984263bc
MD
681 return;
682 }
683 }
684 }
685 /*
686 * Consider beginning a cluster. If at end of file, make
687 * cluster as large as possible, otherwise find size of
688 * existing cluster.
689 */
690 if ((vp->v_type == VREG) &&
54078292
MD
691 bp->b_loffset + lblocksize != filesize &&
692 (bp->b_bio2.bio_offset == NOOFFSET) &&
693 (VOP_BMAP(vp, loffset, NULL, &bp->b_bio2.bio_offset, &maxclen, NULL) ||
694 bp->b_bio2.bio_offset == NOOFFSET)) {
984263bc
MD
695 bawrite(bp);
696 vp->v_clen = 0;
54078292
MD
697 vp->v_lasta = bp->b_bio2.bio_offset;
698 vp->v_cstart = loffset + lblocksize;
699 vp->v_lastw = loffset;
984263bc
MD
700 return;
701 }
54078292
MD
702 if (maxclen > lblocksize)
703 vp->v_clen = maxclen - lblocksize;
704 else
705 vp->v_clen = 0;
706 if (!async && vp->v_clen == 0) { /* I/O not contiguous */
707 vp->v_cstart = loffset + lblocksize;
984263bc
MD
708 bawrite(bp);
709 } else { /* Wait for rest of cluster */
54078292 710 vp->v_cstart = loffset;
984263bc
MD
711 bdwrite(bp);
712 }
54078292 713 } else if (loffset == vp->v_cstart + vp->v_clen) {
984263bc
MD
714 /*
715 * At end of cluster, write it out if seqcount tells us we
716 * are operating sequentially, otherwise let the buf or
717 * update daemon handle it.
718 */
719 bdwrite(bp);
720 if (seqcount > 1)
54078292
MD
721 cluster_wbuild_wb(vp, lblocksize, vp->v_cstart,
722 vp->v_clen + lblocksize);
984263bc 723 vp->v_clen = 0;
54078292 724 vp->v_cstart = loffset + lblocksize;
984263bc
MD
725 } else if (vm_page_count_severe()) {
726 /*
727 * We are low on memory, get it going NOW
728 */
729 bawrite(bp);
730 } else {
731 /*
732 * In the middle of a cluster, so just delay the I/O for now.
733 */
734 bdwrite(bp);
735 }
54078292
MD
736 vp->v_lastw = loffset;
737 vp->v_lasta = bp->b_bio2.bio_offset;
984263bc
MD
738}
739
740
741/*
742 * This is an awful lot like cluster_rbuild...wish they could be combined.
743 * The last lbn argument is the current block on which I/O is being
744 * performed. Check to see that it doesn't fall in the middle of
745 * the current block (if last_bp == NULL).
746 */
747int
54078292 748cluster_wbuild(struct vnode *vp, int size, off_t start_loffset, int bytes)
984263bc
MD
749{
750 struct buf *bp, *tbp;
e43a034f 751 int i, j;
984263bc 752 int totalwritten = 0;
984263bc 753
54078292 754 while (bytes > 0) {
e43a034f 755 crit_enter();
984263bc
MD
756 /*
757 * If the buffer is not delayed-write (i.e. dirty), or it
758 * is delayed-write but either locked or inval, it cannot
759 * partake in the clustered write.
760 */
54078292 761 if (((tbp = findblk(vp, start_loffset)) == NULL) ||
984263bc
MD
762 ((tbp->b_flags & (B_LOCKED | B_INVAL | B_DELWRI)) != B_DELWRI) ||
763 BUF_LOCK(tbp, LK_EXCLUSIVE | LK_NOWAIT)) {
54078292
MD
764 start_loffset += size;
765 bytes -= size;
e43a034f 766 crit_exit();
984263bc
MD
767 continue;
768 }
769 bremfree(tbp);
10f3fee5 770 KKASSERT(tbp->b_cmd == BUF_CMD_DONE);
e43a034f 771 crit_exit();
984263bc
MD
772
773 /*
774 * Extra memory in the buffer, punt on this buffer.
775 * XXX we could handle this in most cases, but we would
776 * have to push the extra memory down to after our max
777 * possible cluster size and then potentially pull it back
778 * up if the cluster was terminated prematurely--too much
779 * hassle.
780 */
781 if (((tbp->b_flags & (B_CLUSTEROK|B_MALLOC)) != B_CLUSTEROK) ||
782 (tbp->b_bcount != tbp->b_bufsize) ||
783 (tbp->b_bcount != size) ||
54078292 784 (bytes == size) ||
984263bc
MD
785 ((bp = getpbuf(&cluster_pbuf_freecnt)) == NULL)) {
786 totalwritten += tbp->b_bufsize;
787 bawrite(tbp);
54078292
MD
788 start_loffset += size;
789 bytes -= size;
984263bc
MD
790 continue;
791 }
792
793 /*
9a71d53f
MD
794 * Set up the pbuf. Track our append point with b_bcount
795 * and b_bufsize. b_bufsize is not used by the device but
796 * our caller uses it to loop clusters and we use it to
797 * detect a premature EOF on the block device.
984263bc 798 */
984263bc
MD
799 bp->b_bcount = 0;
800 bp->b_bufsize = 0;
54f51aeb 801 bp->b_xio.xio_npages = 0;
81b5c339 802 bp->b_loffset = tbp->b_loffset;
54078292 803 bp->b_bio2.bio_offset = tbp->b_bio2.bio_offset;
984263bc
MD
804
805 /*
806 * We are synthesizing a buffer out of vm_page_t's, but
807 * if the block size is not page aligned then the starting
808 * address may not be either. Inherit the b_data offset
809 * from the original buffer.
810 */
811 bp->b_data = (char *)((vm_offset_t)bp->b_data |
812 ((vm_offset_t)tbp->b_data & PAGE_MASK));
10f3fee5 813 bp->b_flags &= ~B_ERROR;
4414f2c9 814 bp->b_flags |= B_CLUSTER | B_BNOCLIP |
984263bc 815 (tbp->b_flags & (B_VMIO | B_NEEDCOMMIT | B_NOWDRAIN));
81b5c339
MD
816 bp->b_bio1.bio_done = cluster_callback;
817 bp->b_bio1.bio_caller_info1.cluster_head = NULL;
818 bp->b_bio1.bio_caller_info2.cluster_tail = NULL;
984263bc
MD
819 /*
820 * From this location in the file, scan forward to see
821 * if there are buffers with adjacent data that need to
822 * be written as well.
823 */
54078292 824 for (i = 0; i < bytes; (i += size), (start_loffset += size)) {
984263bc 825 if (i != 0) { /* If not the first buffer */
e43a034f 826 crit_enter();
984263bc
MD
827 /*
828 * If the adjacent data is not even in core it
829 * can't need to be written.
830 */
54078292 831 if ((tbp = findblk(vp, start_loffset)) == NULL) {
e43a034f 832 crit_exit();
984263bc
MD
833 break;
834 }
835
836 /*
837 * If it IS in core, but has different
838 * characteristics, or is locked (which
839 * means it could be undergoing a background
840 * I/O or be in a weird state), then don't
841 * cluster with it.
842 */
843 if ((tbp->b_flags & (B_VMIO | B_CLUSTEROK |
844 B_INVAL | B_DELWRI | B_NEEDCOMMIT))
845 != (B_DELWRI | B_CLUSTEROK |
846 (bp->b_flags & (B_VMIO | B_NEEDCOMMIT))) ||
847 (tbp->b_flags & B_LOCKED) ||
984263bc 848 BUF_LOCK(tbp, LK_EXCLUSIVE | LK_NOWAIT)) {
e43a034f 849 crit_exit();
984263bc
MD
850 break;
851 }
852
853 /*
854 * Check that the combined cluster
855 * would make sense with regard to pages
856 * and would not be too large
857 */
858 if ((tbp->b_bcount != size) ||
54078292
MD
859 ((bp->b_bio2.bio_offset + i) !=
860 tbp->b_bio2.bio_offset) ||
54f51aeb 861 ((tbp->b_xio.xio_npages + bp->b_xio.xio_npages) >
984263bc
MD
862 (vp->v_mount->mnt_iosize_max / PAGE_SIZE))) {
863 BUF_UNLOCK(tbp);
e43a034f 864 crit_exit();
984263bc
MD
865 break;
866 }
867 /*
868 * Ok, it's passed all the tests,
869 * so remove it from the free list
870 * and mark it busy. We will use it.
871 */
872 bremfree(tbp);
10f3fee5 873 KKASSERT(tbp->b_cmd == BUF_CMD_DONE);
e43a034f 874 crit_exit();
984263bc 875 } /* end of code for non-first buffers only */
81b5c339 876
984263bc
MD
877 /*
878 * If the IO is via the VM then we do some
879 * special VM hackery (yuck). Since the buffer's
880 * block size may not be page-aligned it is possible
881 * for a page to be shared between two buffers. We
882 * have to get rid of the duplication when building
883 * the cluster.
884 */
885 if (tbp->b_flags & B_VMIO) {
886 vm_page_t m;
887
888 if (i != 0) { /* if not first buffer */
54078292 889 for (j = 0; j < tbp->b_xio.xio_npages; ++j) {
54f51aeb 890 m = tbp->b_xio.xio_pages[j];
984263bc
MD
891 if (m->flags & PG_BUSY) {
892 bqrelse(tbp);
893 goto finishcluster;
894 }
895 }
896 }
897
54078292 898 for (j = 0; j < tbp->b_xio.xio_npages; ++j) {
54f51aeb 899 m = tbp->b_xio.xio_pages[j];
984263bc
MD
900 vm_page_io_start(m);
901 vm_object_pip_add(m->object, 1);
54f51aeb
HP
902 if ((bp->b_xio.xio_npages == 0) ||
903 (bp->b_xio.xio_pages[bp->b_xio.xio_npages - 1] != m)) {
904 bp->b_xio.xio_pages[bp->b_xio.xio_npages] = m;
905 bp->b_xio.xio_npages++;
984263bc
MD
906 }
907 }
908 }
909 bp->b_bcount += size;
910 bp->b_bufsize += size;
911
e43a034f 912 crit_enter();
984263bc 913 bundirty(tbp);
10f3fee5 914 tbp->b_flags &= ~B_ERROR;
984263bc 915 tbp->b_flags |= B_ASYNC;
10f3fee5 916 tbp->b_cmd = BUF_CMD_WRITE;
e43a034f 917 crit_exit();
984263bc 918 BUF_KERNPROC(tbp);
81b5c339 919 cluster_append(&bp->b_bio1, tbp);
2aee763b
MD
920
921 /*
922 * check for latent dependencies to be handled
923 */
924 if (LIST_FIRST(&tbp->b_dep) != NULL && bioops.io_start)
925 (*bioops.io_start)(tbp);
926
984263bc
MD
927 }
928 finishcluster:
929 pmap_qenter(trunc_page((vm_offset_t) bp->b_data),
54f51aeb 930 (vm_page_t *) bp->b_xio.xio_pages, bp->b_xio.xio_npages);
312dcd01 931 if (bp->b_bufsize > bp->b_kvasize) {
984263bc 932 panic(
54078292 933 "cluster_wbuild: b_bufsize(%d) > b_kvasize(%d)\n",
984263bc 934 bp->b_bufsize, bp->b_kvasize);
312dcd01 935 }
984263bc
MD
936 totalwritten += bp->b_bufsize;
937 bp->b_dirtyoff = 0;
938 bp->b_dirtyend = bp->b_bufsize;
10f3fee5
MD
939 bp->b_flags |= B_ASYNC;
940 bp->b_cmd = BUF_CMD_WRITE;
941 vfs_busy_pages(vp, bp);
a8f169e2
MD
942 bp->b_runningbufspace = bp->b_bufsize;
943 runningbufspace += bp->b_runningbufspace;
944 BUF_KERNPROC(bp); /* B_ASYNC */
945 vn_strategy(vp, &bp->b_bio1);
984263bc 946
54078292 947 bytes -= i;
984263bc
MD
948 }
949 return totalwritten;
950}
951
952/*
953 * Collect together all the buffers in a cluster.
954 * Plus add one additional buffer.
955 */
956static struct cluster_save *
54078292 957cluster_collectbufs(struct vnode *vp, struct buf *last_bp, int lblocksize)
984263bc
MD
958{
959 struct cluster_save *buflist;
960 struct buf *bp;
54078292 961 off_t loffset;
984263bc
MD
962 int i, len;
963
54078292 964 len = (int)(vp->v_lastw - vp->v_cstart + lblocksize) / lblocksize;
984263bc 965 buflist = malloc(sizeof(struct buf *) * (len + 1) + sizeof(*buflist),
54078292 966 M_SEGMENT, M_WAITOK);
984263bc
MD
967 buflist->bs_nchildren = 0;
968 buflist->bs_children = (struct buf **) (buflist + 1);
54078292
MD
969 for (loffset = vp->v_cstart, i = 0; i < len; (loffset += lblocksize), i++) {
970 (void) bread(vp, loffset, last_bp->b_bcount, &bp);
984263bc 971 buflist->bs_children[i] = bp;
54078292
MD
972 if (bp->b_bio2.bio_offset == NOOFFSET) {
973 VOP_BMAP(bp->b_vp, bp->b_loffset, NULL,
974 &bp->b_bio2.bio_offset, NULL, NULL);
975 }
984263bc
MD
976 }
977 buflist->bs_children[i] = bp = last_bp;
54078292
MD
978 if (bp->b_bio2.bio_offset == NOOFFSET) {
979 VOP_BMAP(bp->b_vp, bp->b_loffset, NULL,
980 &bp->b_bio2.bio_offset, NULL, NULL);
981 }
984263bc
MD
982 buflist->bs_nchildren = i + 1;
983 return (buflist);
984}
81b5c339
MD
985
986void
987cluster_append(struct bio *bio, struct buf *tbp)
988{
989 tbp->b_cluster_next = NULL;
990 if (bio->bio_caller_info1.cluster_head == NULL) {
991 bio->bio_caller_info1.cluster_head = tbp;
992 bio->bio_caller_info2.cluster_tail = tbp;
993 } else {
994 bio->bio_caller_info2.cluster_tail->b_cluster_next = tbp;
995 bio->bio_caller_info2.cluster_tail = tbp;
996 }
997}
998