3 * The Regents of the University of California. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by the University of
16 * California, Berkeley and its contributors.
17 * 4. Neither the name of the University nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * @(#)ufs_readwrite.c 8.11 (Berkeley) 5/8/95
34 * $FreeBSD: src/sys/ufs/ufs/ufs_readwrite.c,v 1.65.2.14 2003/04/04 22:21:29 tegge Exp $
35 * $DragonFly: src/sys/vfs/ufs/ufs_readwrite.c,v 1.13 2004/10/25 19:14:34 dillon Exp $
38 #define BLKSIZE(a, b, c) blksize(a, b, c)
43 #include <vm/vm_object.h>
44 #include <vm/vm_pager.h>
45 #include <vm/vm_map.h>
46 #include <vm/vnode_pager.h>
47 #include <sys/event.h>
48 #include <sys/vmmeter.h>
49 #include <vm/vm_page2.h>
51 #include "opt_directio.h"
53 #define VN_KNOTE(vp, b) \
54 KNOTE((struct klist *)&vp->v_pollinfo.vpi_selinfo.si_note, (b))
57 extern int ffs_rawread(struct vnode *vp, struct uio *uio, int *workdone);
61 * Vnode op for reading.
63 * ffs_read(struct vnode *a_vp, struct uio *a_uio, int a_ioflag,
64 * struct ucred *a_cred)
68 ffs_read(struct vop_read_args *ap)
75 ufs_daddr_t lbn, nextlbn;
77 long size, xfersize, blkoffset;
78 int error, orig_resid;
85 seqcount = ap->a_ioflag >> 16;
89 ioflag = ap->a_ioflag;
91 if ((ioflag & IO_DIRECT) != 0) {
94 error = ffs_rawread(vp, uio, &workdone);
95 if (error || workdone)
101 if (uio->uio_rw != UIO_READ)
102 panic("ffs_read: mode");
104 if (vp->v_type == VLNK) {
105 if ((int)ip->i_size < vp->v_mount->mnt_maxsymlinklen)
106 panic("ffs_read: short symlink");
107 } else if (vp->v_type != VREG && vp->v_type != VDIR)
108 panic("ffs_read: type %d", vp->v_type);
111 if ((uint64_t)uio->uio_offset > fs->fs_maxfilesize)
114 orig_resid = uio->uio_resid;
118 object = vp->v_object;
120 bytesinfile = ip->i_size - uio->uio_offset;
121 if (bytesinfile <= 0) {
122 if ((vp->v_mount->mnt_flag & MNT_NOATIME) == 0)
123 ip->i_flag |= IN_ACCESS;
128 vm_object_reference(object);
131 * Ok so we couldn't do it all in one vm trick...
132 * so cycle around trying smaller bites..
134 for (error = 0, bp = NULL; uio->uio_resid > 0; bp = NULL) {
135 if ((bytesinfile = ip->i_size - uio->uio_offset) <= 0)
138 lbn = lblkno(fs, uio->uio_offset);
142 * size of buffer. The buffer representing the
143 * end of the file is rounded up to the size of
144 * the block type ( fragment or full block,
147 size = BLKSIZE(fs, ip, lbn);
148 blkoffset = blkoff(fs, uio->uio_offset);
151 * The amount we want to transfer in this iteration is
152 * one FS block less the amount of the data before
153 * our startpoint (duh!)
155 xfersize = fs->fs_bsize - blkoffset;
158 * But if we actually want less than the block,
159 * or the file doesn't have a whole block more of data,
160 * then use the lesser number.
162 if (uio->uio_resid < xfersize)
163 xfersize = uio->uio_resid;
164 if (bytesinfile < xfersize)
165 xfersize = bytesinfile;
167 if (lblktosize(fs, nextlbn) >= ip->i_size) {
169 * Don't do readahead if this is the end of the file.
171 error = bread(vp, lbn, size, &bp);
172 } else if ((vp->v_mount->mnt_flag & MNT_NOCLUSTERR) == 0) {
174 * Otherwise if we are allowed to cluster,
175 * grab as much as we can.
177 * XXX This may not be a win if we are not
178 * doing sequential access.
180 error = cluster_read(vp, ip->i_size, lbn,
181 size, uio->uio_resid, seqcount, &bp);
182 } else if (seqcount > 1) {
184 * If we are NOT allowed to cluster, then
185 * if we appear to be acting sequentially,
186 * fire off a request for a readahead
187 * as well as a read. Note that the 4th and 5th
188 * arguments point to arrays of the size specified in
191 int nextsize = BLKSIZE(fs, ip, nextlbn);
192 error = breadn(vp, lbn,
193 size, &nextlbn, &nextsize, 1, &bp);
196 * Failing all of the above, just read what the
197 * user asked for. Interestingly, the same as
198 * the first option above.
200 error = bread(vp, lbn, size, &bp);
209 * If IO_DIRECT then set B_DIRECT for the buffer. This
210 * will cause us to attempt to release the buffer later on
211 * and will cause the buffer cache to attempt to free the
214 if (ioflag & IO_DIRECT)
215 bp->b_flags |= B_DIRECT;
218 * We should only get non-zero b_resid when an I/O error
219 * has occurred, which should cause us to break above.
220 * However, if the short read did not cause an error,
221 * then we want to ensure that we do not uiomove bad
222 * or uninitialized data.
224 * XXX b_resid is only valid when an actual I/O has occured
225 * and may be incorrect if the buffer is B_CACHE or if the
226 * last op on the buffer was a failed write. This KASSERT
227 * is a precursor to removing it from the UFS code.
229 KASSERT(bp->b_resid == 0, ("bp->b_resid != 0"));
231 if (size < xfersize) {
238 * otherwise use the general form
240 error = uiomove((char *)bp->b_data + blkoffset,
246 if ((ioflag & (IO_VMIO|IO_DIRECT)) &&
247 (LIST_FIRST(&bp->b_dep) == NULL)) {
249 * If there are no dependencies, and it's VMIO,
250 * then we don't need the buf, mark it available
251 * for freeing. The VM has the data.
253 bp->b_flags |= B_RELBUF;
257 * Otherwise let whoever
258 * made the request take care of
259 * freeing it. We just queue
260 * it onto another list.
267 * This can only happen in the case of an error
268 * because the loop above resets bp to NULL on each iteration
269 * and on normal completion has not set a new value into it.
270 * so it must have come from a 'break' statement
273 if ((ioflag & (IO_VMIO|IO_DIRECT)) &&
274 (LIST_FIRST(&bp->b_dep) == NULL)) {
275 bp->b_flags |= B_RELBUF;
283 vm_object_vndeallocate(object);
284 if ((error == 0 || uio->uio_resid != orig_resid) &&
285 (vp->v_mount->mnt_flag & MNT_NOATIME) == 0)
286 ip->i_flag |= IN_ACCESS;
291 * Vnode op for writing.
293 * ffs_write(struct vnode *a_vp, struct uio *a_uio, int a_ioflag,
294 * struct ucred *a_cred)
297 ffs_write(struct vop_write_args *ap)
307 int blkoffset, error, extended, flags, ioflag, resid, size, xfersize;
312 seqcount = ap->a_ioflag >> 16;
313 ioflag = ap->a_ioflag;
318 object = vp->v_object;
320 vm_object_reference(object);
323 if (uio->uio_rw != UIO_WRITE)
324 panic("ffs_write: mode");
327 switch (vp->v_type) {
329 if (ioflag & IO_APPEND)
330 uio->uio_offset = ip->i_size;
331 if ((ip->i_flags & APPEND) && uio->uio_offset != ip->i_size) {
333 vm_object_vndeallocate(object);
340 panic("ffs_write: dir write");
343 panic("ffs_write: type %p %d (%d,%d)", vp, (int)vp->v_type,
344 (int)uio->uio_offset,
350 if (uio->uio_offset < 0 ||
351 (uint64_t)uio->uio_offset + uio->uio_resid > fs->fs_maxfilesize) {
353 vm_object_vndeallocate(object);
357 * Maybe this should be above the vnode op call, but so long as
358 * file servers have no limits, I don't think it matters.
361 if (vp->v_type == VREG && td && td->td_proc &&
362 uio->uio_offset + uio->uio_resid >
363 td->td_proc->p_rlimit[RLIMIT_FSIZE].rlim_cur) {
364 psignal(td->td_proc, SIGXFSZ);
366 vm_object_vndeallocate(object);
370 resid = uio->uio_resid;
374 * NOTE! These B_ flags are actually balloc-only flags, not buffer
375 * flags. They are similar to the BA_ flags in fbsd.
377 if (seqcount > B_SEQMAX)
378 flags = B_SEQMAX << B_SEQSHIFT;
380 flags = seqcount << B_SEQSHIFT;
381 if ((ioflag & IO_SYNC) && !DOINGASYNC(vp))
384 if (object && (object->flags & OBJ_OPT)) {
385 vm_freeze_copyopts(object,
386 OFF_TO_IDX(uio->uio_offset),
387 OFF_TO_IDX(uio->uio_offset + uio->uio_resid + PAGE_MASK));
390 for (error = 0; uio->uio_resid > 0;) {
391 lbn = lblkno(fs, uio->uio_offset);
392 blkoffset = blkoff(fs, uio->uio_offset);
393 xfersize = fs->fs_bsize - blkoffset;
394 if (uio->uio_resid < xfersize)
395 xfersize = uio->uio_resid;
397 if (uio->uio_offset + xfersize > ip->i_size)
398 vnode_pager_setsize(vp, uio->uio_offset + xfersize);
401 * We must perform a read-before-write if the transfer
402 * size does not cover the entire buffer.
404 if (fs->fs_bsize > xfersize)
408 /* XXX is uio->uio_offset the right thing here? */
409 error = VOP_BALLOC(vp, uio->uio_offset, xfersize,
410 ap->a_cred, flags, &bp);
414 * If the buffer is not valid and we did not clear garbage
415 * out above, we have to do so here even though the write
416 * covers the entire buffer in order to avoid a mmap()/write
417 * race where another process may see the garbage prior to
418 * the uiomove() for a write replacing it.
420 if ((bp->b_flags & B_CACHE) == 0 && fs->fs_bsize <= xfersize)
422 if (ioflag & IO_DIRECT)
423 bp->b_flags |= B_DIRECT;
424 if (ioflag & IO_NOWDRAIN)
425 bp->b_flags |= B_NOWDRAIN;
426 if ((ioflag & (IO_SYNC|IO_INVAL)) == (IO_SYNC|IO_INVAL))
427 bp->b_flags |= B_NOCACHE;
429 if (uio->uio_offset + xfersize > ip->i_size) {
430 ip->i_size = uio->uio_offset + xfersize;
434 size = BLKSIZE(fs, ip, lbn) - bp->b_resid;
439 uiomove((char *)bp->b_data + blkoffset, (int)xfersize, uio);
440 if ((ioflag & (IO_VMIO|IO_DIRECT)) &&
441 (LIST_FIRST(&bp->b_dep) == NULL)) {
442 bp->b_flags |= B_RELBUF;
446 * If IO_SYNC each buffer is written synchronously. Otherwise
447 * if we have a severe page deficiency write the buffer
448 * asynchronously. Otherwise try to cluster, and if that
449 * doesn't do it then either do an async write (if O_DIRECT),
450 * or a delayed write (if not).
453 if (ioflag & IO_SYNC) {
455 } else if (vm_page_count_severe() ||
456 buf_dirty_count_severe() ||
457 (ioflag & IO_ASYNC)) {
458 bp->b_flags |= B_CLUSTEROK;
460 } else if (xfersize + blkoffset == fs->fs_bsize) {
461 if ((vp->v_mount->mnt_flag & MNT_NOCLUSTERW) == 0) {
462 bp->b_flags |= B_CLUSTEROK;
463 cluster_write(bp, ip->i_size, seqcount);
467 } else if (ioflag & IO_DIRECT) {
468 bp->b_flags |= B_CLUSTEROK;
471 bp->b_flags |= B_CLUSTEROK;
474 if (error || xfersize == 0)
476 ip->i_flag |= IN_CHANGE | IN_UPDATE;
479 * If we successfully wrote any data, and we are not the superuser
480 * we clear the setuid and setgid bits as a precaution against
483 if (resid > uio->uio_resid && ap->a_cred && ap->a_cred->cr_uid != 0)
484 ip->i_mode &= ~(ISUID | ISGID);
485 if (resid > uio->uio_resid)
486 VN_KNOTE(vp, NOTE_WRITE | (extended ? NOTE_EXTEND : 0));
488 if (ioflag & IO_UNIT) {
489 (void)UFS_TRUNCATE(vp, osize,
490 ioflag & IO_SYNC, ap->a_cred, uio->uio_td);
491 uio->uio_offset -= resid - uio->uio_resid;
492 uio->uio_resid = resid;
494 } else if (resid > uio->uio_resid && (ioflag & IO_SYNC))
495 error = UFS_UPDATE(vp, 1);
498 vm_object_vndeallocate(object);
508 ffs_getpages(struct vop_getpages_args *ap)
510 off_t foff, physoffset;
512 struct vnode *dp, *vp;
514 vm_pindex_t pindex, firstindex;
516 int bbackwards, bforwards;
517 int pbackwards, pforwards;
527 pcount = round_page(ap->a_count) / PAGE_SIZE;
528 mreq = ap->a_m[ap->a_reqpage];
529 firstindex = ap->a_m[0]->pindex;
532 * if ANY DEV_BSIZE blocks are valid on a large filesystem block,
533 * then the entire page is valid. Since the page may be mapped,
534 * user programs might reference data beyond the actual end of file
535 * occuring within the page. We have to zero that data.
538 if (mreq->valid != VM_PAGE_BITS_ALL)
539 vm_page_zero_invalid(mreq, TRUE);
540 for (i = 0; i < pcount; i++) {
541 if (i != ap->a_reqpage) {
542 vm_page_free(ap->a_m[i]);
550 bsize = vp->v_mount->mnt_stat.f_iosize;
551 pindex = mreq->pindex;
552 foff = IDX_TO_OFF(pindex) /* + ap->a_offset should be zero */;
554 if (bsize < PAGE_SIZE)
555 return vnode_pager_generic_getpages(ap->a_vp, ap->a_m,
560 * foff is the file offset of the required page
561 * reqlblkno is the logical block that contains the page
562 * poff is the index of the page into the logical block
564 reqlblkno = foff / bsize;
565 poff = (foff % bsize) / PAGE_SIZE;
567 if ( VOP_BMAP( vp, reqlblkno, &dp, &reqblkno,
568 &bforwards, &bbackwards) || (reqblkno == -1)) {
569 for(i = 0; i < pcount; i++) {
570 if (i != ap->a_reqpage)
571 vm_page_free(ap->a_m[i]);
573 if (reqblkno == -1) {
574 if ((mreq->flags & PG_ZERO) == 0)
575 vm_page_zero_fill(mreq);
576 vm_page_undirty(mreq);
577 mreq->valid = VM_PAGE_BITS_ALL;
580 return VM_PAGER_ERROR;
584 physoffset = (off_t)reqblkno * DEV_BSIZE + poff * PAGE_SIZE;
585 pagesperblock = bsize / PAGE_SIZE;
587 * find the first page that is contiguous...
588 * note that pbackwards is the number of pages that are contiguous
593 pbackwards = poff + bbackwards * pagesperblock;
594 if (ap->a_reqpage > pbackwards) {
595 firstpage = ap->a_reqpage - pbackwards;
596 for(i=0;i<firstpage;i++)
597 vm_page_free(ap->a_m[i]);
601 * pforwards is the number of pages that are contiguous
602 * after the current page.
604 pforwards = (pagesperblock - (poff + 1)) +
605 bforwards * pagesperblock;
606 if (pforwards < (pcount - (ap->a_reqpage + 1))) {
607 for( i = ap->a_reqpage + pforwards + 1; i < pcount; i++)
608 vm_page_free(ap->a_m[i]);
609 pcount = ap->a_reqpage + pforwards + 1;
613 * number of pages for I/O corrected for the non-contig pages at
614 * the beginning of the array.
620 * calculate the size of the transfer
623 size = pcount * PAGE_SIZE;
625 if ((IDX_TO_OFF(ap->a_m[firstpage]->pindex) + size) >
626 obj->un_pager.vnp.vnp_size)
627 size = obj->un_pager.vnp.vnp_size -
628 IDX_TO_OFF(ap->a_m[firstpage]->pindex);
631 rtval = VOP_GETPAGES(dp, &ap->a_m[firstpage], size,
632 (ap->a_reqpage - firstpage), physoffset);
640 * XXX By default, wimp out... note that a_offset is ignored (and always
644 ffs_putpages(struct vop_putpages_args *ap)
646 return vnode_pager_generic_putpages(ap->a_vp, ap->a_m, ap->a_count,
647 ap->a_sync, ap->a_rtvals);