3 * The Regents of the University of California. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by the University of
16 * California, Berkeley and its contributors.
17 * 4. Neither the name of the University nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * @(#)ufs_readwrite.c 8.11 (Berkeley) 5/8/95
34 * $FreeBSD: src/sys/ufs/ufs/ufs_readwrite.c,v 1.65.2.14 2003/04/04 22:21:29 tegge Exp $
35 * $DragonFly: src/sys/vfs/ufs/ufs_readwrite.c,v 1.23 2007/08/21 17:26:48 dillon Exp $
38 #define BLKSIZE(a, b, c) blksize(a, b, c)
43 #include <vm/vm_object.h>
44 #include <vm/vm_pager.h>
45 #include <vm/vm_map.h>
46 #include <vm/vnode_pager.h>
47 #include <sys/event.h>
48 #include <sys/vmmeter.h>
49 #include <vm/vm_page2.h>
51 #include "opt_directio.h"
53 #define VN_KNOTE(vp, b) \
54 KNOTE((struct klist *)&vp->v_pollinfo.vpi_selinfo.si_note, (b))
57 extern int ffs_rawread(struct vnode *vp, struct uio *uio, int *workdone);
61 * Vnode op for reading.
63 * ffs_read(struct vnode *a_vp, struct uio *a_uio, int a_ioflag,
64 * struct ucred *a_cred)
68 ffs_read(struct vop_read_args *ap)
76 int xfersize, blkoffset;
77 int error, orig_resid;
83 seqcount = ap->a_ioflag >> 16;
87 ioflag = ap->a_ioflag;
89 if ((ioflag & IO_DIRECT) != 0) {
92 error = ffs_rawread(vp, uio, &workdone);
93 if (error || workdone)
99 if (uio->uio_rw != UIO_READ)
100 panic("ffs_read: mode");
102 if (vp->v_type == VLNK) {
103 if ((int)ip->i_size < vp->v_mount->mnt_maxsymlinklen)
104 panic("ffs_read: short symlink");
105 } else if (vp->v_type != VREG && vp->v_type != VDIR)
106 panic("ffs_read: type %d", vp->v_type);
109 if ((uint64_t)uio->uio_offset > fs->fs_maxfilesize)
112 orig_resid = uio->uio_resid;
116 bytesinfile = ip->i_size - uio->uio_offset;
117 if (bytesinfile <= 0) {
118 if ((vp->v_mount->mnt_flag & MNT_NOATIME) == 0)
119 ip->i_flag |= IN_ACCESS;
124 * Ok so we couldn't do it all in one vm trick...
125 * so cycle around trying smaller bites..
127 for (error = 0, bp = NULL; uio->uio_resid > 0; bp = NULL) {
128 if ((bytesinfile = ip->i_size - uio->uio_offset) <= 0)
131 error = ffs_blkatoff_ra(vp, uio->uio_offset, NULL,
137 * If IO_DIRECT then set B_DIRECT for the buffer. This
138 * will cause us to attempt to release the buffer later on
139 * and will cause the buffer cache to attempt to free the
142 if (ioflag & IO_DIRECT)
143 bp->b_flags |= B_DIRECT;
146 * We should only get non-zero b_resid when an I/O error
147 * has occurred, which should cause us to break above.
148 * However, if the short read did not cause an error,
149 * then we want to ensure that we do not uiomove bad
150 * or uninitialized data.
152 * XXX b_resid is only valid when an actual I/O has occured
153 * and may be incorrect if the buffer is B_CACHE or if the
154 * last op on the buffer was a failed write. This KASSERT
155 * is a precursor to removing it from the UFS code.
157 KASSERT(bp->b_resid == 0, ("bp->b_resid != 0"));
160 * Calculate how much data we can copy
162 blkoffset = blkoff(fs, uio->uio_offset);
163 xfersize = bp->b_bufsize - blkoffset;
164 if (xfersize > uio->uio_resid)
165 xfersize = uio->uio_resid;
166 if (xfersize > bytesinfile)
167 xfersize = bytesinfile;
169 panic("ufs_readwrite: impossible xfersize: %d",
174 * otherwise use the general form
176 error = uiomove((char *)bp->b_data + blkoffset,
182 if ((ioflag & (IO_VMIO|IO_DIRECT)) &&
183 (LIST_FIRST(&bp->b_dep) == NULL)) {
185 * If there are no dependencies, and it's VMIO,
186 * then we don't need the buf, mark it available
187 * for freeing. The VM has the data.
189 bp->b_flags |= B_RELBUF;
193 * Otherwise let whoever
194 * made the request take care of
195 * freeing it. We just queue
196 * it onto another list.
203 * This can only happen in the case of an error
204 * because the loop above resets bp to NULL on each iteration
205 * and on normal completion has not set a new value into it.
206 * so it must have come from a 'break' statement
209 if ((ioflag & (IO_VMIO|IO_DIRECT)) &&
210 (LIST_FIRST(&bp->b_dep) == NULL)) {
211 bp->b_flags |= B_RELBUF;
218 if ((error == 0 || uio->uio_resid != orig_resid) &&
219 (vp->v_mount->mnt_flag & MNT_NOATIME) == 0)
220 ip->i_flag |= IN_ACCESS;
225 * Vnode op for writing.
227 * ffs_write(struct vnode *a_vp, struct uio *a_uio, int a_ioflag,
228 * struct ucred *a_cred)
231 ffs_write(struct vop_write_args *ap)
241 int blkoffset, error, extended, flags, ioflag, resid, size, xfersize;
245 seqcount = ap->a_ioflag >> 16;
246 ioflag = ap->a_ioflag;
252 if (uio->uio_rw != UIO_WRITE)
253 panic("ffs_write: mode");
256 switch (vp->v_type) {
258 if (ioflag & IO_APPEND)
259 uio->uio_offset = ip->i_size;
260 if ((ip->i_flags & APPEND) && uio->uio_offset != ip->i_size)
266 panic("ffs_write: dir write");
269 panic("ffs_write: type %p %d (%d,%d)", vp, (int)vp->v_type,
270 (int)uio->uio_offset,
276 if (uio->uio_offset < 0 ||
277 (uint64_t)uio->uio_offset + uio->uio_resid > fs->fs_maxfilesize) {
281 * Maybe this should be above the vnode op call, but so long as
282 * file servers have no limits, I don't think it matters.
285 if (vp->v_type == VREG && td && td->td_proc &&
286 uio->uio_offset + uio->uio_resid >
287 td->td_proc->p_rlimit[RLIMIT_FSIZE].rlim_cur) {
288 lwpsignal(td->td_proc, td->td_lwp, SIGXFSZ);
292 resid = uio->uio_resid;
296 * NOTE! These B_ flags are actually balloc-only flags, not buffer
297 * flags. They are similar to the BA_ flags in fbsd.
299 if (seqcount > B_SEQMAX)
300 flags = B_SEQMAX << B_SEQSHIFT;
302 flags = seqcount << B_SEQSHIFT;
303 if ((ioflag & IO_SYNC) && !DOINGASYNC(vp))
306 for (error = 0; uio->uio_resid > 0;) {
307 lbn = lblkno(fs, uio->uio_offset);
308 blkoffset = blkoff(fs, uio->uio_offset);
309 xfersize = fs->fs_bsize - blkoffset;
310 if (uio->uio_resid < xfersize)
311 xfersize = uio->uio_resid;
313 if (uio->uio_offset + xfersize > ip->i_size)
314 vnode_pager_setsize(vp, uio->uio_offset + xfersize);
317 * We must perform a read-before-write if the transfer
318 * size does not cover the entire buffer, or if doing
319 * a dummy write to flush the buffer.
321 if (xfersize < fs->fs_bsize || uio->uio_segflg == UIO_NOCOPY)
325 /* XXX is uio->uio_offset the right thing here? */
326 error = VOP_BALLOC(vp, uio->uio_offset, xfersize,
327 ap->a_cred, flags, &bp);
331 * If the buffer is not valid and we did not clear garbage
332 * out above, we have to do so here even though the write
333 * covers the entire buffer in order to avoid a mmap()/write
334 * race where another process may see the garbage prior to
335 * the uiomove() for a write replacing it.
337 if ((bp->b_flags & B_CACHE) == 0 && (flags & B_CLRBUF) == 0)
339 if (ioflag & IO_DIRECT)
340 bp->b_flags |= B_DIRECT;
341 if (ioflag & IO_NOWDRAIN)
342 bp->b_flags |= B_NOWDRAIN;
343 if ((ioflag & (IO_SYNC|IO_INVAL)) == (IO_SYNC|IO_INVAL))
344 bp->b_flags |= B_NOCACHE;
346 if (uio->uio_offset + xfersize > ip->i_size) {
347 ip->i_size = uio->uio_offset + xfersize;
351 size = BLKSIZE(fs, ip, lbn) - bp->b_resid;
356 uiomove((char *)bp->b_data + blkoffset, (int)xfersize, uio);
357 if ((ioflag & (IO_VMIO|IO_DIRECT)) &&
358 (LIST_FIRST(&bp->b_dep) == NULL)) {
359 bp->b_flags |= B_RELBUF;
363 * If IO_SYNC each buffer is written synchronously. Otherwise
364 * if we have a severe page deficiency write the buffer
365 * asynchronously. Otherwise try to cluster, and if that
366 * doesn't do it then either do an async write (if O_DIRECT),
367 * or a delayed write (if not).
370 if (ioflag & IO_SYNC) {
372 } else if (vm_page_count_severe() ||
373 buf_dirty_count_severe() ||
374 (ioflag & IO_ASYNC)) {
375 bp->b_flags |= B_CLUSTEROK;
377 } else if (xfersize + blkoffset == fs->fs_bsize) {
378 if ((vp->v_mount->mnt_flag & MNT_NOCLUSTERW) == 0) {
379 bp->b_flags |= B_CLUSTEROK;
380 cluster_write(bp, (off_t)ip->i_size, seqcount);
384 } else if (ioflag & IO_DIRECT) {
385 bp->b_flags |= B_CLUSTEROK;
388 bp->b_flags |= B_CLUSTEROK;
391 if (error || xfersize == 0)
393 ip->i_flag |= IN_CHANGE | IN_UPDATE;
396 * If we successfully wrote any data, and we are not the superuser
397 * we clear the setuid and setgid bits as a precaution against
400 if (resid > uio->uio_resid && ap->a_cred && ap->a_cred->cr_uid != 0)
401 ip->i_mode &= ~(ISUID | ISGID);
402 if (resid > uio->uio_resid)
403 VN_KNOTE(vp, NOTE_WRITE | (extended ? NOTE_EXTEND : 0));
405 if (ioflag & IO_UNIT) {
406 (void)ffs_truncate(vp, osize, ioflag & IO_SYNC,
408 uio->uio_offset -= resid - uio->uio_resid;
409 uio->uio_resid = resid;
411 } else if (resid > uio->uio_resid && (ioflag & IO_SYNC)) {
412 error = ffs_update(vp, 1);
423 ffs_getpages(struct vop_getpages_args *ap)
425 off_t foff, physoffset;
427 struct vnode *dp, *vp;
429 vm_pindex_t pindex, firstindex;
431 int bbackwards, bforwards;
432 int pbackwards, pforwards;
442 pcount = round_page(ap->a_count) / PAGE_SIZE;
443 mreq = ap->a_m[ap->a_reqpage];
444 firstindex = ap->a_m[0]->pindex;
447 * if ANY DEV_BSIZE blocks are valid on a large filesystem block,
448 * then the entire page is valid. Since the page may be mapped,
449 * user programs might reference data beyond the actual end of file
450 * occuring within the page. We have to zero that data.
453 if (mreq->valid != VM_PAGE_BITS_ALL)
454 vm_page_zero_invalid(mreq, TRUE);
455 for (i = 0; i < pcount; i++) {
456 if (i != ap->a_reqpage) {
457 vm_page_free(ap->a_m[i]);
465 bsize = vp->v_mount->mnt_stat.f_iosize;
466 pindex = mreq->pindex;
467 foff = IDX_TO_OFF(pindex) /* + ap->a_offset should be zero */;
469 if (bsize < PAGE_SIZE)
470 return vnode_pager_generic_getpages(ap->a_vp, ap->a_m,
475 * foff is the file offset of the required page
476 * reqlblkno is the logical block that contains the page
477 * poff is the bytes offset of the page in the logical block
479 poff = (int)(foff % bsize);
480 reqoffset = foff - poff;
482 if (VOP_BMAP(vp, reqoffset, &doffset, &bforwards, &bbackwards) ||
485 for (i = 0; i < pcount; i++) {
486 if (i != ap->a_reqpage)
487 vm_page_free(ap->a_m[i]);
489 if (doffset == NOOFFSET) {
490 if ((mreq->flags & PG_ZERO) == 0)
491 vm_page_zero_fill(mreq);
492 vm_page_undirty(mreq);
493 mreq->valid = VM_PAGE_BITS_ALL;
496 return VM_PAGER_ERROR;
500 physoffset = doffset + poff;
501 pagesperblock = bsize / PAGE_SIZE;
504 * find the first page that is contiguous.
506 * bforwards and bbackwards are the number of contiguous bytes
507 * available before and after the block offset. poff is the page
508 * offset, in bytes, relative to the block offset.
510 * pforwards and pbackwards are the number of contiguous pages
511 * relative to the requested page, non-inclusive of the requested
512 * page (so a pbackwards and pforwards of 0 indicates just the
518 * Calculate pbackwards and clean up any requested
519 * pages that are too far back.
521 pbackwards = (poff + bbackwards) >> PAGE_SHIFT;
522 if (ap->a_reqpage > pbackwards) {
523 firstpage = ap->a_reqpage - pbackwards;
524 for (i = 0; i < firstpage; i++)
525 vm_page_free(ap->a_m[i]);
529 * Calculate pforwards
531 pforwards = (bforwards - poff - PAGE_SIZE) >> PAGE_SHIFT;
534 if (pforwards < (pcount - (ap->a_reqpage + 1))) {
535 for(i = ap->a_reqpage + pforwards + 1; i < pcount; i++)
536 vm_page_free(ap->a_m[i]);
537 pcount = ap->a_reqpage + pforwards + 1;
541 * Adjust pcount to be relative to firstpage. All pages prior
542 * to firstpage in the array have been cleaned up.
548 * calculate the size of the transfer
550 size = pcount * PAGE_SIZE;
552 if ((IDX_TO_OFF(ap->a_m[firstpage]->pindex) + size) > vp->v_filesize) {
553 size = vp->v_filesize - IDX_TO_OFF(ap->a_m[firstpage]->pindex);
557 dp = VTOI(ap->a_vp)->i_devvp;
558 rtval = VOP_GETPAGES(dp, &ap->a_m[firstpage], size,
559 (ap->a_reqpage - firstpage), physoffset);
567 * XXX By default, wimp out... note that a_offset is ignored (and always
571 ffs_putpages(struct vop_putpages_args *ap)
573 return vnode_pager_generic_putpages(ap->a_vp, ap->a_m, ap->a_count,
574 ap->a_sync, ap->a_rtvals);