2 * Copyright (c) 1982, 1986, 1989, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * @(#)vfs_vnops.c 8.2 (Berkeley) 1/21/94
39 * $FreeBSD: src/sys/kern/vfs_vnops.c,v 1.87.2.13 2002/12/29 18:19:53 dillon Exp $
40 * $DragonFly: src/sys/kern/vfs_vnops.c,v 1.41 2006/06/13 08:12:03 dillon Exp $
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/fcntl.h>
49 #include <sys/mount.h>
50 #include <sys/nlookup.h>
51 #include <sys/vnode.h>
53 #include <sys/filio.h>
54 #include <sys/ttycom.h>
56 #include <sys/syslog.h>
58 static int vn_closefile (struct file *fp);
59 static int vn_ioctl (struct file *fp, u_long com, caddr_t data,
61 static int vn_read (struct file *fp, struct uio *uio,
62 struct ucred *cred, int flags);
63 static int svn_read (struct file *fp, struct uio *uio,
64 struct ucred *cred, int flags);
65 static int vn_poll (struct file *fp, int events, struct ucred *cred);
66 static int vn_kqfilter (struct file *fp, struct knote *kn);
67 static int vn_statfile (struct file *fp, struct stat *sb, struct ucred *cred);
68 static int vn_write (struct file *fp, struct uio *uio,
69 struct ucred *cred, int flags);
70 static int svn_write (struct file *fp, struct uio *uio,
71 struct ucred *cred, int flags);
73 struct fileops vnode_fileops = {
76 vn_read, vn_write, vn_ioctl, vn_poll, vn_kqfilter,
77 vn_statfile, vn_closefile, nofo_shutdown
80 struct fileops specvnode_fileops = {
83 svn_read, svn_write, vn_ioctl, vn_poll, vn_kqfilter,
84 vn_statfile, vn_closefile, nofo_shutdown
88 * Shortcut the device read/write. This avoids a lot of vnode junk.
89 * Basically the specfs vnops for read and write take the locked vnode,
90 * unlock it (because we can't hold the vnode locked while reading or writing
91 * a device which may block indefinitely), issues the device operation, then
92 * relock the vnode before returning, plus other junk. This bypasses all
93 * of that and just does the device operation.
96 vn_setspecops(struct file *fp)
98 if (vfs_fastdev && fp->f_ops == &vnode_fileops) {
99 fp->f_ops = &specvnode_fileops;
104 * Common code for vnode open operations. Check permissions, and call
105 * the VOP_NOPEN or VOP_NCREATE routine.
107 * The caller is responsible for setting up nd with nlookup_init() and
108 * for cleaning it up with nlookup_done(), whether we return an error
111 * On success nd->nl_open_vp will hold a referenced and, if requested,
112 * locked vnode. A locked vnode is requested via NLC_LOCKVP. If fp
113 * is non-NULL the vnode will be installed in the file pointer.
115 * NOTE: The vnode is referenced just once on return whether or not it
116 * is also installed in the file pointer.
119 vn_open(struct nlookupdata *nd, struct file *fp, int fmode, int cmode)
122 struct ucred *cred = nd->nl_cred;
124 struct vattr *vap = &vat;
125 struct namecache *ncp;
129 * Lookup the path and create or obtain the vnode. After a
130 * successful lookup a locked nd->nl_ncp will be returned.
132 * The result of this section should be a locked vnode.
134 * XXX with only a little work we should be able to avoid locking
135 * the vnode if FWRITE, O_CREAT, and O_TRUNC are *not* set.
137 if (fmode & O_CREAT) {
139 * CONDITIONAL CREATE FILE CASE
141 * Setting NLC_CREATE causes a negative hit to store
142 * the negative hit ncp and not return an error. Then
143 * nc_error or nc_vp may be checked to see if the ncp
144 * represents a negative hit. NLC_CREATE also requires
145 * write permission on the governing directory or EPERM
148 if ((fmode & O_EXCL) == 0 && (fmode & O_NOFOLLOW) == 0)
149 nd->nl_flags |= NLC_FOLLOW;
150 nd->nl_flags |= NLC_CREATE;
155 * NORMAL OPEN FILE CASE
165 * split case to allow us to re-resolve and retry the ncp in case
169 if (fmode & O_CREAT) {
170 if (ncp->nc_vp == NULL) {
173 vap->va_mode = cmode;
175 vap->va_vaflags |= VA_EXCLUSIVE;
176 error = VOP_NCREATE(ncp, &vp, nd->nl_cred, vap);
180 ASSERT_VOP_LOCKED(vp, "create");
181 /* locked vnode is returned */
183 if (fmode & O_EXCL) {
186 error = cache_vget(ncp, cred,
194 error = cache_vget(ncp, cred, LK_EXCLUSIVE, &vp);
200 * We have a locked vnode and ncp now. Note that the ncp will
201 * be cleaned up by the caller if nd->nl_ncp is left intact.
203 if (vp->v_type == VLNK) {
207 if (vp->v_type == VSOCK) {
211 if ((fmode & O_CREAT) == 0) {
213 if (fmode & (FWRITE | O_TRUNC)) {
214 if (vp->v_type == VDIR) {
218 error = vn_writechk(vp);
221 * Special stale handling, re-resolve the
224 if (error == ESTALE) {
227 cache_setunresolved(ncp);
228 error = cache_resolve(ncp, cred);
239 error = VOP_ACCESS(vp, mode, cred);
242 * Special stale handling, re-resolve the
245 if (error == ESTALE) {
248 cache_setunresolved(ncp);
249 error = cache_resolve(ncp, cred);
257 if (fmode & O_TRUNC) {
258 VOP_UNLOCK(vp, 0); /* XXX */
259 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); /* XXX */
262 error = VOP_SETATTR(vp, vap, cred);
268 * Setup the fp so VOP_OPEN can override it. No descriptor has been
269 * associated with the fp yet so we own it clean. f_ncp inherits
273 if (vp->v_type == VDIR) {
274 fp->f_ncp = nd->nl_ncp;
276 cache_unlock(fp->f_ncp);
281 * Get rid of nl_ncp. vn_open does not return it (it returns the
282 * vnode or the file pointer). Note: we can't leave nl_ncp locked
283 * through the VOP_OPEN anyway since the VOP_OPEN may block, e.g.
287 cache_put(nd->nl_ncp);
291 error = VOP_OPEN(vp, fmode, cred, fp);
294 * setting f_ops to &badfileops will prevent the descriptor
295 * code from trying to close and release the vnode, since
296 * the open failed we do not want to call close.
300 fp->f_ops = &badfileops;
307 * Assert that VREG files have been setup for vmio.
309 KASSERT(vp->v_type != VREG || vp->v_object != NULL,
310 ("vn_open: regular file was not VMIO enabled!"));
314 * Return the vnode. XXX needs some cleaning up. The vnode is
315 * only returned in the fp == NULL case.
319 nd->nl_vp_fmode = fmode;
320 if ((nd->nl_flags & NLC_LOCKVP) == 0)
333 * Check for write permissions on the specified vnode.
334 * Prototype text segments cannot be written.
342 * If there's shared text associated with
343 * the vnode, try to free it up once. If
344 * we fail, we can't allow writing.
346 if (vp->v_flag & VTEXT)
355 vn_close(struct vnode *vp, int flags)
359 if ((error = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY)) == 0) {
360 error = VOP_CLOSE(vp, flags);
369 sequential_heuristic(struct uio *uio, struct file *fp)
372 * Sequential heuristic - detect sequential operation
374 if ((uio->uio_offset == 0 && fp->f_seqcount > 0) ||
375 uio->uio_offset == fp->f_nextoff) {
376 int tmpseq = fp->f_seqcount;
378 * XXX we assume that the filesystem block size is
379 * the default. Not true, but still gives us a pretty
380 * good indicator of how sequential the read operations
383 tmpseq += (uio->uio_resid + BKVASIZE - 1) / BKVASIZE;
384 if (tmpseq > IO_SEQMAX)
386 fp->f_seqcount = tmpseq;
387 return(fp->f_seqcount << IO_SEQSHIFT);
391 * Not sequential, quick draw-down of seqcount
393 if (fp->f_seqcount > 1)
401 * Package up an I/O request on a vnode into a uio and do it.
404 vn_rdwr(enum uio_rw rw, struct vnode *vp, caddr_t base, int len,
405 off_t offset, enum uio_seg segflg, int ioflg,
406 struct ucred *cred, int *aresid)
412 if ((ioflg & IO_NODELOCKED) == 0)
413 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
414 auio.uio_iov = &aiov;
416 aiov.iov_base = base;
418 auio.uio_resid = len;
419 auio.uio_offset = offset;
420 auio.uio_segflg = segflg;
422 auio.uio_td = curthread;
423 if (rw == UIO_READ) {
424 error = VOP_READ(vp, &auio, ioflg, cred);
426 error = VOP_WRITE(vp, &auio, ioflg, cred);
429 *aresid = auio.uio_resid;
431 if (auio.uio_resid && error == 0)
433 if ((ioflg & IO_NODELOCKED) == 0)
439 * Package up an I/O request on a vnode into a uio and do it. The I/O
440 * request is split up into smaller chunks and we try to avoid saturating
441 * the buffer cache while potentially holding a vnode locked, so we
442 * check bwillwrite() before calling vn_rdwr(). We also call uio_yield()
443 * to give other processes a chance to lock the vnode (either other processes
444 * core'ing the same binary, or unrelated processes scanning the directory).
447 vn_rdwr_inchunks(enum uio_rw rw, struct vnode *vp, caddr_t base, int len,
448 off_t offset, enum uio_seg segflg, int ioflg,
449 struct ucred *cred, int *aresid)
457 * Force `offset' to a multiple of MAXBSIZE except possibly
458 * for the first chunk, so that filesystems only need to
459 * write full blocks except possibly for the first and last
462 chunk = MAXBSIZE - (uoff_t)offset % MAXBSIZE;
466 if (rw != UIO_READ && vp->v_type == VREG)
468 error = vn_rdwr(rw, vp, base, chunk, offset, segflg,
469 ioflg, cred, aresid);
470 len -= chunk; /* aresid calc already includes length */
483 * MPALMOSTSAFE - acquires mplock
486 vn_read(struct file *fp, struct uio *uio, struct ucred *cred, int flags)
492 KASSERT(uio->uio_td == curthread,
493 ("uio_td %p is not td %p", uio->uio_td, curthread));
494 vp = (struct vnode *)fp->f_data;
497 if (flags & O_FBLOCKING) {
498 /* ioflag &= ~IO_NDELAY; */
499 } else if (flags & O_FNONBLOCKING) {
501 } else if (fp->f_flag & FNONBLOCK) {
504 if (flags & O_FBUFFERED) {
505 /* ioflag &= ~IO_DIRECT; */
506 } else if (flags & O_FUNBUFFERED) {
508 } else if (fp->f_flag & O_DIRECT) {
511 vn_lock(vp, LK_SHARED | LK_NOPAUSE | LK_RETRY);
512 if ((flags & O_FOFFSET) == 0)
513 uio->uio_offset = fp->f_offset;
515 ioflag |= sequential_heuristic(uio, fp);
517 error = VOP_READ(vp, uio, ioflag, cred);
518 if ((flags & O_FOFFSET) == 0)
519 fp->f_offset = uio->uio_offset;
520 fp->f_nextoff = uio->uio_offset;
527 * Device-optimized file table vnode read routine.
529 * This bypasses the VOP table and talks directly to the device. Most
530 * filesystems just route to specfs and can make this optimization.
532 * MPALMOSTSAFE - acquires mplock
535 svn_read(struct file *fp, struct uio *uio, struct ucred *cred, int flags)
543 KASSERT(uio->uio_td == curthread,
544 ("uio_td %p is not td %p", uio->uio_td, curthread));
546 vp = (struct vnode *)fp->f_data;
547 if (vp == NULL || vp->v_type == VBAD) {
552 if ((dev = vp->v_rdev) == NULL) {
558 if (uio->uio_resid == 0) {
562 if ((flags & O_FOFFSET) == 0)
563 uio->uio_offset = fp->f_offset;
566 if (flags & O_FBLOCKING) {
567 /* ioflag &= ~IO_NDELAY; */
568 } else if (flags & O_FNONBLOCKING) {
570 } else if (fp->f_flag & FNONBLOCK) {
573 if (flags & O_FBUFFERED) {
574 /* ioflag &= ~IO_DIRECT; */
575 } else if (flags & O_FUNBUFFERED) {
577 } else if (fp->f_flag & O_DIRECT) {
580 ioflag |= sequential_heuristic(uio, fp);
582 error = dev_dread(dev, uio, ioflag);
585 if ((flags & O_FOFFSET) == 0)
586 fp->f_offset = uio->uio_offset;
587 fp->f_nextoff = uio->uio_offset;
594 * MPALMOSTSAFE - acquires mplock
597 vn_write(struct file *fp, struct uio *uio, struct ucred *cred, int flags)
603 KASSERT(uio->uio_td == curthread,
604 ("uio_procp %p is not p %p", uio->uio_td, curthread));
605 vp = (struct vnode *)fp->f_data;
606 if (vp->v_type == VREG)
608 vp = (struct vnode *)fp->f_data; /* XXX needed? */
611 if (vp->v_type == VREG &&
612 ((fp->f_flag & O_APPEND) || (flags & O_FAPPEND))) {
616 if (flags & O_FBLOCKING) {
617 /* ioflag &= ~IO_NDELAY; */
618 } else if (flags & O_FNONBLOCKING) {
620 } else if (fp->f_flag & FNONBLOCK) {
623 if (flags & O_FBUFFERED) {
624 /* ioflag &= ~IO_DIRECT; */
625 } else if (flags & O_FUNBUFFERED) {
627 } else if (fp->f_flag & O_DIRECT) {
630 if (flags & O_FASYNCWRITE) {
631 /* ioflag &= ~IO_SYNC; */
632 } else if (flags & O_FSYNCWRITE) {
634 } else if (fp->f_flag & O_FSYNC) {
638 if (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS))
640 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
641 if ((flags & O_FOFFSET) == 0)
642 uio->uio_offset = fp->f_offset;
643 ioflag |= sequential_heuristic(uio, fp);
644 error = VOP_WRITE(vp, uio, ioflag, cred);
645 if ((flags & O_FOFFSET) == 0)
646 fp->f_offset = uio->uio_offset;
647 fp->f_nextoff = uio->uio_offset;
654 * Device-optimized file table vnode write routine.
656 * This bypasses the VOP table and talks directly to the device. Most
657 * filesystems just route to specfs and can make this optimization.
659 * MPALMOSTSAFE - acquires mplock
662 svn_write(struct file *fp, struct uio *uio, struct ucred *cred, int flags)
670 KASSERT(uio->uio_td == curthread,
671 ("uio_procp %p is not p %p", uio->uio_td, curthread));
673 vp = (struct vnode *)fp->f_data;
674 if (vp == NULL || vp->v_type == VBAD) {
678 if (vp->v_type == VREG)
680 vp = (struct vnode *)fp->f_data; /* XXX needed? */
682 if ((dev = vp->v_rdev) == NULL) {
688 if ((flags & O_FOFFSET) == 0)
689 uio->uio_offset = fp->f_offset;
692 if (vp->v_type == VREG &&
693 ((fp->f_flag & O_APPEND) || (flags & O_FAPPEND))) {
697 if (flags & O_FBLOCKING) {
698 /* ioflag &= ~IO_NDELAY; */
699 } else if (flags & O_FNONBLOCKING) {
701 } else if (fp->f_flag & FNONBLOCK) {
704 if (flags & O_FBUFFERED) {
705 /* ioflag &= ~IO_DIRECT; */
706 } else if (flags & O_FUNBUFFERED) {
708 } else if (fp->f_flag & O_DIRECT) {
711 if (flags & O_FASYNCWRITE) {
712 /* ioflag &= ~IO_SYNC; */
713 } else if (flags & O_FSYNCWRITE) {
715 } else if (fp->f_flag & O_FSYNC) {
719 if (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS))
721 ioflag |= sequential_heuristic(uio, fp);
723 error = dev_dwrite(dev, uio, ioflag);
726 if ((flags & O_FOFFSET) == 0)
727 fp->f_offset = uio->uio_offset;
728 fp->f_nextoff = uio->uio_offset;
735 * MPALMOSTSAFE - acquires mplock
738 vn_statfile(struct file *fp, struct stat *sb, struct ucred *cred)
744 vp = (struct vnode *)fp->f_data;
745 error = vn_stat(vp, sb, cred);
751 vn_stat(struct vnode *vp, struct stat *sb, struct ucred *cred)
760 error = VOP_GETATTR(vp, vap);
765 * Zero the spare stat fields
771 * Copy from vattr table
773 if (vap->va_fsid != VNOVAL)
774 sb->st_dev = vap->va_fsid;
776 sb->st_dev = vp->v_mount->mnt_stat.f_fsid.val[0];
777 sb->st_ino = vap->va_fileid;
779 switch (vap->va_type) {
794 /* This is a cosmetic change, symlinks do not have a mode. */
795 if (vp->v_mount->mnt_flag & MNT_NOSYMFOLLOW)
796 sb->st_mode &= ~ACCESSPERMS; /* 0000 */
798 sb->st_mode |= ACCESSPERMS; /* 0777 */
810 sb->st_nlink = vap->va_nlink;
811 sb->st_uid = vap->va_uid;
812 sb->st_gid = vap->va_gid;
813 sb->st_rdev = vap->va_rdev;
814 sb->st_size = vap->va_size;
815 sb->st_atimespec = vap->va_atime;
816 sb->st_mtimespec = vap->va_mtime;
817 sb->st_ctimespec = vap->va_ctime;
820 * A VCHR and VBLK device may track the last access and last modified
821 * time independantly of the filesystem. This is particularly true
822 * because device read and write calls may bypass the filesystem.
824 if (vp->v_type == VCHR || vp->v_type == VBLK) {
825 if ((dev = vp->v_rdev) != NULL) {
826 if (dev->si_lastread) {
827 sb->st_atimespec.tv_sec = dev->si_lastread;
828 sb->st_atimespec.tv_nsec = 0;
830 if (dev->si_lastwrite) {
831 sb->st_atimespec.tv_sec = dev->si_lastwrite;
832 sb->st_atimespec.tv_nsec = 0;
838 * According to www.opengroup.org, the meaning of st_blksize is
839 * "a filesystem-specific preferred I/O block size for this
840 * object. In some filesystem types, this may vary from file
842 * Default to PAGE_SIZE after much discussion.
845 if (vap->va_type == VREG) {
846 sb->st_blksize = vap->va_blocksize;
847 } else if (vn_isdisk(vp, NULL)) {
849 * XXX this is broken. If the device is not yet open (aka
850 * stat() call, aka v_rdev == NULL), how are we supposed
851 * to get a valid block size out of it?
855 if ((dev = vp->v_rdev) == NULL)
856 dev = udev2dev(vp->v_udev, vp->v_type == VBLK);
857 sb->st_blksize = dev->si_bsize_best;
858 if (sb->st_blksize < dev->si_bsize_phys)
859 sb->st_blksize = dev->si_bsize_phys;
860 if (sb->st_blksize < BLKDEV_IOSIZE)
861 sb->st_blksize = BLKDEV_IOSIZE;
863 sb->st_blksize = PAGE_SIZE;
866 sb->st_flags = vap->va_flags;
867 if (suser_cred(cred, 0))
870 sb->st_gen = vap->va_gen;
872 #if (S_BLKSIZE == 512)
873 /* Optimize this case */
874 sb->st_blocks = vap->va_bytes >> 9;
876 sb->st_blocks = vap->va_bytes / S_BLKSIZE;
878 sb->st_fsmid = vap->va_fsmid;
883 * MPALMOSTSAFE - acquires mplock
886 vn_ioctl(struct file *fp, u_long com, caddr_t data, struct ucred *ucred)
888 struct vnode *vp = ((struct vnode *)fp->f_data);
895 switch (vp->v_type) {
898 if (com == FIONREAD) {
899 if ((error = VOP_GETATTR(vp, &vattr)) != 0)
901 *(int *)data = vattr.va_size - fp->f_offset;
905 if (com == FIOASYNC) { /* XXX */
917 if (com == FIODTYPE) {
918 if (vp->v_type != VCHR && vp->v_type != VBLK) {
922 *(int *)data = dev_dflags(vp->v_rdev) & D_TYPEMASK;
926 error = VOP_IOCTL(vp, com, data, fp->f_flag, ucred);
927 if (error == 0 && com == TIOCSCTTY) {
928 struct proc *p = curthread->td_proc;
929 struct session *sess;
937 /* Do nothing if reassigning same control tty */
938 if (sess->s_ttyvp == vp) {
943 /* Get rid of reference to old control tty */
957 * MPALMOSTSAFE - acquires mplock
960 vn_poll(struct file *fp, int events, struct ucred *cred)
965 error = VOP_POLL(((struct vnode *)fp->f_data), events, cred);
971 * Check that the vnode is still valid, and if so
972 * acquire requested lock.
976 vn_lock(struct vnode *vp, int flags)
978 debug_vn_lock(struct vnode *vp, int flags, const char *filename, int line)
985 vp->filename = filename;
988 error = VOP_LOCK(vp, flags | LK_NOPAUSE);
991 } while (flags & LK_RETRY);
994 * Because we (had better!) have a ref on the vnode, once it
995 * goes to VRECLAIMED state it will not be recycled until all
996 * refs go away. So we can just check the flag.
998 if (error == 0 && (vp->v_flag & VRECLAIMED)) {
1006 * MPALMOSTSAFE - acquires mplock
1009 vn_closefile(struct file *fp)
1014 fp->f_ops = &badfileops;
1015 error = vn_close(((struct vnode *)fp->f_data), fp->f_flag);
1021 * MPALMOSTSAFE - acquires mplock
1024 vn_kqfilter(struct file *fp, struct knote *kn)
1029 error = VOP_KQFILTER(((struct vnode *)fp->f_data), kn);