2 * Copyright (c) 1982, 1986, 1989, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * @(#)vfs_vnops.c 8.2 (Berkeley) 1/21/94
39 * $FreeBSD: src/sys/kern/vfs_vnops.c,v 1.87.2.13 2002/12/29 18:19:53 dillon Exp $
40 * $DragonFly: src/sys/kern/vfs_vnops.c,v 1.38 2006/05/05 21:15:09 dillon Exp $
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/fcntl.h>
49 #include <sys/mount.h>
50 #include <sys/nlookup.h>
51 #include <sys/vnode.h>
53 #include <sys/filio.h>
54 #include <sys/ttycom.h>
56 #include <sys/syslog.h>
58 static int vn_closefile (struct file *fp, struct thread *td);
59 static int vn_ioctl (struct file *fp, u_long com, caddr_t data,
61 static int vn_read (struct file *fp, struct uio *uio,
62 struct ucred *cred, int flags, struct thread *td);
63 static int svn_read (struct file *fp, struct uio *uio,
64 struct ucred *cred, int flags, struct thread *td);
65 static int vn_poll (struct file *fp, int events, struct ucred *cred,
67 static int vn_kqfilter (struct file *fp, struct knote *kn);
68 static int vn_statfile (struct file *fp, struct stat *sb, struct thread *td);
69 static int vn_write (struct file *fp, struct uio *uio,
70 struct ucred *cred, int flags, struct thread *td);
71 static int svn_write (struct file *fp, struct uio *uio,
72 struct ucred *cred, int flags, struct thread *td);
74 struct fileops vnode_fileops = {
77 vn_read, vn_write, vn_ioctl, vn_poll, vn_kqfilter,
78 vn_statfile, vn_closefile, nofo_shutdown
81 struct fileops specvnode_fileops = {
84 svn_read, svn_write, vn_ioctl, vn_poll, vn_kqfilter,
85 vn_statfile, vn_closefile, nofo_shutdown
89 * Shortcut the device read/write. This avoids a lot of vnode junk.
90 * Basically the specfs vnops for read and write take the locked vnode,
91 * unlock it (because we can't hold the vnode locked while reading or writing
92 * a device which may block indefinitely), issues the device operation, then
93 * relock the vnode before returning, plus other junk. This bypasses all
94 * of that and just does the device operation.
97 vn_setspecops(struct file *fp)
99 if (vfs_fastdev && fp->f_ops == &vnode_fileops) {
100 fp->f_ops = &specvnode_fileops;
105 * Common code for vnode open operations. Check permissions, and call
106 * the VOP_NOPEN or VOP_NCREATE routine.
108 * The caller is responsible for setting up nd with nlookup_init() and
109 * for cleaning it up with nlookup_done(), whether we return an error
112 * On success nd->nl_open_vp will hold a referenced and, if requested,
113 * locked vnode. A locked vnode is requested via NLC_LOCKVP. If fp
114 * is non-NULL the vnode will be installed in the file pointer.
116 * NOTE: The vnode is referenced just once on return whether or not it
117 * is also installed in the file pointer.
120 vn_open(struct nlookupdata *nd, struct file *fp, int fmode, int cmode)
123 struct thread *td = nd->nl_td;
124 struct ucred *cred = nd->nl_cred;
126 struct vattr *vap = &vat;
127 struct namecache *ncp;
131 * Lookup the path and create or obtain the vnode. After a
132 * successful lookup a locked nd->nl_ncp will be returned.
134 * The result of this section should be a locked vnode.
136 * XXX with only a little work we should be able to avoid locking
137 * the vnode if FWRITE, O_CREAT, and O_TRUNC are *not* set.
139 if (fmode & O_CREAT) {
141 * CONDITIONAL CREATE FILE CASE
143 * Setting NLC_CREATE causes a negative hit to store
144 * the negative hit ncp and not return an error. Then
145 * nc_error or nc_vp may be checked to see if the ncp
146 * represents a negative hit. NLC_CREATE also requires
147 * write permission on the governing directory or EPERM
150 if ((fmode & O_EXCL) == 0 && (fmode & O_NOFOLLOW) == 0)
151 nd->nl_flags |= NLC_FOLLOW;
152 nd->nl_flags |= NLC_CREATE;
157 * NORMAL OPEN FILE CASE
167 * split case to allow us to re-resolve and retry the ncp in case
171 if (fmode & O_CREAT) {
172 if (ncp->nc_vp == NULL) {
175 vap->va_mode = cmode;
177 vap->va_vaflags |= VA_EXCLUSIVE;
178 error = VOP_NCREATE(ncp, &vp, nd->nl_cred, vap);
182 ASSERT_VOP_LOCKED(vp, "create");
183 /* locked vnode is returned */
185 if (fmode & O_EXCL) {
188 error = cache_vget(ncp, cred,
196 error = cache_vget(ncp, cred, LK_EXCLUSIVE, &vp);
202 * We have a locked vnode and ncp now. Note that the ncp will
203 * be cleaned up by the caller if nd->nl_ncp is left intact.
205 if (vp->v_type == VLNK) {
209 if (vp->v_type == VSOCK) {
213 if ((fmode & O_CREAT) == 0) {
215 if (fmode & (FWRITE | O_TRUNC)) {
216 if (vp->v_type == VDIR) {
220 error = vn_writechk(vp);
223 * Special stale handling, re-resolve the
226 if (error == ESTALE) {
229 cache_setunresolved(ncp);
230 error = cache_resolve(ncp, cred);
241 error = VOP_ACCESS(vp, mode, cred, td);
244 * Special stale handling, re-resolve the
247 if (error == ESTALE) {
250 cache_setunresolved(ncp);
251 error = cache_resolve(ncp, cred);
259 if (fmode & O_TRUNC) {
260 VOP_UNLOCK(vp, 0); /* XXX */
261 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); /* XXX */
264 error = VOP_SETATTR(vp, vap, cred, td);
270 * Setup the fp so VOP_OPEN can override it. No descriptor has been
271 * associated with the fp yet so we own it clean. f_ncp inherits
275 if (vp->v_type == VDIR) {
276 fp->f_ncp = nd->nl_ncp;
278 cache_unlock(fp->f_ncp);
283 * Get rid of nl_ncp. vn_open does not return it (it returns the
284 * vnode or the file pointer). Note: we can't leave nl_ncp locked
285 * through the VOP_OPEN anyway since the VOP_OPEN may block, e.g.
289 cache_put(nd->nl_ncp);
293 error = VOP_OPEN(vp, fmode, cred, fp, td);
296 * setting f_ops to &badfileops will prevent the descriptor
297 * code from trying to close and release the vnode, since
298 * the open failed we do not want to call close.
302 fp->f_ops = &badfileops;
309 * Assert that VREG files have been setup for vmio.
311 KASSERT(vp->v_type != VREG || vp->v_object != NULL,
312 ("vn_open: regular file was not VMIO enabled!"));
316 * Return the vnode. XXX needs some cleaning up. The vnode is
317 * only returned in the fp == NULL case.
321 nd->nl_vp_fmode = fmode;
322 if ((nd->nl_flags & NLC_LOCKVP) == 0)
335 * Check for write permissions on the specified vnode.
336 * Prototype text segments cannot be written.
344 * If there's shared text associated with
345 * the vnode, try to free it up once. If
346 * we fail, we can't allow writing.
348 if (vp->v_flag & VTEXT)
357 vn_close(struct vnode *vp, int flags, struct thread *td)
361 if ((error = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY)) == 0) {
362 error = VOP_CLOSE(vp, flags, td);
371 sequential_heuristic(struct uio *uio, struct file *fp)
374 * Sequential heuristic - detect sequential operation
376 if ((uio->uio_offset == 0 && fp->f_seqcount > 0) ||
377 uio->uio_offset == fp->f_nextoff) {
378 int tmpseq = fp->f_seqcount;
380 * XXX we assume that the filesystem block size is
381 * the default. Not true, but still gives us a pretty
382 * good indicator of how sequential the read operations
385 tmpseq += (uio->uio_resid + BKVASIZE - 1) / BKVASIZE;
386 if (tmpseq > IO_SEQMAX)
388 fp->f_seqcount = tmpseq;
389 return(fp->f_seqcount << IO_SEQSHIFT);
393 * Not sequential, quick draw-down of seqcount
395 if (fp->f_seqcount > 1)
403 * Package up an I/O request on a vnode into a uio and do it.
406 vn_rdwr(rw, vp, base, len, offset, segflg, ioflg, cred, aresid, td)
422 if ((ioflg & IO_NODELOCKED) == 0)
423 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
424 auio.uio_iov = &aiov;
426 aiov.iov_base = base;
428 auio.uio_resid = len;
429 auio.uio_offset = offset;
430 auio.uio_segflg = segflg;
433 if (rw == UIO_READ) {
434 error = VOP_READ(vp, &auio, ioflg, cred);
436 error = VOP_WRITE(vp, &auio, ioflg, cred);
439 *aresid = auio.uio_resid;
441 if (auio.uio_resid && error == 0)
443 if ((ioflg & IO_NODELOCKED) == 0)
449 * Package up an I/O request on a vnode into a uio and do it. The I/O
450 * request is split up into smaller chunks and we try to avoid saturating
451 * the buffer cache while potentially holding a vnode locked, so we
452 * check bwillwrite() before calling vn_rdwr(). We also call uio_yield()
453 * to give other processes a chance to lock the vnode (either other processes
454 * core'ing the same binary, or unrelated processes scanning the directory).
457 vn_rdwr_inchunks(rw, vp, base, len, offset, segflg, ioflg, cred, aresid, td)
475 * Force `offset' to a multiple of MAXBSIZE except possibly
476 * for the first chunk, so that filesystems only need to
477 * write full blocks except possibly for the first and last
480 chunk = MAXBSIZE - (uoff_t)offset % MAXBSIZE;
484 if (rw != UIO_READ && vp->v_type == VREG)
486 error = vn_rdwr(rw, vp, base, chunk, offset, segflg,
487 ioflg, cred, aresid, td);
488 len -= chunk; /* aresid calc already includes length */
501 * File table vnode read routine.
504 vn_read(fp, uio, cred, flags, td)
514 KASSERT(uio->uio_td == td, ("uio_td %p is not td %p", uio->uio_td, td));
515 vp = (struct vnode *)fp->f_data;
517 if (fp->f_flag & FNONBLOCK)
519 if (fp->f_flag & O_DIRECT)
521 vn_lock(vp, LK_SHARED | LK_NOPAUSE | LK_RETRY);
522 if ((flags & FOF_OFFSET) == 0)
523 uio->uio_offset = fp->f_offset;
525 ioflag |= sequential_heuristic(uio, fp);
527 error = VOP_READ(vp, uio, ioflag, cred);
528 if ((flags & FOF_OFFSET) == 0)
529 fp->f_offset = uio->uio_offset;
530 fp->f_nextoff = uio->uio_offset;
536 * Device-optimized file table vnode read routine.
538 * This bypasses the VOP table and talks directly to the device. Most
539 * filesystems just route to specfs and can make this optimization.
542 svn_read(fp, uio, cred, flags, td)
554 KASSERT(uio->uio_td == td, ("uio_td %p is not td %p", uio->uio_td, td));
556 vp = (struct vnode *)fp->f_data;
557 if (vp == NULL || vp->v_type == VBAD)
560 if ((dev = vp->v_rdev) == NULL)
564 if (uio->uio_resid == 0)
566 if ((flags & FOF_OFFSET) == 0)
567 uio->uio_offset = fp->f_offset;
570 if (fp->f_flag & FNONBLOCK)
572 if (fp->f_flag & O_DIRECT)
574 ioflag |= sequential_heuristic(uio, fp);
576 error = dev_dread(dev, uio, ioflag);
579 if ((flags & FOF_OFFSET) == 0)
580 fp->f_offset = uio->uio_offset;
581 fp->f_nextoff = uio->uio_offset;
586 * File table vnode write routine.
589 vn_write(fp, uio, cred, flags, td)
599 KASSERT(uio->uio_td == td, ("uio_procp %p is not p %p",
601 vp = (struct vnode *)fp->f_data;
602 if (vp->v_type == VREG)
604 vp = (struct vnode *)fp->f_data; /* XXX needed? */
606 if (vp->v_type == VREG && (fp->f_flag & O_APPEND))
608 if (fp->f_flag & FNONBLOCK)
610 if (fp->f_flag & O_DIRECT)
612 if ((fp->f_flag & O_FSYNC) ||
613 (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS)))
615 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
616 if ((flags & FOF_OFFSET) == 0)
617 uio->uio_offset = fp->f_offset;
618 ioflag |= sequential_heuristic(uio, fp);
619 error = VOP_WRITE(vp, uio, ioflag, cred);
620 if ((flags & FOF_OFFSET) == 0)
621 fp->f_offset = uio->uio_offset;
622 fp->f_nextoff = uio->uio_offset;
628 * Device-optimized file table vnode write routine.
630 * This bypasses the VOP table and talks directly to the device. Most
631 * filesystems just route to specfs and can make this optimization.
634 svn_write(fp, uio, cred, flags, td)
646 KASSERT(uio->uio_td == td, ("uio_procp %p is not p %p",
649 vp = (struct vnode *)fp->f_data;
650 if (vp == NULL || vp->v_type == VBAD)
652 if (vp->v_type == VREG)
654 vp = (struct vnode *)fp->f_data; /* XXX needed? */
656 if ((dev = vp->v_rdev) == NULL)
660 if ((flags & FOF_OFFSET) == 0)
661 uio->uio_offset = fp->f_offset;
664 if (vp->v_type == VREG && (fp->f_flag & O_APPEND))
666 if (fp->f_flag & FNONBLOCK)
668 if (fp->f_flag & O_DIRECT)
670 if ((fp->f_flag & O_FSYNC) ||
671 (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS)))
673 ioflag |= sequential_heuristic(uio, fp);
675 error = dev_dwrite(dev, uio, ioflag);
678 if ((flags & FOF_OFFSET) == 0)
679 fp->f_offset = uio->uio_offset;
680 fp->f_nextoff = uio->uio_offset;
686 * File table vnode stat routine.
689 vn_statfile(struct file *fp, struct stat *sb, struct thread *td)
691 struct vnode *vp = (struct vnode *)fp->f_data;
693 return vn_stat(vp, sb, td);
697 vn_stat(struct vnode *vp, struct stat *sb, struct thread *td)
706 error = VOP_GETATTR(vp, vap, td);
711 * Zero the spare stat fields
717 * Copy from vattr table
719 if (vap->va_fsid != VNOVAL)
720 sb->st_dev = vap->va_fsid;
722 sb->st_dev = vp->v_mount->mnt_stat.f_fsid.val[0];
723 sb->st_ino = vap->va_fileid;
725 switch (vap->va_type) {
740 /* This is a cosmetic change, symlinks do not have a mode. */
741 if (vp->v_mount->mnt_flag & MNT_NOSYMFOLLOW)
742 sb->st_mode &= ~ACCESSPERMS; /* 0000 */
744 sb->st_mode |= ACCESSPERMS; /* 0777 */
756 sb->st_nlink = vap->va_nlink;
757 sb->st_uid = vap->va_uid;
758 sb->st_gid = vap->va_gid;
759 sb->st_rdev = vap->va_rdev;
760 sb->st_size = vap->va_size;
761 sb->st_atimespec = vap->va_atime;
762 sb->st_mtimespec = vap->va_mtime;
763 sb->st_ctimespec = vap->va_ctime;
766 * A VCHR and VBLK device may track the last access and last modified
767 * time independantly of the filesystem. This is particularly true
768 * because device read and write calls may bypass the filesystem.
770 if (vp->v_type == VCHR || vp->v_type == VBLK) {
771 if ((dev = vp->v_rdev) != NULL) {
772 if (dev->si_lastread) {
773 sb->st_atimespec.tv_sec = dev->si_lastread;
774 sb->st_atimespec.tv_nsec = 0;
776 if (dev->si_lastwrite) {
777 sb->st_atimespec.tv_sec = dev->si_lastwrite;
778 sb->st_atimespec.tv_nsec = 0;
784 * According to www.opengroup.org, the meaning of st_blksize is
785 * "a filesystem-specific preferred I/O block size for this
786 * object. In some filesystem types, this may vary from file
788 * Default to PAGE_SIZE after much discussion.
791 if (vap->va_type == VREG) {
792 sb->st_blksize = vap->va_blocksize;
793 } else if (vn_isdisk(vp, NULL)) {
795 * XXX this is broken. If the device is not yet open (aka
796 * stat() call, aka v_rdev == NULL), how are we supposed
797 * to get a valid block size out of it?
801 if ((dev = vp->v_rdev) == NULL)
802 dev = udev2dev(vp->v_udev, vp->v_type == VBLK);
803 sb->st_blksize = dev->si_bsize_best;
804 if (sb->st_blksize < dev->si_bsize_phys)
805 sb->st_blksize = dev->si_bsize_phys;
806 if (sb->st_blksize < BLKDEV_IOSIZE)
807 sb->st_blksize = BLKDEV_IOSIZE;
809 sb->st_blksize = PAGE_SIZE;
812 sb->st_flags = vap->va_flags;
816 sb->st_gen = vap->va_gen;
818 #if (S_BLKSIZE == 512)
819 /* Optimize this case */
820 sb->st_blocks = vap->va_bytes >> 9;
822 sb->st_blocks = vap->va_bytes / S_BLKSIZE;
824 sb->st_fsmid = vap->va_fsmid;
829 * File table vnode ioctl routine.
832 vn_ioctl(struct file *fp, u_long com, caddr_t data, struct thread *td)
834 struct vnode *vp = ((struct vnode *)fp->f_data);
840 KKASSERT(td->td_proc != NULL);
841 ucred = td->td_proc->p_ucred;
843 switch (vp->v_type) {
846 if (com == FIONREAD) {
847 error = VOP_GETATTR(vp, &vattr, td);
850 *(int *)data = vattr.va_size - fp->f_offset;
853 if (com == FIONBIO || com == FIOASYNC) /* XXX */
854 return (0); /* XXX */
863 if (com == FIODTYPE) {
864 if (vp->v_type != VCHR && vp->v_type != VBLK)
866 *(int *)data = dev_dflags(vp->v_rdev) & D_TYPEMASK;
869 error = VOP_IOCTL(vp, com, data, fp->f_flag, ucred, td);
870 if (error == 0 && com == TIOCSCTTY) {
871 struct session *sess = td->td_proc->p_session;
873 /* Do nothing if reassigning same control tty */
874 if (sess->s_ttyvp == vp)
877 /* Get rid of reference to old control tty */
889 * File table vnode poll routine.
892 vn_poll(struct file *fp, int events, struct ucred *cred, struct thread *td)
894 return (VOP_POLL(((struct vnode *)fp->f_data), events, cred, td));
898 * Check that the vnode is still valid, and if so
899 * acquire requested lock.
903 vn_lock(struct vnode *vp, int flags)
905 debug_vn_lock(struct vnode *vp, int flags, const char *filename, int line)
912 vp->filename = filename;
915 error = VOP_LOCK(vp, flags | LK_NOPAUSE);
918 } while (flags & LK_RETRY);
921 * Because we (had better!) have a ref on the vnode, once it
922 * goes to VRECLAIMED state it will not be recycled until all
923 * refs go away. So we can just check the flag.
925 if (error == 0 && (vp->v_flag & VRECLAIMED)) {
933 * File table vnode close routine.
936 vn_closefile(struct file *fp, struct thread *td)
940 fp->f_ops = &badfileops;
941 err = vn_close(((struct vnode *)fp->f_data), fp->f_flag, td);
946 vn_kqfilter(struct file *fp, struct knote *kn)
949 return (VOP_KQFILTER(((struct vnode *)fp->f_data), kn));