2 * Copyright (c) 1982, 1986, 1989, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * @(#)vfs_vnops.c 8.2 (Berkeley) 1/21/94
39 * $FreeBSD: src/sys/kern/vfs_vnops.c,v 1.87.2.13 2002/12/29 18:19:53 dillon Exp $
40 * $DragonFly: src/sys/kern/vfs_vnops.c,v 1.48 2006/09/18 18:19:33 dillon Exp $
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/fcntl.h>
49 #include <sys/mount.h>
50 #include <sys/nlookup.h>
51 #include <sys/vnode.h>
53 #include <sys/filio.h>
54 #include <sys/ttycom.h>
56 #include <sys/syslog.h>
58 static int vn_closefile (struct file *fp);
59 static int vn_ioctl (struct file *fp, u_long com, caddr_t data,
61 static int vn_read (struct file *fp, struct uio *uio,
62 struct ucred *cred, int flags);
63 static int svn_read (struct file *fp, struct uio *uio,
64 struct ucred *cred, int flags);
65 static int vn_poll (struct file *fp, int events, struct ucred *cred);
66 static int vn_kqfilter (struct file *fp, struct knote *kn);
67 static int vn_statfile (struct file *fp, struct stat *sb, struct ucred *cred);
68 static int vn_write (struct file *fp, struct uio *uio,
69 struct ucred *cred, int flags);
70 static int svn_write (struct file *fp, struct uio *uio,
71 struct ucred *cred, int flags);
73 struct fileops vnode_fileops = {
78 .fo_kqfilter = vn_kqfilter,
79 .fo_stat = vn_statfile,
80 .fo_close = vn_closefile,
81 .fo_shutdown = nofo_shutdown
84 struct fileops specvnode_fileops = {
86 .fo_write = svn_write,
89 .fo_kqfilter = vn_kqfilter,
90 .fo_stat = vn_statfile,
91 .fo_close = vn_closefile,
92 .fo_shutdown = nofo_shutdown
96 * Shortcut the device read/write. This avoids a lot of vnode junk.
97 * Basically the specfs vnops for read and write take the locked vnode,
98 * unlock it (because we can't hold the vnode locked while reading or writing
99 * a device which may block indefinitely), issues the device operation, then
100 * relock the vnode before returning, plus other junk. This bypasses all
101 * of that and just does the device operation.
104 vn_setspecops(struct file *fp)
106 if (vfs_fastdev && fp->f_ops == &vnode_fileops) {
107 fp->f_ops = &specvnode_fileops;
112 * Common code for vnode open operations. Check permissions, and call
113 * the VOP_NOPEN or VOP_NCREATE routine.
115 * The caller is responsible for setting up nd with nlookup_init() and
116 * for cleaning it up with nlookup_done(), whether we return an error
119 * On success nd->nl_open_vp will hold a referenced and, if requested,
120 * locked vnode. A locked vnode is requested via NLC_LOCKVP. If fp
121 * is non-NULL the vnode will be installed in the file pointer.
123 * NOTE: The vnode is referenced just once on return whether or not it
124 * is also installed in the file pointer.
127 vn_open(struct nlookupdata *nd, struct file *fp, int fmode, int cmode)
130 struct ucred *cred = nd->nl_cred;
132 struct vattr *vap = &vat;
133 struct namecache *ncp;
137 * Lookup the path and create or obtain the vnode. After a
138 * successful lookup a locked nd->nl_ncp will be returned.
140 * The result of this section should be a locked vnode.
142 * XXX with only a little work we should be able to avoid locking
143 * the vnode if FWRITE, O_CREAT, and O_TRUNC are *not* set.
145 if (fmode & O_CREAT) {
147 * CONDITIONAL CREATE FILE CASE
149 * Setting NLC_CREATE causes a negative hit to store
150 * the negative hit ncp and not return an error. Then
151 * nc_error or nc_vp may be checked to see if the ncp
152 * represents a negative hit. NLC_CREATE also requires
153 * write permission on the governing directory or EPERM
156 if ((fmode & O_EXCL) == 0 && (fmode & O_NOFOLLOW) == 0)
157 nd->nl_flags |= NLC_FOLLOW;
158 nd->nl_flags |= NLC_CREATE;
163 * NORMAL OPEN FILE CASE
173 * split case to allow us to re-resolve and retry the ncp in case
177 if (fmode & O_CREAT) {
178 if (ncp->nc_vp == NULL) {
179 if ((error = ncp_writechk(ncp)) != 0)
183 vap->va_mode = cmode;
185 vap->va_vaflags |= VA_EXCLUSIVE;
186 error = VOP_NCREATE(ncp, &vp, nd->nl_cred, vap);
190 /* locked vnode is returned */
192 if (fmode & O_EXCL) {
195 error = cache_vget(ncp, cred,
203 error = cache_vget(ncp, cred, LK_EXCLUSIVE, &vp);
209 * We have a locked vnode and ncp now. Note that the ncp will
210 * be cleaned up by the caller if nd->nl_ncp is left intact.
212 if (vp->v_type == VLNK) {
216 if (vp->v_type == VSOCK) {
220 if ((fmode & O_CREAT) == 0) {
222 if (fmode & (FWRITE | O_TRUNC)) {
223 if (vp->v_type == VDIR) {
227 error = vn_writechk(vp, ncp);
230 * Special stale handling, re-resolve the
233 if (error == ESTALE) {
236 cache_setunresolved(ncp);
237 error = cache_resolve(ncp, cred);
248 error = VOP_ACCESS(vp, mode, cred);
251 * Special stale handling, re-resolve the
254 if (error == ESTALE) {
257 cache_setunresolved(ncp);
258 error = cache_resolve(ncp, cred);
266 if (fmode & O_TRUNC) {
267 vn_unlock(vp); /* XXX */
268 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); /* XXX */
271 error = VOP_SETATTR(vp, vap, cred);
277 * Setup the fp so VOP_OPEN can override it. No descriptor has been
278 * associated with the fp yet so we own it clean.
280 * f_ncp inherits nl_ncp. This used to be necessary only for
281 * directories but now we do it unconditionally so f*() ops
282 * such as fchmod() can access the actual namespace that was
283 * used to open the file.
286 fp->f_ncp = nd->nl_ncp;
288 cache_unlock(fp->f_ncp);
292 * Get rid of nl_ncp. vn_open does not return it (it returns the
293 * vnode or the file pointer). Note: we can't leave nl_ncp locked
294 * through the VOP_OPEN anyway since the VOP_OPEN may block, e.g.
298 cache_put(nd->nl_ncp);
302 error = VOP_OPEN(vp, fmode, cred, fp);
305 * setting f_ops to &badfileops will prevent the descriptor
306 * code from trying to close and release the vnode, since
307 * the open failed we do not want to call close.
311 fp->f_ops = &badfileops;
318 * Assert that VREG files have been setup for vmio.
320 KASSERT(vp->v_type != VREG || vp->v_object != NULL,
321 ("vn_open: regular file was not VMIO enabled!"));
325 * Return the vnode. XXX needs some cleaning up. The vnode is
326 * only returned in the fp == NULL case.
330 nd->nl_vp_fmode = fmode;
331 if ((nd->nl_flags & NLC_LOCKVP) == 0)
344 * Check for write permissions on the specified vnode.
347 vn_writechk(struct vnode *vp, struct namecache *ncp)
350 * If there's shared text associated with
351 * the vnode, try to free it up once. If
352 * we fail, we can't allow writing.
354 if (vp->v_flag & VTEXT)
358 * If the vnode represents a regular file, check the mount
359 * point via the ncp. This may be a different mount point
360 * then the one embedded in the vnode (e.g. nullfs).
362 * We can still write to non-regular files (e.g. devices)
363 * via read-only mounts.
365 if (ncp && vp->v_type == VREG)
366 return (ncp_writechk(ncp));
371 * Check whether the underlying mount is read-only. The mount point
372 * referenced by the namecache may be different from the mount point
373 * used by the underlying vnode in the case of NULLFS, so a separate
377 ncp_writechk(struct namecache *ncp)
379 if (ncp->nc_mount && (ncp->nc_mount->mnt_flag & MNT_RDONLY))
388 vn_close(struct vnode *vp, int flags)
392 if ((error = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY)) == 0) {
393 error = VOP_CLOSE(vp, flags);
402 sequential_heuristic(struct uio *uio, struct file *fp)
405 * Sequential heuristic - detect sequential operation
407 if ((uio->uio_offset == 0 && fp->f_seqcount > 0) ||
408 uio->uio_offset == fp->f_nextoff) {
409 int tmpseq = fp->f_seqcount;
411 * XXX we assume that the filesystem block size is
412 * the default. Not true, but still gives us a pretty
413 * good indicator of how sequential the read operations
416 tmpseq += (uio->uio_resid + BKVASIZE - 1) / BKVASIZE;
417 if (tmpseq > IO_SEQMAX)
419 fp->f_seqcount = tmpseq;
420 return(fp->f_seqcount << IO_SEQSHIFT);
424 * Not sequential, quick draw-down of seqcount
426 if (fp->f_seqcount > 1)
434 * Package up an I/O request on a vnode into a uio and do it.
437 vn_rdwr(enum uio_rw rw, struct vnode *vp, caddr_t base, int len,
438 off_t offset, enum uio_seg segflg, int ioflg,
439 struct ucred *cred, int *aresid)
443 struct ccms_lock ccms_lock;
446 if ((ioflg & IO_NODELOCKED) == 0)
447 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
448 auio.uio_iov = &aiov;
450 aiov.iov_base = base;
452 auio.uio_resid = len;
453 auio.uio_offset = offset;
454 auio.uio_segflg = segflg;
456 auio.uio_td = curthread;
457 ccms_lock_get_uio(&vp->v_ccms, &ccms_lock, &auio);
458 if (rw == UIO_READ) {
459 error = VOP_READ(vp, &auio, ioflg, cred);
461 error = VOP_WRITE(vp, &auio, ioflg, cred);
463 ccms_lock_put(&vp->v_ccms, &ccms_lock);
465 *aresid = auio.uio_resid;
467 if (auio.uio_resid && error == 0)
469 if ((ioflg & IO_NODELOCKED) == 0)
475 * Package up an I/O request on a vnode into a uio and do it. The I/O
476 * request is split up into smaller chunks and we try to avoid saturating
477 * the buffer cache while potentially holding a vnode locked, so we
478 * check bwillwrite() before calling vn_rdwr(). We also call uio_yield()
479 * to give other processes a chance to lock the vnode (either other processes
480 * core'ing the same binary, or unrelated processes scanning the directory).
483 vn_rdwr_inchunks(enum uio_rw rw, struct vnode *vp, caddr_t base, int len,
484 off_t offset, enum uio_seg segflg, int ioflg,
485 struct ucred *cred, int *aresid)
493 * Force `offset' to a multiple of MAXBSIZE except possibly
494 * for the first chunk, so that filesystems only need to
495 * write full blocks except possibly for the first and last
498 chunk = MAXBSIZE - (uoff_t)offset % MAXBSIZE;
502 if (rw != UIO_READ && vp->v_type == VREG)
504 error = vn_rdwr(rw, vp, base, chunk, offset, segflg,
505 ioflg, cred, aresid);
506 len -= chunk; /* aresid calc already includes length */
519 * MPALMOSTSAFE - acquires mplock
522 vn_read(struct file *fp, struct uio *uio, struct ucred *cred, int flags)
524 struct ccms_lock ccms_lock;
529 KASSERT(uio->uio_td == curthread,
530 ("uio_td %p is not td %p", uio->uio_td, curthread));
531 vp = (struct vnode *)fp->f_data;
534 if (flags & O_FBLOCKING) {
535 /* ioflag &= ~IO_NDELAY; */
536 } else if (flags & O_FNONBLOCKING) {
538 } else if (fp->f_flag & FNONBLOCK) {
541 if (flags & O_FBUFFERED) {
542 /* ioflag &= ~IO_DIRECT; */
543 } else if (flags & O_FUNBUFFERED) {
545 } else if (fp->f_flag & O_DIRECT) {
548 vn_lock(vp, LK_SHARED | LK_RETRY);
549 if ((flags & O_FOFFSET) == 0)
550 uio->uio_offset = fp->f_offset;
551 ioflag |= sequential_heuristic(uio, fp);
553 ccms_lock_get_uio(&vp->v_ccms, &ccms_lock, uio);
554 error = VOP_READ(vp, uio, ioflag, cred);
555 ccms_lock_put(&vp->v_ccms, &ccms_lock);
556 if ((flags & O_FOFFSET) == 0)
557 fp->f_offset = uio->uio_offset;
558 fp->f_nextoff = uio->uio_offset;
565 * Device-optimized file table vnode read routine.
567 * This bypasses the VOP table and talks directly to the device. Most
568 * filesystems just route to specfs and can make this optimization.
570 * MPALMOSTSAFE - acquires mplock
573 svn_read(struct file *fp, struct uio *uio, struct ucred *cred, int flags)
581 KASSERT(uio->uio_td == curthread,
582 ("uio_td %p is not td %p", uio->uio_td, curthread));
584 vp = (struct vnode *)fp->f_data;
585 if (vp == NULL || vp->v_type == VBAD) {
590 if ((dev = vp->v_rdev) == NULL) {
596 if (uio->uio_resid == 0) {
600 if ((flags & O_FOFFSET) == 0)
601 uio->uio_offset = fp->f_offset;
604 if (flags & O_FBLOCKING) {
605 /* ioflag &= ~IO_NDELAY; */
606 } else if (flags & O_FNONBLOCKING) {
608 } else if (fp->f_flag & FNONBLOCK) {
611 if (flags & O_FBUFFERED) {
612 /* ioflag &= ~IO_DIRECT; */
613 } else if (flags & O_FUNBUFFERED) {
615 } else if (fp->f_flag & O_DIRECT) {
618 ioflag |= sequential_heuristic(uio, fp);
620 error = dev_dread(dev, uio, ioflag);
623 if ((flags & O_FOFFSET) == 0)
624 fp->f_offset = uio->uio_offset;
625 fp->f_nextoff = uio->uio_offset;
632 * MPALMOSTSAFE - acquires mplock
635 vn_write(struct file *fp, struct uio *uio, struct ucred *cred, int flags)
637 struct ccms_lock ccms_lock;
642 KASSERT(uio->uio_td == curthread,
643 ("uio_procp %p is not p %p", uio->uio_td, curthread));
644 vp = (struct vnode *)fp->f_data;
645 if (vp->v_type == VREG)
647 vp = (struct vnode *)fp->f_data; /* XXX needed? */
650 if (vp->v_type == VREG &&
651 ((fp->f_flag & O_APPEND) || (flags & O_FAPPEND))) {
655 if (flags & O_FBLOCKING) {
656 /* ioflag &= ~IO_NDELAY; */
657 } else if (flags & O_FNONBLOCKING) {
659 } else if (fp->f_flag & FNONBLOCK) {
662 if (flags & O_FBUFFERED) {
663 /* ioflag &= ~IO_DIRECT; */
664 } else if (flags & O_FUNBUFFERED) {
666 } else if (fp->f_flag & O_DIRECT) {
669 if (flags & O_FASYNCWRITE) {
670 /* ioflag &= ~IO_SYNC; */
671 } else if (flags & O_FSYNCWRITE) {
673 } else if (fp->f_flag & O_FSYNC) {
677 if (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS))
679 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
680 if ((flags & O_FOFFSET) == 0)
681 uio->uio_offset = fp->f_offset;
682 ioflag |= sequential_heuristic(uio, fp);
683 ccms_lock_get_uio(&vp->v_ccms, &ccms_lock, uio);
684 error = VOP_WRITE(vp, uio, ioflag, cred);
685 ccms_lock_put(&vp->v_ccms, &ccms_lock);
686 if ((flags & O_FOFFSET) == 0)
687 fp->f_offset = uio->uio_offset;
688 fp->f_nextoff = uio->uio_offset;
695 * Device-optimized file table vnode write routine.
697 * This bypasses the VOP table and talks directly to the device. Most
698 * filesystems just route to specfs and can make this optimization.
700 * MPALMOSTSAFE - acquires mplock
703 svn_write(struct file *fp, struct uio *uio, struct ucred *cred, int flags)
711 KASSERT(uio->uio_td == curthread,
712 ("uio_procp %p is not p %p", uio->uio_td, curthread));
714 vp = (struct vnode *)fp->f_data;
715 if (vp == NULL || vp->v_type == VBAD) {
719 if (vp->v_type == VREG)
721 vp = (struct vnode *)fp->f_data; /* XXX needed? */
723 if ((dev = vp->v_rdev) == NULL) {
729 if ((flags & O_FOFFSET) == 0)
730 uio->uio_offset = fp->f_offset;
733 if (vp->v_type == VREG &&
734 ((fp->f_flag & O_APPEND) || (flags & O_FAPPEND))) {
738 if (flags & O_FBLOCKING) {
739 /* ioflag &= ~IO_NDELAY; */
740 } else if (flags & O_FNONBLOCKING) {
742 } else if (fp->f_flag & FNONBLOCK) {
745 if (flags & O_FBUFFERED) {
746 /* ioflag &= ~IO_DIRECT; */
747 } else if (flags & O_FUNBUFFERED) {
749 } else if (fp->f_flag & O_DIRECT) {
752 if (flags & O_FASYNCWRITE) {
753 /* ioflag &= ~IO_SYNC; */
754 } else if (flags & O_FSYNCWRITE) {
756 } else if (fp->f_flag & O_FSYNC) {
760 if (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS))
762 ioflag |= sequential_heuristic(uio, fp);
764 error = dev_dwrite(dev, uio, ioflag);
767 if ((flags & O_FOFFSET) == 0)
768 fp->f_offset = uio->uio_offset;
769 fp->f_nextoff = uio->uio_offset;
776 * MPALMOSTSAFE - acquires mplock
779 vn_statfile(struct file *fp, struct stat *sb, struct ucred *cred)
785 vp = (struct vnode *)fp->f_data;
786 error = vn_stat(vp, sb, cred);
792 vn_stat(struct vnode *vp, struct stat *sb, struct ucred *cred)
801 error = VOP_GETATTR(vp, vap);
806 * Zero the spare stat fields
812 * Copy from vattr table
814 if (vap->va_fsid != VNOVAL)
815 sb->st_dev = vap->va_fsid;
817 sb->st_dev = vp->v_mount->mnt_stat.f_fsid.val[0];
818 sb->st_ino = vap->va_fileid;
820 switch (vap->va_type) {
835 /* This is a cosmetic change, symlinks do not have a mode. */
836 if (vp->v_mount->mnt_flag & MNT_NOSYMFOLLOW)
837 sb->st_mode &= ~ACCESSPERMS; /* 0000 */
839 sb->st_mode |= ACCESSPERMS; /* 0777 */
851 sb->st_nlink = vap->va_nlink;
852 sb->st_uid = vap->va_uid;
853 sb->st_gid = vap->va_gid;
854 sb->st_rdev = vap->va_rdev;
855 sb->st_size = vap->va_size;
856 sb->st_atimespec = vap->va_atime;
857 sb->st_mtimespec = vap->va_mtime;
858 sb->st_ctimespec = vap->va_ctime;
861 * A VCHR and VBLK device may track the last access and last modified
862 * time independantly of the filesystem. This is particularly true
863 * because device read and write calls may bypass the filesystem.
865 if (vp->v_type == VCHR || vp->v_type == VBLK) {
866 if ((dev = vp->v_rdev) != NULL) {
867 if (dev->si_lastread) {
868 sb->st_atimespec.tv_sec = dev->si_lastread;
869 sb->st_atimespec.tv_nsec = 0;
871 if (dev->si_lastwrite) {
872 sb->st_atimespec.tv_sec = dev->si_lastwrite;
873 sb->st_atimespec.tv_nsec = 0;
879 * According to www.opengroup.org, the meaning of st_blksize is
880 * "a filesystem-specific preferred I/O block size for this
881 * object. In some filesystem types, this may vary from file
883 * Default to PAGE_SIZE after much discussion.
886 if (vap->va_type == VREG) {
887 sb->st_blksize = vap->va_blocksize;
888 } else if (vn_isdisk(vp, NULL)) {
890 * XXX this is broken. If the device is not yet open (aka
891 * stat() call, aka v_rdev == NULL), how are we supposed
892 * to get a valid block size out of it?
896 if ((dev = vp->v_rdev) == NULL)
897 dev = udev2dev(vp->v_udev, vp->v_type == VBLK);
898 sb->st_blksize = dev->si_bsize_best;
899 if (sb->st_blksize < dev->si_bsize_phys)
900 sb->st_blksize = dev->si_bsize_phys;
901 if (sb->st_blksize < BLKDEV_IOSIZE)
902 sb->st_blksize = BLKDEV_IOSIZE;
904 sb->st_blksize = PAGE_SIZE;
907 sb->st_flags = vap->va_flags;
908 if (suser_cred(cred, 0))
911 sb->st_gen = vap->va_gen;
913 #if (S_BLKSIZE == 512)
914 /* Optimize this case */
915 sb->st_blocks = vap->va_bytes >> 9;
917 sb->st_blocks = vap->va_bytes / S_BLKSIZE;
919 sb->st_fsmid = vap->va_fsmid;
924 * MPALMOSTSAFE - acquires mplock
927 vn_ioctl(struct file *fp, u_long com, caddr_t data, struct ucred *ucred)
929 struct vnode *vp = ((struct vnode *)fp->f_data);
936 switch (vp->v_type) {
939 if (com == FIONREAD) {
940 if ((error = VOP_GETATTR(vp, &vattr)) != 0)
942 *(int *)data = vattr.va_size - fp->f_offset;
946 if (com == FIOASYNC) { /* XXX */
958 if (com == FIODTYPE) {
959 if (vp->v_type != VCHR && vp->v_type != VBLK) {
963 *(int *)data = dev_dflags(vp->v_rdev) & D_TYPEMASK;
967 error = VOP_IOCTL(vp, com, data, fp->f_flag, ucred);
968 if (error == 0 && com == TIOCSCTTY) {
969 struct proc *p = curthread->td_proc;
970 struct session *sess;
978 /* Do nothing if reassigning same control tty */
979 if (sess->s_ttyvp == vp) {
984 /* Get rid of reference to old control tty */
998 * MPALMOSTSAFE - acquires mplock
1001 vn_poll(struct file *fp, int events, struct ucred *cred)
1006 error = VOP_POLL(((struct vnode *)fp->f_data), events, cred);
1012 * Check that the vnode is still valid, and if so
1013 * acquire requested lock.
1017 vn_lock(struct vnode *vp, int flags)
1019 debug_vn_lock(struct vnode *vp, int flags, const char *filename, int line)
1026 vp->filename = filename;
1028 error = debuglockmgr(&vp->v_lock, flags,
1029 "vn_lock", filename, line);
1031 error = lockmgr(&vp->v_lock, flags);
1035 } while (flags & LK_RETRY);
1038 * Because we (had better!) have a ref on the vnode, once it
1039 * goes to VRECLAIMED state it will not be recycled until all
1040 * refs go away. So we can just check the flag.
1042 if (error == 0 && (vp->v_flag & VRECLAIMED)) {
1043 lockmgr(&vp->v_lock, LK_RELEASE);
1050 vn_unlock(struct vnode *vp)
1052 lockmgr(&vp->v_lock, LK_RELEASE);
1056 vn_islocked(struct vnode *vp)
1058 return (lockstatus(&vp->v_lock, curthread));
1062 * MPALMOSTSAFE - acquires mplock
1065 vn_closefile(struct file *fp)
1070 fp->f_ops = &badfileops;
1071 error = vn_close(((struct vnode *)fp->f_data), fp->f_flag);
1077 * MPALMOSTSAFE - acquires mplock
1080 vn_kqfilter(struct file *fp, struct knote *kn)
1085 error = VOP_KQFILTER(((struct vnode *)fp->f_data), kn);