2 * Copyright (c) 1982, 1986, 1989, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * @(#)vfs_vnops.c 8.2 (Berkeley) 1/21/94
35 * $FreeBSD: src/sys/kern/vfs_vnops.c,v 1.87.2.13 2002/12/29 18:19:53 dillon Exp $
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/fcntl.h>
45 #include <sys/mount.h>
46 #include <sys/nlookup.h>
47 #include <sys/vnode.h>
49 #include <sys/filio.h>
50 #include <sys/ttycom.h>
52 #include <sys/sysctl.h>
53 #include <sys/syslog.h>
55 #include <sys/thread2.h>
56 #include <sys/mplock2.h>
58 static int vn_closefile (struct file *fp);
59 static int vn_ioctl (struct file *fp, u_long com, caddr_t data,
60 struct ucred *cred, struct sysmsg *msg);
61 static int vn_read (struct file *fp, struct uio *uio,
62 struct ucred *cred, int flags);
63 static int vn_kqfilter (struct file *fp, struct knote *kn);
64 static int vn_statfile (struct file *fp, struct stat *sb, struct ucred *cred);
65 static int vn_write (struct file *fp, struct uio *uio,
66 struct ucred *cred, int flags);
68 struct fileops vnode_fileops = {
72 .fo_kqfilter = vn_kqfilter,
73 .fo_stat = vn_statfile,
74 .fo_close = vn_closefile,
75 .fo_shutdown = nofo_shutdown
79 * Common code for vnode open operations. Check permissions, and call
80 * the VOP_NOPEN or VOP_NCREATE routine.
82 * The caller is responsible for setting up nd with nlookup_init() and
83 * for cleaning it up with nlookup_done(), whether we return an error
86 * On success nd->nl_open_vp will hold a referenced and, if requested,
87 * locked vnode. A locked vnode is requested via NLC_LOCKVP. If fp
88 * is non-NULL the vnode will be installed in the file pointer.
90 * NOTE: The vnode is referenced just once on return whether or not it
91 * is also installed in the file pointer.
94 vn_open(struct nlookupdata *nd, struct file *fp, int fmode, int cmode)
97 struct ucred *cred = nd->nl_cred;
99 struct vattr *vap = &vat;
106 * Certain combinations are illegal
108 if ((fmode & (FWRITE | O_TRUNC)) == O_TRUNC)
112 * Lookup the path and create or obtain the vnode. After a
113 * successful lookup a locked nd->nl_nch will be returned.
115 * The result of this section should be a locked vnode.
117 * XXX with only a little work we should be able to avoid locking
118 * the vnode if FWRITE, O_CREAT, and O_TRUNC are *not* set.
120 nd->nl_flags |= NLC_OPEN;
121 if (fmode & O_APPEND)
122 nd->nl_flags |= NLC_APPEND;
124 nd->nl_flags |= NLC_TRUNCATE;
126 nd->nl_flags |= NLC_READ;
128 nd->nl_flags |= NLC_WRITE;
129 if ((fmode & O_EXCL) == 0 && (fmode & O_NOFOLLOW) == 0)
130 nd->nl_flags |= NLC_FOLLOW;
132 if (fmode & O_CREAT) {
134 * CONDITIONAL CREATE FILE CASE
136 * Setting NLC_CREATE causes a negative hit to store
137 * the negative hit ncp and not return an error. Then
138 * nc_error or nc_vp may be checked to see if the ncp
139 * represents a negative hit. NLC_CREATE also requires
140 * write permission on the governing directory or EPERM
143 nd->nl_flags |= NLC_CREATE;
144 nd->nl_flags |= NLC_REFDVP;
149 * NORMAL OPEN FILE CASE
158 * split case to allow us to re-resolve and retry the ncp in case
162 if (fmode & O_CREAT) {
163 if (nd->nl_nch.ncp->nc_vp == NULL) {
164 if ((error = ncp_writechk(&nd->nl_nch)) != 0)
168 vap->va_mode = cmode;
170 vap->va_vaflags |= VA_EXCLUSIVE;
171 error = VOP_NCREATE(&nd->nl_nch, nd->nl_dvp, &vp,
176 /* locked vnode is returned */
178 if (fmode & O_EXCL) {
181 error = cache_vget(&nd->nl_nch, cred,
189 error = cache_vget(&nd->nl_nch, cred, LK_EXCLUSIVE, &vp);
195 * We have a locked vnode and ncp now. Note that the ncp will
196 * be cleaned up by the caller if nd->nl_nch is left intact.
198 if (vp->v_type == VLNK) {
202 if (vp->v_type == VSOCK) {
206 if (vp->v_type != VDIR && (fmode & O_DIRECTORY)) {
210 if ((fmode & O_CREAT) == 0) {
211 if (fmode & (FWRITE | O_TRUNC)) {
212 if (vp->v_type == VDIR) {
216 error = vn_writechk(vp, &nd->nl_nch);
219 * Special stale handling, re-resolve the
222 if (error == ESTALE) {
225 cache_setunresolved(&nd->nl_nch);
226 error = cache_resolve(&nd->nl_nch, cred);
234 if (fmode & O_TRUNC) {
235 vn_unlock(vp); /* XXX */
236 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); /* XXX */
237 osize = vp->v_filesize;
240 error = VOP_SETATTR(vp, vap, cred);
243 error = VOP_GETATTR(vp, vap);
247 VFS_ACCOUNT(mp, vap->va_uid, vap->va_gid, -osize);
251 * Set or clear VNSWAPCACHE on the vp based on nd->nl_nch.ncp->nc_flag.
252 * These particular bits a tracked all the way from the root.
254 * NOTE: Might not work properly on NFS servers due to the
255 * disconnected namecache.
257 flags = nd->nl_nch.ncp->nc_flag;
258 if ((flags & (NCF_UF_CACHE | NCF_UF_PCACHE)) &&
259 (flags & (NCF_SF_NOCACHE | NCF_SF_PNOCACHE)) == 0) {
260 vsetflags(vp, VSWAPCACHE);
262 vclrflags(vp, VSWAPCACHE);
266 * Setup the fp so VOP_OPEN can override it. No descriptor has been
267 * associated with the fp yet so we own it clean.
269 * f_nchandle inherits nl_nch. This used to be necessary only for
270 * directories but now we do it unconditionally so f*() ops
271 * such as fchmod() can access the actual namespace that was
272 * used to open the file.
275 if (nd->nl_flags & NLC_APPENDONLY)
276 fmode |= FAPPENDONLY;
277 fp->f_nchandle = nd->nl_nch;
278 cache_zero(&nd->nl_nch);
279 cache_unlock(&fp->f_nchandle);
283 * Get rid of nl_nch. vn_open does not return it (it returns the
284 * vnode or the file pointer). Note: we can't leave nl_nch locked
285 * through the VOP_OPEN anyway since the VOP_OPEN may block, e.g.
289 cache_put(&nd->nl_nch);
291 error = VOP_OPEN(vp, fmode, cred, fp);
294 * setting f_ops to &badfileops will prevent the descriptor
295 * code from trying to close and release the vnode, since
296 * the open failed we do not want to call close.
300 fp->f_ops = &badfileops;
307 * Assert that VREG files have been setup for vmio.
309 KASSERT(vp->v_type != VREG || vp->v_object != NULL,
310 ("vn_open: regular file was not VMIO enabled!"));
314 * Return the vnode. XXX needs some cleaning up. The vnode is
315 * only returned in the fp == NULL case.
319 nd->nl_vp_fmode = fmode;
320 if ((nd->nl_flags & NLC_LOCKVP) == 0)
333 vn_opendisk(const char *devname, int fmode, struct vnode **vpp)
338 if (strncmp(devname, "/dev/", 5) == 0)
340 if ((vp = getsynthvnode(devname)) == NULL) {
343 error = VOP_OPEN(vp, fmode, proc0.p_ucred, NULL);
355 * Check for write permissions on the specified vnode. nch may be NULL.
358 vn_writechk(struct vnode *vp, struct nchandle *nch)
361 * If there's shared text associated with
362 * the vnode, try to free it up once. If
363 * we fail, we can't allow writing.
365 if (vp->v_flag & VTEXT)
369 * If the vnode represents a regular file, check the mount
370 * point via the nch. This may be a different mount point
371 * then the one embedded in the vnode (e.g. nullfs).
373 * We can still write to non-regular files (e.g. devices)
374 * via read-only mounts.
376 if (nch && nch->ncp && vp->v_type == VREG)
377 return (ncp_writechk(nch));
382 * Check whether the underlying mount is read-only. The mount point
383 * referenced by the namecache may be different from the mount point
384 * used by the underlying vnode in the case of NULLFS, so a separate
388 ncp_writechk(struct nchandle *nch)
390 if (nch->mount && (nch->mount->mnt_flag & MNT_RDONLY))
401 vn_close(struct vnode *vp, int flags)
405 error = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
407 error = VOP_CLOSE(vp, flags);
415 * Sequential heuristic.
417 * MPSAFE (f_seqcount and f_nextoff are allowed to race)
421 sequential_heuristic(struct uio *uio, struct file *fp)
424 * Sequential heuristic - detect sequential operation
426 * NOTE: SMP: We allow f_seqcount updates to race.
428 if ((uio->uio_offset == 0 && fp->f_seqcount > 0) ||
429 uio->uio_offset == fp->f_nextoff) {
430 int tmpseq = fp->f_seqcount;
432 tmpseq += (uio->uio_resid + BKVASIZE - 1) / BKVASIZE;
433 if (tmpseq > IO_SEQMAX)
435 fp->f_seqcount = tmpseq;
436 return(fp->f_seqcount << IO_SEQSHIFT);
440 * Not sequential, quick draw-down of seqcount
442 * NOTE: SMP: We allow f_seqcount updates to race.
444 if (fp->f_seqcount > 1)
452 * get - lock and return the f_offset field.
453 * set - set and unlock the f_offset field.
455 * These routines serve the dual purpose of serializing access to the
456 * f_offset field (at least on i386) and guaranteeing operational integrity
457 * when multiple read()ers and write()ers are present on the same fp.
461 static __inline off_t
462 vn_get_fpf_offset(struct file *fp)
468 * Shortcut critical path.
470 flags = fp->f_flag & ~FOFFSETLOCK;
471 if (atomic_cmpset_int(&fp->f_flag, flags, flags | FOFFSETLOCK))
472 return(fp->f_offset);
479 if (flags & FOFFSETLOCK) {
480 nflags = flags | FOFFSETWAKE;
481 tsleep_interlock(&fp->f_flag, 0);
482 if (atomic_cmpset_int(&fp->f_flag, flags, nflags))
483 tsleep(&fp->f_flag, PINTERLOCKED, "fpoff", 0);
485 nflags = flags | FOFFSETLOCK;
486 if (atomic_cmpset_int(&fp->f_flag, flags, nflags))
490 return(fp->f_offset);
497 vn_set_fpf_offset(struct file *fp, off_t offset)
503 * We hold the lock so we can set the offset without interference.
505 fp->f_offset = offset;
508 * Normal release is already a reasonably critical path.
512 nflags = flags & ~(FOFFSETLOCK | FOFFSETWAKE);
513 if (atomic_cmpset_int(&fp->f_flag, flags, nflags)) {
514 if (flags & FOFFSETWAKE)
524 static __inline off_t
525 vn_poll_fpf_offset(struct file *fp)
527 #if defined(__x86_64__)
528 return(fp->f_offset);
530 off_t off = vn_get_fpf_offset(fp);
531 vn_set_fpf_offset(fp, off);
537 * Package up an I/O request on a vnode into a uio and do it.
542 vn_rdwr(enum uio_rw rw, struct vnode *vp, caddr_t base, int len,
543 off_t offset, enum uio_seg segflg, int ioflg,
544 struct ucred *cred, int *aresid)
550 if ((ioflg & IO_NODELOCKED) == 0)
551 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
552 auio.uio_iov = &aiov;
554 aiov.iov_base = base;
556 auio.uio_resid = len;
557 auio.uio_offset = offset;
558 auio.uio_segflg = segflg;
560 auio.uio_td = curthread;
561 if (rw == UIO_READ) {
562 error = VOP_READ(vp, &auio, ioflg, cred);
564 error = VOP_WRITE(vp, &auio, ioflg, cred);
567 *aresid = auio.uio_resid;
569 if (auio.uio_resid && error == 0)
571 if ((ioflg & IO_NODELOCKED) == 0)
577 * Package up an I/O request on a vnode into a uio and do it. The I/O
578 * request is split up into smaller chunks and we try to avoid saturating
579 * the buffer cache while potentially holding a vnode locked, so we
580 * check bwillwrite() before calling vn_rdwr(). We also call lwkt_user_yield()
581 * to give other processes a chance to lock the vnode (either other processes
582 * core'ing the same binary, or unrelated processes scanning the directory).
587 vn_rdwr_inchunks(enum uio_rw rw, struct vnode *vp, caddr_t base, int len,
588 off_t offset, enum uio_seg segflg, int ioflg,
589 struct ucred *cred, int *aresid)
597 * Force `offset' to a multiple of MAXBSIZE except possibly
598 * for the first chunk, so that filesystems only need to
599 * write full blocks except possibly for the first and last
602 chunk = MAXBSIZE - (uoff_t)offset % MAXBSIZE;
606 if (vp->v_type == VREG) {
616 error = vn_rdwr(rw, vp, base, chunk, offset, segflg,
617 ioflg, cred, aresid);
618 len -= chunk; /* aresid calc already includes length */
631 * File pointers can no longer get ripped up by revoke so
632 * we don't need to lock access to the vp.
634 * f_offset updates are not guaranteed against multiple readers
637 vn_read(struct file *fp, struct uio *uio, struct ucred *cred, int flags)
642 KASSERT(uio->uio_td == curthread,
643 ("uio_td %p is not td %p", uio->uio_td, curthread));
644 vp = (struct vnode *)fp->f_data;
647 if (flags & O_FBLOCKING) {
648 /* ioflag &= ~IO_NDELAY; */
649 } else if (flags & O_FNONBLOCKING) {
651 } else if (fp->f_flag & FNONBLOCK) {
654 if (flags & O_FBUFFERED) {
655 /* ioflag &= ~IO_DIRECT; */
656 } else if (flags & O_FUNBUFFERED) {
658 } else if (fp->f_flag & O_DIRECT) {
661 if ((flags & O_FOFFSET) == 0 && (vp->v_flag & VNOTSEEKABLE) == 0)
662 uio->uio_offset = vn_get_fpf_offset(fp);
663 vn_lock(vp, LK_SHARED | LK_RETRY);
664 ioflag |= sequential_heuristic(uio, fp);
666 error = VOP_READ(vp, uio, ioflag, cred);
667 fp->f_nextoff = uio->uio_offset;
669 if ((flags & O_FOFFSET) == 0 && (vp->v_flag & VNOTSEEKABLE) == 0)
670 vn_set_fpf_offset(fp, uio->uio_offset);
678 vn_write(struct file *fp, struct uio *uio, struct ucred *cred, int flags)
683 KASSERT(uio->uio_td == curthread,
684 ("uio_td %p is not p %p", uio->uio_td, curthread));
685 vp = (struct vnode *)fp->f_data;
688 if (vp->v_type == VREG &&
689 ((fp->f_flag & O_APPEND) || (flags & O_FAPPEND))) {
693 if (flags & O_FBLOCKING) {
694 /* ioflag &= ~IO_NDELAY; */
695 } else if (flags & O_FNONBLOCKING) {
697 } else if (fp->f_flag & FNONBLOCK) {
700 if (flags & O_FBUFFERED) {
701 /* ioflag &= ~IO_DIRECT; */
702 } else if (flags & O_FUNBUFFERED) {
704 } else if (fp->f_flag & O_DIRECT) {
707 if (flags & O_FASYNCWRITE) {
708 /* ioflag &= ~IO_SYNC; */
709 } else if (flags & O_FSYNCWRITE) {
711 } else if (fp->f_flag & O_FSYNC) {
715 if (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS))
717 if ((flags & O_FOFFSET) == 0)
718 uio->uio_offset = vn_get_fpf_offset(fp);
719 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
720 ioflag |= sequential_heuristic(uio, fp);
721 error = VOP_WRITE(vp, uio, ioflag, cred);
722 fp->f_nextoff = uio->uio_offset;
724 if ((flags & O_FOFFSET) == 0)
725 vn_set_fpf_offset(fp, uio->uio_offset);
733 vn_statfile(struct file *fp, struct stat *sb, struct ucred *cred)
738 vp = (struct vnode *)fp->f_data;
739 error = vn_stat(vp, sb, cred);
747 vn_stat(struct vnode *vp, struct stat *sb, struct ucred *cred)
756 error = VOP_GETATTR(vp, vap);
761 * Zero the spare stat fields
768 * Copy from vattr table
770 if (vap->va_fsid != VNOVAL)
771 sb->st_dev = vap->va_fsid;
773 sb->st_dev = vp->v_mount->mnt_stat.f_fsid.val[0];
774 sb->st_ino = vap->va_fileid;
776 switch (vap->va_type) {
794 /* This is a cosmetic change, symlinks do not have a mode. */
795 if (vp->v_mount->mnt_flag & MNT_NOSYMFOLLOW)
796 sb->st_mode &= ~ACCESSPERMS; /* 0000 */
798 sb->st_mode |= ACCESSPERMS; /* 0777 */
810 if (vap->va_nlink > (nlink_t)-1)
811 sb->st_nlink = (nlink_t)-1;
813 sb->st_nlink = vap->va_nlink;
814 sb->st_uid = vap->va_uid;
815 sb->st_gid = vap->va_gid;
816 sb->st_rdev = dev2udev(vp->v_rdev);
817 sb->st_size = vap->va_size;
818 sb->st_atimespec = vap->va_atime;
819 sb->st_mtimespec = vap->va_mtime;
820 sb->st_ctimespec = vap->va_ctime;
823 * A VCHR and VBLK device may track the last access and last modified
824 * time independantly of the filesystem. This is particularly true
825 * because device read and write calls may bypass the filesystem.
827 if (vp->v_type == VCHR || vp->v_type == VBLK) {
830 if (dev->si_lastread) {
831 sb->st_atimespec.tv_sec = time_second +
834 sb->st_atimespec.tv_nsec = 0;
836 if (dev->si_lastwrite) {
837 sb->st_atimespec.tv_sec = time_second +
840 sb->st_atimespec.tv_nsec = 0;
846 * According to www.opengroup.org, the meaning of st_blksize is
847 * "a filesystem-specific preferred I/O block size for this
848 * object. In some filesystem types, this may vary from file
850 * Default to PAGE_SIZE after much discussion.
853 if (vap->va_type == VREG) {
854 sb->st_blksize = vap->va_blocksize;
855 } else if (vn_isdisk(vp, NULL)) {
857 * XXX this is broken. If the device is not yet open (aka
858 * stat() call, aka v_rdev == NULL), how are we supposed
859 * to get a valid block size out of it?
863 sb->st_blksize = dev->si_bsize_best;
864 if (sb->st_blksize < dev->si_bsize_phys)
865 sb->st_blksize = dev->si_bsize_phys;
866 if (sb->st_blksize < BLKDEV_IOSIZE)
867 sb->st_blksize = BLKDEV_IOSIZE;
869 sb->st_blksize = PAGE_SIZE;
872 sb->st_flags = vap->va_flags;
874 error = priv_check_cred(cred, PRIV_VFS_GENERATION, 0);
878 sb->st_gen = (u_int32_t)vap->va_gen;
880 sb->st_blocks = vap->va_bytes / S_BLKSIZE;
885 * MPALMOSTSAFE - acquires mplock
888 vn_ioctl(struct file *fp, u_long com, caddr_t data, struct ucred *ucred,
891 struct vnode *vp = ((struct vnode *)fp->f_data);
897 switch (vp->v_type) {
900 if (com == FIONREAD) {
901 error = VOP_GETATTR(vp, &vattr);
904 size = vattr.va_size;
905 if ((vp->v_flag & VNOTSEEKABLE) == 0)
906 size -= vn_poll_fpf_offset(fp);
907 if (size > 0x7FFFFFFF)
913 if (com == FIOASYNC) { /* XXX */
925 if (com == FIODTYPE) {
926 if (vp->v_type != VCHR && vp->v_type != VBLK) {
930 *(int *)data = dev_dflags(vp->v_rdev) & D_TYPEMASK;
934 error = VOP_IOCTL(vp, com, data, fp->f_flag, ucred, msg);
935 if (error == 0 && com == TIOCSCTTY) {
936 struct proc *p = curthread->td_proc;
937 struct session *sess;
946 /* Do nothing if reassigning same control tty */
947 if (sess->s_ttyvp == vp) {
953 /* Get rid of reference to old control tty */
967 * Check that the vnode is still valid, and if so
968 * acquire requested lock.
972 vn_lock(struct vnode *vp, int flags)
974 debug_vn_lock(struct vnode *vp, int flags, const char *filename, int line)
981 vp->filename = filename;
983 error = debuglockmgr(&vp->v_lock, flags,
984 "vn_lock", filename, line);
986 error = lockmgr(&vp->v_lock, flags);
990 } while (flags & LK_RETRY);
993 * Because we (had better!) have a ref on the vnode, once it
994 * goes to VRECLAIMED state it will not be recycled until all
995 * refs go away. So we can just check the flag.
997 if (error == 0 && (vp->v_flag & VRECLAIMED)) {
998 lockmgr(&vp->v_lock, LK_RELEASE);
1004 #ifdef DEBUG_VN_UNLOCK
1007 debug_vn_unlock(struct vnode *vp, const char *filename, int line)
1009 kprintf("vn_unlock from %s:%d\n", filename, line);
1010 lockmgr(&vp->v_lock, LK_RELEASE);
1016 vn_unlock(struct vnode *vp)
1018 lockmgr(&vp->v_lock, LK_RELEASE);
1027 vn_islocked(struct vnode *vp)
1029 return (lockstatus(&vp->v_lock, curthread));
1033 * Return the lock status of a vnode and unlock the vnode
1034 * if we owned the lock. This is not a boolean, if the
1035 * caller cares what the lock status is the caller must
1036 * check the various possible values.
1038 * This only unlocks exclusive locks held by the caller,
1039 * it will NOT unlock shared locks (there is no way to
1040 * tell who the shared lock belongs to).
1045 vn_islocked_unlock(struct vnode *vp)
1049 vpls = lockstatus(&vp->v_lock, curthread);
1050 if (vpls == LK_EXCLUSIVE)
1051 lockmgr(&vp->v_lock, LK_RELEASE);
1056 * Restore a vnode lock that we previously released via
1057 * vn_islocked_unlock(). This is a NOP if we did not
1058 * own the original lock.
1063 vn_islocked_relock(struct vnode *vp, int vpls)
1067 if (vpls == LK_EXCLUSIVE)
1068 error = lockmgr(&vp->v_lock, vpls);
1075 vn_closefile(struct file *fp)
1079 fp->f_ops = &badfileops;
1080 error = vn_close(((struct vnode *)fp->f_data), fp->f_flag);
1088 vn_kqfilter(struct file *fp, struct knote *kn)
1092 error = VOP_KQFILTER(((struct vnode *)fp->f_data), kn);