2 * Copyright (c) 2005, 2006 The NetBSD Foundation, Inc.
5 * This code is derived from software contributed to The NetBSD Foundation
6 * by Julio M. Merino Vidal, developed as part of Google's Summer of Code
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
19 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
20 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
21 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
22 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
30 * $NetBSD: tmpfs_vnops.c,v 1.39 2007/07/23 15:41:01 jmmv Exp $
34 * tmpfs vnode interface.
37 #include <sys/kernel.h>
38 #include <sys/kern_syscall.h>
39 #include <sys/param.h>
40 #include <sys/fcntl.h>
41 #include <sys/lockf.h>
44 #include <sys/resourcevar.h>
45 #include <sys/sched.h>
47 #include <sys/systm.h>
48 #include <sys/unistd.h>
49 #include <sys/vfsops.h>
50 #include <sys/vnode.h>
51 #include <sys/mountctl.h>
54 #include <vm/vm_object.h>
55 #include <vm/vm_page.h>
56 #include <vm/vm_pager.h>
57 #include <vm/swap_pager.h>
61 #include <vfs/fifofs/fifo.h>
62 #include <vfs/tmpfs/tmpfs_vnops.h>
64 #include <vfs/tmpfs/tmpfs.h>
68 static void tmpfs_strategy_done(struct bio *bio);
72 tmpfs_knote(struct vnode *vp, int flags)
75 KNOTE(&vp->v_pollinfo.vpi_kqinfo.ki_note, flags);
79 /* --------------------------------------------------------------------- */
82 tmpfs_nresolve(struct vop_nresolve_args *v)
84 struct vnode *dvp = v->a_dvp;
85 struct vnode *vp = NULL;
86 struct namecache *ncp = v->a_nch->ncp;
87 struct tmpfs_node *tnode;
91 struct tmpfs_dirent *de;
92 struct tmpfs_node *dnode;
95 lwkt_gettoken(&mp->mnt_token);
97 dnode = VP_TO_TMPFS_DIR(dvp);
99 de = tmpfs_dir_lookup(dnode, NULL, ncp);
104 * Allocate a vnode for the node we found.
107 error = tmpfs_alloc_vp(dvp->v_mount, tnode,
108 LK_EXCLUSIVE | LK_RETRY, &vp);
116 * Store the result of this lookup in the cache. Avoid this if the
117 * request was for creation, as it does not improve timings on
122 cache_setvp(v->a_nch, vp);
124 } else if (error == ENOENT) {
125 cache_setvp(v->a_nch, NULL);
128 lwkt_reltoken(&mp->mnt_token);
133 tmpfs_nlookupdotdot(struct vop_nlookupdotdot_args *v)
135 struct vnode *dvp = v->a_dvp;
136 struct vnode **vpp = v->a_vpp;
137 struct tmpfs_node *dnode = VP_TO_TMPFS_NODE(dvp);
138 struct ucred *cred = v->a_cred;
145 lwkt_gettoken(&mp->mnt_token);
147 /* Check accessibility of requested node as a first step. */
148 error = VOP_ACCESS(dvp, VEXEC, cred);
150 lwkt_reltoken(&mp->mnt_token);
154 if (dnode->tn_dir.tn_parent != NULL) {
155 /* Allocate a new vnode on the matching entry. */
156 error = tmpfs_alloc_vp(dvp->v_mount, dnode->tn_dir.tn_parent,
157 LK_EXCLUSIVE | LK_RETRY, vpp);
163 lwkt_reltoken(&mp->mnt_token);
165 return (*vpp == NULL) ? ENOENT : 0;
168 /* --------------------------------------------------------------------- */
171 tmpfs_ncreate(struct vop_ncreate_args *v)
173 struct vnode *dvp = v->a_dvp;
174 struct vnode **vpp = v->a_vpp;
175 struct namecache *ncp = v->a_nch->ncp;
176 struct vattr *vap = v->a_vap;
177 struct ucred *cred = v->a_cred;
182 lwkt_gettoken(&mp->mnt_token);
184 KKASSERT(vap->va_type == VREG || vap->va_type == VSOCK);
186 error = tmpfs_alloc_file(dvp, vpp, vap, ncp, cred, NULL);
188 cache_setunresolved(v->a_nch);
189 cache_setvp(v->a_nch, *vpp);
190 tmpfs_knote(dvp, NOTE_WRITE);
193 lwkt_reltoken(&mp->mnt_token);
197 /* --------------------------------------------------------------------- */
200 tmpfs_nmknod(struct vop_nmknod_args *v)
202 struct vnode *dvp = v->a_dvp;
203 struct vnode **vpp = v->a_vpp;
204 struct namecache *ncp = v->a_nch->ncp;
205 struct vattr *vap = v->a_vap;
206 struct ucred *cred = v->a_cred;
207 struct mount *mp = dvp->v_mount;
210 lwkt_gettoken(&mp->mnt_token);
212 if (vap->va_type != VBLK && vap->va_type != VCHR &&
213 vap->va_type != VFIFO) {
214 lwkt_reltoken(&mp->mnt_token);
218 error = tmpfs_alloc_file(dvp, vpp, vap, ncp, cred, NULL);
220 cache_setunresolved(v->a_nch);
221 cache_setvp(v->a_nch, *vpp);
222 tmpfs_knote(dvp, NOTE_WRITE);
225 lwkt_reltoken(&mp->mnt_token);
230 /* --------------------------------------------------------------------- */
233 tmpfs_open(struct vop_open_args *v)
235 struct vnode *vp = v->a_vp;
236 int mode = v->a_mode;
237 struct mount *mp = vp->v_mount;
238 struct tmpfs_node *node;
241 lwkt_gettoken(&mp->mnt_token);
242 node = VP_TO_TMPFS_NODE(vp);
245 /* The file is still active but all its names have been removed
246 * (e.g. by a "rmdir $(pwd)"). It cannot be opened any more as
247 * it is about to die. */
248 if (node->tn_links < 1)
252 /* If the file is marked append-only, deny write requests. */
253 if ((node->tn_flags & APPEND) &&
254 (mode & (FWRITE | O_APPEND)) == FWRITE) {
257 error = (vop_stdopen(v));
260 lwkt_reltoken(&mp->mnt_token);
264 /* --------------------------------------------------------------------- */
267 tmpfs_close(struct vop_close_args *v)
269 struct vnode *vp = v->a_vp;
270 struct tmpfs_node *node;
273 lwkt_gettoken(&vp->v_mount->mnt_token);
274 node = VP_TO_TMPFS_NODE(vp);
276 if (node->tn_links > 0) {
278 * Update node times. No need to do it if the node has
279 * been deleted, because it will vanish after we return.
284 error = vop_stdclose(v);
286 lwkt_reltoken(&vp->v_mount->mnt_token);
291 /* --------------------------------------------------------------------- */
294 tmpfs_access(struct vop_access_args *v)
296 struct vnode *vp = v->a_vp;
298 struct tmpfs_node *node;
300 lwkt_gettoken(&vp->v_mount->mnt_token);
301 node = VP_TO_TMPFS_NODE(vp);
303 switch (vp->v_type) {
309 if ((v->a_mode & VWRITE) &&
310 (vp->v_mount->mnt_flag & MNT_RDONLY)) {
330 if ((v->a_mode & VWRITE) && (node->tn_flags & IMMUTABLE)) {
335 error = vop_helper_access(v, node->tn_uid, node->tn_gid,
339 lwkt_reltoken(&vp->v_mount->mnt_token);
343 /* --------------------------------------------------------------------- */
346 tmpfs_getattr(struct vop_getattr_args *v)
348 struct vnode *vp = v->a_vp;
349 struct vattr *vap = v->a_vap;
350 struct tmpfs_node *node;
352 lwkt_gettoken(&vp->v_mount->mnt_token);
353 node = VP_TO_TMPFS_NODE(vp);
357 vap->va_type = vp->v_type;
358 vap->va_mode = node->tn_mode;
359 vap->va_nlink = node->tn_links;
360 vap->va_uid = node->tn_uid;
361 vap->va_gid = node->tn_gid;
362 vap->va_fsid = vp->v_mount->mnt_stat.f_fsid.val[0];
363 vap->va_fileid = node->tn_id;
364 vap->va_size = node->tn_size;
365 vap->va_blocksize = PAGE_SIZE;
366 vap->va_atime.tv_sec = node->tn_atime;
367 vap->va_atime.tv_nsec = node->tn_atimensec;
368 vap->va_mtime.tv_sec = node->tn_mtime;
369 vap->va_mtime.tv_nsec = node->tn_mtimensec;
370 vap->va_ctime.tv_sec = node->tn_ctime;
371 vap->va_ctime.tv_nsec = node->tn_ctimensec;
372 vap->va_gen = node->tn_gen;
373 vap->va_flags = node->tn_flags;
374 if (vp->v_type == VBLK || vp->v_type == VCHR)
376 vap->va_rmajor = umajor(node->tn_rdev);
377 vap->va_rminor = uminor(node->tn_rdev);
379 vap->va_bytes = round_page(node->tn_size);
382 lwkt_reltoken(&vp->v_mount->mnt_token);
387 /* --------------------------------------------------------------------- */
390 tmpfs_setattr(struct vop_setattr_args *v)
392 struct vnode *vp = v->a_vp;
393 struct vattr *vap = v->a_vap;
394 struct ucred *cred = v->a_cred;
395 struct tmpfs_node *node = VP_TO_TMPFS_NODE(vp);
399 lwkt_gettoken(&vp->v_mount->mnt_token);
400 if (error == 0 && (vap->va_flags != VNOVAL)) {
401 error = tmpfs_chflags(vp, vap->va_flags, cred);
402 kflags |= NOTE_ATTRIB;
405 if (error == 0 && (vap->va_size != VNOVAL)) {
406 if (vap->va_size > node->tn_size)
407 kflags |= NOTE_WRITE | NOTE_EXTEND;
409 kflags |= NOTE_WRITE;
410 error = tmpfs_chsize(vp, vap->va_size, cred);
413 if (error == 0 && (vap->va_uid != (uid_t)VNOVAL ||
414 vap->va_gid != (gid_t)VNOVAL)) {
415 error = tmpfs_chown(vp, vap->va_uid, vap->va_gid, cred);
416 kflags |= NOTE_ATTRIB;
419 if (error == 0 && (vap->va_mode != (mode_t)VNOVAL)) {
420 error = tmpfs_chmod(vp, vap->va_mode, cred);
421 kflags |= NOTE_ATTRIB;
424 if (error == 0 && ((vap->va_atime.tv_sec != VNOVAL &&
425 vap->va_atime.tv_nsec != VNOVAL) ||
426 (vap->va_mtime.tv_sec != VNOVAL &&
427 vap->va_mtime.tv_nsec != VNOVAL) )) {
428 error = tmpfs_chtimes(vp, &vap->va_atime, &vap->va_mtime,
429 vap->va_vaflags, cred);
430 kflags |= NOTE_ATTRIB;
433 /* Update the node times. We give preference to the error codes
434 * generated by this function rather than the ones that may arise
435 * from tmpfs_update. */
437 tmpfs_knote(vp, kflags);
439 lwkt_reltoken(&vp->v_mount->mnt_token);
444 /* --------------------------------------------------------------------- */
447 * fsync is usually a NOP, but we must take action when unmounting or
451 tmpfs_fsync(struct vop_fsync_args *v)
453 struct tmpfs_node *node;
454 struct vnode *vp = v->a_vp;
456 lwkt_gettoken(&vp->v_mount->mnt_token);
457 node = VP_TO_TMPFS_NODE(vp);
460 if (vp->v_type == VREG) {
461 if (vp->v_flag & VRECLAIMED) {
462 if (node->tn_links == 0)
463 tmpfs_truncate(vp, 0);
465 vfsync(v->a_vp, v->a_waitfor, 1, NULL, NULL);
469 lwkt_reltoken(&vp->v_mount->mnt_token);
473 /* --------------------------------------------------------------------- */
476 tmpfs_read (struct vop_read_args *ap)
479 struct vnode *vp = ap->a_vp;
480 struct uio *uio = ap->a_uio;
481 struct tmpfs_node *node;
491 if (uio->uio_offset < 0)
493 if (vp->v_type != VREG)
497 * Extract node, try to shortcut the operation through
498 * the VM page cache, allowing us to avoid buffer cache
501 node = VP_TO_TMPFS_NODE(vp);
502 resid = uio->uio_resid;
503 error = vop_helper_read_shortcut(ap);
506 if (uio->uio_resid == 0) {
513 * Fall-through to our normal read code.
515 while (uio->uio_resid > 0 && uio->uio_offset < node->tn_size) {
517 * Use buffer cache I/O (via tmpfs_strategy)
519 offset = (size_t)uio->uio_offset & BMASK;
520 base_offset = (off_t)uio->uio_offset - offset;
521 bp = getcacheblk(vp, base_offset, BSIZE, 0);
523 lwkt_gettoken(&vp->v_mount->mnt_token);
524 error = bread(vp, base_offset, BSIZE, &bp);
527 lwkt_reltoken(&vp->v_mount->mnt_token);
528 kprintf("tmpfs_read bread error %d\n", error);
531 lwkt_reltoken(&vp->v_mount->mnt_token);
535 * Figure out how many bytes we can actually copy this loop.
537 len = BSIZE - offset;
538 if (len > uio->uio_resid)
539 len = uio->uio_resid;
540 if (len > node->tn_size - uio->uio_offset)
541 len = (size_t)(node->tn_size - uio->uio_offset);
543 error = uiomovebp(bp, (char *)bp->b_data + offset, len, uio);
546 kprintf("tmpfs_read uiomove error %d\n", error);
552 TMPFS_NODE_LOCK(node);
553 node->tn_status |= TMPFS_NODE_ACCESSED;
554 TMPFS_NODE_UNLOCK(node);
560 tmpfs_write (struct vop_write_args *ap)
563 struct vnode *vp = ap->a_vp;
564 struct uio *uio = ap->a_uio;
565 struct thread *td = uio->uio_td;
566 struct tmpfs_node *node;
578 if (uio->uio_resid == 0) {
582 node = VP_TO_TMPFS_NODE(vp);
584 if (vp->v_type != VREG)
587 lwkt_gettoken(&vp->v_mount->mnt_token);
589 oldsize = node->tn_size;
590 if (ap->a_ioflag & IO_APPEND)
591 uio->uio_offset = node->tn_size;
594 * Check for illegal write offsets.
596 if (uio->uio_offset + uio->uio_resid >
597 VFS_TO_TMPFS(vp->v_mount)->tm_maxfilesize) {
598 lwkt_reltoken(&vp->v_mount->mnt_token);
603 * NOTE: Ignore if UIO does not come from a user thread (e.g. VN).
605 if (vp->v_type == VREG && td != NULL && td->td_lwp != NULL) {
606 error = kern_getrlimit(RLIMIT_FSIZE, &limit);
608 lwkt_reltoken(&vp->v_mount->mnt_token);
611 if (uio->uio_offset + uio->uio_resid > limit.rlim_cur) {
612 ksignal(td->td_proc, SIGXFSZ);
613 lwkt_reltoken(&vp->v_mount->mnt_token);
620 * Extend the file's size if necessary
622 extended = ((uio->uio_offset + uio->uio_resid) > node->tn_size);
624 while (uio->uio_resid > 0) {
626 * Use buffer cache I/O (via tmpfs_strategy)
628 offset = (size_t)uio->uio_offset & BMASK;
629 base_offset = (off_t)uio->uio_offset - offset;
630 len = BSIZE - offset;
631 if (len > uio->uio_resid)
632 len = uio->uio_resid;
634 if ((uio->uio_offset + len) > node->tn_size) {
635 trivial = (uio->uio_offset <= node->tn_size);
636 error = tmpfs_reg_resize(vp, uio->uio_offset + len, trivial);
642 * Read to fill in any gaps. Theoretically we could
643 * optimize this if the write covers the entire buffer
644 * and is not a UIO_NOCOPY write, however this can lead
645 * to a security violation exposing random kernel memory
646 * (whatever junk was in the backing VM pages before).
648 * So just use bread() to do the right thing.
650 error = bread(vp, base_offset, BSIZE, &bp);
651 error = uiomovebp(bp, (char *)bp->b_data + offset, len, uio);
653 kprintf("tmpfs_write uiomove error %d\n", error);
658 if (uio->uio_offset > node->tn_size) {
659 node->tn_size = uio->uio_offset;
660 kflags |= NOTE_EXTEND;
662 kflags |= NOTE_WRITE;
665 * Always try to flush the page in the UIO_NOCOPY case. This
666 * can come from the pageout daemon or during vnode eviction.
667 * It is not necessarily going to be marked IO_ASYNC/IO_SYNC.
669 * For the normal case we buwrite(), dirtying the underlying
670 * VM pages instead of dirtying the buffer and releasing the
671 * buffer as a clean buffer. This allows tmpfs to use
672 * essentially all available memory to cache file data.
673 * If we used bdwrite() the buffer cache would wind up
674 * flushing the data to swap too quickly.
676 bp->b_flags |= B_AGE;
677 if (uio->uio_segflg == UIO_NOCOPY) {
684 kprintf("tmpfs_write bwrite error %d\n", bp->b_error);
691 (void)tmpfs_reg_resize(vp, oldsize, trivial);
692 kflags &= ~NOTE_EXTEND;
698 * Currently we don't set the mtime on files modified via mmap()
699 * because we can't tell the difference between those modifications
700 * and an attempt by the pageout daemon to flush tmpfs pages to
703 * This is because in order to defer flushes as long as possible
704 * buwrite() works by marking the underlying VM pages dirty in
705 * order to be able to dispose of the buffer cache buffer without
708 TMPFS_NODE_LOCK(node);
709 if (uio->uio_segflg != UIO_NOCOPY)
710 node->tn_status |= TMPFS_NODE_ACCESSED | TMPFS_NODE_MODIFIED;
712 node->tn_status |= TMPFS_NODE_CHANGED;
714 if (node->tn_mode & (S_ISUID | S_ISGID)) {
715 if (priv_check_cred(ap->a_cred, PRIV_VFS_RETAINSUGID, 0))
716 node->tn_mode &= ~(S_ISUID | S_ISGID);
718 TMPFS_NODE_UNLOCK(node);
721 tmpfs_knote(vp, kflags);
723 lwkt_reltoken(&vp->v_mount->mnt_token);
728 tmpfs_advlock (struct vop_advlock_args *ap)
730 struct tmpfs_node *node;
731 struct vnode *vp = ap->a_vp;
734 lwkt_gettoken(&vp->v_mount->mnt_token);
735 node = VP_TO_TMPFS_NODE(vp);
737 error = (lf_advlock(ap, &node->tn_advlock, node->tn_size));
738 lwkt_reltoken(&vp->v_mount->mnt_token);
744 * The strategy function is typically only called when memory pressure
745 * forces the system to attempt to pageout pages. It can also be called
746 * by [n]vtruncbuf() when a truncation cuts a page in half. Normal write
750 tmpfs_strategy(struct vop_strategy_args *ap)
752 struct bio *bio = ap->a_bio;
754 struct buf *bp = bio->bio_buf;
755 struct vnode *vp = ap->a_vp;
756 struct tmpfs_node *node;
761 if (vp->v_type != VREG) {
762 bp->b_resid = bp->b_bcount;
763 bp->b_flags |= B_ERROR | B_INVAL;
764 bp->b_error = EINVAL;
769 lwkt_gettoken(&vp->v_mount->mnt_token);
770 node = VP_TO_TMPFS_NODE(vp);
772 uobj = node->tn_reg.tn_aobj;
775 * Don't bother flushing to swap if there is no swap, just
776 * ensure that the pages are marked as needing a commit (still).
778 if (bp->b_cmd == BUF_CMD_WRITE && vm_swap_size == 0) {
779 for (i = 0; i < bp->b_xio.xio_npages; ++i) {
780 m = bp->b_xio.xio_pages[i];
781 vm_page_need_commit(m);
787 nbio = push_bio(bio);
788 nbio->bio_done = tmpfs_strategy_done;
789 nbio->bio_offset = bio->bio_offset;
790 swap_pager_strategy(uobj, nbio);
793 lwkt_reltoken(&vp->v_mount->mnt_token);
798 * If we were unable to commit the pages to swap make sure they are marked
799 * as needing a commit (again). If we were, clear the flag to allow the
803 tmpfs_strategy_done(struct bio *bio)
811 if (bp->b_flags & B_ERROR) {
812 bp->b_flags &= ~B_ERROR;
815 for (i = 0; i < bp->b_xio.xio_npages; ++i) {
816 m = bp->b_xio.xio_pages[i];
817 vm_page_need_commit(m);
820 for (i = 0; i < bp->b_xio.xio_npages; ++i) {
821 m = bp->b_xio.xio_pages[i];
822 vm_page_clear_commit(m);
830 tmpfs_bmap(struct vop_bmap_args *ap)
832 if (ap->a_doffsetp != NULL)
833 *ap->a_doffsetp = ap->a_loffset;
834 if (ap->a_runp != NULL)
836 if (ap->a_runb != NULL)
842 /* --------------------------------------------------------------------- */
845 tmpfs_nremove(struct vop_nremove_args *v)
847 struct vnode *dvp = v->a_dvp;
848 struct namecache *ncp = v->a_nch->ncp;
851 struct tmpfs_dirent *de;
852 struct tmpfs_mount *tmp;
853 struct tmpfs_node *dnode;
854 struct tmpfs_node *node;
859 lwkt_gettoken(&mp->mnt_token);
862 * We have to acquire the vp from v->a_nch because we will likely
863 * unresolve the namecache entry, and a vrele/vput is needed to
864 * trigger the tmpfs_inactive/tmpfs_reclaim sequence.
866 * We have to use vget to clear any inactive state on the vnode,
867 * otherwise the vnode may remain inactive and thus tmpfs_inactive
868 * will not get called when we release it.
870 error = cache_vget(v->a_nch, v->a_cred, LK_SHARED, &vp);
871 KKASSERT(vp->v_mount == dvp->v_mount);
872 KKASSERT(error == 0);
875 if (vp->v_type == VDIR) {
880 dnode = VP_TO_TMPFS_DIR(dvp);
881 node = VP_TO_TMPFS_NODE(vp);
882 tmp = VFS_TO_TMPFS(vp->v_mount);
883 de = tmpfs_dir_lookup(dnode, node, ncp);
889 /* Files marked as immutable or append-only cannot be deleted. */
890 if ((node->tn_flags & (IMMUTABLE | APPEND | NOUNLINK)) ||
891 (dnode->tn_flags & APPEND)) {
896 /* Remove the entry from the directory; as it is a file, we do not
897 * have to change the number of hard links of the directory. */
898 tmpfs_dir_detach(dnode, de);
900 /* Free the directory entry we just deleted. Note that the node
901 * referred by it will not be removed until the vnode is really
903 tmpfs_free_dirent(tmp, de);
905 if (node->tn_links > 0) {
906 TMPFS_NODE_LOCK(node);
907 node->tn_status |= TMPFS_NODE_ACCESSED | TMPFS_NODE_CHANGED | \
909 TMPFS_NODE_UNLOCK(node);
912 cache_unlink(v->a_nch);
913 tmpfs_knote(vp, NOTE_DELETE);
914 tmpfs_knote(dvp, NOTE_WRITE);
919 lwkt_reltoken(&mp->mnt_token);
924 /* --------------------------------------------------------------------- */
927 tmpfs_nlink(struct vop_nlink_args *v)
929 struct vnode *dvp = v->a_dvp;
930 struct vnode *vp = v->a_vp;
931 struct namecache *ncp = v->a_nch->ncp;
932 struct tmpfs_dirent *de;
933 struct tmpfs_node *node;
934 struct tmpfs_node *dnode;
938 if (dvp->v_mount != vp->v_mount)
942 lwkt_gettoken(&mp->mnt_token);
943 KKASSERT(dvp != vp); /* XXX When can this be false? */
945 node = VP_TO_TMPFS_NODE(vp);
946 dnode = VP_TO_TMPFS_NODE(dvp);
948 /* XXX: Why aren't the following two tests done by the caller? */
950 /* Hard links of directories are forbidden. */
951 if (vp->v_type == VDIR) {
956 /* Cannot create cross-device links. */
957 if (dvp->v_mount != vp->v_mount) {
962 /* Ensure that we do not overflow the maximum number of links imposed
964 KKASSERT(node->tn_links <= LINK_MAX);
965 if (node->tn_links == LINK_MAX) {
970 /* We cannot create links of files marked immutable or append-only. */
971 if (node->tn_flags & (IMMUTABLE | APPEND)) {
976 /* Allocate a new directory entry to represent the node. */
977 error = tmpfs_alloc_dirent(VFS_TO_TMPFS(vp->v_mount), node,
978 ncp->nc_name, ncp->nc_nlen, &de);
982 /* Insert the new directory entry into the appropriate directory. */
983 tmpfs_dir_attach(dnode, de);
985 /* vp link count has changed, so update node times. */
987 TMPFS_NODE_LOCK(node);
988 node->tn_status |= TMPFS_NODE_CHANGED;
989 TMPFS_NODE_UNLOCK(node);
992 tmpfs_knote(vp, NOTE_LINK);
993 cache_setunresolved(v->a_nch);
994 cache_setvp(v->a_nch, vp);
995 tmpfs_knote(dvp, NOTE_WRITE);
999 lwkt_reltoken(&mp->mnt_token);
1003 /* --------------------------------------------------------------------- */
1006 tmpfs_nrename(struct vop_nrename_args *v)
1008 struct vnode *fdvp = v->a_fdvp;
1009 struct namecache *fncp = v->a_fnch->ncp;
1010 struct vnode *fvp = fncp->nc_vp;
1011 struct vnode *tdvp = v->a_tdvp;
1012 struct namecache *tncp = v->a_tnch->ncp;
1014 struct tmpfs_dirent *de, *tde;
1015 struct tmpfs_mount *tmp;
1016 struct tmpfs_node *fdnode;
1017 struct tmpfs_node *fnode;
1018 struct tmpfs_node *tnode;
1019 struct tmpfs_node *tdnode;
1026 KKASSERT(fdvp->v_mount == fvp->v_mount);
1028 lwkt_gettoken(&mp->mnt_token);
1030 * Because tvp can get overwritten we have to vget it instead of
1031 * just vref or use it, otherwise it's VINACTIVE flag may not get
1032 * cleared and the node won't get destroyed.
1034 error = cache_vget(v->a_tnch, v->a_cred, LK_SHARED, &tvp);
1036 tnode = VP_TO_TMPFS_NODE(tvp);
1042 /* Disallow cross-device renames.
1043 * XXX Why isn't this done by the caller? */
1044 if (fvp->v_mount != tdvp->v_mount ||
1045 (tvp != NULL && fvp->v_mount != tvp->v_mount)) {
1050 tmp = VFS_TO_TMPFS(tdvp->v_mount);
1051 tdnode = VP_TO_TMPFS_DIR(tdvp);
1053 /* If source and target are the same file, there is nothing to do. */
1059 fdnode = VP_TO_TMPFS_DIR(fdvp);
1060 fnode = VP_TO_TMPFS_NODE(fvp);
1061 de = tmpfs_dir_lookup(fdnode, fnode, fncp);
1063 /* Avoid manipulating '.' and '..' entries. */
1068 KKASSERT(de->td_node == fnode);
1071 * If replacing an entry in the target directory and that entry
1072 * is a directory, it must be empty.
1074 * Kern_rename gurantees the destination to be a directory
1075 * if the source is one (it does?).
1078 KKASSERT(tnode != NULL);
1080 if ((tnode->tn_flags & (NOUNLINK | IMMUTABLE | APPEND)) ||
1081 (tdnode->tn_flags & (APPEND | IMMUTABLE))) {
1086 if (fnode->tn_type == VDIR && tnode->tn_type == VDIR) {
1087 if (tnode->tn_size > 0) {
1091 } else if (fnode->tn_type == VDIR && tnode->tn_type != VDIR) {
1094 } else if (fnode->tn_type != VDIR && tnode->tn_type == VDIR) {
1098 KKASSERT(fnode->tn_type != VDIR &&
1099 tnode->tn_type != VDIR);
1103 if ((fnode->tn_flags & (NOUNLINK | IMMUTABLE | APPEND)) ||
1104 (fdnode->tn_flags & (APPEND | IMMUTABLE))) {
1110 * Ensure that we have enough memory to hold the new name, if it
1111 * has to be changed.
1113 if (fncp->nc_nlen != tncp->nc_nlen ||
1114 bcmp(fncp->nc_name, tncp->nc_name, fncp->nc_nlen) != 0) {
1115 newname = kmalloc(tncp->nc_nlen + 1, tmp->tm_name_zone,
1116 M_WAITOK | M_NULLOK);
1117 if (newname == NULL) {
1121 bcopy(tncp->nc_name, newname, tncp->nc_nlen);
1122 newname[tncp->nc_nlen] = '\0';
1128 * Unlink entry from source directory. Note that the kernel has
1129 * already checked for illegal recursion cases (renaming a directory
1130 * into a subdirectory of itself).
1132 if (fdnode != tdnode) {
1133 tmpfs_dir_detach(fdnode, de);
1135 RB_REMOVE(tmpfs_dirtree, &fdnode->tn_dir.tn_dirtree, de);
1139 * Handle any name change. Swap with newname, we will
1140 * deallocate it at the end.
1142 if (newname != NULL) {
1144 TMPFS_NODE_LOCK(fnode);
1145 fnode->tn_status |= TMPFS_NODE_CHANGED;
1146 TMPFS_NODE_UNLOCK(fnode);
1148 oldname = de->td_name;
1149 de->td_name = newname;
1150 de->td_namelen = (uint16_t)tncp->nc_nlen;
1155 * If we are overwriting an entry, we have to remove the old one
1156 * from the target directory.
1159 /* Remove the old entry from the target directory. */
1160 tde = tmpfs_dir_lookup(tdnode, tnode, tncp);
1161 tmpfs_dir_detach(tdnode, tde);
1162 tmpfs_knote(tdnode->tn_vnode, NOTE_DELETE);
1165 * Free the directory entry we just deleted. Note that the
1166 * node referred by it will not be removed until the vnode is
1169 tmpfs_free_dirent(VFS_TO_TMPFS(tvp->v_mount), tde);
1170 /*cache_inval_vp(tvp, CINV_DESTROY);*/
1174 * Link entry to target directory. If the entry
1175 * represents a directory move the parent linkage
1178 if (fdnode != tdnode) {
1179 if (de->td_node->tn_type == VDIR) {
1180 TMPFS_VALIDATE_DIR(fnode);
1182 tmpfs_dir_attach(tdnode, de);
1184 TMPFS_NODE_LOCK(tdnode);
1185 tdnode->tn_status |= TMPFS_NODE_MODIFIED;
1186 RB_INSERT(tmpfs_dirtree, &tdnode->tn_dir.tn_dirtree, de);
1187 TMPFS_NODE_UNLOCK(tdnode);
1194 kfree(newname, tmp->tm_name_zone);
1197 cache_rename(v->a_fnch, v->a_tnch);
1198 tmpfs_knote(v->a_fdvp, NOTE_WRITE);
1199 tmpfs_knote(v->a_tdvp, NOTE_WRITE);
1200 if (fnode->tn_vnode)
1201 tmpfs_knote(fnode->tn_vnode, NOTE_RENAME);
1211 lwkt_reltoken(&mp->mnt_token);
1216 /* --------------------------------------------------------------------- */
1219 tmpfs_nmkdir(struct vop_nmkdir_args *v)
1221 struct vnode *dvp = v->a_dvp;
1222 struct vnode **vpp = v->a_vpp;
1223 struct namecache *ncp = v->a_nch->ncp;
1224 struct vattr *vap = v->a_vap;
1225 struct ucred *cred = v->a_cred;
1231 lwkt_gettoken(&mp->mnt_token);
1232 KKASSERT(vap->va_type == VDIR);
1234 error = tmpfs_alloc_file(dvp, vpp, vap, ncp, cred, NULL);
1236 cache_setunresolved(v->a_nch);
1237 cache_setvp(v->a_nch, *vpp);
1238 tmpfs_knote(dvp, NOTE_WRITE | NOTE_LINK);
1241 lwkt_reltoken(&mp->mnt_token);
1246 /* --------------------------------------------------------------------- */
1249 tmpfs_nrmdir(struct vop_nrmdir_args *v)
1251 struct vnode *dvp = v->a_dvp;
1252 struct namecache *ncp = v->a_nch->ncp;
1254 struct tmpfs_dirent *de;
1255 struct tmpfs_mount *tmp;
1256 struct tmpfs_node *dnode;
1257 struct tmpfs_node *node;
1262 lwkt_gettoken(&mp->mnt_token);
1265 * We have to acquire the vp from v->a_nch because we will likely
1266 * unresolve the namecache entry, and a vrele/vput is needed to
1267 * trigger the tmpfs_inactive/tmpfs_reclaim sequence.
1269 * We have to use vget to clear any inactive state on the vnode,
1270 * otherwise the vnode may remain inactive and thus tmpfs_inactive
1271 * will not get called when we release it.
1273 error = cache_vget(v->a_nch, v->a_cred, LK_SHARED, &vp);
1274 KKASSERT(error == 0);
1278 * Prevalidate so we don't hit an assertion later
1280 if (vp->v_type != VDIR) {
1285 tmp = VFS_TO_TMPFS(dvp->v_mount);
1286 dnode = VP_TO_TMPFS_DIR(dvp);
1287 node = VP_TO_TMPFS_DIR(vp);
1289 /* Directories with more than two entries ('.' and '..') cannot be
1291 if (node->tn_size > 0) {
1296 if ((dnode->tn_flags & APPEND)
1297 || (node->tn_flags & (NOUNLINK | IMMUTABLE | APPEND))) {
1302 /* This invariant holds only if we are not trying to remove "..".
1303 * We checked for that above so this is safe now. */
1304 KKASSERT(node->tn_dir.tn_parent == dnode);
1306 /* Get the directory entry associated with node (vp). This was
1307 * filled by tmpfs_lookup while looking up the entry. */
1308 de = tmpfs_dir_lookup(dnode, node, ncp);
1309 KKASSERT(TMPFS_DIRENT_MATCHES(de,
1313 /* Check flags to see if we are allowed to remove the directory. */
1314 if ((dnode->tn_flags & APPEND) ||
1315 node->tn_flags & (NOUNLINK | IMMUTABLE | APPEND)) {
1321 /* Detach the directory entry from the directory (dnode). */
1322 tmpfs_dir_detach(dnode, de);
1324 /* No vnode should be allocated for this entry from this point */
1325 TMPFS_NODE_LOCK(node);
1326 TMPFS_ASSERT_ELOCKED(node);
1327 TMPFS_NODE_LOCK(dnode);
1328 TMPFS_ASSERT_ELOCKED(dnode);
1331 * Must set parent linkage to NULL (tested by ncreate to disallow
1332 * the creation of new files/dirs in a deleted directory)
1334 node->tn_status |= TMPFS_NODE_ACCESSED | TMPFS_NODE_CHANGED | \
1335 TMPFS_NODE_MODIFIED;
1337 dnode->tn_status |= TMPFS_NODE_ACCESSED | \
1338 TMPFS_NODE_CHANGED | TMPFS_NODE_MODIFIED;
1340 TMPFS_NODE_UNLOCK(dnode);
1341 TMPFS_NODE_UNLOCK(node);
1343 /* Free the directory entry we just deleted. Note that the node
1344 * referred by it will not be removed until the vnode is really
1346 tmpfs_free_dirent(tmp, de);
1348 /* Release the deleted vnode (will destroy the node, notify
1349 * interested parties and clean it from the cache). */
1351 TMPFS_NODE_LOCK(dnode);
1352 dnode->tn_status |= TMPFS_NODE_CHANGED;
1353 TMPFS_NODE_UNLOCK(dnode);
1356 cache_unlink(v->a_nch);
1357 tmpfs_knote(dvp, NOTE_WRITE | NOTE_LINK);
1363 lwkt_reltoken(&mp->mnt_token);
1368 /* --------------------------------------------------------------------- */
1371 tmpfs_nsymlink(struct vop_nsymlink_args *v)
1373 struct vnode *dvp = v->a_dvp;
1374 struct vnode **vpp = v->a_vpp;
1375 struct namecache *ncp = v->a_nch->ncp;
1376 struct vattr *vap = v->a_vap;
1377 struct ucred *cred = v->a_cred;
1378 char *target = v->a_target;
1379 struct mount *mp = dvp->v_mount;
1382 lwkt_gettoken(&mp->mnt_token);
1383 vap->va_type = VLNK;
1384 error = tmpfs_alloc_file(dvp, vpp, vap, ncp, cred, target);
1386 tmpfs_knote(*vpp, NOTE_WRITE);
1387 cache_setunresolved(v->a_nch);
1388 cache_setvp(v->a_nch, *vpp);
1391 lwkt_reltoken(&mp->mnt_token);
1396 /* --------------------------------------------------------------------- */
1399 tmpfs_readdir(struct vop_readdir_args *v)
1401 struct vnode *vp = v->a_vp;
1402 struct uio *uio = v->a_uio;
1403 int *eofflag = v->a_eofflag;
1404 off_t **cookies = v->a_cookies;
1405 int *ncookies = v->a_ncookies;
1406 struct tmpfs_mount *tmp;
1410 struct tmpfs_node *node;
1411 struct mount *mp = vp->v_mount;
1413 lwkt_gettoken(&mp->mnt_token);
1415 /* This operation only makes sense on directory nodes. */
1416 if (vp->v_type != VDIR) {
1417 lwkt_reltoken(&mp->mnt_token);
1421 tmp = VFS_TO_TMPFS(vp->v_mount);
1422 node = VP_TO_TMPFS_DIR(vp);
1423 startoff = uio->uio_offset;
1425 if (uio->uio_offset == TMPFS_DIRCOOKIE_DOT) {
1426 error = tmpfs_dir_getdotdent(node, uio);
1432 if (uio->uio_offset == TMPFS_DIRCOOKIE_DOTDOT) {
1433 error = tmpfs_dir_getdotdotdent(tmp, node, uio);
1439 error = tmpfs_dir_getdents(node, uio, &cnt);
1442 KKASSERT(error >= -1);
1447 if (eofflag != NULL)
1449 (error == 0 && uio->uio_offset == TMPFS_DIRCOOKIE_EOF);
1451 /* Update NFS-related variables. */
1452 if (error == 0 && cookies != NULL && ncookies != NULL) {
1454 off_t off = startoff;
1455 struct tmpfs_dirent *de = NULL;
1458 *cookies = kmalloc(cnt * sizeof(off_t), M_TEMP, M_WAITOK);
1460 for (i = 0; i < cnt; i++) {
1461 KKASSERT(off != TMPFS_DIRCOOKIE_EOF);
1462 if (off == TMPFS_DIRCOOKIE_DOT) {
1463 off = TMPFS_DIRCOOKIE_DOTDOT;
1465 if (off == TMPFS_DIRCOOKIE_DOTDOT) {
1466 de = RB_MIN(tmpfs_dirtree, &node->tn_dir.tn_dirtree);
1467 } else if (de != NULL) {
1468 de = RB_NEXT(tmpfs_dirtree, &node->tn_dir.tn_dirtree, de);
1470 de = tmpfs_dir_lookupbycookie(node,
1472 KKASSERT(de != NULL);
1473 de = RB_NEXT(tmpfs_dirtree, &node->tn_dir.tn_dirtree, de);
1476 off = TMPFS_DIRCOOKIE_EOF;
1478 off = tmpfs_dircookie(de);
1481 (*cookies)[i] = off;
1483 KKASSERT(uio->uio_offset == off);
1486 lwkt_reltoken(&mp->mnt_token);
1491 /* --------------------------------------------------------------------- */
1494 tmpfs_readlink(struct vop_readlink_args *v)
1496 struct vnode *vp = v->a_vp;
1497 struct uio *uio = v->a_uio;
1498 struct mount *mp = vp->v_mount;
1500 struct tmpfs_node *node;
1502 lwkt_gettoken(&mp->mnt_token);
1504 KKASSERT(uio->uio_offset == 0);
1505 KKASSERT(vp->v_type == VLNK);
1507 node = VP_TO_TMPFS_NODE(vp);
1509 error = uiomove(node->tn_link, MIN(node->tn_size, uio->uio_resid),
1511 TMPFS_NODE_LOCK(node);
1512 node->tn_status |= TMPFS_NODE_ACCESSED;
1513 TMPFS_NODE_UNLOCK(node);
1515 lwkt_reltoken(&mp->mnt_token);
1520 /* --------------------------------------------------------------------- */
1523 tmpfs_inactive(struct vop_inactive_args *v)
1525 struct vnode *vp = v->a_vp;
1526 struct tmpfs_node *node;
1530 lwkt_gettoken(&mp->mnt_token);
1531 node = VP_TO_TMPFS_NODE(vp);
1538 lwkt_reltoken(&mp->mnt_token);
1543 * Get rid of unreferenced deleted vnodes sooner rather than
1544 * later so the data memory can be recovered immediately.
1546 * We must truncate the vnode to prevent the normal reclamation
1547 * path from flushing the data for the removed file to disk.
1549 TMPFS_NODE_LOCK(node);
1550 if ((node->tn_vpstate & TMPFS_VNODE_ALLOCATING) == 0 &&
1551 node->tn_links == 0)
1553 node->tn_vpstate = TMPFS_VNODE_DOOMED;
1554 TMPFS_NODE_UNLOCK(node);
1555 if (node->tn_type == VREG)
1556 tmpfs_truncate(vp, 0);
1559 TMPFS_NODE_UNLOCK(node);
1561 lwkt_reltoken(&mp->mnt_token);
1566 /* --------------------------------------------------------------------- */
1569 tmpfs_reclaim(struct vop_reclaim_args *v)
1571 struct vnode *vp = v->a_vp;
1572 struct tmpfs_mount *tmp;
1573 struct tmpfs_node *node;
1577 lwkt_gettoken(&mp->mnt_token);
1579 node = VP_TO_TMPFS_NODE(vp);
1580 tmp = VFS_TO_TMPFS(vp->v_mount);
1581 KKASSERT(mp == tmp->tm_mount);
1586 * If the node referenced by this vnode was deleted by the
1587 * user, we must free its associated data structures now that
1588 * the vnode is being reclaimed.
1590 * Directories have an extra link ref.
1592 TMPFS_NODE_LOCK(node);
1593 if ((node->tn_vpstate & TMPFS_VNODE_ALLOCATING) == 0 &&
1594 node->tn_links == 0) {
1595 node->tn_vpstate = TMPFS_VNODE_DOOMED;
1596 tmpfs_free_node(tmp, node);
1599 TMPFS_NODE_UNLOCK(node);
1601 lwkt_reltoken(&mp->mnt_token);
1603 KKASSERT(vp->v_data == NULL);
1607 /* --------------------------------------------------------------------- */
1610 tmpfs_mountctl(struct vop_mountctl_args *ap)
1612 struct tmpfs_mount *tmp;
1616 mp = ap->a_head.a_ops->head.vv_mount;
1617 lwkt_gettoken(&mp->mnt_token);
1620 case (MOUNTCTL_SET_EXPORT):
1621 tmp = (struct tmpfs_mount *) mp->mnt_data;
1623 if (ap->a_ctllen != sizeof(struct export_args))
1626 rc = vfs_export(mp, &tmp->tm_export,
1627 (const struct export_args *) ap->a_ctl);
1630 rc = vop_stdmountctl(ap);
1634 lwkt_reltoken(&mp->mnt_token);
1638 /* --------------------------------------------------------------------- */
1641 tmpfs_print(struct vop_print_args *v)
1643 struct vnode *vp = v->a_vp;
1645 struct tmpfs_node *node;
1647 node = VP_TO_TMPFS_NODE(vp);
1649 kprintf("tag VT_TMPFS, tmpfs_node %p, flags 0x%x, links %d\n",
1650 node, node->tn_flags, node->tn_links);
1651 kprintf("\tmode 0%o, owner %d, group %d, size %ju, status 0x%x\n",
1652 node->tn_mode, node->tn_uid, node->tn_gid,
1653 (uintmax_t)node->tn_size, node->tn_status);
1655 if (vp->v_type == VFIFO)
1663 /* --------------------------------------------------------------------- */
1666 tmpfs_pathconf(struct vop_pathconf_args *v)
1668 int name = v->a_name;
1669 register_t *retval = v->a_retval;
1692 case _PC_CHOWN_RESTRICTED:
1704 case _PC_FILESIZEBITS:
1705 *retval = 0; /* XXX Don't know which value should I return. */
1715 /************************************************************************
1717 ************************************************************************/
1719 static void filt_tmpfsdetach(struct knote *kn);
1720 static int filt_tmpfsread(struct knote *kn, long hint);
1721 static int filt_tmpfswrite(struct knote *kn, long hint);
1722 static int filt_tmpfsvnode(struct knote *kn, long hint);
1724 static struct filterops tmpfsread_filtops =
1725 { FILTEROP_ISFD, NULL, filt_tmpfsdetach, filt_tmpfsread };
1726 static struct filterops tmpfswrite_filtops =
1727 { FILTEROP_ISFD, NULL, filt_tmpfsdetach, filt_tmpfswrite };
1728 static struct filterops tmpfsvnode_filtops =
1729 { FILTEROP_ISFD, NULL, filt_tmpfsdetach, filt_tmpfsvnode };
1732 tmpfs_kqfilter (struct vop_kqfilter_args *ap)
1734 struct vnode *vp = ap->a_vp;
1735 struct knote *kn = ap->a_kn;
1737 switch (kn->kn_filter) {
1739 kn->kn_fop = &tmpfsread_filtops;
1742 kn->kn_fop = &tmpfswrite_filtops;
1745 kn->kn_fop = &tmpfsvnode_filtops;
1748 return (EOPNOTSUPP);
1751 kn->kn_hook = (caddr_t)vp;
1753 knote_insert(&vp->v_pollinfo.vpi_kqinfo.ki_note, kn);
1759 filt_tmpfsdetach(struct knote *kn)
1761 struct vnode *vp = (void *)kn->kn_hook;
1763 knote_remove(&vp->v_pollinfo.vpi_kqinfo.ki_note, kn);
1767 filt_tmpfsread(struct knote *kn, long hint)
1769 struct vnode *vp = (void *)kn->kn_hook;
1770 struct tmpfs_node *node = VP_TO_TMPFS_NODE(vp);
1773 if (hint == NOTE_REVOKE) {
1774 kn->kn_flags |= (EV_EOF | EV_NODATA | EV_ONESHOT);
1779 * Interlock against MP races when performing this function.
1781 lwkt_gettoken(&vp->v_mount->mnt_token);
1782 off = node->tn_size - kn->kn_fp->f_offset;
1783 kn->kn_data = (off < INTPTR_MAX) ? off : INTPTR_MAX;
1784 if (kn->kn_sfflags & NOTE_OLDAPI) {
1785 lwkt_reltoken(&vp->v_mount->mnt_token);
1789 if (kn->kn_data == 0) {
1790 kn->kn_data = (off < INTPTR_MAX) ? off : INTPTR_MAX;
1792 lwkt_reltoken(&vp->v_mount->mnt_token);
1793 return (kn->kn_data != 0);
1797 filt_tmpfswrite(struct knote *kn, long hint)
1799 if (hint == NOTE_REVOKE)
1800 kn->kn_flags |= (EV_EOF | EV_NODATA | EV_ONESHOT);
1806 filt_tmpfsvnode(struct knote *kn, long hint)
1808 if (kn->kn_sfflags & hint)
1809 kn->kn_fflags |= hint;
1810 if (hint == NOTE_REVOKE) {
1811 kn->kn_flags |= (EV_EOF | EV_NODATA);
1814 return (kn->kn_fflags != 0);
1818 /* --------------------------------------------------------------------- */
1821 * vnode operations vector used for files stored in a tmpfs file system.
1823 struct vop_ops tmpfs_vnode_vops = {
1824 .vop_default = vop_defaultop,
1825 .vop_getpages = vop_stdgetpages,
1826 .vop_putpages = vop_stdputpages,
1827 .vop_ncreate = tmpfs_ncreate,
1828 .vop_nresolve = tmpfs_nresolve,
1829 .vop_nlookupdotdot = tmpfs_nlookupdotdot,
1830 .vop_nmknod = tmpfs_nmknod,
1831 .vop_open = tmpfs_open,
1832 .vop_close = tmpfs_close,
1833 .vop_access = tmpfs_access,
1834 .vop_getattr = tmpfs_getattr,
1835 .vop_setattr = tmpfs_setattr,
1836 .vop_read = tmpfs_read,
1837 .vop_write = tmpfs_write,
1838 .vop_fsync = tmpfs_fsync,
1839 .vop_mountctl = tmpfs_mountctl,
1840 .vop_nremove = tmpfs_nremove,
1841 .vop_nlink = tmpfs_nlink,
1842 .vop_nrename = tmpfs_nrename,
1843 .vop_nmkdir = tmpfs_nmkdir,
1844 .vop_nrmdir = tmpfs_nrmdir,
1845 .vop_nsymlink = tmpfs_nsymlink,
1846 .vop_readdir = tmpfs_readdir,
1847 .vop_readlink = tmpfs_readlink,
1848 .vop_inactive = tmpfs_inactive,
1849 .vop_reclaim = tmpfs_reclaim,
1850 .vop_print = tmpfs_print,
1851 .vop_pathconf = tmpfs_pathconf,
1852 .vop_bmap = tmpfs_bmap,
1853 .vop_strategy = tmpfs_strategy,
1854 .vop_advlock = tmpfs_advlock,
1855 .vop_kqfilter = tmpfs_kqfilter