kernel - Introduce lightweight buffers
[dragonfly.git] / sys / vfs / tmpfs / tmpfs_vnops.c
... / ...
CommitLineData
1/* $NetBSD: tmpfs_vnops.c,v 1.39 2007/07/23 15:41:01 jmmv Exp $ */
2
3/*-
4 * Copyright (c) 2005, 2006 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Julio M. Merino Vidal, developed as part of Google's Summer of Code
9 * 2005 program.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 */
32
33/*
34 * tmpfs vnode interface.
35 */
36#include <sys/cdefs.h>
37
38#include <sys/kernel.h>
39#include <sys/kern_syscall.h>
40#include <sys/param.h>
41#include <sys/fcntl.h>
42#include <sys/lockf.h>
43#include <sys/priv.h>
44#include <sys/proc.h>
45#include <sys/resourcevar.h>
46#include <sys/sched.h>
47#include <sys/stat.h>
48#include <sys/systm.h>
49#include <sys/unistd.h>
50#include <sys/vfsops.h>
51#include <sys/vnode.h>
52
53#include <sys/mplock2.h>
54
55#include <vm/vm.h>
56#include <vm/vm_object.h>
57#include <vm/vm_page.h>
58#include <vm/vm_pager.h>
59
60#include <vfs/fifofs/fifo.h>
61#include <vfs/tmpfs/tmpfs_vnops.h>
62#include <vfs/tmpfs/tmpfs.h>
63
64MALLOC_DECLARE(M_TMPFS);
65
66/* --------------------------------------------------------------------- */
67
68static int
69tmpfs_nresolve(struct vop_nresolve_args *v)
70{
71 struct vnode *dvp = v->a_dvp;
72 struct vnode *vp = NULL;
73 struct namecache *ncp = v->a_nch->ncp;
74 struct tmpfs_node *tnode;
75
76 int error;
77 struct tmpfs_dirent *de;
78 struct tmpfs_node *dnode;
79
80 dnode = VP_TO_TMPFS_DIR(dvp);
81
82 de = tmpfs_dir_lookup(dnode, NULL, ncp);
83 if (de == NULL) {
84 error = ENOENT;
85 } else {
86 /*
87 * Allocate a vnode for the node we found.
88 */
89 tnode = de->td_node;
90 error = tmpfs_alloc_vp(dvp->v_mount, tnode,
91 LK_EXCLUSIVE | LK_RETRY, &vp);
92 if (error)
93 goto out;
94 KKASSERT(vp);
95 }
96
97out:
98 /*
99 * Store the result of this lookup in the cache. Avoid this if the
100 * request was for creation, as it does not improve timings on
101 * emprical tests.
102 */
103 if (vp) {
104 vn_unlock(vp);
105 cache_setvp(v->a_nch, vp);
106 vrele(vp);
107 } else if (error == ENOENT) {
108 cache_setvp(v->a_nch, NULL);
109 }
110 return error;
111}
112
113static int
114tmpfs_nlookupdotdot(struct vop_nlookupdotdot_args *v)
115{
116 struct vnode *dvp = v->a_dvp;
117 struct vnode **vpp = v->a_vpp;
118 struct tmpfs_node *dnode = VP_TO_TMPFS_NODE(dvp);
119 struct ucred *cred = v->a_cred;
120 int error;
121
122 *vpp = NULL;
123 /* Check accessibility of requested node as a first step. */
124 error = VOP_ACCESS(dvp, VEXEC, cred);
125 if (error != 0)
126 return error;
127
128 if (dnode->tn_dir.tn_parent != NULL) {
129 /* Allocate a new vnode on the matching entry. */
130 error = tmpfs_alloc_vp(dvp->v_mount, dnode->tn_dir.tn_parent,
131 LK_EXCLUSIVE | LK_RETRY, vpp);
132
133 if (*vpp)
134 vn_unlock(*vpp);
135 }
136
137 return (*vpp == NULL) ? ENOENT : 0;
138}
139
140/* --------------------------------------------------------------------- */
141
142static int
143tmpfs_ncreate(struct vop_ncreate_args *v)
144{
145 struct vnode *dvp = v->a_dvp;
146 struct vnode **vpp = v->a_vpp;
147 struct namecache *ncp = v->a_nch->ncp;
148 struct vattr *vap = v->a_vap;
149 struct ucred *cred = v->a_cred;
150 int error;
151
152 KKASSERT(vap->va_type == VREG || vap->va_type == VSOCK);
153
154 error = tmpfs_alloc_file(dvp, vpp, vap, ncp, cred, NULL);
155 if (error == 0) {
156 cache_setunresolved(v->a_nch);
157 cache_setvp(v->a_nch, *vpp);
158 }
159
160 return error;
161}
162/* --------------------------------------------------------------------- */
163
164static int
165tmpfs_nmknod(struct vop_nmknod_args *v)
166{
167 struct vnode *dvp = v->a_dvp;
168 struct vnode **vpp = v->a_vpp;
169 struct namecache *ncp = v->a_nch->ncp;
170 struct vattr *vap = v->a_vap;
171 struct ucred *cred = v->a_cred;
172 int error;
173
174 if (vap->va_type != VBLK && vap->va_type != VCHR &&
175 vap->va_type != VFIFO)
176 return EINVAL;
177
178 error = tmpfs_alloc_file(dvp, vpp, vap, ncp, cred, NULL);
179 if (error == 0) {
180 cache_setunresolved(v->a_nch);
181 cache_setvp(v->a_nch, *vpp);
182 }
183
184 return error;
185}
186
187/* --------------------------------------------------------------------- */
188
189static int
190tmpfs_open(struct vop_open_args *v)
191{
192 struct vnode *vp = v->a_vp;
193 int mode = v->a_mode;
194
195 int error;
196 struct tmpfs_node *node;
197
198 node = VP_TO_TMPFS_NODE(vp);
199
200 /* The file is still active but all its names have been removed
201 * (e.g. by a "rmdir $(pwd)"). It cannot be opened any more as
202 * it is about to die. */
203 if (node->tn_links < 1)
204 return (ENOENT);
205
206 /* If the file is marked append-only, deny write requests. */
207 if ((node->tn_flags & APPEND) &&
208 (mode & (FWRITE | O_APPEND)) == FWRITE) {
209 error = EPERM;
210 } else {
211 return (vop_stdopen(v));
212 }
213 return error;
214}
215
216/* --------------------------------------------------------------------- */
217
218static int
219tmpfs_close(struct vop_close_args *v)
220{
221 struct vnode *vp = v->a_vp;
222 struct tmpfs_node *node;
223
224 node = VP_TO_TMPFS_NODE(vp);
225
226 if (node->tn_links > 0) {
227 /* Update node times. No need to do it if the node has
228 * been deleted, because it will vanish after we return. */
229 tmpfs_update(vp);
230 }
231
232 return vop_stdclose(v);
233}
234
235/* --------------------------------------------------------------------- */
236
237int
238tmpfs_access(struct vop_access_args *v)
239{
240 struct vnode *vp = v->a_vp;
241 int error;
242 struct tmpfs_node *node;
243
244 node = VP_TO_TMPFS_NODE(vp);
245
246 switch (vp->v_type) {
247 case VDIR:
248 /* FALLTHROUGH */
249 case VLNK:
250 /* FALLTHROUGH */
251 case VREG:
252 if (VWRITE && vp->v_mount->mnt_flag & MNT_RDONLY) {
253 error = EROFS;
254 goto out;
255 }
256 break;
257
258 case VBLK:
259 /* FALLTHROUGH */
260 case VCHR:
261 /* FALLTHROUGH */
262 case VSOCK:
263 /* FALLTHROUGH */
264 case VFIFO:
265 break;
266
267 default:
268 error = EINVAL;
269 goto out;
270 }
271
272 if (VWRITE && node->tn_flags & IMMUTABLE) {
273 error = EPERM;
274 goto out;
275 }
276
277 error = vop_helper_access(v, node->tn_uid, node->tn_gid, node->tn_mode, 0);
278
279out:
280
281 return error;
282}
283
284/* --------------------------------------------------------------------- */
285
286int
287tmpfs_getattr(struct vop_getattr_args *v)
288{
289 struct vnode *vp = v->a_vp;
290 struct vattr *vap = v->a_vap;
291 struct tmpfs_node *node;
292
293 node = VP_TO_TMPFS_NODE(vp);
294
295 tmpfs_update(vp);
296
297 vap->va_type = vp->v_type;
298 vap->va_mode = node->tn_mode;
299 vap->va_nlink = node->tn_links;
300 vap->va_uid = node->tn_uid;
301 vap->va_gid = node->tn_gid;
302 vap->va_fsid = vp->v_mount->mnt_stat.f_fsid.val[0];
303 vap->va_fileid = node->tn_id;
304 vap->va_size = node->tn_size;
305 vap->va_blocksize = PAGE_SIZE;
306 vap->va_atime.tv_sec = node->tn_atime;
307 vap->va_atime.tv_nsec = node->tn_atimensec;
308 vap->va_mtime.tv_sec = node->tn_mtime;
309 vap->va_mtime.tv_nsec = node->tn_mtimensec;
310 vap->va_ctime.tv_sec = node->tn_ctime;
311 vap->va_ctime.tv_nsec = node->tn_ctimensec;
312 vap->va_gen = node->tn_gen;
313 vap->va_flags = node->tn_flags;
314 if (vp->v_type == VBLK || vp->v_type == VCHR)
315 {
316 vap->va_rmajor = umajor(node->tn_rdev);
317 vap->va_rminor = uminor(node->tn_rdev);
318 }
319 vap->va_bytes = round_page(node->tn_size);
320 vap->va_filerev = 0;
321
322 return 0;
323}
324
325/* --------------------------------------------------------------------- */
326
327int
328tmpfs_setattr(struct vop_setattr_args *v)
329{
330 struct vnode *vp = v->a_vp;
331 struct vattr *vap = v->a_vap;
332 struct ucred *cred = v->a_cred;
333 int error = 0;
334
335 if (error == 0 && (vap->va_flags != VNOVAL))
336 error = tmpfs_chflags(vp, vap->va_flags, cred);
337
338 if (error == 0 && (vap->va_size != VNOVAL))
339 error = tmpfs_chsize(vp, vap->va_size, cred);
340
341 if (error == 0 && (vap->va_uid != (uid_t)VNOVAL ||
342 vap->va_gid != (gid_t)VNOVAL)) {
343 error = tmpfs_chown(vp, vap->va_uid, vap->va_gid, cred);
344 }
345
346 if (error == 0 && (vap->va_mode != (mode_t)VNOVAL))
347 error = tmpfs_chmod(vp, vap->va_mode, cred);
348
349 if (error == 0 && ((vap->va_atime.tv_sec != VNOVAL &&
350 vap->va_atime.tv_nsec != VNOVAL) ||
351 (vap->va_mtime.tv_sec != VNOVAL &&
352 vap->va_mtime.tv_nsec != VNOVAL) )) {
353 error = tmpfs_chtimes(vp, &vap->va_atime, &vap->va_mtime,
354 vap->va_vaflags, cred);
355 }
356
357 /* Update the node times. We give preference to the error codes
358 * generated by this function rather than the ones that may arise
359 * from tmpfs_update. */
360 tmpfs_update(vp);
361
362 return error;
363}
364
365/* --------------------------------------------------------------------- */
366
367/*
368 * fsync is usually a NOP, but we must take action when unmounting or
369 * when recycling.
370 */
371static int
372tmpfs_fsync(struct vop_fsync_args *v)
373{
374 struct tmpfs_mount *tmp;
375 struct tmpfs_node *node;
376 struct vnode *vp = v->a_vp;
377
378 tmp = VFS_TO_TMPFS(vp->v_mount);
379 node = VP_TO_TMPFS_NODE(vp);
380
381 tmpfs_update(vp);
382 if (vp->v_type == VREG) {
383 if (vp->v_flag & VRECLAIMED) {
384 if (node->tn_links == 0)
385 tmpfs_truncate(vp, 0);
386 else
387 vfsync(v->a_vp, v->a_waitfor, 1, NULL, NULL);
388 }
389 }
390 return 0;
391}
392
393/* --------------------------------------------------------------------- */
394
395static int
396tmpfs_read (struct vop_read_args *ap)
397{
398 struct buf *bp;
399 struct vnode *vp = ap->a_vp;
400 struct uio *uio = ap->a_uio;
401 struct tmpfs_node *node;
402 off_t base_offset;
403 size_t offset;
404 size_t len;
405 int got_mplock;
406 int error;
407
408 error = 0;
409 if (uio->uio_resid == 0) {
410 return error;
411 }
412
413 node = VP_TO_TMPFS_NODE(vp);
414
415 if (uio->uio_offset < 0)
416 return (EINVAL);
417 if (vp->v_type != VREG)
418 return (EINVAL);
419
420#ifdef SMP
421 if(curthread->td_mpcount)
422 got_mplock = -1;
423 else
424 got_mplock = 0;
425#else
426 got_mplock = -1;
427#endif
428
429 while (uio->uio_resid > 0 && uio->uio_offset < node->tn_size) {
430 /*
431 * Use buffer cache I/O (via tmpfs_strategy)
432 */
433 offset = (size_t)uio->uio_offset & BMASK;
434 base_offset = (off_t)uio->uio_offset - offset;
435 bp = getcacheblk(vp, base_offset);
436 if (bp == NULL)
437 {
438 if (got_mplock == 0) {
439 got_mplock = 1;
440 get_mplock();
441 }
442
443 error = bread(vp, base_offset, BSIZE, &bp);
444 if (error) {
445 brelse(bp);
446 kprintf("tmpfs_read bread error %d\n", error);
447 break;
448 }
449 }
450
451 if (got_mplock == 0) {
452 got_mplock = 1;
453 get_mplock();
454 }
455
456 /*
457 * Figure out how many bytes we can actually copy this loop.
458 */
459 len = BSIZE - offset;
460 if (len > uio->uio_resid)
461 len = uio->uio_resid;
462 if (len > node->tn_size - uio->uio_offset)
463 len = (size_t)(node->tn_size - uio->uio_offset);
464
465 error = uiomove((char *)bp->b_data + offset, len, uio);
466 bqrelse(bp);
467 if (error) {
468 kprintf("tmpfs_read uiomove error %d\n", error);
469 break;
470 }
471 }
472
473 if (got_mplock > 0)
474 rel_mplock();
475
476 TMPFS_NODE_LOCK(node);
477 node->tn_status |= TMPFS_NODE_ACCESSED;
478 TMPFS_NODE_UNLOCK(node);
479
480 return(error);
481}
482
483static int
484tmpfs_write (struct vop_write_args *ap)
485{
486 struct buf *bp;
487 struct vnode *vp = ap->a_vp;
488 struct uio *uio = ap->a_uio;
489 struct thread *td = uio->uio_td;
490 struct tmpfs_node *node;
491 boolean_t extended;
492 off_t oldsize;
493 int error;
494 off_t base_offset;
495 size_t offset;
496 size_t len;
497 struct rlimit limit;
498 int got_mplock;
499 int trivial = 0;
500
501 error = 0;
502 if (uio->uio_resid == 0) {
503 return error;
504 }
505
506 node = VP_TO_TMPFS_NODE(vp);
507
508 if (vp->v_type != VREG)
509 return (EINVAL);
510
511 oldsize = node->tn_size;
512 if (ap->a_ioflag & IO_APPEND)
513 uio->uio_offset = node->tn_size;
514
515 /*
516 * Check for illegal write offsets.
517 */
518 if (uio->uio_offset + uio->uio_resid >
519 VFS_TO_TMPFS(vp->v_mount)->tm_maxfilesize)
520 return (EFBIG);
521
522 if (vp->v_type == VREG && td != NULL) {
523 error = kern_getrlimit(RLIMIT_FSIZE, &limit);
524 if (error != 0)
525 return error;
526 if (uio->uio_offset + uio->uio_resid > limit.rlim_cur) {
527 ksignal(td->td_proc, SIGXFSZ);
528 return (EFBIG);
529 }
530 }
531
532
533 /*
534 * Extend the file's size if necessary
535 */
536 extended = ((uio->uio_offset + uio->uio_resid) > node->tn_size);
537
538#ifdef SMP
539 if (curthread->td_mpcount) {
540 got_mplock = -1;
541 } else {
542 got_mplock = 1;
543 get_mplock();
544 }
545#else
546 got_mplock = -1;
547#endif
548 while (uio->uio_resid > 0) {
549 /*
550 * Use buffer cache I/O (via tmpfs_strategy)
551 */
552 offset = (size_t)uio->uio_offset & BMASK;
553 base_offset = (off_t)uio->uio_offset - offset;
554 len = BSIZE - offset;
555 if (len > uio->uio_resid)
556 len = uio->uio_resid;
557
558 if ((uio->uio_offset + len) > node->tn_size) {
559 trivial = (uio->uio_offset <= node->tn_size);
560 error = tmpfs_reg_resize(vp, uio->uio_offset + len, trivial);
561 if (error)
562 break;
563 }
564
565 /*
566 * Read to fill in any gaps. Theoretically we could
567 * optimize this if the write covers the entire buffer
568 * and is not a UIO_NOCOPY write, however this can lead
569 * to a security violation exposing random kernel memory
570 * (whatever junk was in the backing VM pages before).
571 *
572 * So just use bread() to do the right thing.
573 */
574 error = bread(vp, base_offset, BSIZE, &bp);
575 error = uiomove((char *)bp->b_data + offset, len, uio);
576 if (error) {
577 kprintf("tmpfs_write uiomove error %d\n", error);
578 brelse(bp);
579 break;
580 }
581
582 if (uio->uio_offset > node->tn_size)
583 node->tn_size = uio->uio_offset;
584
585 /*
586 * The data has been loaded into the buffer, write it out.
587 *
588 * We want tmpfs to be able to use all available ram, not
589 * just the buffer cache, so if not explicitly paging we
590 * use buwrite() to leave the buffer clean but mark all the
591 * VM pages valid+dirty.
592 *
593 * When the kernel is paging, either via normal pageout
594 * operation or when cleaning the object during a recycle,
595 * the underlying VM pages are going to get thrown away
596 * so we MUST write them to swap.
597 *
598 * XXX unfortunately this catches msync() system calls too
599 * for the moment.
600 */
601 if (ap->a_ioflag & IO_SYNC) {
602 bwrite(bp);
603 } else if ((ap->a_ioflag & IO_ASYNC) ||
604 (uio->uio_segflg == UIO_NOCOPY)) {
605 bawrite(bp);
606 } else {
607 buwrite(bp);
608 }
609
610 if (bp->b_error) {
611 kprintf("tmpfs_write bwrite error %d\n", error);
612 break;
613 }
614 }
615
616 if (got_mplock > 0)
617 rel_mplock();
618
619 if (error) {
620 if (extended)
621 (void)tmpfs_reg_resize(vp, oldsize, trivial);
622 return error;
623 }
624
625 TMPFS_NODE_LOCK(node);
626 node->tn_status |= TMPFS_NODE_ACCESSED | TMPFS_NODE_MODIFIED |
627 (extended? TMPFS_NODE_CHANGED : 0);
628
629 if (node->tn_mode & (S_ISUID | S_ISGID)) {
630 if (priv_check_cred(ap->a_cred, PRIV_VFS_RETAINSUGID, 0))
631 node->tn_mode &= ~(S_ISUID | S_ISGID);
632 }
633 TMPFS_NODE_UNLOCK(node);
634
635 return(error);
636}
637
638static int
639tmpfs_advlock (struct vop_advlock_args *ap)
640{
641 struct tmpfs_node *node;
642 struct vnode *vp = ap->a_vp;
643
644 node = VP_TO_TMPFS_NODE(vp);
645
646 return (lf_advlock(ap, &node->tn_advlock, node->tn_size));
647}
648
649
650static int
651tmpfs_strategy(struct vop_strategy_args *ap)
652{
653 struct bio *bio = ap->a_bio;
654 struct buf *bp = bio->bio_buf;
655 struct vnode *vp = ap->a_vp;
656 struct tmpfs_node *node;
657 vm_object_t uobj;
658
659 if (vp->v_type != VREG) {
660 bp->b_resid = bp->b_bcount;
661 bp->b_flags |= B_ERROR | B_INVAL;
662 bp->b_error = EINVAL;
663 biodone(bio);
664 return(0);
665 }
666
667 node = VP_TO_TMPFS_NODE(vp);
668
669 uobj = node->tn_reg.tn_aobj;
670
671 /*
672 * Call swap_pager_strategy to read or write between the VM
673 * object and the buffer cache.
674 */
675 swap_pager_strategy(uobj, bio);
676
677 return 0;
678}
679
680static int
681tmpfs_bmap(struct vop_bmap_args *ap)
682{
683 if (ap->a_doffsetp != NULL)
684 *ap->a_doffsetp = ap->a_loffset;
685 if (ap->a_runp != NULL)
686 *ap->a_runp = 0;
687 if (ap->a_runb != NULL)
688 *ap->a_runb = 0;
689
690 return 0;
691}
692
693/* --------------------------------------------------------------------- */
694
695static int
696tmpfs_nremove(struct vop_nremove_args *v)
697{
698 struct vnode *dvp = v->a_dvp;
699 struct namecache *ncp = v->a_nch->ncp;
700 struct vnode *vp;
701 int error;
702 struct tmpfs_dirent *de;
703 struct tmpfs_mount *tmp;
704 struct tmpfs_node *dnode;
705 struct tmpfs_node *node;
706
707 /*
708 * We have to acquire the vp from v->a_nch because
709 * we will likely unresolve the namecache entry, and
710 * a vrele is needed to trigger the tmpfs_inactive/tmpfs_reclaim
711 * sequence to recover space from the file.
712 */
713 error = cache_vref(v->a_nch, v->a_cred, &vp);
714 KKASSERT(error == 0);
715
716 if (vp->v_type == VDIR) {
717 error = EISDIR;
718 goto out;
719 }
720
721 dnode = VP_TO_TMPFS_DIR(dvp);
722 node = VP_TO_TMPFS_NODE(vp);
723 tmp = VFS_TO_TMPFS(vp->v_mount);
724 de = tmpfs_dir_lookup(dnode, node, ncp);
725 if (de == NULL) {
726 error = ENOENT;
727 goto out;
728 }
729
730 /* Files marked as immutable or append-only cannot be deleted. */
731 if ((node->tn_flags & (IMMUTABLE | APPEND | NOUNLINK)) ||
732 (dnode->tn_flags & APPEND)) {
733 error = EPERM;
734 goto out;
735 }
736
737 /* Remove the entry from the directory; as it is a file, we do not
738 * have to change the number of hard links of the directory. */
739 tmpfs_dir_detach(dnode, de);
740
741 /* Free the directory entry we just deleted. Note that the node
742 * referred by it will not be removed until the vnode is really
743 * reclaimed. */
744 tmpfs_free_dirent(tmp, de);
745
746 if (node->tn_links > 0) {
747 TMPFS_NODE_LOCK(node);
748 node->tn_status |= TMPFS_NODE_ACCESSED | TMPFS_NODE_CHANGED | \
749 TMPFS_NODE_MODIFIED;
750 TMPFS_NODE_UNLOCK(node);
751 }
752
753 cache_setunresolved(v->a_nch);
754 cache_setvp(v->a_nch, NULL);
755 /*cache_inval_vp(vp, CINV_DESTROY);*/
756 error = 0;
757
758out:
759 vrele(vp);
760
761 return error;
762}
763
764/* --------------------------------------------------------------------- */
765
766static int
767tmpfs_nlink(struct vop_nlink_args *v)
768{
769 struct vnode *dvp = v->a_dvp;
770 struct vnode *vp = v->a_vp;
771 struct namecache *ncp = v->a_nch->ncp;
772 struct tmpfs_dirent *de;
773 struct tmpfs_node *node;
774 struct tmpfs_node *dnode;
775 int error;
776
777 KKASSERT(dvp != vp); /* XXX When can this be false? */
778
779 node = VP_TO_TMPFS_NODE(vp);
780 dnode = VP_TO_TMPFS_NODE(dvp);
781
782 /* XXX: Why aren't the following two tests done by the caller? */
783
784 /* Hard links of directories are forbidden. */
785 if (vp->v_type == VDIR) {
786 error = EPERM;
787 goto out;
788 }
789
790 /* Cannot create cross-device links. */
791 if (dvp->v_mount != vp->v_mount) {
792 error = EXDEV;
793 goto out;
794 }
795
796 /* Ensure that we do not overflow the maximum number of links imposed
797 * by the system. */
798 KKASSERT(node->tn_links <= LINK_MAX);
799 if (node->tn_links == LINK_MAX) {
800 error = EMLINK;
801 goto out;
802 }
803
804 /* We cannot create links of files marked immutable or append-only. */
805 if (node->tn_flags & (IMMUTABLE | APPEND)) {
806 error = EPERM;
807 goto out;
808 }
809
810 /* Allocate a new directory entry to represent the node. */
811 error = tmpfs_alloc_dirent(VFS_TO_TMPFS(vp->v_mount), node,
812 ncp->nc_name, ncp->nc_nlen, &de);
813 if (error != 0)
814 goto out;
815
816 /* Insert the new directory entry into the appropriate directory. */
817 tmpfs_dir_attach(dnode, de);
818
819 /* vp link count has changed, so update node times. */
820
821 TMPFS_NODE_LOCK(node);
822 node->tn_status |= TMPFS_NODE_CHANGED;
823 TMPFS_NODE_UNLOCK(node);
824 tmpfs_update(vp);
825
826 cache_setunresolved(v->a_nch);
827 cache_setvp(v->a_nch, vp);
828 error = 0;
829
830out:
831 return error;
832}
833
834/* --------------------------------------------------------------------- */
835
836static int
837tmpfs_nrename(struct vop_nrename_args *v)
838{
839 struct vnode *fdvp = v->a_fdvp;
840 struct namecache *fncp = v->a_fnch->ncp;
841 struct vnode *fvp = fncp->nc_vp;
842 struct vnode *tdvp = v->a_tdvp;
843 struct namecache *tncp = v->a_tnch->ncp;
844 struct vnode *tvp = tncp->nc_vp;
845 struct tmpfs_dirent *de;
846 struct tmpfs_mount *tmp;
847 struct tmpfs_node *fdnode;
848 struct tmpfs_node *fnode;
849 struct tmpfs_node *tnode;
850 struct tmpfs_node *tdnode;
851 char *newname;
852 char *oldname;
853 int error;
854
855 tnode = (tvp == NULL) ? NULL : VP_TO_TMPFS_NODE(tvp);
856
857 /* Disallow cross-device renames.
858 * XXX Why isn't this done by the caller? */
859 if (fvp->v_mount != tdvp->v_mount ||
860 (tvp != NULL && fvp->v_mount != tvp->v_mount)) {
861 error = EXDEV;
862 goto out;
863 }
864
865 tmp = VFS_TO_TMPFS(tdvp->v_mount);
866 tdnode = VP_TO_TMPFS_DIR(tdvp);
867
868 /* If source and target are the same file, there is nothing to do. */
869 if (fvp == tvp) {
870 error = 0;
871 goto out;
872 }
873
874 fdnode = VP_TO_TMPFS_DIR(fdvp);
875 fnode = VP_TO_TMPFS_NODE(fvp);
876 de = tmpfs_dir_lookup(fdnode, fnode, fncp);
877
878 /* Avoid manipulating '.' and '..' entries. */
879 if (de == NULL) {
880 error = ENOENT;
881 goto out_locked;
882 }
883 KKASSERT(de->td_node == fnode);
884
885 /*
886 * If replacing an entry in the target directory and that entry
887 * is a directory, it must be empty.
888 *
889 * Kern_rename gurantees the destination to be a directory
890 * if the source is one (it does?).
891 */
892 if (tvp != NULL) {
893 KKASSERT(tnode != NULL);
894
895 if ((tnode->tn_flags & (NOUNLINK | IMMUTABLE | APPEND)) ||
896 (tdnode->tn_flags & (APPEND | IMMUTABLE))) {
897 error = EPERM;
898 goto out_locked;
899 }
900
901 if (fnode->tn_type == VDIR && tnode->tn_type == VDIR) {
902 if (tnode->tn_size > 0) {
903 error = ENOTEMPTY;
904 goto out_locked;
905 }
906 } else if (fnode->tn_type == VDIR && tnode->tn_type != VDIR) {
907 error = ENOTDIR;
908 goto out_locked;
909 } else if (fnode->tn_type != VDIR && tnode->tn_type == VDIR) {
910 error = EISDIR;
911 goto out_locked;
912 } else {
913 KKASSERT(fnode->tn_type != VDIR &&
914 tnode->tn_type != VDIR);
915 }
916 }
917
918 if ((fnode->tn_flags & (NOUNLINK | IMMUTABLE | APPEND)) ||
919 (fdnode->tn_flags & (APPEND | IMMUTABLE))) {
920 error = EPERM;
921 goto out_locked;
922 }
923
924 /*
925 * Ensure that we have enough memory to hold the new name, if it
926 * has to be changed.
927 */
928 if (fncp->nc_nlen != tncp->nc_nlen ||
929 bcmp(fncp->nc_name, tncp->nc_name, fncp->nc_nlen) != 0) {
930 newname = kmalloc(tncp->nc_nlen + 1, M_TMPFSNAME, M_WAITOK);
931 bcopy(tncp->nc_name, newname, tncp->nc_nlen);
932 newname[tncp->nc_nlen] = '\0';
933 } else {
934 newname = NULL;
935 }
936
937 /*
938 * Unlink entry from source directory. Note that the kernel has
939 * already checked for illegal recursion cases (renaming a directory
940 * into a subdirectory of itself).
941 */
942 if (fdnode != tdnode)
943 tmpfs_dir_detach(fdnode, de);
944
945 /*
946 * Handle any name change. Swap with newname, we will
947 * deallocate it at the end.
948 */
949 if (newname != NULL) {
950#if 0
951 TMPFS_NODE_LOCK(fnode);
952 fnode->tn_status |= TMPFS_NODE_CHANGED;
953 TMPFS_NODE_UNLOCK(fnode);
954#endif
955 oldname = de->td_name;
956 de->td_name = newname;
957 de->td_namelen = (uint16_t)tncp->nc_nlen;
958 newname = oldname;
959 }
960
961 /*
962 * Link entry to target directory. If the entry
963 * represents a directory move the parent linkage
964 * as well.
965 */
966 if (fdnode != tdnode) {
967 if (de->td_node->tn_type == VDIR) {
968 TMPFS_VALIDATE_DIR(fnode);
969
970 TMPFS_NODE_LOCK(tdnode);
971 tdnode->tn_links++;
972 tdnode->tn_status |= TMPFS_NODE_MODIFIED;
973 TMPFS_NODE_UNLOCK(tdnode);
974
975 TMPFS_NODE_LOCK(fnode);
976 fnode->tn_dir.tn_parent = tdnode;
977 fnode->tn_status |= TMPFS_NODE_CHANGED;
978 TMPFS_NODE_UNLOCK(fnode);
979
980 TMPFS_NODE_LOCK(fdnode);
981 fdnode->tn_links--;
982 fdnode->tn_status |= TMPFS_NODE_MODIFIED;
983 TMPFS_NODE_UNLOCK(fdnode);
984 }
985 tmpfs_dir_attach(tdnode, de);
986 } else {
987 TMPFS_NODE_LOCK(tdnode);
988 tdnode->tn_status |= TMPFS_NODE_MODIFIED;
989 TMPFS_NODE_UNLOCK(tdnode);
990 }
991
992 /*
993 * If we are overwriting an entry, we have to remove the old one
994 * from the target directory.
995 */
996 if (tvp != NULL) {
997 /* Remove the old entry from the target directory. */
998 de = tmpfs_dir_lookup(tdnode, tnode, tncp);
999 tmpfs_dir_detach(tdnode, de);
1000
1001 /*
1002 * Free the directory entry we just deleted. Note that the
1003 * node referred by it will not be removed until the vnode is
1004 * really reclaimed.
1005 */
1006 tmpfs_free_dirent(VFS_TO_TMPFS(tvp->v_mount), de);
1007 /*cache_inval_vp(tvp, CINV_DESTROY);*/
1008 }
1009
1010 /*
1011 * Finish up
1012 */
1013 if (newname) {
1014 kfree(newname, M_TMPFSNAME);
1015 newname = NULL;
1016 }
1017 cache_rename(v->a_fnch, v->a_tnch);
1018 error = 0;
1019
1020out_locked:
1021 ;
1022
1023out:
1024 /* Release target nodes. */
1025 /* XXX: I don't understand when tdvp can be the same as tvp, but
1026 * other code takes care of this... */
1027 if (tdvp == tvp)
1028 vrele(tdvp);
1029
1030 return error;
1031}
1032
1033/* --------------------------------------------------------------------- */
1034
1035static int
1036tmpfs_nmkdir(struct vop_nmkdir_args *v)
1037{
1038 struct vnode *dvp = v->a_dvp;
1039 struct vnode **vpp = v->a_vpp;
1040 struct namecache *ncp = v->a_nch->ncp;
1041 struct vattr *vap = v->a_vap;
1042 struct ucred *cred = v->a_cred;
1043 int error;
1044
1045 KKASSERT(vap->va_type == VDIR);
1046
1047 error = tmpfs_alloc_file(dvp, vpp, vap, ncp, cred, NULL);
1048 if (error == 0) {
1049 cache_setunresolved(v->a_nch);
1050 cache_setvp(v->a_nch, *vpp);
1051 }
1052
1053 return error;
1054}
1055
1056/* --------------------------------------------------------------------- */
1057
1058static int
1059tmpfs_nrmdir(struct vop_nrmdir_args *v)
1060{
1061 struct vnode *dvp = v->a_dvp;
1062 struct namecache *ncp = v->a_nch->ncp;
1063 struct vnode *vp;
1064 struct tmpfs_dirent *de;
1065 struct tmpfs_mount *tmp;
1066 struct tmpfs_node *dnode;
1067 struct tmpfs_node *node;
1068 int error;
1069
1070 /*
1071 * We have to acquire the vp from v->a_nch because
1072 * we will likely unresolve the namecache entry, and
1073 * a vrele is needed to trigger the tmpfs_inactive/tmpfs_reclaim
1074 * sequence.
1075 */
1076 error = cache_vref(v->a_nch, v->a_cred, &vp);
1077 KKASSERT(error == 0);
1078
1079 /*
1080 * Prevalidate so we don't hit an assertion later
1081 */
1082 if (vp->v_type != VDIR) {
1083 error = ENOTDIR;
1084 goto out;
1085 }
1086
1087 tmp = VFS_TO_TMPFS(dvp->v_mount);
1088 dnode = VP_TO_TMPFS_DIR(dvp);
1089 node = VP_TO_TMPFS_DIR(vp);
1090
1091 /* Directories with more than two entries ('.' and '..') cannot be
1092 * removed. */
1093 if (node->tn_size > 0) {
1094 error = ENOTEMPTY;
1095 goto out;
1096 }
1097
1098 if ((dnode->tn_flags & APPEND)
1099 || (node->tn_flags & (NOUNLINK | IMMUTABLE | APPEND))) {
1100 error = EPERM;
1101 goto out;
1102 }
1103
1104 /* This invariant holds only if we are not trying to remove "..".
1105 * We checked for that above so this is safe now. */
1106 KKASSERT(node->tn_dir.tn_parent == dnode);
1107
1108 /* Get the directory entry associated with node (vp). This was
1109 * filled by tmpfs_lookup while looking up the entry. */
1110 de = tmpfs_dir_lookup(dnode, node, ncp);
1111 KKASSERT(TMPFS_DIRENT_MATCHES(de,
1112 ncp->nc_name,
1113 ncp->nc_nlen));
1114
1115 /* Check flags to see if we are allowed to remove the directory. */
1116 if ((dnode->tn_flags & APPEND) ||
1117 node->tn_flags & (NOUNLINK | IMMUTABLE | APPEND)) {
1118 error = EPERM;
1119 goto out;
1120 }
1121
1122
1123 /* Detach the directory entry from the directory (dnode). */
1124 tmpfs_dir_detach(dnode, de);
1125
1126 /* No vnode should be allocated for this entry from this point */
1127 TMPFS_NODE_LOCK(node);
1128 TMPFS_ASSERT_ELOCKED(node);
1129 TMPFS_NODE_LOCK(dnode);
1130 TMPFS_ASSERT_ELOCKED(dnode);
1131
1132#if 0
1133 /* handled by tmpfs_free_node */
1134 KKASSERT(node->tn_links > 0);
1135 node->tn_links--;
1136 node->tn_dir.tn_parent = NULL;
1137#endif
1138 node->tn_status |= TMPFS_NODE_ACCESSED | TMPFS_NODE_CHANGED | \
1139 TMPFS_NODE_MODIFIED;
1140
1141#if 0
1142 /* handled by tmpfs_free_node */
1143 KKASSERT(dnode->tn_links > 0);
1144 dnode->tn_links--;
1145#endif
1146 dnode->tn_status |= TMPFS_NODE_ACCESSED | \
1147 TMPFS_NODE_CHANGED | TMPFS_NODE_MODIFIED;
1148
1149 TMPFS_NODE_UNLOCK(dnode);
1150 TMPFS_NODE_UNLOCK(node);
1151
1152 /* Free the directory entry we just deleted. Note that the node
1153 * referred by it will not be removed until the vnode is really
1154 * reclaimed. */
1155 tmpfs_free_dirent(tmp, de);
1156
1157 /* Release the deleted vnode (will destroy the node, notify
1158 * interested parties and clean it from the cache). */
1159
1160 TMPFS_NODE_LOCK(dnode);
1161 dnode->tn_status |= TMPFS_NODE_CHANGED;
1162 TMPFS_NODE_UNLOCK(dnode);
1163 tmpfs_update(dvp);
1164
1165 cache_setunresolved(v->a_nch);
1166 cache_setvp(v->a_nch, NULL);
1167 /*cache_inval_vp(vp, CINV_DESTROY);*/
1168 error = 0;
1169
1170out:
1171 vrele(vp);
1172
1173 return error;
1174}
1175
1176/* --------------------------------------------------------------------- */
1177
1178static int
1179tmpfs_nsymlink(struct vop_nsymlink_args *v)
1180{
1181 struct vnode *dvp = v->a_dvp;
1182 struct vnode **vpp = v->a_vpp;
1183 struct namecache *ncp = v->a_nch->ncp;
1184 struct vattr *vap = v->a_vap;
1185 struct ucred *cred = v->a_cred;
1186 char *target = v->a_target;
1187 int error;
1188
1189 vap->va_type = VLNK;
1190 error = tmpfs_alloc_file(dvp, vpp, vap, ncp, cred, target);
1191 if (error == 0) {
1192 cache_setunresolved(v->a_nch);
1193 cache_setvp(v->a_nch, *vpp);
1194 }
1195
1196 return error;
1197}
1198
1199/* --------------------------------------------------------------------- */
1200
1201static int
1202tmpfs_readdir(struct vop_readdir_args *v)
1203{
1204 struct vnode *vp = v->a_vp;
1205 struct uio *uio = v->a_uio;
1206 int *eofflag = v->a_eofflag;
1207 off_t **cookies = v->a_cookies;
1208 int *ncookies = v->a_ncookies;
1209 struct tmpfs_mount *tmp;
1210 int error;
1211 off_t startoff;
1212 off_t cnt = 0;
1213 struct tmpfs_node *node;
1214
1215 /* This operation only makes sense on directory nodes. */
1216 if (vp->v_type != VDIR)
1217 return ENOTDIR;
1218
1219 tmp = VFS_TO_TMPFS(vp->v_mount);
1220 node = VP_TO_TMPFS_DIR(vp);
1221 startoff = uio->uio_offset;
1222
1223 if (uio->uio_offset == TMPFS_DIRCOOKIE_DOT) {
1224 error = tmpfs_dir_getdotdent(node, uio);
1225 if (error != 0)
1226 goto outok;
1227 cnt++;
1228 }
1229
1230 if (uio->uio_offset == TMPFS_DIRCOOKIE_DOTDOT) {
1231 error = tmpfs_dir_getdotdotdent(tmp, node, uio);
1232 if (error != 0)
1233 goto outok;
1234 cnt++;
1235 }
1236
1237 error = tmpfs_dir_getdents(node, uio, &cnt);
1238
1239outok:
1240 KKASSERT(error >= -1);
1241
1242 if (error == -1)
1243 error = 0;
1244
1245 if (eofflag != NULL)
1246 *eofflag =
1247 (error == 0 && uio->uio_offset == TMPFS_DIRCOOKIE_EOF);
1248
1249 /* Update NFS-related variables. */
1250 if (error == 0 && cookies != NULL && ncookies != NULL) {
1251 off_t i;
1252 off_t off = startoff;
1253 struct tmpfs_dirent *de = NULL;
1254
1255 *ncookies = cnt;
1256 *cookies = kmalloc(cnt * sizeof(off_t), M_TEMP, M_WAITOK);
1257
1258 for (i = 0; i < cnt; i++) {
1259 KKASSERT(off != TMPFS_DIRCOOKIE_EOF);
1260 if (off == TMPFS_DIRCOOKIE_DOT) {
1261 off = TMPFS_DIRCOOKIE_DOTDOT;
1262 } else {
1263 if (off == TMPFS_DIRCOOKIE_DOTDOT) {
1264 de = TAILQ_FIRST(&node->tn_dir.tn_dirhead);
1265 } else if (de != NULL) {
1266 de = TAILQ_NEXT(de, td_entries);
1267 } else {
1268 de = tmpfs_dir_lookupbycookie(node,
1269 off);
1270 KKASSERT(de != NULL);
1271 de = TAILQ_NEXT(de, td_entries);
1272 }
1273 if (de == NULL)
1274 off = TMPFS_DIRCOOKIE_EOF;
1275 else
1276 off = tmpfs_dircookie(de);
1277 }
1278
1279 (*cookies)[i] = off;
1280 }
1281 KKASSERT(uio->uio_offset == off);
1282 }
1283
1284 return error;
1285}
1286
1287/* --------------------------------------------------------------------- */
1288
1289static int
1290tmpfs_readlink(struct vop_readlink_args *v)
1291{
1292 struct vnode *vp = v->a_vp;
1293 struct uio *uio = v->a_uio;
1294
1295 int error;
1296 struct tmpfs_node *node;
1297
1298 KKASSERT(uio->uio_offset == 0);
1299 KKASSERT(vp->v_type == VLNK);
1300
1301 node = VP_TO_TMPFS_NODE(vp);
1302
1303 error = uiomove(node->tn_link, MIN(node->tn_size, uio->uio_resid),
1304 uio);
1305 TMPFS_NODE_LOCK(node);
1306 node->tn_status |= TMPFS_NODE_ACCESSED;
1307 TMPFS_NODE_UNLOCK(node);
1308
1309 return error;
1310}
1311
1312/* --------------------------------------------------------------------- */
1313
1314static int
1315tmpfs_inactive(struct vop_inactive_args *v)
1316{
1317 struct vnode *vp = v->a_vp;
1318
1319 struct tmpfs_node *node;
1320
1321 node = VP_TO_TMPFS_NODE(vp);
1322
1323 /*
1324 * Get rid of unreferenced deleted vnodes sooner rather than
1325 * later so the data memory can be recovered immediately.
1326 *
1327 * We must truncate the vnode to prevent the normal reclamation
1328 * path from flushing the data for the removed file to disk.
1329 */
1330 TMPFS_NODE_LOCK(node);
1331 if ((node->tn_vpstate & TMPFS_VNODE_ALLOCATING) == 0 &&
1332 (node->tn_links == 0 ||
1333 (node->tn_links == 1 && node->tn_type == VDIR &&
1334 node->tn_dir.tn_parent)))
1335 {
1336 node->tn_vpstate = TMPFS_VNODE_DOOMED;
1337 TMPFS_NODE_UNLOCK(node);
1338 if (node->tn_type == VREG)
1339 tmpfs_truncate(vp, 0);
1340 vrecycle(vp);
1341 } else {
1342 TMPFS_NODE_UNLOCK(node);
1343 }
1344
1345 return 0;
1346}
1347
1348/* --------------------------------------------------------------------- */
1349
1350int
1351tmpfs_reclaim(struct vop_reclaim_args *v)
1352{
1353 struct vnode *vp = v->a_vp;
1354 struct tmpfs_mount *tmp;
1355 struct tmpfs_node *node;
1356
1357 node = VP_TO_TMPFS_NODE(vp);
1358 tmp = VFS_TO_TMPFS(vp->v_mount);
1359
1360 tmpfs_free_vp(vp);
1361
1362 /*
1363 * If the node referenced by this vnode was deleted by the
1364 * user, we must free its associated data structures now that
1365 * the vnode is being reclaimed.
1366 *
1367 * Directories have an extra link ref.
1368 */
1369 TMPFS_NODE_LOCK(node);
1370 if ((node->tn_vpstate & TMPFS_VNODE_ALLOCATING) == 0 &&
1371 (node->tn_links == 0 ||
1372 (node->tn_links == 1 && node->tn_type == VDIR &&
1373 node->tn_dir.tn_parent)))
1374 {
1375 node->tn_vpstate = TMPFS_VNODE_DOOMED;
1376 tmpfs_free_node(tmp, node);
1377 /* eats the lock */
1378 } else {
1379 TMPFS_NODE_UNLOCK(node);
1380 }
1381
1382 KKASSERT(vp->v_data == NULL);
1383 return 0;
1384}
1385
1386/* --------------------------------------------------------------------- */
1387
1388static int
1389tmpfs_print(struct vop_print_args *v)
1390{
1391 struct vnode *vp = v->a_vp;
1392
1393 struct tmpfs_node *node;
1394
1395 node = VP_TO_TMPFS_NODE(vp);
1396
1397 kprintf("tag VT_TMPFS, tmpfs_node %p, flags 0x%x, links %d\n",
1398 node, node->tn_flags, node->tn_links);
1399 kprintf("\tmode 0%o, owner %d, group %d, size %ju, status 0x%x\n",
1400 node->tn_mode, node->tn_uid, node->tn_gid,
1401 (uintmax_t)node->tn_size, node->tn_status);
1402
1403 if (vp->v_type == VFIFO)
1404 fifo_printinfo(vp);
1405
1406 kprintf("\n");
1407
1408 return 0;
1409}
1410
1411/* --------------------------------------------------------------------- */
1412
1413static int
1414tmpfs_pathconf(struct vop_pathconf_args *v)
1415{
1416 int name = v->a_name;
1417 register_t *retval = v->a_retval;
1418
1419 int error;
1420
1421 error = 0;
1422
1423 switch (name) {
1424 case _PC_LINK_MAX:
1425 *retval = LINK_MAX;
1426 break;
1427
1428 case _PC_NAME_MAX:
1429 *retval = NAME_MAX;
1430 break;
1431
1432 case _PC_PATH_MAX:
1433 *retval = PATH_MAX;
1434 break;
1435
1436 case _PC_PIPE_BUF:
1437 *retval = PIPE_BUF;
1438 break;
1439
1440 case _PC_CHOWN_RESTRICTED:
1441 *retval = 1;
1442 break;
1443
1444 case _PC_NO_TRUNC:
1445 *retval = 1;
1446 break;
1447
1448 case _PC_SYNC_IO:
1449 *retval = 1;
1450 break;
1451
1452 case _PC_FILESIZEBITS:
1453 *retval = 0; /* XXX Don't know which value should I return. */
1454 break;
1455
1456 default:
1457 error = EINVAL;
1458 }
1459
1460 return error;
1461}
1462
1463/* --------------------------------------------------------------------- */
1464
1465/*
1466 * vnode operations vector used for files stored in a tmpfs file system.
1467 */
1468struct vop_ops tmpfs_vnode_vops = {
1469 .vop_default = vop_defaultop,
1470 .vop_getpages = vop_stdgetpages,
1471 .vop_putpages = vop_stdputpages,
1472 .vop_ncreate = tmpfs_ncreate,
1473 .vop_nresolve = tmpfs_nresolve,
1474 .vop_nlookupdotdot = tmpfs_nlookupdotdot,
1475 .vop_nmknod = tmpfs_nmknod,
1476 .vop_open = tmpfs_open,
1477 .vop_close = tmpfs_close,
1478 .vop_access = tmpfs_access,
1479 .vop_getattr = tmpfs_getattr,
1480 .vop_setattr = tmpfs_setattr,
1481 .vop_read = tmpfs_read,
1482 .vop_write = tmpfs_write,
1483 .vop_fsync = tmpfs_fsync,
1484 .vop_nremove = tmpfs_nremove,
1485 .vop_nlink = tmpfs_nlink,
1486 .vop_nrename = tmpfs_nrename,
1487 .vop_nmkdir = tmpfs_nmkdir,
1488 .vop_nrmdir = tmpfs_nrmdir,
1489 .vop_nsymlink = tmpfs_nsymlink,
1490 .vop_readdir = tmpfs_readdir,
1491 .vop_readlink = tmpfs_readlink,
1492 .vop_inactive = tmpfs_inactive,
1493 .vop_reclaim = tmpfs_reclaim,
1494 .vop_print = tmpfs_print,
1495 .vop_pathconf = tmpfs_pathconf,
1496 .vop_bmap = tmpfs_bmap,
1497 .vop_strategy = tmpfs_strategy,
1498 .vop_advlock = tmpfs_advlock,
1499};