kevent: Restore old EV_EOF semantics
[dragonfly.git] / sys / vfs / tmpfs / tmpfs_vnops.c
CommitLineData
7a2de9a4
MD
1/*-
2 * Copyright (c) 2005, 2006 The NetBSD Foundation, Inc.
3 * All rights reserved.
4 *
5 * This code is derived from software contributed to The NetBSD Foundation
6 * by Julio M. Merino Vidal, developed as part of Google's Summer of Code
7 * 2005 program.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
19 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
20 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
21 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
22 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
80ae59d7
MD
29 *
30 * $NetBSD: tmpfs_vnops.c,v 1.39 2007/07/23 15:41:01 jmmv Exp $
7a2de9a4
MD
31 */
32
33/*
34 * tmpfs vnode interface.
35 */
7a2de9a4
MD
36
37#include <sys/kernel.h>
38#include <sys/kern_syscall.h>
39#include <sys/param.h>
40#include <sys/fcntl.h>
41#include <sys/lockf.h>
42#include <sys/priv.h>
43#include <sys/proc.h>
44#include <sys/resourcevar.h>
45#include <sys/sched.h>
7a2de9a4
MD
46#include <sys/stat.h>
47#include <sys/systm.h>
48#include <sys/unistd.h>
49#include <sys/vfsops.h>
50#include <sys/vnode.h>
51
7a2de9a4
MD
52#include <vm/vm.h>
53#include <vm/vm_object.h>
54#include <vm/vm_page.h>
55#include <vm/vm_pager.h>
b7545cb3 56#include <vm/swap_pager.h>
7a2de9a4
MD
57
58#include <vfs/fifofs/fifo.h>
59#include <vfs/tmpfs/tmpfs_vnops.h>
60#include <vfs/tmpfs/tmpfs.h>
61
62MALLOC_DECLARE(M_TMPFS);
63
80ae59d7
MD
64static __inline
65void
66tmpfs_knote(struct vnode *vp, int flags)
67{
68 if (flags)
69 KNOTE(&vp->v_pollinfo.vpi_kqinfo.ki_note, flags);
70}
71
72
7a2de9a4
MD
73/* --------------------------------------------------------------------- */
74
75static int
76tmpfs_nresolve(struct vop_nresolve_args *v)
77{
78 struct vnode *dvp = v->a_dvp;
79 struct vnode *vp = NULL;
80 struct namecache *ncp = v->a_nch->ncp;
d89ce96a 81 struct tmpfs_node *tnode;
7a2de9a4
MD
82
83 int error;
84 struct tmpfs_dirent *de;
85 struct tmpfs_node *dnode;
86
87 dnode = VP_TO_TMPFS_DIR(dvp);
88
7a2de9a4
MD
89 de = tmpfs_dir_lookup(dnode, NULL, ncp);
90 if (de == NULL) {
d89ce96a 91 error = ENOENT;
7a2de9a4 92 } else {
d89ce96a
MD
93 /*
94 * Allocate a vnode for the node we found.
95 */
7a2de9a4 96 tnode = de->td_node;
7a2de9a4 97 error = tmpfs_alloc_vp(dvp->v_mount, tnode,
d89ce96a
MD
98 LK_EXCLUSIVE | LK_RETRY, &vp);
99 if (error)
7a2de9a4 100 goto out;
d89ce96a 101 KKASSERT(vp);
7a2de9a4
MD
102 }
103
7a2de9a4 104out:
d89ce96a
MD
105 /*
106 * Store the result of this lookup in the cache. Avoid this if the
7a2de9a4 107 * request was for creation, as it does not improve timings on
d89ce96a
MD
108 * emprical tests.
109 */
7a2de9a4
MD
110 if (vp) {
111 vn_unlock(vp);
112 cache_setvp(v->a_nch, vp);
113 vrele(vp);
d89ce96a 114 } else if (error == ENOENT) {
7a2de9a4
MD
115 cache_setvp(v->a_nch, NULL);
116 }
117 return error;
118}
119
120static int
121tmpfs_nlookupdotdot(struct vop_nlookupdotdot_args *v)
122{
123 struct vnode *dvp = v->a_dvp;
124 struct vnode **vpp = v->a_vpp;
125 struct tmpfs_node *dnode = VP_TO_TMPFS_NODE(dvp);
126 struct ucred *cred = v->a_cred;
127 int error;
128
129 *vpp = NULL;
130 /* Check accessibility of requested node as a first step. */
131 error = VOP_ACCESS(dvp, VEXEC, cred);
132 if (error != 0)
133 return error;
134
135 if (dnode->tn_dir.tn_parent != NULL) {
136 /* Allocate a new vnode on the matching entry. */
137 error = tmpfs_alloc_vp(dvp->v_mount, dnode->tn_dir.tn_parent,
138 LK_EXCLUSIVE | LK_RETRY, vpp);
139
140 if (*vpp)
141 vn_unlock(*vpp);
142 }
143
144 return (*vpp == NULL) ? ENOENT : 0;
145}
146
147/* --------------------------------------------------------------------- */
148
149static int
150tmpfs_ncreate(struct vop_ncreate_args *v)
151{
152 struct vnode *dvp = v->a_dvp;
153 struct vnode **vpp = v->a_vpp;
154 struct namecache *ncp = v->a_nch->ncp;
155 struct vattr *vap = v->a_vap;
156 struct ucred *cred = v->a_cred;
157 int error;
158
159 KKASSERT(vap->va_type == VREG || vap->va_type == VSOCK);
160
7a2de9a4
MD
161 error = tmpfs_alloc_file(dvp, vpp, vap, ncp, cred, NULL);
162 if (error == 0) {
163 cache_setunresolved(v->a_nch);
164 cache_setvp(v->a_nch, *vpp);
80ae59d7 165 tmpfs_knote(dvp, NOTE_WRITE);
7a2de9a4 166 }
7a2de9a4
MD
167
168 return error;
169}
170/* --------------------------------------------------------------------- */
171
172static int
173tmpfs_nmknod(struct vop_nmknod_args *v)
174{
175 struct vnode *dvp = v->a_dvp;
176 struct vnode **vpp = v->a_vpp;
177 struct namecache *ncp = v->a_nch->ncp;
178 struct vattr *vap = v->a_vap;
179 struct ucred *cred = v->a_cred;
180 int error;
181
182 if (vap->va_type != VBLK && vap->va_type != VCHR &&
183 vap->va_type != VFIFO)
184 return EINVAL;
185
7a2de9a4
MD
186 error = tmpfs_alloc_file(dvp, vpp, vap, ncp, cred, NULL);
187 if (error == 0) {
188 cache_setunresolved(v->a_nch);
189 cache_setvp(v->a_nch, *vpp);
80ae59d7 190 tmpfs_knote(dvp, NOTE_WRITE);
7a2de9a4 191 }
7a2de9a4
MD
192
193 return error;
194}
195
196/* --------------------------------------------------------------------- */
197
198static int
199tmpfs_open(struct vop_open_args *v)
200{
201 struct vnode *vp = v->a_vp;
202 int mode = v->a_mode;
203
204 int error;
205 struct tmpfs_node *node;
206
7a2de9a4
MD
207 node = VP_TO_TMPFS_NODE(vp);
208
209 /* The file is still active but all its names have been removed
210 * (e.g. by a "rmdir $(pwd)"). It cannot be opened any more as
211 * it is about to die. */
212 if (node->tn_links < 1)
213 return (ENOENT);
214
215 /* If the file is marked append-only, deny write requests. */
630e3a33
MD
216 if ((node->tn_flags & APPEND) &&
217 (mode & (FWRITE | O_APPEND)) == FWRITE) {
7a2de9a4 218 error = EPERM;
630e3a33 219 } else {
7a2de9a4
MD
220 return (vop_stdopen(v));
221 }
7a2de9a4
MD
222 return error;
223}
224
225/* --------------------------------------------------------------------- */
226
227static int
228tmpfs_close(struct vop_close_args *v)
229{
230 struct vnode *vp = v->a_vp;
231 struct tmpfs_node *node;
232
233 node = VP_TO_TMPFS_NODE(vp);
234
235 if (node->tn_links > 0) {
236 /* Update node times. No need to do it if the node has
237 * been deleted, because it will vanish after we return. */
238 tmpfs_update(vp);
239 }
240
241 return vop_stdclose(v);
242}
243
244/* --------------------------------------------------------------------- */
245
246int
247tmpfs_access(struct vop_access_args *v)
248{
249 struct vnode *vp = v->a_vp;
250 int error;
251 struct tmpfs_node *node;
252
7a2de9a4
MD
253 node = VP_TO_TMPFS_NODE(vp);
254
255 switch (vp->v_type) {
256 case VDIR:
257 /* FALLTHROUGH */
258 case VLNK:
259 /* FALLTHROUGH */
260 case VREG:
5a9e9ac7 261 if ((v->a_mode & VWRITE) && (vp->v_mount->mnt_flag & MNT_RDONLY)) {
7a2de9a4
MD
262 error = EROFS;
263 goto out;
264 }
265 break;
266
267 case VBLK:
268 /* FALLTHROUGH */
269 case VCHR:
270 /* FALLTHROUGH */
271 case VSOCK:
272 /* FALLTHROUGH */
273 case VFIFO:
274 break;
275
276 default:
277 error = EINVAL;
278 goto out;
279 }
280
5a9e9ac7 281 if ((v->a_mode & VWRITE) && (node->tn_flags & IMMUTABLE)) {
7a2de9a4
MD
282 error = EPERM;
283 goto out;
284 }
285
286 error = vop_helper_access(v, node->tn_uid, node->tn_gid, node->tn_mode, 0);
287
288out:
289
290 return error;
291}
292
293/* --------------------------------------------------------------------- */
294
295int
296tmpfs_getattr(struct vop_getattr_args *v)
297{
298 struct vnode *vp = v->a_vp;
299 struct vattr *vap = v->a_vap;
7a2de9a4 300 struct tmpfs_node *node;
7a2de9a4
MD
301
302 node = VP_TO_TMPFS_NODE(vp);
303
e575e508 304 lwkt_gettoken(&vp->v_mount->mnt_token);
7a2de9a4
MD
305 tmpfs_update(vp);
306
307 vap->va_type = vp->v_type;
308 vap->va_mode = node->tn_mode;
309 vap->va_nlink = node->tn_links;
310 vap->va_uid = node->tn_uid;
311 vap->va_gid = node->tn_gid;
312 vap->va_fsid = vp->v_mount->mnt_stat.f_fsid.val[0];
313 vap->va_fileid = node->tn_id;
314 vap->va_size = node->tn_size;
315 vap->va_blocksize = PAGE_SIZE;
316 vap->va_atime.tv_sec = node->tn_atime;
317 vap->va_atime.tv_nsec = node->tn_atimensec;
318 vap->va_mtime.tv_sec = node->tn_mtime;
319 vap->va_mtime.tv_nsec = node->tn_mtimensec;
320 vap->va_ctime.tv_sec = node->tn_ctime;
321 vap->va_ctime.tv_nsec = node->tn_ctimensec;
322 vap->va_gen = node->tn_gen;
323 vap->va_flags = node->tn_flags;
324 if (vp->v_type == VBLK || vp->v_type == VCHR)
325 {
326 vap->va_rmajor = umajor(node->tn_rdev);
327 vap->va_rminor = uminor(node->tn_rdev);
328 }
329 vap->va_bytes = round_page(node->tn_size);
330 vap->va_filerev = 0;
331
e575e508
VS
332 lwkt_reltoken(&vp->v_mount->mnt_token);
333
7a2de9a4
MD
334 return 0;
335}
336
337/* --------------------------------------------------------------------- */
338
339int
340tmpfs_setattr(struct vop_setattr_args *v)
341{
342 struct vnode *vp = v->a_vp;
343 struct vattr *vap = v->a_vap;
344 struct ucred *cred = v->a_cred;
80ae59d7 345 struct tmpfs_node *node = VP_TO_TMPFS_NODE(vp);
7a2de9a4 346 int error = 0;
80ae59d7 347 int kflags = 0;
7a2de9a4 348
80ae59d7 349 if (error == 0 && (vap->va_flags != VNOVAL)) {
7a2de9a4 350 error = tmpfs_chflags(vp, vap->va_flags, cred);
80ae59d7
MD
351 kflags |= NOTE_ATTRIB;
352 }
7a2de9a4 353
80ae59d7
MD
354 if (error == 0 && (vap->va_size != VNOVAL)) {
355 if (vap->va_size > node->tn_size)
356 kflags |= NOTE_WRITE | NOTE_EXTEND;
357 else
358 kflags |= NOTE_WRITE;
7a2de9a4 359 error = tmpfs_chsize(vp, vap->va_size, cred);
80ae59d7 360 }
7a2de9a4 361
d89ce96a
MD
362 if (error == 0 && (vap->va_uid != (uid_t)VNOVAL ||
363 vap->va_gid != (gid_t)VNOVAL)) {
7a2de9a4 364 error = tmpfs_chown(vp, vap->va_uid, vap->va_gid, cred);
80ae59d7 365 kflags |= NOTE_ATTRIB;
d89ce96a 366 }
7a2de9a4 367
80ae59d7 368 if (error == 0 && (vap->va_mode != (mode_t)VNOVAL)) {
7a2de9a4 369 error = tmpfs_chmod(vp, vap->va_mode, cred);
80ae59d7
MD
370 kflags |= NOTE_ATTRIB;
371 }
7a2de9a4
MD
372
373 if (error == 0 && ((vap->va_atime.tv_sec != VNOVAL &&
374 vap->va_atime.tv_nsec != VNOVAL) ||
375 (vap->va_mtime.tv_sec != VNOVAL &&
d89ce96a 376 vap->va_mtime.tv_nsec != VNOVAL) )) {
7a2de9a4 377 error = tmpfs_chtimes(vp, &vap->va_atime, &vap->va_mtime,
d89ce96a 378 vap->va_vaflags, cred);
80ae59d7 379 kflags |= NOTE_ATTRIB;
d89ce96a 380 }
7a2de9a4
MD
381
382 /* Update the node times. We give preference to the error codes
383 * generated by this function rather than the ones that may arise
384 * from tmpfs_update. */
385 tmpfs_update(vp);
80ae59d7 386 tmpfs_knote(vp, kflags);
7a2de9a4 387
7a2de9a4
MD
388 return error;
389}
390
391/* --------------------------------------------------------------------- */
392
9fc94b5f 393/*
630e3a33
MD
394 * fsync is usually a NOP, but we must take action when unmounting or
395 * when recycling.
9fc94b5f 396 */
7a2de9a4
MD
397static int
398tmpfs_fsync(struct vop_fsync_args *v)
399{
9fc94b5f 400 struct tmpfs_mount *tmp;
630e3a33 401 struct tmpfs_node *node;
7a2de9a4
MD
402 struct vnode *vp = v->a_vp;
403
9fc94b5f 404 tmp = VFS_TO_TMPFS(vp->v_mount);
630e3a33
MD
405 node = VP_TO_TMPFS_NODE(vp);
406
407 tmpfs_update(vp);
408 if (vp->v_type == VREG) {
d4623db3 409 if (vp->v_flag & VRECLAIMED) {
630e3a33
MD
410 if (node->tn_links == 0)
411 tmpfs_truncate(vp, 0);
412 else
413 vfsync(v->a_vp, v->a_waitfor, 1, NULL, NULL);
414 }
9fc94b5f 415 }
7a2de9a4
MD
416 return 0;
417}
418
419/* --------------------------------------------------------------------- */
420
421static int
422tmpfs_read (struct vop_read_args *ap)
423{
424 struct buf *bp;
425 struct vnode *vp = ap->a_vp;
426 struct uio *uio = ap->a_uio;
427 struct tmpfs_node *node;
7a2de9a4 428 off_t base_offset;
9fc94b5f 429 size_t offset;
7a2de9a4 430 size_t len;
9fc94b5f 431 int error;
7a2de9a4
MD
432
433 error = 0;
434 if (uio->uio_resid == 0) {
435 return error;
436 }
437
438 node = VP_TO_TMPFS_NODE(vp);
439
440 if (uio->uio_offset < 0)
441 return (EINVAL);
442 if (vp->v_type != VREG)
443 return (EINVAL);
444
7a2de9a4
MD
445 while (uio->uio_resid > 0 && uio->uio_offset < node->tn_size) {
446 /*
447 * Use buffer cache I/O (via tmpfs_strategy)
448 */
9fc94b5f 449 offset = (size_t)uio->uio_offset & BMASK;
7a2de9a4 450 base_offset = (off_t)uio->uio_offset - offset;
72d6a027 451 bp = getcacheblk(vp, base_offset, BSIZE);
7a2de9a4
MD
452 if (bp == NULL)
453 {
b403e861 454 lwkt_gettoken(&vp->v_mount->mnt_token);
7a2de9a4
MD
455 error = bread(vp, base_offset, BSIZE, &bp);
456 if (error) {
457 brelse(bp);
b403e861 458 lwkt_reltoken(&vp->v_mount->mnt_token);
7a2de9a4
MD
459 kprintf("tmpfs_read bread error %d\n", error);
460 break;
461 }
b403e861 462 lwkt_reltoken(&vp->v_mount->mnt_token);
7a2de9a4
MD
463 }
464
7a2de9a4
MD
465 /*
466 * Figure out how many bytes we can actually copy this loop.
467 */
468 len = BSIZE - offset;
469 if (len > uio->uio_resid)
470 len = uio->uio_resid;
471 if (len > node->tn_size - uio->uio_offset)
472 len = (size_t)(node->tn_size - uio->uio_offset);
473
474 error = uiomove((char *)bp->b_data + offset, len, uio);
475 bqrelse(bp);
476 if (error) {
477 kprintf("tmpfs_read uiomove error %d\n", error);
478 break;
479 }
480 }
481
7a2de9a4
MD
482 TMPFS_NODE_LOCK(node);
483 node->tn_status |= TMPFS_NODE_ACCESSED;
484 TMPFS_NODE_UNLOCK(node);
485
7a2de9a4
MD
486 return(error);
487}
488
489static int
490tmpfs_write (struct vop_write_args *ap)
491{
492 struct buf *bp;
493 struct vnode *vp = ap->a_vp;
494 struct uio *uio = ap->a_uio;
495 struct thread *td = uio->uio_td;
496 struct tmpfs_node *node;
497 boolean_t extended;
498 off_t oldsize;
499 int error;
7a2de9a4 500 off_t base_offset;
9fc94b5f 501 size_t offset;
7a2de9a4
MD
502 size_t len;
503 struct rlimit limit;
7a2de9a4 504 int trivial = 0;
80ae59d7 505 int kflags = 0;
7a2de9a4
MD
506
507 error = 0;
508 if (uio->uio_resid == 0) {
509 return error;
510 }
511
512 node = VP_TO_TMPFS_NODE(vp);
513
514 if (vp->v_type != VREG)
515 return (EINVAL);
516
1be4932c
VS
517 lwkt_gettoken(&vp->v_mount->mnt_token);
518
7a2de9a4
MD
519 oldsize = node->tn_size;
520 if (ap->a_ioflag & IO_APPEND)
521 uio->uio_offset = node->tn_size;
522
523 /*
524 * Check for illegal write offsets.
525 */
526 if (uio->uio_offset + uio->uio_resid >
1be4932c
VS
527 VFS_TO_TMPFS(vp->v_mount)->tm_maxfilesize) {
528 lwkt_reltoken(&vp->v_mount->mnt_token);
7a2de9a4 529 return (EFBIG);
1be4932c 530 }
7a2de9a4
MD
531
532 if (vp->v_type == VREG && td != NULL) {
533 error = kern_getrlimit(RLIMIT_FSIZE, &limit);
1be4932c
VS
534 if (error != 0) {
535 lwkt_reltoken(&vp->v_mount->mnt_token);
7a2de9a4 536 return error;
1be4932c 537 }
7a2de9a4
MD
538 if (uio->uio_offset + uio->uio_resid > limit.rlim_cur) {
539 ksignal(td->td_proc, SIGXFSZ);
1be4932c 540 lwkt_reltoken(&vp->v_mount->mnt_token);
7a2de9a4
MD
541 return (EFBIG);
542 }
543 }
544
545
546 /*
547 * Extend the file's size if necessary
548 */
9fc94b5f 549 extended = ((uio->uio_offset + uio->uio_resid) > node->tn_size);
7a2de9a4 550
7a2de9a4
MD
551 while (uio->uio_resid > 0) {
552 /*
553 * Use buffer cache I/O (via tmpfs_strategy)
554 */
9fc94b5f 555 offset = (size_t)uio->uio_offset & BMASK;
7a2de9a4
MD
556 base_offset = (off_t)uio->uio_offset - offset;
557 len = BSIZE - offset;
558 if (len > uio->uio_resid)
559 len = uio->uio_resid;
560
561 if ((uio->uio_offset + len) > node->tn_size) {
9fc94b5f 562 trivial = (uio->uio_offset <= node->tn_size);
7a2de9a4
MD
563 error = tmpfs_reg_resize(vp, uio->uio_offset + len, trivial);
564 if (error)
565 break;
566 }
567
9fc94b5f
MD
568 /*
569 * Read to fill in any gaps. Theoretically we could
570 * optimize this if the write covers the entire buffer
571 * and is not a UIO_NOCOPY write, however this can lead
572 * to a security violation exposing random kernel memory
573 * (whatever junk was in the backing VM pages before).
574 *
575 * So just use bread() to do the right thing.
576 */
577 error = bread(vp, base_offset, BSIZE, &bp);
7a2de9a4
MD
578 error = uiomove((char *)bp->b_data + offset, len, uio);
579 if (error) {
580 kprintf("tmpfs_write uiomove error %d\n", error);
581 brelse(bp);
582 break;
583 }
584
80ae59d7 585 if (uio->uio_offset > node->tn_size) {
7a2de9a4 586 node->tn_size = uio->uio_offset;
80ae59d7
MD
587 kflags |= NOTE_EXTEND;
588 }
589 kflags |= NOTE_WRITE;
7a2de9a4
MD
590
591 /*
9fc94b5f 592 * The data has been loaded into the buffer, write it out.
7a2de9a4 593 *
9fc94b5f
MD
594 * We want tmpfs to be able to use all available ram, not
595 * just the buffer cache, so if not explicitly paging we
596 * use buwrite() to leave the buffer clean but mark all the
597 * VM pages valid+dirty.
7a2de9a4 598 *
d89ce96a
MD
599 * When the kernel is paging, either via normal pageout
600 * operation or when cleaning the object during a recycle,
601 * the underlying VM pages are going to get thrown away
602 * so we MUST write them to swap.
603 *
604 * XXX unfortunately this catches msync() system calls too
605 * for the moment.
7a2de9a4 606 */
b7545cb3
AH
607 if (vm_swap_size == 0) {
608 /*
609 * if swap isn't configured yet, force a buwrite() to
610 * avoid problems further down the line, due to flushing
611 * to swap.
612 */
9fc94b5f 613 buwrite(bp);
b7545cb3
AH
614 } else {
615 if (ap->a_ioflag & IO_SYNC) {
616 bwrite(bp);
617 } else if ((ap->a_ioflag & IO_ASYNC) ||
618 (uio->uio_segflg == UIO_NOCOPY)) {
619 bawrite(bp);
620 } else {
621 buwrite(bp);
622 }
d89ce96a 623 }
9fc94b5f 624
7a2de9a4 625 if (bp->b_error) {
2cd8c774 626 kprintf("tmpfs_write bwrite error %d\n", bp->b_error);
7a2de9a4
MD
627 break;
628 }
629 }
7a2de9a4 630
7a2de9a4 631 if (error) {
80ae59d7 632 if (extended) {
7a2de9a4 633 (void)tmpfs_reg_resize(vp, oldsize, trivial);
80ae59d7
MD
634 kflags &= ~NOTE_EXTEND;
635 }
636 goto done;
7a2de9a4
MD
637 }
638
639 TMPFS_NODE_LOCK(node);
640 node->tn_status |= TMPFS_NODE_ACCESSED | TMPFS_NODE_MODIFIED |
641 (extended? TMPFS_NODE_CHANGED : 0);
642
643 if (node->tn_mode & (S_ISUID | S_ISGID)) {
644 if (priv_check_cred(ap->a_cred, PRIV_VFS_RETAINSUGID, 0))
645 node->tn_mode &= ~(S_ISUID | S_ISGID);
646 }
647 TMPFS_NODE_UNLOCK(node);
80ae59d7 648done:
7a2de9a4 649
80ae59d7 650 tmpfs_knote(vp, kflags);
1be4932c
VS
651
652
653 lwkt_reltoken(&vp->v_mount->mnt_token);
7a2de9a4
MD
654 return(error);
655}
656
657static int
658tmpfs_advlock (struct vop_advlock_args *ap)
659{
660 struct tmpfs_node *node;
661 struct vnode *vp = ap->a_vp;
662
663 node = VP_TO_TMPFS_NODE(vp);
664
665 return (lf_advlock(ap, &node->tn_advlock, node->tn_size));
666}
667
7a2de9a4
MD
668static int
669tmpfs_strategy(struct vop_strategy_args *ap)
670{
671 struct bio *bio = ap->a_bio;
9fc94b5f 672 struct buf *bp = bio->bio_buf;
7a2de9a4
MD
673 struct vnode *vp = ap->a_vp;
674 struct tmpfs_node *node;
675 vm_object_t uobj;
676
9fc94b5f
MD
677 if (vp->v_type != VREG) {
678 bp->b_resid = bp->b_bcount;
679 bp->b_flags |= B_ERROR | B_INVAL;
680 bp->b_error = EINVAL;
681 biodone(bio);
682 return(0);
683 }
7a2de9a4 684
d1f61aa2 685 lwkt_gettoken(&vp->v_mount->mnt_token);
7a2de9a4
MD
686 node = VP_TO_TMPFS_NODE(vp);
687
688 uobj = node->tn_reg.tn_aobj;
9fc94b5f 689
7a2de9a4 690 /*
9fc94b5f
MD
691 * Call swap_pager_strategy to read or write between the VM
692 * object and the buffer cache.
7a2de9a4
MD
693 */
694 swap_pager_strategy(uobj, bio);
695
d1f61aa2 696 lwkt_reltoken(&vp->v_mount->mnt_token);
7a2de9a4
MD
697 return 0;
698}
699
700static int
701tmpfs_bmap(struct vop_bmap_args *ap)
702{
703 if (ap->a_doffsetp != NULL)
704 *ap->a_doffsetp = ap->a_loffset;
705 if (ap->a_runp != NULL)
706 *ap->a_runp = 0;
707 if (ap->a_runb != NULL)
708 *ap->a_runb = 0;
709
710 return 0;
711}
9fc94b5f 712
7a2de9a4
MD
713/* --------------------------------------------------------------------- */
714
715static int
716tmpfs_nremove(struct vop_nremove_args *v)
717{
718 struct vnode *dvp = v->a_dvp;
719 struct namecache *ncp = v->a_nch->ncp;
9fc94b5f 720 struct vnode *vp;
7a2de9a4
MD
721 int error;
722 struct tmpfs_dirent *de;
723 struct tmpfs_mount *tmp;
724 struct tmpfs_node *dnode;
725 struct tmpfs_node *node;
726
9fc94b5f 727 /*
a1fa5d8d
MD
728 * We have to acquire the vp from v->a_nch because we will likely
729 * unresolve the namecache entry, and a vrele/vput is needed to
730 * trigger the tmpfs_inactive/tmpfs_reclaim sequence.
731 *
732 * We have to use vget to clear any inactive state on the vnode,
733 * otherwise the vnode may remain inactive and thus tmpfs_inactive
734 * will not get called when we release it.
9fc94b5f 735 */
a1fa5d8d 736 error = cache_vget(v->a_nch, v->a_cred, LK_SHARED, &vp);
9fc94b5f 737 KKASSERT(error == 0);
a1fa5d8d 738 vn_unlock(vp);
7a2de9a4
MD
739
740 if (vp->v_type == VDIR) {
741 error = EISDIR;
742 goto out;
743 }
744
745 dnode = VP_TO_TMPFS_DIR(dvp);
746 node = VP_TO_TMPFS_NODE(vp);
747 tmp = VFS_TO_TMPFS(vp->v_mount);
748 de = tmpfs_dir_lookup(dnode, node, ncp);
749 if (de == NULL) {
750 error = ENOENT;
751 goto out;
752 }
753
754 /* Files marked as immutable or append-only cannot be deleted. */
755 if ((node->tn_flags & (IMMUTABLE | APPEND | NOUNLINK)) ||
756 (dnode->tn_flags & APPEND)) {
757 error = EPERM;
758 goto out;
759 }
760
761 /* Remove the entry from the directory; as it is a file, we do not
762 * have to change the number of hard links of the directory. */
22d3b394 763 tmpfs_dir_detach(dnode, de);
7a2de9a4
MD
764
765 /* Free the directory entry we just deleted. Note that the node
766 * referred by it will not be removed until the vnode is really
767 * reclaimed. */
0786baf1 768 tmpfs_free_dirent(tmp, de);
7a2de9a4
MD
769
770 if (node->tn_links > 0) {
771 TMPFS_NODE_LOCK(node);
772 node->tn_status |= TMPFS_NODE_ACCESSED | TMPFS_NODE_CHANGED | \
773 TMPFS_NODE_MODIFIED;
774 TMPFS_NODE_UNLOCK(node);
775 }
776
777 cache_setunresolved(v->a_nch);
778 cache_setvp(v->a_nch, NULL);
80ae59d7 779 tmpfs_knote(vp, NOTE_DELETE);
9fc94b5f 780 /*cache_inval_vp(vp, CINV_DESTROY);*/
80ae59d7 781 tmpfs_knote(dvp, NOTE_WRITE);
7a2de9a4
MD
782 error = 0;
783
7a2de9a4 784out:
9fc94b5f 785 vrele(vp);
7a2de9a4
MD
786
787 return error;
788}
789
790/* --------------------------------------------------------------------- */
791
792static int
793tmpfs_nlink(struct vop_nlink_args *v)
794{
795 struct vnode *dvp = v->a_dvp;
796 struct vnode *vp = v->a_vp;
797 struct namecache *ncp = v->a_nch->ncp;
7a2de9a4
MD
798 struct tmpfs_dirent *de;
799 struct tmpfs_node *node;
22d3b394
MD
800 struct tmpfs_node *dnode;
801 int error;
7a2de9a4 802
7a2de9a4
MD
803 KKASSERT(dvp != vp); /* XXX When can this be false? */
804
805 node = VP_TO_TMPFS_NODE(vp);
22d3b394 806 dnode = VP_TO_TMPFS_NODE(dvp);
7a2de9a4
MD
807
808 /* XXX: Why aren't the following two tests done by the caller? */
809
810 /* Hard links of directories are forbidden. */
811 if (vp->v_type == VDIR) {
812 error = EPERM;
813 goto out;
814 }
815
816 /* Cannot create cross-device links. */
817 if (dvp->v_mount != vp->v_mount) {
818 error = EXDEV;
819 goto out;
820 }
821
822 /* Ensure that we do not overflow the maximum number of links imposed
823 * by the system. */
824 KKASSERT(node->tn_links <= LINK_MAX);
825 if (node->tn_links == LINK_MAX) {
826 error = EMLINK;
827 goto out;
828 }
829
830 /* We cannot create links of files marked immutable or append-only. */
831 if (node->tn_flags & (IMMUTABLE | APPEND)) {
832 error = EPERM;
833 goto out;
834 }
835
836 /* Allocate a new directory entry to represent the node. */
837 error = tmpfs_alloc_dirent(VFS_TO_TMPFS(vp->v_mount), node,
838 ncp->nc_name, ncp->nc_nlen, &de);
839 if (error != 0)
840 goto out;
841
842 /* Insert the new directory entry into the appropriate directory. */
22d3b394 843 tmpfs_dir_attach(dnode, de);
7a2de9a4
MD
844
845 /* vp link count has changed, so update node times. */
846
847 TMPFS_NODE_LOCK(node);
848 node->tn_status |= TMPFS_NODE_CHANGED;
849 TMPFS_NODE_UNLOCK(node);
850 tmpfs_update(vp);
851
80ae59d7 852 tmpfs_knote(vp, NOTE_LINK);
7a2de9a4
MD
853 cache_setunresolved(v->a_nch);
854 cache_setvp(v->a_nch, vp);
80ae59d7 855 tmpfs_knote(dvp, NOTE_WRITE);
7a2de9a4
MD
856 error = 0;
857
858out:
7a2de9a4
MD
859 return error;
860}
861
862/* --------------------------------------------------------------------- */
863
864static int
865tmpfs_nrename(struct vop_nrename_args *v)
866{
867 struct vnode *fdvp = v->a_fdvp;
868 struct namecache *fncp = v->a_fnch->ncp;
869 struct vnode *fvp = fncp->nc_vp;
870 struct vnode *tdvp = v->a_tdvp;
871 struct namecache *tncp = v->a_tnch->ncp;
a1fa5d8d 872 struct vnode *tvp;
7a2de9a4
MD
873 struct tmpfs_dirent *de;
874 struct tmpfs_mount *tmp;
875 struct tmpfs_node *fdnode;
876 struct tmpfs_node *fnode;
877 struct tmpfs_node *tnode;
878 struct tmpfs_node *tdnode;
22d3b394 879 char *newname;
dca262fb 880 char *oldname;
22d3b394 881 int error;
7a2de9a4 882
a1fa5d8d
MD
883 /*
884 * Because tvp can get overwritten we have to vget it instead of
885 * just vref or use it, otherwise it's VINACTIVE flag may not get
886 * cleared and the node won't get destroyed.
887 */
888 error = cache_vget(v->a_tnch, v->a_cred, LK_SHARED, &tvp);
889 if (error == 0) {
890 tnode = VP_TO_TMPFS_NODE(tvp);
891 vn_unlock(tvp);
892 } else {
893 tnode = NULL;
894 }
7a2de9a4
MD
895
896 /* Disallow cross-device renames.
897 * XXX Why isn't this done by the caller? */
898 if (fvp->v_mount != tdvp->v_mount ||
899 (tvp != NULL && fvp->v_mount != tvp->v_mount)) {
900 error = EXDEV;
901 goto out;
902 }
903
904 tmp = VFS_TO_TMPFS(tdvp->v_mount);
905 tdnode = VP_TO_TMPFS_DIR(tdvp);
906
907 /* If source and target are the same file, there is nothing to do. */
908 if (fvp == tvp) {
909 error = 0;
910 goto out;
911 }
912
7a2de9a4
MD
913 fdnode = VP_TO_TMPFS_DIR(fdvp);
914 fnode = VP_TO_TMPFS_NODE(fvp);
915 de = tmpfs_dir_lookup(fdnode, fnode, fncp);
916
917 /* Avoid manipulating '.' and '..' entries. */
918 if (de == NULL) {
919 error = ENOENT;
920 goto out_locked;
921 }
922 KKASSERT(de->td_node == fnode);
923
dca262fb
MD
924 /*
925 * If replacing an entry in the target directory and that entry
926 * is a directory, it must be empty.
927 *
7a2de9a4 928 * Kern_rename gurantees the destination to be a directory
dca262fb
MD
929 * if the source is one (it does?).
930 */
7a2de9a4
MD
931 if (tvp != NULL) {
932 KKASSERT(tnode != NULL);
933
934 if ((tnode->tn_flags & (NOUNLINK | IMMUTABLE | APPEND)) ||
935 (tdnode->tn_flags & (APPEND | IMMUTABLE))) {
936 error = EPERM;
937 goto out_locked;
938 }
939
940 if (fnode->tn_type == VDIR && tnode->tn_type == VDIR) {
941 if (tnode->tn_size > 0) {
942 error = ENOTEMPTY;
943 goto out_locked;
944 }
945 } else if (fnode->tn_type == VDIR && tnode->tn_type != VDIR) {
946 error = ENOTDIR;
947 goto out_locked;
948 } else if (fnode->tn_type != VDIR && tnode->tn_type == VDIR) {
949 error = EISDIR;
950 goto out_locked;
951 } else {
952 KKASSERT(fnode->tn_type != VDIR &&
953 tnode->tn_type != VDIR);
954 }
955 }
956
dca262fb
MD
957 if ((fnode->tn_flags & (NOUNLINK | IMMUTABLE | APPEND)) ||
958 (fdnode->tn_flags & (APPEND | IMMUTABLE))) {
7a2de9a4
MD
959 error = EPERM;
960 goto out_locked;
961 }
962
dca262fb
MD
963 /*
964 * Ensure that we have enough memory to hold the new name, if it
965 * has to be changed.
966 */
7a2de9a4
MD
967 if (fncp->nc_nlen != tncp->nc_nlen ||
968 bcmp(fncp->nc_name, tncp->nc_name, fncp->nc_nlen) != 0) {
d00cd01c 969 newname = kmalloc(tncp->nc_nlen + 1, tmp->tm_name_zone,
42f6f6b1
VS
970 M_WAITOK | M_NULLOK);
971 if (newname == NULL) {
972 error = ENOSPC;
973 goto out_locked;
974 }
dca262fb
MD
975 bcopy(tncp->nc_name, newname, tncp->nc_nlen);
976 newname[tncp->nc_nlen] = '\0';
977 } else {
7a2de9a4 978 newname = NULL;
dca262fb 979 }
7a2de9a4 980
dca262fb
MD
981 /*
982 * Unlink entry from source directory. Note that the kernel has
983 * already checked for illegal recursion cases (renaming a directory
984 * into a subdirectory of itself).
985 */
986 if (fdnode != tdnode)
987 tmpfs_dir_detach(fdnode, de);
988
989 /*
990 * Handle any name change. Swap with newname, we will
991 * deallocate it at the end.
992 */
993 if (newname != NULL) {
994#if 0
995 TMPFS_NODE_LOCK(fnode);
996 fnode->tn_status |= TMPFS_NODE_CHANGED;
997 TMPFS_NODE_UNLOCK(fnode);
998#endif
999 oldname = de->td_name;
1000 de->td_name = newname;
1001 de->td_namelen = (uint16_t)tncp->nc_nlen;
1002 newname = oldname;
1003 }
1004
1005 /*
1006 * Link entry to target directory. If the entry
1007 * represents a directory move the parent linkage
1008 * as well.
1009 */
7a2de9a4 1010 if (fdnode != tdnode) {
7a2de9a4 1011 if (de->td_node->tn_type == VDIR) {
7a2de9a4 1012 TMPFS_VALIDATE_DIR(fnode);
7a2de9a4 1013
7a2de9a4 1014 TMPFS_NODE_LOCK(tdnode);
7a2de9a4 1015 tdnode->tn_links++;
dca262fb
MD
1016 tdnode->tn_status |= TMPFS_NODE_MODIFIED;
1017 TMPFS_NODE_UNLOCK(tdnode);
7a2de9a4 1018
dca262fb
MD
1019 TMPFS_NODE_LOCK(fnode);
1020 fnode->tn_dir.tn_parent = tdnode;
1021 fnode->tn_status |= TMPFS_NODE_CHANGED;
1022 TMPFS_NODE_UNLOCK(fnode);
1023
1024 TMPFS_NODE_LOCK(fdnode);
1025 fdnode->tn_links--;
1026 fdnode->tn_status |= TMPFS_NODE_MODIFIED;
7a2de9a4 1027 TMPFS_NODE_UNLOCK(fdnode);
7a2de9a4 1028 }
22d3b394 1029 tmpfs_dir_attach(tdnode, de);
dca262fb 1030 } else {
7a2de9a4 1031 TMPFS_NODE_LOCK(tdnode);
7a2de9a4 1032 tdnode->tn_status |= TMPFS_NODE_MODIFIED;
7a2de9a4
MD
1033 TMPFS_NODE_UNLOCK(tdnode);
1034 }
1035
dca262fb
MD
1036 /*
1037 * If we are overwriting an entry, we have to remove the old one
1038 * from the target directory.
1039 */
7a2de9a4
MD
1040 if (tvp != NULL) {
1041 /* Remove the old entry from the target directory. */
1042 de = tmpfs_dir_lookup(tdnode, tnode, tncp);
22d3b394 1043 tmpfs_dir_detach(tdnode, de);
80ae59d7 1044 tmpfs_knote(tdnode->tn_vnode, NOTE_DELETE);
7a2de9a4 1045
dca262fb
MD
1046 /*
1047 * Free the directory entry we just deleted. Note that the
7a2de9a4 1048 * node referred by it will not be removed until the vnode is
dca262fb
MD
1049 * really reclaimed.
1050 */
0786baf1 1051 tmpfs_free_dirent(VFS_TO_TMPFS(tvp->v_mount), de);
9fc94b5f 1052 /*cache_inval_vp(tvp, CINV_DESTROY);*/
7a2de9a4
MD
1053 }
1054
dca262fb
MD
1055 /*
1056 * Finish up
1057 */
1058 if (newname) {
d00cd01c 1059 kfree(newname, tmp->tm_name_zone);
dca262fb
MD
1060 newname = NULL;
1061 }
7a2de9a4 1062 cache_rename(v->a_fnch, v->a_tnch);
80ae59d7
MD
1063 tmpfs_knote(v->a_fdvp, NOTE_WRITE);
1064 tmpfs_knote(v->a_tdvp, NOTE_WRITE);
1065 if (fnode->tn_vnode)
1066 tmpfs_knote(fnode->tn_vnode, NOTE_RENAME);
7a2de9a4
MD
1067 error = 0;
1068
1069out_locked:
630e3a33 1070 ;
7a2de9a4
MD
1071
1072out:
a1fa5d8d
MD
1073 if (tvp)
1074 vrele(tvp);
7a2de9a4
MD
1075
1076 return error;
1077}
1078
1079/* --------------------------------------------------------------------- */
1080
1081static int
1082tmpfs_nmkdir(struct vop_nmkdir_args *v)
1083{
1084 struct vnode *dvp = v->a_dvp;
1085 struct vnode **vpp = v->a_vpp;
1086 struct namecache *ncp = v->a_nch->ncp;
1087 struct vattr *vap = v->a_vap;
1088 struct ucred *cred = v->a_cred;
1089 int error;
1090
1091 KKASSERT(vap->va_type == VDIR);
1092
7a2de9a4
MD
1093 error = tmpfs_alloc_file(dvp, vpp, vap, ncp, cred, NULL);
1094 if (error == 0) {
1095 cache_setunresolved(v->a_nch);
1096 cache_setvp(v->a_nch, *vpp);
80ae59d7 1097 tmpfs_knote(dvp, NOTE_WRITE | NOTE_LINK);
7a2de9a4 1098 }
7a2de9a4
MD
1099
1100 return error;
1101}
1102
1103/* --------------------------------------------------------------------- */
1104
1105static int
1106tmpfs_nrmdir(struct vop_nrmdir_args *v)
1107{
1108 struct vnode *dvp = v->a_dvp;
1109 struct namecache *ncp = v->a_nch->ncp;
9fc94b5f 1110 struct vnode *vp;
7a2de9a4
MD
1111 struct tmpfs_dirent *de;
1112 struct tmpfs_mount *tmp;
1113 struct tmpfs_node *dnode;
1114 struct tmpfs_node *node;
38e5e604
MD
1115 int error;
1116
9fc94b5f 1117 /*
a1fa5d8d
MD
1118 * We have to acquire the vp from v->a_nch because we will likely
1119 * unresolve the namecache entry, and a vrele/vput is needed to
1120 * trigger the tmpfs_inactive/tmpfs_reclaim sequence.
1121 *
1122 * We have to use vget to clear any inactive state on the vnode,
1123 * otherwise the vnode may remain inactive and thus tmpfs_inactive
1124 * will not get called when we release it.
9fc94b5f 1125 */
a1fa5d8d 1126 error = cache_vget(v->a_nch, v->a_cred, LK_SHARED, &vp);
9fc94b5f 1127 KKASSERT(error == 0);
a1fa5d8d 1128 vn_unlock(vp);
7a2de9a4 1129
e527fb6b
MD
1130 /*
1131 * Prevalidate so we don't hit an assertion later
1132 */
1133 if (vp->v_type != VDIR) {
1134 error = ENOTDIR;
1135 goto out;
1136 }
1137
7a2de9a4
MD
1138 tmp = VFS_TO_TMPFS(dvp->v_mount);
1139 dnode = VP_TO_TMPFS_DIR(dvp);
1140 node = VP_TO_TMPFS_DIR(vp);
1141
1142 /* Directories with more than two entries ('.' and '..') cannot be
1143 * removed. */
1144 if (node->tn_size > 0) {
1145 error = ENOTEMPTY;
1146 goto out;
1147 }
1148
1149 if ((dnode->tn_flags & APPEND)
1150 || (node->tn_flags & (NOUNLINK | IMMUTABLE | APPEND))) {
1151 error = EPERM;
1152 goto out;
1153 }
1154
1155 /* This invariant holds only if we are not trying to remove "..".
1156 * We checked for that above so this is safe now. */
1157 KKASSERT(node->tn_dir.tn_parent == dnode);
1158
1159 /* Get the directory entry associated with node (vp). This was
1160 * filled by tmpfs_lookup while looking up the entry. */
1161 de = tmpfs_dir_lookup(dnode, node, ncp);
1162 KKASSERT(TMPFS_DIRENT_MATCHES(de,
1163 ncp->nc_name,
1164 ncp->nc_nlen));
1165
1166 /* Check flags to see if we are allowed to remove the directory. */
b7fe63af
MD
1167 if ((dnode->tn_flags & APPEND) ||
1168 node->tn_flags & (NOUNLINK | IMMUTABLE | APPEND)) {
7a2de9a4
MD
1169 error = EPERM;
1170 goto out;
1171 }
1172
1173
1174 /* Detach the directory entry from the directory (dnode). */
22d3b394 1175 tmpfs_dir_detach(dnode, de);
7a2de9a4
MD
1176
1177 /* No vnode should be allocated for this entry from this point */
1178 TMPFS_NODE_LOCK(node);
1179 TMPFS_ASSERT_ELOCKED(node);
1180 TMPFS_NODE_LOCK(dnode);
1181 TMPFS_ASSERT_ELOCKED(dnode);
1182
0786baf1
MD
1183#if 0
1184 /* handled by tmpfs_free_node */
1185 KKASSERT(node->tn_links > 0);
7a2de9a4
MD
1186 node->tn_links--;
1187 node->tn_dir.tn_parent = NULL;
0786baf1 1188#endif
7a2de9a4
MD
1189 node->tn_status |= TMPFS_NODE_ACCESSED | TMPFS_NODE_CHANGED | \
1190 TMPFS_NODE_MODIFIED;
1191
0786baf1
MD
1192#if 0
1193 /* handled by tmpfs_free_node */
1194 KKASSERT(dnode->tn_links > 0);
7a2de9a4 1195 dnode->tn_links--;
0786baf1 1196#endif
7a2de9a4
MD
1197 dnode->tn_status |= TMPFS_NODE_ACCESSED | \
1198 TMPFS_NODE_CHANGED | TMPFS_NODE_MODIFIED;
1199
1200 TMPFS_NODE_UNLOCK(dnode);
1201 TMPFS_NODE_UNLOCK(node);
1202
1203 /* Free the directory entry we just deleted. Note that the node
1204 * referred by it will not be removed until the vnode is really
1205 * reclaimed. */
0786baf1 1206 tmpfs_free_dirent(tmp, de);
7a2de9a4
MD
1207
1208 /* Release the deleted vnode (will destroy the node, notify
1209 * interested parties and clean it from the cache). */
1210
1211 TMPFS_NODE_LOCK(dnode);
1212 dnode->tn_status |= TMPFS_NODE_CHANGED;
1213 TMPFS_NODE_UNLOCK(dnode);
1214 tmpfs_update(dvp);
1215
1216 cache_setunresolved(v->a_nch);
1217 cache_setvp(v->a_nch, NULL);
9fc94b5f 1218 /*cache_inval_vp(vp, CINV_DESTROY);*/
80ae59d7 1219 tmpfs_knote(dvp, NOTE_WRITE | NOTE_LINK);
7a2de9a4
MD
1220 error = 0;
1221
1222out:
9fc94b5f 1223 vrele(vp);
7a2de9a4
MD
1224
1225 return error;
1226}
1227
1228/* --------------------------------------------------------------------- */
1229
1230static int
1231tmpfs_nsymlink(struct vop_nsymlink_args *v)
1232{
1233 struct vnode *dvp = v->a_dvp;
1234 struct vnode **vpp = v->a_vpp;
1235 struct namecache *ncp = v->a_nch->ncp;
1236 struct vattr *vap = v->a_vap;
1237 struct ucred *cred = v->a_cred;
1238 char *target = v->a_target;
1239 int error;
1240
7a2de9a4
MD
1241 vap->va_type = VLNK;
1242 error = tmpfs_alloc_file(dvp, vpp, vap, ncp, cred, target);
1243 if (error == 0) {
80ae59d7 1244 tmpfs_knote(*vpp, NOTE_WRITE);
7a2de9a4
MD
1245 cache_setunresolved(v->a_nch);
1246 cache_setvp(v->a_nch, *vpp);
1247 }
7a2de9a4
MD
1248
1249 return error;
1250}
1251
1252/* --------------------------------------------------------------------- */
1253
1254static int
1255tmpfs_readdir(struct vop_readdir_args *v)
1256{
1257 struct vnode *vp = v->a_vp;
1258 struct uio *uio = v->a_uio;
1259 int *eofflag = v->a_eofflag;
1260 off_t **cookies = v->a_cookies;
1261 int *ncookies = v->a_ncookies;
22d3b394 1262 struct tmpfs_mount *tmp;
7a2de9a4
MD
1263 int error;
1264 off_t startoff;
1265 off_t cnt = 0;
1266 struct tmpfs_node *node;
1267
1268 /* This operation only makes sense on directory nodes. */
1269 if (vp->v_type != VDIR)
1270 return ENOTDIR;
1271
22d3b394 1272 tmp = VFS_TO_TMPFS(vp->v_mount);
7a2de9a4
MD
1273 node = VP_TO_TMPFS_DIR(vp);
1274 startoff = uio->uio_offset;
1275
1276 if (uio->uio_offset == TMPFS_DIRCOOKIE_DOT) {
1277 error = tmpfs_dir_getdotdent(node, uio);
1278 if (error != 0)
1279 goto outok;
1280 cnt++;
1281 }
1282
1283 if (uio->uio_offset == TMPFS_DIRCOOKIE_DOTDOT) {
22d3b394 1284 error = tmpfs_dir_getdotdotdent(tmp, node, uio);
7a2de9a4
MD
1285 if (error != 0)
1286 goto outok;
1287 cnt++;
1288 }
1289
1290 error = tmpfs_dir_getdents(node, uio, &cnt);
1291
1292outok:
1293 KKASSERT(error >= -1);
1294
1295 if (error == -1)
1296 error = 0;
1297
1298 if (eofflag != NULL)
1299 *eofflag =
1300 (error == 0 && uio->uio_offset == TMPFS_DIRCOOKIE_EOF);
1301
1302 /* Update NFS-related variables. */
1303 if (error == 0 && cookies != NULL && ncookies != NULL) {
1304 off_t i;
1305 off_t off = startoff;
1306 struct tmpfs_dirent *de = NULL;
1307
1308 *ncookies = cnt;
1309 *cookies = kmalloc(cnt * sizeof(off_t), M_TEMP, M_WAITOK);
1310
1311 for (i = 0; i < cnt; i++) {
1312 KKASSERT(off != TMPFS_DIRCOOKIE_EOF);
1313 if (off == TMPFS_DIRCOOKIE_DOT) {
1314 off = TMPFS_DIRCOOKIE_DOTDOT;
1315 } else {
1316 if (off == TMPFS_DIRCOOKIE_DOTDOT) {
1317 de = TAILQ_FIRST(&node->tn_dir.tn_dirhead);
1318 } else if (de != NULL) {
1319 de = TAILQ_NEXT(de, td_entries);
1320 } else {
1321 de = tmpfs_dir_lookupbycookie(node,
1322 off);
1323 KKASSERT(de != NULL);
1324 de = TAILQ_NEXT(de, td_entries);
1325 }
1326 if (de == NULL)
1327 off = TMPFS_DIRCOOKIE_EOF;
1328 else
1329 off = tmpfs_dircookie(de);
1330 }
1331
1332 (*cookies)[i] = off;
1333 }
1334 KKASSERT(uio->uio_offset == off);
1335 }
7a2de9a4
MD
1336
1337 return error;
1338}
1339
1340/* --------------------------------------------------------------------- */
1341
1342static int
1343tmpfs_readlink(struct vop_readlink_args *v)
1344{
1345 struct vnode *vp = v->a_vp;
1346 struct uio *uio = v->a_uio;
1347
1348 int error;
1349 struct tmpfs_node *node;
1350
1351 KKASSERT(uio->uio_offset == 0);
1352 KKASSERT(vp->v_type == VLNK);
1353
1354 node = VP_TO_TMPFS_NODE(vp);
1355
1356 error = uiomove(node->tn_link, MIN(node->tn_size, uio->uio_resid),
1357 uio);
1358 TMPFS_NODE_LOCK(node);
1359 node->tn_status |= TMPFS_NODE_ACCESSED;
1360 TMPFS_NODE_UNLOCK(node);
1361
1362 return error;
1363}
1364
1365/* --------------------------------------------------------------------- */
1366
1367static int
1368tmpfs_inactive(struct vop_inactive_args *v)
1369{
1370 struct vnode *vp = v->a_vp;
7a2de9a4
MD
1371 struct tmpfs_node *node;
1372
7a2de9a4
MD
1373 node = VP_TO_TMPFS_NODE(vp);
1374
a1fa5d8d
MD
1375 /*
1376 * Degenerate case
1377 */
1378 if (node == NULL) {
1379 vrecycle(vp);
1380 return(0);
1381 }
1382
9fc94b5f
MD
1383 /*
1384 * Get rid of unreferenced deleted vnodes sooner rather than
1385 * later so the data memory can be recovered immediately.
f96f2f39
MD
1386 *
1387 * We must truncate the vnode to prevent the normal reclamation
1388 * path from flushing the data for the removed file to disk.
9fc94b5f 1389 */
7a2de9a4 1390 TMPFS_NODE_LOCK(node);
b7fe63af
MD
1391 if ((node->tn_vpstate & TMPFS_VNODE_ALLOCATING) == 0 &&
1392 (node->tn_links == 0 ||
1393 (node->tn_links == 1 && node->tn_type == VDIR &&
1394 node->tn_dir.tn_parent)))
1395 {
9fc94b5f 1396 node->tn_vpstate = TMPFS_VNODE_DOOMED;
7a2de9a4 1397 TMPFS_NODE_UNLOCK(node);
f96f2f39
MD
1398 if (node->tn_type == VREG)
1399 tmpfs_truncate(vp, 0);
7a2de9a4 1400 vrecycle(vp);
9fc94b5f 1401 } else {
7a2de9a4 1402 TMPFS_NODE_UNLOCK(node);
9fc94b5f 1403 }
7a2de9a4
MD
1404
1405 return 0;
1406}
1407
1408/* --------------------------------------------------------------------- */
1409
1410int
1411tmpfs_reclaim(struct vop_reclaim_args *v)
1412{
1413 struct vnode *vp = v->a_vp;
7a2de9a4
MD
1414 struct tmpfs_mount *tmp;
1415 struct tmpfs_node *node;
1416
1417 node = VP_TO_TMPFS_NODE(vp);
1418 tmp = VFS_TO_TMPFS(vp->v_mount);
1419
7a2de9a4
MD
1420 tmpfs_free_vp(vp);
1421
b7fe63af
MD
1422 /*
1423 * If the node referenced by this vnode was deleted by the
1424 * user, we must free its associated data structures now that
1425 * the vnode is being reclaimed.
1426 *
1427 * Directories have an extra link ref.
1428 */
7a2de9a4 1429 TMPFS_NODE_LOCK(node);
b7fe63af
MD
1430 if ((node->tn_vpstate & TMPFS_VNODE_ALLOCATING) == 0 &&
1431 (node->tn_links == 0 ||
1432 (node->tn_links == 1 && node->tn_type == VDIR &&
1433 node->tn_dir.tn_parent)))
1434 {
7a2de9a4 1435 node->tn_vpstate = TMPFS_VNODE_DOOMED;
7a2de9a4 1436 tmpfs_free_node(tmp, node);
0786baf1 1437 /* eats the lock */
9fc94b5f 1438 } else {
7a2de9a4 1439 TMPFS_NODE_UNLOCK(node);
9fc94b5f 1440 }
7a2de9a4
MD
1441
1442 KKASSERT(vp->v_data == NULL);
1443 return 0;
1444}
1445
1446/* --------------------------------------------------------------------- */
1447
1448static int
1449tmpfs_print(struct vop_print_args *v)
1450{
1451 struct vnode *vp = v->a_vp;
1452
1453 struct tmpfs_node *node;
1454
1455 node = VP_TO_TMPFS_NODE(vp);
1456
1457 kprintf("tag VT_TMPFS, tmpfs_node %p, flags 0x%x, links %d\n",
1458 node, node->tn_flags, node->tn_links);
1459 kprintf("\tmode 0%o, owner %d, group %d, size %ju, status 0x%x\n",
1460 node->tn_mode, node->tn_uid, node->tn_gid,
1461 (uintmax_t)node->tn_size, node->tn_status);
1462
1463 if (vp->v_type == VFIFO)
1464 fifo_printinfo(vp);
1465
1466 kprintf("\n");
1467
1468 return 0;
1469}
1470
1471/* --------------------------------------------------------------------- */
1472
1473static int
1474tmpfs_pathconf(struct vop_pathconf_args *v)
1475{
1476 int name = v->a_name;
1477 register_t *retval = v->a_retval;
1478
1479 int error;
1480
1481 error = 0;
1482
1483 switch (name) {
1484 case _PC_LINK_MAX:
1485 *retval = LINK_MAX;
1486 break;
1487
1488 case _PC_NAME_MAX:
1489 *retval = NAME_MAX;
1490 break;
1491
1492 case _PC_PATH_MAX:
1493 *retval = PATH_MAX;
1494 break;
1495
1496 case _PC_PIPE_BUF:
1497 *retval = PIPE_BUF;
1498 break;
1499
1500 case _PC_CHOWN_RESTRICTED:
1501 *retval = 1;
1502 break;
1503
1504 case _PC_NO_TRUNC:
1505 *retval = 1;
1506 break;
1507
1508 case _PC_SYNC_IO:
1509 *retval = 1;
1510 break;
1511
1512 case _PC_FILESIZEBITS:
1513 *retval = 0; /* XXX Don't know which value should I return. */
1514 break;
1515
1516 default:
1517 error = EINVAL;
1518 }
1519
1520 return error;
1521}
1522
80ae59d7
MD
1523/************************************************************************
1524 * KQFILTER OPS *
1525 ************************************************************************/
1526
1527static void filt_tmpfsdetach(struct knote *kn);
1528static int filt_tmpfsread(struct knote *kn, long hint);
1529static int filt_tmpfswrite(struct knote *kn, long hint);
1530static int filt_tmpfsvnode(struct knote *kn, long hint);
1531
1532static struct filterops tmpfsread_filtops =
1533 { FILTEROP_ISFD, NULL, filt_tmpfsdetach, filt_tmpfsread };
1534static struct filterops tmpfswrite_filtops =
1535 { FILTEROP_ISFD, NULL, filt_tmpfsdetach, filt_tmpfswrite };
1536static struct filterops tmpfsvnode_filtops =
1537 { FILTEROP_ISFD, NULL, filt_tmpfsdetach, filt_tmpfsvnode };
1538
1539static int
1540tmpfs_kqfilter (struct vop_kqfilter_args *ap)
1541{
1542 struct vnode *vp = ap->a_vp;
1543 struct knote *kn = ap->a_kn;
1544
1545 switch (kn->kn_filter) {
1546 case EVFILT_READ:
1547 kn->kn_fop = &tmpfsread_filtops;
1548 break;
1549 case EVFILT_WRITE:
1550 kn->kn_fop = &tmpfswrite_filtops;
1551 break;
1552 case EVFILT_VNODE:
1553 kn->kn_fop = &tmpfsvnode_filtops;
1554 break;
1555 default:
1556 return (EOPNOTSUPP);
1557 }
1558
1559 kn->kn_hook = (caddr_t)vp;
1560
1561 knote_insert(&vp->v_pollinfo.vpi_kqinfo.ki_note, kn);
1562
1563 return(0);
1564}
1565
1566static void
1567filt_tmpfsdetach(struct knote *kn)
1568{
1569 struct vnode *vp = (void *)kn->kn_hook;
1570
1571 knote_remove(&vp->v_pollinfo.vpi_kqinfo.ki_note, kn);
1572}
1573
1574static int
1575filt_tmpfsread(struct knote *kn, long hint)
1576{
1577 struct vnode *vp = (void *)kn->kn_hook;
1578 struct tmpfs_node *node = VP_TO_TMPFS_NODE(vp);
1579 off_t off;
1580
1581 if (hint == NOTE_REVOKE) {
3bcb6e5e 1582 kn->kn_flags |= (EV_EOF | EV_NODATA | EV_ONESHOT);
80ae59d7
MD
1583 return(1);
1584 }
f79d9cc9
MD
1585
1586 /*
1587 * Interlock against MP races when performing this function.
1588 */
1589 lwkt_gettoken(&vp->v_mount->mnt_token);
80ae59d7
MD
1590 off = node->tn_size - kn->kn_fp->f_offset;
1591 kn->kn_data = (off < INTPTR_MAX) ? off : INTPTR_MAX;
f79d9cc9
MD
1592 if (kn->kn_sfflags & NOTE_OLDAPI) {
1593 lwkt_reltoken(&vp->v_mount->mnt_token);
80ae59d7 1594 return(1);
f79d9cc9 1595 }
80ae59d7 1596
80ae59d7 1597 if (kn->kn_data == 0) {
80ae59d7 1598 kn->kn_data = (off < INTPTR_MAX) ? off : INTPTR_MAX;
80ae59d7 1599 }
f79d9cc9 1600 lwkt_reltoken(&vp->v_mount->mnt_token);
80ae59d7
MD
1601 return (kn->kn_data != 0);
1602}
1603
1604static int
1605filt_tmpfswrite(struct knote *kn, long hint)
1606{
1607 if (hint == NOTE_REVOKE)
3bcb6e5e 1608 kn->kn_flags |= (EV_EOF | EV_NODATA | EV_ONESHOT);
80ae59d7
MD
1609 kn->kn_data = 0;
1610 return (1);
1611}
1612
1613static int
1614filt_tmpfsvnode(struct knote *kn, long hint)
1615{
1616 if (kn->kn_sfflags & hint)
1617 kn->kn_fflags |= hint;
1618 if (hint == NOTE_REVOKE) {
3bcb6e5e 1619 kn->kn_flags |= (EV_EOF | EV_NODATA);
80ae59d7
MD
1620 return (1);
1621 }
1622 return (kn->kn_fflags != 0);
1623}
1624
1625
7a2de9a4
MD
1626/* --------------------------------------------------------------------- */
1627
1628/*
1629 * vnode operations vector used for files stored in a tmpfs file system.
1630 */
1631struct vop_ops tmpfs_vnode_vops = {
1632 .vop_default = vop_defaultop,
1633 .vop_getpages = vop_stdgetpages,
1634 .vop_putpages = vop_stdputpages,
1635 .vop_ncreate = tmpfs_ncreate,
1636 .vop_nresolve = tmpfs_nresolve,
1637 .vop_nlookupdotdot = tmpfs_nlookupdotdot,
1638 .vop_nmknod = tmpfs_nmknod,
1639 .vop_open = tmpfs_open,
1640 .vop_close = tmpfs_close,
1641 .vop_access = tmpfs_access,
1642 .vop_getattr = tmpfs_getattr,
1643 .vop_setattr = tmpfs_setattr,
1644 .vop_read = tmpfs_read,
1645 .vop_write = tmpfs_write,
1646 .vop_fsync = tmpfs_fsync,
1647 .vop_nremove = tmpfs_nremove,
1648 .vop_nlink = tmpfs_nlink,
1649 .vop_nrename = tmpfs_nrename,
1650 .vop_nmkdir = tmpfs_nmkdir,
1651 .vop_nrmdir = tmpfs_nrmdir,
1652 .vop_nsymlink = tmpfs_nsymlink,
1653 .vop_readdir = tmpfs_readdir,
1654 .vop_readlink = tmpfs_readlink,
1655 .vop_inactive = tmpfs_inactive,
1656 .vop_reclaim = tmpfs_reclaim,
1657 .vop_print = tmpfs_print,
1658 .vop_pathconf = tmpfs_pathconf,
9fc94b5f 1659 .vop_bmap = tmpfs_bmap,
7a2de9a4
MD
1660 .vop_strategy = tmpfs_strategy,
1661 .vop_advlock = tmpfs_advlock,
80ae59d7 1662 .vop_kqfilter = tmpfs_kqfilter
7a2de9a4 1663};