kernel - Cluster fixes + Enable clustering for HAMMER1
[dragonfly.git] / sys / vfs / tmpfs / tmpfs_vnops.c
CommitLineData
7a2de9a4
MD
1/*-
2 * Copyright (c) 2005, 2006 The NetBSD Foundation, Inc.
3 * All rights reserved.
4 *
5 * This code is derived from software contributed to The NetBSD Foundation
6 * by Julio M. Merino Vidal, developed as part of Google's Summer of Code
7 * 2005 program.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
19 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
20 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
21 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
22 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
80ae59d7
MD
29 *
30 * $NetBSD: tmpfs_vnops.c,v 1.39 2007/07/23 15:41:01 jmmv Exp $
7a2de9a4
MD
31 */
32
33/*
34 * tmpfs vnode interface.
35 */
7a2de9a4
MD
36
37#include <sys/kernel.h>
38#include <sys/kern_syscall.h>
39#include <sys/param.h>
40#include <sys/fcntl.h>
41#include <sys/lockf.h>
42#include <sys/priv.h>
43#include <sys/proc.h>
44#include <sys/resourcevar.h>
45#include <sys/sched.h>
7a2de9a4
MD
46#include <sys/stat.h>
47#include <sys/systm.h>
48#include <sys/unistd.h>
49#include <sys/vfsops.h>
50#include <sys/vnode.h>
d22d7da4 51#include <sys/mountctl.h>
7a2de9a4 52
7a2de9a4
MD
53#include <vm/vm.h>
54#include <vm/vm_object.h>
55#include <vm/vm_page.h>
56#include <vm/vm_pager.h>
b7545cb3 57#include <vm/swap_pager.h>
7a2de9a4 58
54341a3b
MD
59#include <sys/buf2.h>
60
7a2de9a4
MD
61#include <vfs/fifofs/fifo.h>
62#include <vfs/tmpfs/tmpfs_vnops.h>
63#include <vfs/tmpfs/tmpfs.h>
64
65MALLOC_DECLARE(M_TMPFS);
66
8f9ba07b
MD
67static void tmpfs_strategy_done(struct bio *bio);
68
80ae59d7
MD
69static __inline
70void
71tmpfs_knote(struct vnode *vp, int flags)
72{
73 if (flags)
74 KNOTE(&vp->v_pollinfo.vpi_kqinfo.ki_note, flags);
75}
76
77
7a2de9a4
MD
78/* --------------------------------------------------------------------- */
79
80static int
81tmpfs_nresolve(struct vop_nresolve_args *v)
82{
83 struct vnode *dvp = v->a_dvp;
84 struct vnode *vp = NULL;
85 struct namecache *ncp = v->a_nch->ncp;
d89ce96a 86 struct tmpfs_node *tnode;
7a2de9a4
MD
87
88 int error;
89 struct tmpfs_dirent *de;
90 struct tmpfs_node *dnode;
91
92 dnode = VP_TO_TMPFS_DIR(dvp);
93
7a2de9a4
MD
94 de = tmpfs_dir_lookup(dnode, NULL, ncp);
95 if (de == NULL) {
d89ce96a 96 error = ENOENT;
7a2de9a4 97 } else {
d89ce96a
MD
98 /*
99 * Allocate a vnode for the node we found.
100 */
7a2de9a4 101 tnode = de->td_node;
7a2de9a4 102 error = tmpfs_alloc_vp(dvp->v_mount, tnode,
d89ce96a
MD
103 LK_EXCLUSIVE | LK_RETRY, &vp);
104 if (error)
7a2de9a4 105 goto out;
d89ce96a 106 KKASSERT(vp);
7a2de9a4
MD
107 }
108
7a2de9a4 109out:
d89ce96a
MD
110 /*
111 * Store the result of this lookup in the cache. Avoid this if the
7a2de9a4 112 * request was for creation, as it does not improve timings on
d89ce96a
MD
113 * emprical tests.
114 */
7a2de9a4
MD
115 if (vp) {
116 vn_unlock(vp);
117 cache_setvp(v->a_nch, vp);
118 vrele(vp);
d89ce96a 119 } else if (error == ENOENT) {
7a2de9a4
MD
120 cache_setvp(v->a_nch, NULL);
121 }
122 return error;
123}
124
125static int
126tmpfs_nlookupdotdot(struct vop_nlookupdotdot_args *v)
127{
128 struct vnode *dvp = v->a_dvp;
129 struct vnode **vpp = v->a_vpp;
130 struct tmpfs_node *dnode = VP_TO_TMPFS_NODE(dvp);
131 struct ucred *cred = v->a_cred;
132 int error;
133
134 *vpp = NULL;
135 /* Check accessibility of requested node as a first step. */
136 error = VOP_ACCESS(dvp, VEXEC, cred);
137 if (error != 0)
138 return error;
139
140 if (dnode->tn_dir.tn_parent != NULL) {
141 /* Allocate a new vnode on the matching entry. */
142 error = tmpfs_alloc_vp(dvp->v_mount, dnode->tn_dir.tn_parent,
143 LK_EXCLUSIVE | LK_RETRY, vpp);
144
145 if (*vpp)
146 vn_unlock(*vpp);
147 }
148
149 return (*vpp == NULL) ? ENOENT : 0;
150}
151
152/* --------------------------------------------------------------------- */
153
154static int
155tmpfs_ncreate(struct vop_ncreate_args *v)
156{
157 struct vnode *dvp = v->a_dvp;
158 struct vnode **vpp = v->a_vpp;
159 struct namecache *ncp = v->a_nch->ncp;
160 struct vattr *vap = v->a_vap;
161 struct ucred *cred = v->a_cred;
162 int error;
163
164 KKASSERT(vap->va_type == VREG || vap->va_type == VSOCK);
165
7a2de9a4
MD
166 error = tmpfs_alloc_file(dvp, vpp, vap, ncp, cred, NULL);
167 if (error == 0) {
168 cache_setunresolved(v->a_nch);
169 cache_setvp(v->a_nch, *vpp);
80ae59d7 170 tmpfs_knote(dvp, NOTE_WRITE);
7a2de9a4 171 }
7a2de9a4
MD
172
173 return error;
174}
175/* --------------------------------------------------------------------- */
176
177static int
178tmpfs_nmknod(struct vop_nmknod_args *v)
179{
180 struct vnode *dvp = v->a_dvp;
181 struct vnode **vpp = v->a_vpp;
182 struct namecache *ncp = v->a_nch->ncp;
183 struct vattr *vap = v->a_vap;
184 struct ucred *cred = v->a_cred;
185 int error;
186
187 if (vap->va_type != VBLK && vap->va_type != VCHR &&
188 vap->va_type != VFIFO)
189 return EINVAL;
190
7a2de9a4
MD
191 error = tmpfs_alloc_file(dvp, vpp, vap, ncp, cred, NULL);
192 if (error == 0) {
193 cache_setunresolved(v->a_nch);
194 cache_setvp(v->a_nch, *vpp);
80ae59d7 195 tmpfs_knote(dvp, NOTE_WRITE);
7a2de9a4 196 }
7a2de9a4
MD
197
198 return error;
199}
200
201/* --------------------------------------------------------------------- */
202
203static int
204tmpfs_open(struct vop_open_args *v)
205{
206 struct vnode *vp = v->a_vp;
207 int mode = v->a_mode;
208
209 int error;
210 struct tmpfs_node *node;
211
7a2de9a4
MD
212 node = VP_TO_TMPFS_NODE(vp);
213
214 /* The file is still active but all its names have been removed
215 * (e.g. by a "rmdir $(pwd)"). It cannot be opened any more as
216 * it is about to die. */
217 if (node->tn_links < 1)
218 return (ENOENT);
219
220 /* If the file is marked append-only, deny write requests. */
630e3a33
MD
221 if ((node->tn_flags & APPEND) &&
222 (mode & (FWRITE | O_APPEND)) == FWRITE) {
7a2de9a4 223 error = EPERM;
630e3a33 224 } else {
7a2de9a4
MD
225 return (vop_stdopen(v));
226 }
7a2de9a4
MD
227 return error;
228}
229
230/* --------------------------------------------------------------------- */
231
232static int
233tmpfs_close(struct vop_close_args *v)
234{
235 struct vnode *vp = v->a_vp;
236 struct tmpfs_node *node;
237
238 node = VP_TO_TMPFS_NODE(vp);
239
240 if (node->tn_links > 0) {
241 /* Update node times. No need to do it if the node has
242 * been deleted, because it will vanish after we return. */
243 tmpfs_update(vp);
244 }
245
246 return vop_stdclose(v);
247}
248
249/* --------------------------------------------------------------------- */
250
251int
252tmpfs_access(struct vop_access_args *v)
253{
254 struct vnode *vp = v->a_vp;
255 int error;
256 struct tmpfs_node *node;
257
7a2de9a4
MD
258 node = VP_TO_TMPFS_NODE(vp);
259
260 switch (vp->v_type) {
261 case VDIR:
262 /* FALLTHROUGH */
263 case VLNK:
264 /* FALLTHROUGH */
265 case VREG:
5a9e9ac7 266 if ((v->a_mode & VWRITE) && (vp->v_mount->mnt_flag & MNT_RDONLY)) {
7a2de9a4
MD
267 error = EROFS;
268 goto out;
269 }
270 break;
271
272 case VBLK:
273 /* FALLTHROUGH */
274 case VCHR:
275 /* FALLTHROUGH */
276 case VSOCK:
277 /* FALLTHROUGH */
278 case VFIFO:
279 break;
280
281 default:
282 error = EINVAL;
283 goto out;
284 }
285
5a9e9ac7 286 if ((v->a_mode & VWRITE) && (node->tn_flags & IMMUTABLE)) {
7a2de9a4
MD
287 error = EPERM;
288 goto out;
289 }
290
291 error = vop_helper_access(v, node->tn_uid, node->tn_gid, node->tn_mode, 0);
292
293out:
294
295 return error;
296}
297
298/* --------------------------------------------------------------------- */
299
300int
301tmpfs_getattr(struct vop_getattr_args *v)
302{
303 struct vnode *vp = v->a_vp;
304 struct vattr *vap = v->a_vap;
7a2de9a4 305 struct tmpfs_node *node;
7a2de9a4
MD
306
307 node = VP_TO_TMPFS_NODE(vp);
308
e575e508 309 lwkt_gettoken(&vp->v_mount->mnt_token);
7a2de9a4
MD
310 tmpfs_update(vp);
311
312 vap->va_type = vp->v_type;
313 vap->va_mode = node->tn_mode;
314 vap->va_nlink = node->tn_links;
315 vap->va_uid = node->tn_uid;
316 vap->va_gid = node->tn_gid;
317 vap->va_fsid = vp->v_mount->mnt_stat.f_fsid.val[0];
318 vap->va_fileid = node->tn_id;
319 vap->va_size = node->tn_size;
320 vap->va_blocksize = PAGE_SIZE;
321 vap->va_atime.tv_sec = node->tn_atime;
322 vap->va_atime.tv_nsec = node->tn_atimensec;
323 vap->va_mtime.tv_sec = node->tn_mtime;
324 vap->va_mtime.tv_nsec = node->tn_mtimensec;
325 vap->va_ctime.tv_sec = node->tn_ctime;
326 vap->va_ctime.tv_nsec = node->tn_ctimensec;
327 vap->va_gen = node->tn_gen;
328 vap->va_flags = node->tn_flags;
329 if (vp->v_type == VBLK || vp->v_type == VCHR)
330 {
331 vap->va_rmajor = umajor(node->tn_rdev);
332 vap->va_rminor = uminor(node->tn_rdev);
333 }
334 vap->va_bytes = round_page(node->tn_size);
335 vap->va_filerev = 0;
336
e575e508
VS
337 lwkt_reltoken(&vp->v_mount->mnt_token);
338
7a2de9a4
MD
339 return 0;
340}
341
342/* --------------------------------------------------------------------- */
343
344int
345tmpfs_setattr(struct vop_setattr_args *v)
346{
347 struct vnode *vp = v->a_vp;
348 struct vattr *vap = v->a_vap;
349 struct ucred *cred = v->a_cred;
80ae59d7 350 struct tmpfs_node *node = VP_TO_TMPFS_NODE(vp);
7a2de9a4 351 int error = 0;
80ae59d7 352 int kflags = 0;
7a2de9a4 353
80ae59d7 354 if (error == 0 && (vap->va_flags != VNOVAL)) {
7a2de9a4 355 error = tmpfs_chflags(vp, vap->va_flags, cred);
80ae59d7
MD
356 kflags |= NOTE_ATTRIB;
357 }
7a2de9a4 358
80ae59d7
MD
359 if (error == 0 && (vap->va_size != VNOVAL)) {
360 if (vap->va_size > node->tn_size)
361 kflags |= NOTE_WRITE | NOTE_EXTEND;
362 else
363 kflags |= NOTE_WRITE;
7a2de9a4 364 error = tmpfs_chsize(vp, vap->va_size, cred);
80ae59d7 365 }
7a2de9a4 366
d89ce96a
MD
367 if (error == 0 && (vap->va_uid != (uid_t)VNOVAL ||
368 vap->va_gid != (gid_t)VNOVAL)) {
7a2de9a4 369 error = tmpfs_chown(vp, vap->va_uid, vap->va_gid, cred);
80ae59d7 370 kflags |= NOTE_ATTRIB;
d89ce96a 371 }
7a2de9a4 372
80ae59d7 373 if (error == 0 && (vap->va_mode != (mode_t)VNOVAL)) {
7a2de9a4 374 error = tmpfs_chmod(vp, vap->va_mode, cred);
80ae59d7
MD
375 kflags |= NOTE_ATTRIB;
376 }
7a2de9a4
MD
377
378 if (error == 0 && ((vap->va_atime.tv_sec != VNOVAL &&
379 vap->va_atime.tv_nsec != VNOVAL) ||
380 (vap->va_mtime.tv_sec != VNOVAL &&
d89ce96a 381 vap->va_mtime.tv_nsec != VNOVAL) )) {
7a2de9a4 382 error = tmpfs_chtimes(vp, &vap->va_atime, &vap->va_mtime,
d89ce96a 383 vap->va_vaflags, cred);
80ae59d7 384 kflags |= NOTE_ATTRIB;
d89ce96a 385 }
7a2de9a4
MD
386
387 /* Update the node times. We give preference to the error codes
388 * generated by this function rather than the ones that may arise
389 * from tmpfs_update. */
390 tmpfs_update(vp);
80ae59d7 391 tmpfs_knote(vp, kflags);
7a2de9a4 392
7a2de9a4
MD
393 return error;
394}
395
396/* --------------------------------------------------------------------- */
397
9fc94b5f 398/*
630e3a33
MD
399 * fsync is usually a NOP, but we must take action when unmounting or
400 * when recycling.
9fc94b5f 401 */
7a2de9a4
MD
402static int
403tmpfs_fsync(struct vop_fsync_args *v)
404{
9fc94b5f 405 struct tmpfs_mount *tmp;
630e3a33 406 struct tmpfs_node *node;
7a2de9a4
MD
407 struct vnode *vp = v->a_vp;
408
9fc94b5f 409 tmp = VFS_TO_TMPFS(vp->v_mount);
630e3a33
MD
410 node = VP_TO_TMPFS_NODE(vp);
411
412 tmpfs_update(vp);
413 if (vp->v_type == VREG) {
d4623db3 414 if (vp->v_flag & VRECLAIMED) {
630e3a33
MD
415 if (node->tn_links == 0)
416 tmpfs_truncate(vp, 0);
417 else
418 vfsync(v->a_vp, v->a_waitfor, 1, NULL, NULL);
419 }
9fc94b5f 420 }
7a2de9a4
MD
421 return 0;
422}
423
424/* --------------------------------------------------------------------- */
425
426static int
427tmpfs_read (struct vop_read_args *ap)
428{
429 struct buf *bp;
430 struct vnode *vp = ap->a_vp;
431 struct uio *uio = ap->a_uio;
432 struct tmpfs_node *node;
7a2de9a4 433 off_t base_offset;
9fc94b5f 434 size_t offset;
7a2de9a4 435 size_t len;
9fc94b5f 436 int error;
7a2de9a4
MD
437
438 error = 0;
439 if (uio->uio_resid == 0) {
440 return error;
441 }
442
443 node = VP_TO_TMPFS_NODE(vp);
444
445 if (uio->uio_offset < 0)
446 return (EINVAL);
447 if (vp->v_type != VREG)
448 return (EINVAL);
449
7a2de9a4
MD
450 while (uio->uio_resid > 0 && uio->uio_offset < node->tn_size) {
451 /*
452 * Use buffer cache I/O (via tmpfs_strategy)
453 */
9fc94b5f 454 offset = (size_t)uio->uio_offset & BMASK;
7a2de9a4 455 base_offset = (off_t)uio->uio_offset - offset;
9d4e78c7
MD
456 bp = getcacheblk(vp, base_offset, BSIZE, 0);
457 if (bp == NULL) {
b403e861 458 lwkt_gettoken(&vp->v_mount->mnt_token);
7a2de9a4
MD
459 error = bread(vp, base_offset, BSIZE, &bp);
460 if (error) {
461 brelse(bp);
b403e861 462 lwkt_reltoken(&vp->v_mount->mnt_token);
7a2de9a4
MD
463 kprintf("tmpfs_read bread error %d\n", error);
464 break;
465 }
b403e861 466 lwkt_reltoken(&vp->v_mount->mnt_token);
7a2de9a4
MD
467 }
468
7a2de9a4
MD
469 /*
470 * Figure out how many bytes we can actually copy this loop.
471 */
472 len = BSIZE - offset;
473 if (len > uio->uio_resid)
474 len = uio->uio_resid;
475 if (len > node->tn_size - uio->uio_offset)
476 len = (size_t)(node->tn_size - uio->uio_offset);
477
478 error = uiomove((char *)bp->b_data + offset, len, uio);
479 bqrelse(bp);
480 if (error) {
481 kprintf("tmpfs_read uiomove error %d\n", error);
482 break;
483 }
484 }
485
7a2de9a4
MD
486 TMPFS_NODE_LOCK(node);
487 node->tn_status |= TMPFS_NODE_ACCESSED;
488 TMPFS_NODE_UNLOCK(node);
489
7a2de9a4
MD
490 return(error);
491}
492
493static int
494tmpfs_write (struct vop_write_args *ap)
495{
496 struct buf *bp;
497 struct vnode *vp = ap->a_vp;
498 struct uio *uio = ap->a_uio;
499 struct thread *td = uio->uio_td;
500 struct tmpfs_node *node;
501 boolean_t extended;
502 off_t oldsize;
503 int error;
7a2de9a4 504 off_t base_offset;
9fc94b5f 505 size_t offset;
7a2de9a4
MD
506 size_t len;
507 struct rlimit limit;
7a2de9a4 508 int trivial = 0;
80ae59d7 509 int kflags = 0;
7a2de9a4
MD
510
511 error = 0;
512 if (uio->uio_resid == 0) {
513 return error;
514 }
515
516 node = VP_TO_TMPFS_NODE(vp);
517
518 if (vp->v_type != VREG)
519 return (EINVAL);
520
1be4932c
VS
521 lwkt_gettoken(&vp->v_mount->mnt_token);
522
7a2de9a4
MD
523 oldsize = node->tn_size;
524 if (ap->a_ioflag & IO_APPEND)
525 uio->uio_offset = node->tn_size;
526
527 /*
528 * Check for illegal write offsets.
529 */
530 if (uio->uio_offset + uio->uio_resid >
1be4932c
VS
531 VFS_TO_TMPFS(vp->v_mount)->tm_maxfilesize) {
532 lwkt_reltoken(&vp->v_mount->mnt_token);
7a2de9a4 533 return (EFBIG);
1be4932c 534 }
7a2de9a4
MD
535
536 if (vp->v_type == VREG && td != NULL) {
537 error = kern_getrlimit(RLIMIT_FSIZE, &limit);
1be4932c
VS
538 if (error != 0) {
539 lwkt_reltoken(&vp->v_mount->mnt_token);
7a2de9a4 540 return error;
1be4932c 541 }
7a2de9a4
MD
542 if (uio->uio_offset + uio->uio_resid > limit.rlim_cur) {
543 ksignal(td->td_proc, SIGXFSZ);
1be4932c 544 lwkt_reltoken(&vp->v_mount->mnt_token);
7a2de9a4
MD
545 return (EFBIG);
546 }
547 }
548
549
550 /*
551 * Extend the file's size if necessary
552 */
9fc94b5f 553 extended = ((uio->uio_offset + uio->uio_resid) > node->tn_size);
7a2de9a4 554
7a2de9a4
MD
555 while (uio->uio_resid > 0) {
556 /*
557 * Use buffer cache I/O (via tmpfs_strategy)
558 */
9fc94b5f 559 offset = (size_t)uio->uio_offset & BMASK;
7a2de9a4
MD
560 base_offset = (off_t)uio->uio_offset - offset;
561 len = BSIZE - offset;
562 if (len > uio->uio_resid)
563 len = uio->uio_resid;
564
565 if ((uio->uio_offset + len) > node->tn_size) {
9fc94b5f 566 trivial = (uio->uio_offset <= node->tn_size);
7a2de9a4
MD
567 error = tmpfs_reg_resize(vp, uio->uio_offset + len, trivial);
568 if (error)
569 break;
570 }
571
9fc94b5f
MD
572 /*
573 * Read to fill in any gaps. Theoretically we could
574 * optimize this if the write covers the entire buffer
575 * and is not a UIO_NOCOPY write, however this can lead
576 * to a security violation exposing random kernel memory
577 * (whatever junk was in the backing VM pages before).
578 *
579 * So just use bread() to do the right thing.
580 */
581 error = bread(vp, base_offset, BSIZE, &bp);
7a2de9a4
MD
582 error = uiomove((char *)bp->b_data + offset, len, uio);
583 if (error) {
584 kprintf("tmpfs_write uiomove error %d\n", error);
585 brelse(bp);
586 break;
587 }
588
80ae59d7 589 if (uio->uio_offset > node->tn_size) {
7a2de9a4 590 node->tn_size = uio->uio_offset;
80ae59d7
MD
591 kflags |= NOTE_EXTEND;
592 }
593 kflags |= NOTE_WRITE;
7a2de9a4
MD
594
595 /*
b5c0b8b2
MD
596 * Always try to flush the page if the request is coming
597 * from the pageout daemon (IO_ASYNC), else buwrite() the
598 * buffer.
7a2de9a4 599 *
b5c0b8b2
MD
600 * buwrite() dirties the underlying VM pages instead of
601 * dirtying the buffer, releasing the buffer as a clean
602 * buffer. This allows tmpfs to use essentially all
603 * available memory to cache file data. If we used bdwrite()
604 * the buffer cache would wind up flushing the data to
605 * swap too quickly.
7a2de9a4 606 */
b5c0b8b2
MD
607 bp->b_flags |= B_AGE;
608 if (ap->a_ioflag & IO_ASYNC) {
609 bawrite(bp);
b7545cb3 610 } else {
b5c0b8b2 611 buwrite(bp);
d89ce96a 612 }
9fc94b5f 613
7a2de9a4 614 if (bp->b_error) {
2cd8c774 615 kprintf("tmpfs_write bwrite error %d\n", bp->b_error);
7a2de9a4
MD
616 break;
617 }
618 }
7a2de9a4 619
7a2de9a4 620 if (error) {
80ae59d7 621 if (extended) {
7a2de9a4 622 (void)tmpfs_reg_resize(vp, oldsize, trivial);
80ae59d7
MD
623 kflags &= ~NOTE_EXTEND;
624 }
625 goto done;
7a2de9a4
MD
626 }
627
b5c0b8b2
MD
628 /*
629 * Currently we don't set the mtime on files modified via mmap()
630 * because we can't tell the difference between those modifications
631 * and an attempt by the pageout daemon to flush tmpfs pages to
632 * swap.
633 *
634 * This is because in order to defer flushes as long as possible
635 * buwrite() works by marking the underlying VM pages dirty in
636 * order to be able to dispose of the buffer cache buffer without
637 * flushing it.
638 */
7a2de9a4 639 TMPFS_NODE_LOCK(node);
b5c0b8b2
MD
640 if (uio->uio_segflg != UIO_NOCOPY)
641 node->tn_status |= TMPFS_NODE_ACCESSED | TMPFS_NODE_MODIFIED;
642 if (extended)
643 node->tn_status |= TMPFS_NODE_CHANGED;
7a2de9a4
MD
644
645 if (node->tn_mode & (S_ISUID | S_ISGID)) {
646 if (priv_check_cred(ap->a_cred, PRIV_VFS_RETAINSUGID, 0))
647 node->tn_mode &= ~(S_ISUID | S_ISGID);
648 }
649 TMPFS_NODE_UNLOCK(node);
80ae59d7 650done:
7a2de9a4 651
80ae59d7 652 tmpfs_knote(vp, kflags);
1be4932c
VS
653
654
655 lwkt_reltoken(&vp->v_mount->mnt_token);
7a2de9a4
MD
656 return(error);
657}
658
659static int
660tmpfs_advlock (struct vop_advlock_args *ap)
661{
662 struct tmpfs_node *node;
663 struct vnode *vp = ap->a_vp;
664
665 node = VP_TO_TMPFS_NODE(vp);
666
667 return (lf_advlock(ap, &node->tn_advlock, node->tn_size));
668}
669
b5c0b8b2
MD
670/*
671 * The strategy function is typically only called when memory pressure
672 * forces the system to attempt to pageout pages. It can also be called
673 * by [n]vtruncbuf() when a truncation cuts a page in half. Normal write
674 * operations
675 */
7a2de9a4
MD
676static int
677tmpfs_strategy(struct vop_strategy_args *ap)
678{
679 struct bio *bio = ap->a_bio;
8f9ba07b 680 struct bio *nbio;
9fc94b5f 681 struct buf *bp = bio->bio_buf;
7a2de9a4
MD
682 struct vnode *vp = ap->a_vp;
683 struct tmpfs_node *node;
684 vm_object_t uobj;
8f9ba07b
MD
685 vm_page_t m;
686 int i;
7a2de9a4 687
9fc94b5f
MD
688 if (vp->v_type != VREG) {
689 bp->b_resid = bp->b_bcount;
690 bp->b_flags |= B_ERROR | B_INVAL;
691 bp->b_error = EINVAL;
692 biodone(bio);
693 return(0);
694 }
7a2de9a4 695
d1f61aa2 696 lwkt_gettoken(&vp->v_mount->mnt_token);
7a2de9a4
MD
697 node = VP_TO_TMPFS_NODE(vp);
698
699 uobj = node->tn_reg.tn_aobj;
9fc94b5f 700
7a2de9a4 701 /*
b5c0b8b2
MD
702 * Don't bother flushing to swap if there is no swap, just
703 * ensure that the pages are marked as needing a commit (still).
7a2de9a4 704 */
8f9ba07b
MD
705 if (bp->b_cmd == BUF_CMD_WRITE && vm_swap_size == 0) {
706 for (i = 0; i < bp->b_xio.xio_npages; ++i) {
707 m = bp->b_xio.xio_pages[i];
b5c0b8b2 708 vm_page_need_commit(m);
8f9ba07b
MD
709 }
710 bp->b_resid = 0;
711 bp->b_error = 0;
712 biodone(bio);
713 } else {
714 nbio = push_bio(bio);
715 nbio->bio_done = tmpfs_strategy_done;
716 nbio->bio_offset = bio->bio_offset;
717 swap_pager_strategy(uobj, nbio);
718 }
7a2de9a4 719
d1f61aa2 720 lwkt_reltoken(&vp->v_mount->mnt_token);
7a2de9a4
MD
721 return 0;
722}
723
8f9ba07b 724/*
b5c0b8b2
MD
725 * If we were unable to commit the pages to swap make sure they are marked
726 * as needing a commit (again). If we were, clear the flag to allow the
727 * pages to be freed.
8f9ba07b
MD
728 */
729static void
730tmpfs_strategy_done(struct bio *bio)
731{
732 struct buf *bp;
733 vm_page_t m;
734 int i;
735
736 bp = bio->bio_buf;
737
b5c0b8b2 738 if (bp->b_flags & B_ERROR) {
8f9ba07b
MD
739 bp->b_flags &= ~B_ERROR;
740 bp->b_error = 0;
741 bp->b_resid = 0;
742 for (i = 0; i < bp->b_xio.xio_npages; ++i) {
743 m = bp->b_xio.xio_pages[i];
b5c0b8b2
MD
744 vm_page_need_commit(m);
745 }
746 } else {
747 for (i = 0; i < bp->b_xio.xio_npages; ++i) {
748 m = bp->b_xio.xio_pages[i];
749 vm_page_clear_commit(m);
8f9ba07b
MD
750 }
751 }
752 bio = pop_bio(bio);
753 biodone(bio);
754}
755
7a2de9a4
MD
756static int
757tmpfs_bmap(struct vop_bmap_args *ap)
758{
759 if (ap->a_doffsetp != NULL)
760 *ap->a_doffsetp = ap->a_loffset;
761 if (ap->a_runp != NULL)
762 *ap->a_runp = 0;
763 if (ap->a_runb != NULL)
764 *ap->a_runb = 0;
765
766 return 0;
767}
9fc94b5f 768
7a2de9a4
MD
769/* --------------------------------------------------------------------- */
770
771static int
772tmpfs_nremove(struct vop_nremove_args *v)
773{
774 struct vnode *dvp = v->a_dvp;
775 struct namecache *ncp = v->a_nch->ncp;
9fc94b5f 776 struct vnode *vp;
7a2de9a4
MD
777 int error;
778 struct tmpfs_dirent *de;
779 struct tmpfs_mount *tmp;
780 struct tmpfs_node *dnode;
781 struct tmpfs_node *node;
782
9fc94b5f 783 /*
a1fa5d8d
MD
784 * We have to acquire the vp from v->a_nch because we will likely
785 * unresolve the namecache entry, and a vrele/vput is needed to
786 * trigger the tmpfs_inactive/tmpfs_reclaim sequence.
787 *
788 * We have to use vget to clear any inactive state on the vnode,
789 * otherwise the vnode may remain inactive and thus tmpfs_inactive
790 * will not get called when we release it.
9fc94b5f 791 */
a1fa5d8d 792 error = cache_vget(v->a_nch, v->a_cred, LK_SHARED, &vp);
9fc94b5f 793 KKASSERT(error == 0);
a1fa5d8d 794 vn_unlock(vp);
7a2de9a4
MD
795
796 if (vp->v_type == VDIR) {
797 error = EISDIR;
798 goto out;
799 }
800
801 dnode = VP_TO_TMPFS_DIR(dvp);
802 node = VP_TO_TMPFS_NODE(vp);
803 tmp = VFS_TO_TMPFS(vp->v_mount);
804 de = tmpfs_dir_lookup(dnode, node, ncp);
805 if (de == NULL) {
806 error = ENOENT;
807 goto out;
808 }
809
810 /* Files marked as immutable or append-only cannot be deleted. */
811 if ((node->tn_flags & (IMMUTABLE | APPEND | NOUNLINK)) ||
812 (dnode->tn_flags & APPEND)) {
813 error = EPERM;
814 goto out;
815 }
816
817 /* Remove the entry from the directory; as it is a file, we do not
818 * have to change the number of hard links of the directory. */
22d3b394 819 tmpfs_dir_detach(dnode, de);
7a2de9a4
MD
820
821 /* Free the directory entry we just deleted. Note that the node
822 * referred by it will not be removed until the vnode is really
823 * reclaimed. */
0786baf1 824 tmpfs_free_dirent(tmp, de);
7a2de9a4
MD
825
826 if (node->tn_links > 0) {
827 TMPFS_NODE_LOCK(node);
828 node->tn_status |= TMPFS_NODE_ACCESSED | TMPFS_NODE_CHANGED | \
829 TMPFS_NODE_MODIFIED;
830 TMPFS_NODE_UNLOCK(node);
831 }
832
833 cache_setunresolved(v->a_nch);
834 cache_setvp(v->a_nch, NULL);
80ae59d7 835 tmpfs_knote(vp, NOTE_DELETE);
9fc94b5f 836 /*cache_inval_vp(vp, CINV_DESTROY);*/
80ae59d7 837 tmpfs_knote(dvp, NOTE_WRITE);
7a2de9a4
MD
838 error = 0;
839
7a2de9a4 840out:
9fc94b5f 841 vrele(vp);
7a2de9a4
MD
842
843 return error;
844}
845
846/* --------------------------------------------------------------------- */
847
848static int
849tmpfs_nlink(struct vop_nlink_args *v)
850{
851 struct vnode *dvp = v->a_dvp;
852 struct vnode *vp = v->a_vp;
853 struct namecache *ncp = v->a_nch->ncp;
7a2de9a4
MD
854 struct tmpfs_dirent *de;
855 struct tmpfs_node *node;
22d3b394
MD
856 struct tmpfs_node *dnode;
857 int error;
7a2de9a4 858
7a2de9a4
MD
859 KKASSERT(dvp != vp); /* XXX When can this be false? */
860
861 node = VP_TO_TMPFS_NODE(vp);
22d3b394 862 dnode = VP_TO_TMPFS_NODE(dvp);
7a2de9a4
MD
863
864 /* XXX: Why aren't the following two tests done by the caller? */
865
866 /* Hard links of directories are forbidden. */
867 if (vp->v_type == VDIR) {
868 error = EPERM;
869 goto out;
870 }
871
872 /* Cannot create cross-device links. */
873 if (dvp->v_mount != vp->v_mount) {
874 error = EXDEV;
875 goto out;
876 }
877
878 /* Ensure that we do not overflow the maximum number of links imposed
879 * by the system. */
880 KKASSERT(node->tn_links <= LINK_MAX);
881 if (node->tn_links == LINK_MAX) {
882 error = EMLINK;
883 goto out;
884 }
885
886 /* We cannot create links of files marked immutable or append-only. */
887 if (node->tn_flags & (IMMUTABLE | APPEND)) {
888 error = EPERM;
889 goto out;
890 }
891
892 /* Allocate a new directory entry to represent the node. */
893 error = tmpfs_alloc_dirent(VFS_TO_TMPFS(vp->v_mount), node,
894 ncp->nc_name, ncp->nc_nlen, &de);
895 if (error != 0)
896 goto out;
897
898 /* Insert the new directory entry into the appropriate directory. */
22d3b394 899 tmpfs_dir_attach(dnode, de);
7a2de9a4
MD
900
901 /* vp link count has changed, so update node times. */
902
903 TMPFS_NODE_LOCK(node);
904 node->tn_status |= TMPFS_NODE_CHANGED;
905 TMPFS_NODE_UNLOCK(node);
906 tmpfs_update(vp);
907
80ae59d7 908 tmpfs_knote(vp, NOTE_LINK);
7a2de9a4
MD
909 cache_setunresolved(v->a_nch);
910 cache_setvp(v->a_nch, vp);
80ae59d7 911 tmpfs_knote(dvp, NOTE_WRITE);
7a2de9a4
MD
912 error = 0;
913
914out:
7a2de9a4
MD
915 return error;
916}
917
918/* --------------------------------------------------------------------- */
919
920static int
921tmpfs_nrename(struct vop_nrename_args *v)
922{
923 struct vnode *fdvp = v->a_fdvp;
924 struct namecache *fncp = v->a_fnch->ncp;
925 struct vnode *fvp = fncp->nc_vp;
926 struct vnode *tdvp = v->a_tdvp;
927 struct namecache *tncp = v->a_tnch->ncp;
a1fa5d8d 928 struct vnode *tvp;
7a2de9a4
MD
929 struct tmpfs_dirent *de;
930 struct tmpfs_mount *tmp;
931 struct tmpfs_node *fdnode;
932 struct tmpfs_node *fnode;
933 struct tmpfs_node *tnode;
934 struct tmpfs_node *tdnode;
22d3b394 935 char *newname;
dca262fb 936 char *oldname;
22d3b394 937 int error;
7a2de9a4 938
a1fa5d8d
MD
939 /*
940 * Because tvp can get overwritten we have to vget it instead of
941 * just vref or use it, otherwise it's VINACTIVE flag may not get
942 * cleared and the node won't get destroyed.
943 */
944 error = cache_vget(v->a_tnch, v->a_cred, LK_SHARED, &tvp);
945 if (error == 0) {
946 tnode = VP_TO_TMPFS_NODE(tvp);
947 vn_unlock(tvp);
948 } else {
949 tnode = NULL;
950 }
7a2de9a4
MD
951
952 /* Disallow cross-device renames.
953 * XXX Why isn't this done by the caller? */
954 if (fvp->v_mount != tdvp->v_mount ||
955 (tvp != NULL && fvp->v_mount != tvp->v_mount)) {
956 error = EXDEV;
957 goto out;
958 }
959
960 tmp = VFS_TO_TMPFS(tdvp->v_mount);
961 tdnode = VP_TO_TMPFS_DIR(tdvp);
962
963 /* If source and target are the same file, there is nothing to do. */
964 if (fvp == tvp) {
965 error = 0;
966 goto out;
967 }
968
7a2de9a4
MD
969 fdnode = VP_TO_TMPFS_DIR(fdvp);
970 fnode = VP_TO_TMPFS_NODE(fvp);
971 de = tmpfs_dir_lookup(fdnode, fnode, fncp);
972
973 /* Avoid manipulating '.' and '..' entries. */
974 if (de == NULL) {
975 error = ENOENT;
976 goto out_locked;
977 }
978 KKASSERT(de->td_node == fnode);
979
dca262fb
MD
980 /*
981 * If replacing an entry in the target directory and that entry
982 * is a directory, it must be empty.
983 *
7a2de9a4 984 * Kern_rename gurantees the destination to be a directory
dca262fb
MD
985 * if the source is one (it does?).
986 */
7a2de9a4
MD
987 if (tvp != NULL) {
988 KKASSERT(tnode != NULL);
989
990 if ((tnode->tn_flags & (NOUNLINK | IMMUTABLE | APPEND)) ||
991 (tdnode->tn_flags & (APPEND | IMMUTABLE))) {
992 error = EPERM;
993 goto out_locked;
994 }
995
996 if (fnode->tn_type == VDIR && tnode->tn_type == VDIR) {
997 if (tnode->tn_size > 0) {
998 error = ENOTEMPTY;
999 goto out_locked;
1000 }
1001 } else if (fnode->tn_type == VDIR && tnode->tn_type != VDIR) {
1002 error = ENOTDIR;
1003 goto out_locked;
1004 } else if (fnode->tn_type != VDIR && tnode->tn_type == VDIR) {
1005 error = EISDIR;
1006 goto out_locked;
1007 } else {
1008 KKASSERT(fnode->tn_type != VDIR &&
1009 tnode->tn_type != VDIR);
1010 }
1011 }
1012
dca262fb
MD
1013 if ((fnode->tn_flags & (NOUNLINK | IMMUTABLE | APPEND)) ||
1014 (fdnode->tn_flags & (APPEND | IMMUTABLE))) {
7a2de9a4
MD
1015 error = EPERM;
1016 goto out_locked;
1017 }
1018
dca262fb
MD
1019 /*
1020 * Ensure that we have enough memory to hold the new name, if it
1021 * has to be changed.
1022 */
7a2de9a4
MD
1023 if (fncp->nc_nlen != tncp->nc_nlen ||
1024 bcmp(fncp->nc_name, tncp->nc_name, fncp->nc_nlen) != 0) {
d00cd01c 1025 newname = kmalloc(tncp->nc_nlen + 1, tmp->tm_name_zone,
42f6f6b1
VS
1026 M_WAITOK | M_NULLOK);
1027 if (newname == NULL) {
1028 error = ENOSPC;
1029 goto out_locked;
1030 }
dca262fb
MD
1031 bcopy(tncp->nc_name, newname, tncp->nc_nlen);
1032 newname[tncp->nc_nlen] = '\0';
1033 } else {
7a2de9a4 1034 newname = NULL;
dca262fb 1035 }
7a2de9a4 1036
dca262fb
MD
1037 /*
1038 * Unlink entry from source directory. Note that the kernel has
1039 * already checked for illegal recursion cases (renaming a directory
1040 * into a subdirectory of itself).
1041 */
1042 if (fdnode != tdnode)
1043 tmpfs_dir_detach(fdnode, de);
1044
1045 /*
1046 * Handle any name change. Swap with newname, we will
1047 * deallocate it at the end.
1048 */
1049 if (newname != NULL) {
1050#if 0
1051 TMPFS_NODE_LOCK(fnode);
1052 fnode->tn_status |= TMPFS_NODE_CHANGED;
1053 TMPFS_NODE_UNLOCK(fnode);
1054#endif
1055 oldname = de->td_name;
1056 de->td_name = newname;
1057 de->td_namelen = (uint16_t)tncp->nc_nlen;
1058 newname = oldname;
1059 }
1060
1061 /*
1062 * Link entry to target directory. If the entry
1063 * represents a directory move the parent linkage
1064 * as well.
1065 */
7a2de9a4 1066 if (fdnode != tdnode) {
7a2de9a4 1067 if (de->td_node->tn_type == VDIR) {
7a2de9a4 1068 TMPFS_VALIDATE_DIR(fnode);
7a2de9a4 1069
7a2de9a4 1070 TMPFS_NODE_LOCK(tdnode);
7a2de9a4 1071 tdnode->tn_links++;
dca262fb
MD
1072 tdnode->tn_status |= TMPFS_NODE_MODIFIED;
1073 TMPFS_NODE_UNLOCK(tdnode);
7a2de9a4 1074
dca262fb
MD
1075 TMPFS_NODE_LOCK(fnode);
1076 fnode->tn_dir.tn_parent = tdnode;
1077 fnode->tn_status |= TMPFS_NODE_CHANGED;
1078 TMPFS_NODE_UNLOCK(fnode);
1079
1080 TMPFS_NODE_LOCK(fdnode);
1081 fdnode->tn_links--;
1082 fdnode->tn_status |= TMPFS_NODE_MODIFIED;
7a2de9a4 1083 TMPFS_NODE_UNLOCK(fdnode);
7a2de9a4 1084 }
22d3b394 1085 tmpfs_dir_attach(tdnode, de);
dca262fb 1086 } else {
7a2de9a4 1087 TMPFS_NODE_LOCK(tdnode);
7a2de9a4 1088 tdnode->tn_status |= TMPFS_NODE_MODIFIED;
7a2de9a4
MD
1089 TMPFS_NODE_UNLOCK(tdnode);
1090 }
1091
dca262fb
MD
1092 /*
1093 * If we are overwriting an entry, we have to remove the old one
1094 * from the target directory.
1095 */
7a2de9a4
MD
1096 if (tvp != NULL) {
1097 /* Remove the old entry from the target directory. */
1098 de = tmpfs_dir_lookup(tdnode, tnode, tncp);
22d3b394 1099 tmpfs_dir_detach(tdnode, de);
80ae59d7 1100 tmpfs_knote(tdnode->tn_vnode, NOTE_DELETE);
7a2de9a4 1101
dca262fb
MD
1102 /*
1103 * Free the directory entry we just deleted. Note that the
7a2de9a4 1104 * node referred by it will not be removed until the vnode is
dca262fb
MD
1105 * really reclaimed.
1106 */
0786baf1 1107 tmpfs_free_dirent(VFS_TO_TMPFS(tvp->v_mount), de);
9fc94b5f 1108 /*cache_inval_vp(tvp, CINV_DESTROY);*/
7a2de9a4
MD
1109 }
1110
dca262fb
MD
1111 /*
1112 * Finish up
1113 */
1114 if (newname) {
d00cd01c 1115 kfree(newname, tmp->tm_name_zone);
dca262fb
MD
1116 newname = NULL;
1117 }
7a2de9a4 1118 cache_rename(v->a_fnch, v->a_tnch);
80ae59d7
MD
1119 tmpfs_knote(v->a_fdvp, NOTE_WRITE);
1120 tmpfs_knote(v->a_tdvp, NOTE_WRITE);
1121 if (fnode->tn_vnode)
1122 tmpfs_knote(fnode->tn_vnode, NOTE_RENAME);
7a2de9a4
MD
1123 error = 0;
1124
1125out_locked:
630e3a33 1126 ;
7a2de9a4
MD
1127
1128out:
a1fa5d8d
MD
1129 if (tvp)
1130 vrele(tvp);
7a2de9a4
MD
1131
1132 return error;
1133}
1134
1135/* --------------------------------------------------------------------- */
1136
1137static int
1138tmpfs_nmkdir(struct vop_nmkdir_args *v)
1139{
1140 struct vnode *dvp = v->a_dvp;
1141 struct vnode **vpp = v->a_vpp;
1142 struct namecache *ncp = v->a_nch->ncp;
1143 struct vattr *vap = v->a_vap;
1144 struct ucred *cred = v->a_cred;
1145 int error;
1146
1147 KKASSERT(vap->va_type == VDIR);
1148
7a2de9a4
MD
1149 error = tmpfs_alloc_file(dvp, vpp, vap, ncp, cred, NULL);
1150 if (error == 0) {
1151 cache_setunresolved(v->a_nch);
1152 cache_setvp(v->a_nch, *vpp);
80ae59d7 1153 tmpfs_knote(dvp, NOTE_WRITE | NOTE_LINK);
7a2de9a4 1154 }
7a2de9a4
MD
1155
1156 return error;
1157}
1158
1159/* --------------------------------------------------------------------- */
1160
1161static int
1162tmpfs_nrmdir(struct vop_nrmdir_args *v)
1163{
1164 struct vnode *dvp = v->a_dvp;
1165 struct namecache *ncp = v->a_nch->ncp;
9fc94b5f 1166 struct vnode *vp;
7a2de9a4
MD
1167 struct tmpfs_dirent *de;
1168 struct tmpfs_mount *tmp;
1169 struct tmpfs_node *dnode;
1170 struct tmpfs_node *node;
38e5e604
MD
1171 int error;
1172
1173 /*
a1fa5d8d
MD
1174 * We have to acquire the vp from v->a_nch because we will likely
1175 * unresolve the namecache entry, and a vrele/vput is needed to
1176 * trigger the tmpfs_inactive/tmpfs_reclaim sequence.
1177 *
1178 * We have to use vget to clear any inactive state on the vnode,
1179 * otherwise the vnode may remain inactive and thus tmpfs_inactive
1180 * will not get called when we release it.
9fc94b5f 1181 */
a1fa5d8d 1182 error = cache_vget(v->a_nch, v->a_cred, LK_SHARED, &vp);
9fc94b5f 1183 KKASSERT(error == 0);
a1fa5d8d 1184 vn_unlock(vp);
7a2de9a4 1185
e527fb6b
MD
1186 /*
1187 * Prevalidate so we don't hit an assertion later
1188 */
1189 if (vp->v_type != VDIR) {
1190 error = ENOTDIR;
1191 goto out;
1192 }
1193
7a2de9a4
MD
1194 tmp = VFS_TO_TMPFS(dvp->v_mount);
1195 dnode = VP_TO_TMPFS_DIR(dvp);
1196 node = VP_TO_TMPFS_DIR(vp);
1197
1198 /* Directories with more than two entries ('.' and '..') cannot be
1199 * removed. */
1200 if (node->tn_size > 0) {
1201 error = ENOTEMPTY;
1202 goto out;
1203 }
1204
1205 if ((dnode->tn_flags & APPEND)
1206 || (node->tn_flags & (NOUNLINK | IMMUTABLE | APPEND))) {
1207 error = EPERM;
1208 goto out;
1209 }
1210
1211 /* This invariant holds only if we are not trying to remove "..".
1212 * We checked for that above so this is safe now. */
1213 KKASSERT(node->tn_dir.tn_parent == dnode);
1214
1215 /* Get the directory entry associated with node (vp). This was
1216 * filled by tmpfs_lookup while looking up the entry. */
1217 de = tmpfs_dir_lookup(dnode, node, ncp);
1218 KKASSERT(TMPFS_DIRENT_MATCHES(de,
1219 ncp->nc_name,
1220 ncp->nc_nlen));
1221
1222 /* Check flags to see if we are allowed to remove the directory. */
b7fe63af
MD
1223 if ((dnode->tn_flags & APPEND) ||
1224 node->tn_flags & (NOUNLINK | IMMUTABLE | APPEND)) {
7a2de9a4
MD
1225 error = EPERM;
1226 goto out;
1227 }
1228
1229
1230 /* Detach the directory entry from the directory (dnode). */
22d3b394 1231 tmpfs_dir_detach(dnode, de);
7a2de9a4
MD
1232
1233 /* No vnode should be allocated for this entry from this point */
1234 TMPFS_NODE_LOCK(node);
1235 TMPFS_ASSERT_ELOCKED(node);
1236 TMPFS_NODE_LOCK(dnode);
1237 TMPFS_ASSERT_ELOCKED(dnode);
1238
0786baf1
MD
1239#if 0
1240 /* handled by tmpfs_free_node */
1241 KKASSERT(node->tn_links > 0);
7a2de9a4
MD
1242 node->tn_links--;
1243 node->tn_dir.tn_parent = NULL;
0786baf1 1244#endif
7a2de9a4
MD
1245 node->tn_status |= TMPFS_NODE_ACCESSED | TMPFS_NODE_CHANGED | \
1246 TMPFS_NODE_MODIFIED;
1247
0786baf1
MD
1248#if 0
1249 /* handled by tmpfs_free_node */
1250 KKASSERT(dnode->tn_links > 0);
7a2de9a4 1251 dnode->tn_links--;
0786baf1 1252#endif
7a2de9a4
MD
1253 dnode->tn_status |= TMPFS_NODE_ACCESSED | \
1254 TMPFS_NODE_CHANGED | TMPFS_NODE_MODIFIED;
1255
1256 TMPFS_NODE_UNLOCK(dnode);
1257 TMPFS_NODE_UNLOCK(node);
1258
1259 /* Free the directory entry we just deleted. Note that the node
1260 * referred by it will not be removed until the vnode is really
1261 * reclaimed. */
0786baf1 1262 tmpfs_free_dirent(tmp, de);
7a2de9a4
MD
1263
1264 /* Release the deleted vnode (will destroy the node, notify
1265 * interested parties and clean it from the cache). */
1266
1267 TMPFS_NODE_LOCK(dnode);
1268 dnode->tn_status |= TMPFS_NODE_CHANGED;
1269 TMPFS_NODE_UNLOCK(dnode);
1270 tmpfs_update(dvp);
1271
1272 cache_setunresolved(v->a_nch);
1273 cache_setvp(v->a_nch, NULL);
9fc94b5f 1274 /*cache_inval_vp(vp, CINV_DESTROY);*/
80ae59d7 1275 tmpfs_knote(dvp, NOTE_WRITE | NOTE_LINK);
7a2de9a4
MD
1276 error = 0;
1277
1278out:
9fc94b5f 1279 vrele(vp);
7a2de9a4
MD
1280
1281 return error;
1282}
1283
1284/* --------------------------------------------------------------------- */
1285
1286static int
1287tmpfs_nsymlink(struct vop_nsymlink_args *v)
1288{
1289 struct vnode *dvp = v->a_dvp;
1290 struct vnode **vpp = v->a_vpp;
1291 struct namecache *ncp = v->a_nch->ncp;
1292 struct vattr *vap = v->a_vap;
1293 struct ucred *cred = v->a_cred;
1294 char *target = v->a_target;
1295 int error;
1296
7a2de9a4
MD
1297 vap->va_type = VLNK;
1298 error = tmpfs_alloc_file(dvp, vpp, vap, ncp, cred, target);
1299 if (error == 0) {
80ae59d7 1300 tmpfs_knote(*vpp, NOTE_WRITE);
7a2de9a4
MD
1301 cache_setunresolved(v->a_nch);
1302 cache_setvp(v->a_nch, *vpp);
1303 }
7a2de9a4
MD
1304
1305 return error;
1306}
1307
1308/* --------------------------------------------------------------------- */
1309
1310static int
1311tmpfs_readdir(struct vop_readdir_args *v)
1312{
1313 struct vnode *vp = v->a_vp;
1314 struct uio *uio = v->a_uio;
1315 int *eofflag = v->a_eofflag;
1316 off_t **cookies = v->a_cookies;
1317 int *ncookies = v->a_ncookies;
22d3b394 1318 struct tmpfs_mount *tmp;
7a2de9a4
MD
1319 int error;
1320 off_t startoff;
1321 off_t cnt = 0;
1322 struct tmpfs_node *node;
1323
1324 /* This operation only makes sense on directory nodes. */
1325 if (vp->v_type != VDIR)
1326 return ENOTDIR;
1327
22d3b394 1328 tmp = VFS_TO_TMPFS(vp->v_mount);
7a2de9a4
MD
1329 node = VP_TO_TMPFS_DIR(vp);
1330 startoff = uio->uio_offset;
1331
1332 if (uio->uio_offset == TMPFS_DIRCOOKIE_DOT) {
1333 error = tmpfs_dir_getdotdent(node, uio);
1334 if (error != 0)
1335 goto outok;
1336 cnt++;
1337 }
1338
1339 if (uio->uio_offset == TMPFS_DIRCOOKIE_DOTDOT) {
22d3b394 1340 error = tmpfs_dir_getdotdotdent(tmp, node, uio);
7a2de9a4
MD
1341 if (error != 0)
1342 goto outok;
1343 cnt++;
1344 }
1345
1346 error = tmpfs_dir_getdents(node, uio, &cnt);
1347
1348outok:
1349 KKASSERT(error >= -1);
1350
1351 if (error == -1)
1352 error = 0;
1353
1354 if (eofflag != NULL)
1355 *eofflag =
1356 (error == 0 && uio->uio_offset == TMPFS_DIRCOOKIE_EOF);
1357
1358 /* Update NFS-related variables. */
1359 if (error == 0 && cookies != NULL && ncookies != NULL) {
1360 off_t i;
1361 off_t off = startoff;
1362 struct tmpfs_dirent *de = NULL;
1363
1364 *ncookies = cnt;
1365 *cookies = kmalloc(cnt * sizeof(off_t), M_TEMP, M_WAITOK);
1366
1367 for (i = 0; i < cnt; i++) {
1368 KKASSERT(off != TMPFS_DIRCOOKIE_EOF);
1369 if (off == TMPFS_DIRCOOKIE_DOT) {
1370 off = TMPFS_DIRCOOKIE_DOTDOT;
1371 } else {
1372 if (off == TMPFS_DIRCOOKIE_DOTDOT) {
1373 de = TAILQ_FIRST(&node->tn_dir.tn_dirhead);
1374 } else if (de != NULL) {
1375 de = TAILQ_NEXT(de, td_entries);
1376 } else {
1377 de = tmpfs_dir_lookupbycookie(node,
1378 off);
1379 KKASSERT(de != NULL);
1380 de = TAILQ_NEXT(de, td_entries);
1381 }
1382 if (de == NULL)
1383 off = TMPFS_DIRCOOKIE_EOF;
1384 else
1385 off = tmpfs_dircookie(de);
1386 }
1387
1388 (*cookies)[i] = off;
1389 }
1390 KKASSERT(uio->uio_offset == off);
1391 }
7a2de9a4
MD
1392
1393 return error;
1394}
1395
1396/* --------------------------------------------------------------------- */
1397
1398static int
1399tmpfs_readlink(struct vop_readlink_args *v)
1400{
1401 struct vnode *vp = v->a_vp;
1402 struct uio *uio = v->a_uio;
1403
1404 int error;
1405 struct tmpfs_node *node;
1406
1407 KKASSERT(uio->uio_offset == 0);
1408 KKASSERT(vp->v_type == VLNK);
1409
1410 node = VP_TO_TMPFS_NODE(vp);
1411
1412 error = uiomove(node->tn_link, MIN(node->tn_size, uio->uio_resid),
1413 uio);
1414 TMPFS_NODE_LOCK(node);
1415 node->tn_status |= TMPFS_NODE_ACCESSED;
1416 TMPFS_NODE_UNLOCK(node);
1417
1418 return error;
1419}
1420
1421/* --------------------------------------------------------------------- */
1422
1423static int
1424tmpfs_inactive(struct vop_inactive_args *v)
1425{
1426 struct vnode *vp = v->a_vp;
7a2de9a4
MD
1427 struct tmpfs_node *node;
1428
7a2de9a4
MD
1429 node = VP_TO_TMPFS_NODE(vp);
1430
9fc94b5f 1431 /*
a1fa5d8d
MD
1432 * Degenerate case
1433 */
1434 if (node == NULL) {
1435 vrecycle(vp);
1436 return(0);
1437 }
1438
1439 /*
9fc94b5f
MD
1440 * Get rid of unreferenced deleted vnodes sooner rather than
1441 * later so the data memory can be recovered immediately.
f96f2f39
MD
1442 *
1443 * We must truncate the vnode to prevent the normal reclamation
1444 * path from flushing the data for the removed file to disk.
9fc94b5f 1445 */
7a2de9a4 1446 TMPFS_NODE_LOCK(node);
b7fe63af
MD
1447 if ((node->tn_vpstate & TMPFS_VNODE_ALLOCATING) == 0 &&
1448 (node->tn_links == 0 ||
1449 (node->tn_links == 1 && node->tn_type == VDIR &&
1450 node->tn_dir.tn_parent)))
1451 {
9fc94b5f 1452 node->tn_vpstate = TMPFS_VNODE_DOOMED;
7a2de9a4 1453 TMPFS_NODE_UNLOCK(node);
f96f2f39
MD
1454 if (node->tn_type == VREG)
1455 tmpfs_truncate(vp, 0);
7a2de9a4 1456 vrecycle(vp);
9fc94b5f 1457 } else {
7a2de9a4 1458 TMPFS_NODE_UNLOCK(node);
9fc94b5f 1459 }
7a2de9a4
MD
1460
1461 return 0;
1462}
1463
1464/* --------------------------------------------------------------------- */
1465
1466int
1467tmpfs_reclaim(struct vop_reclaim_args *v)
1468{
1469 struct vnode *vp = v->a_vp;
7a2de9a4
MD
1470 struct tmpfs_mount *tmp;
1471 struct tmpfs_node *node;
1472
1473 node = VP_TO_TMPFS_NODE(vp);
1474 tmp = VFS_TO_TMPFS(vp->v_mount);
1475
7a2de9a4
MD
1476 tmpfs_free_vp(vp);
1477
b7fe63af
MD
1478 /*
1479 * If the node referenced by this vnode was deleted by the
1480 * user, we must free its associated data structures now that
1481 * the vnode is being reclaimed.
1482 *
1483 * Directories have an extra link ref.
1484 */
7a2de9a4 1485 TMPFS_NODE_LOCK(node);
b7fe63af
MD
1486 if ((node->tn_vpstate & TMPFS_VNODE_ALLOCATING) == 0 &&
1487 (node->tn_links == 0 ||
1488 (node->tn_links == 1 && node->tn_type == VDIR &&
1489 node->tn_dir.tn_parent)))
1490 {
7a2de9a4 1491 node->tn_vpstate = TMPFS_VNODE_DOOMED;
7a2de9a4 1492 tmpfs_free_node(tmp, node);
0786baf1 1493 /* eats the lock */
9fc94b5f 1494 } else {
7a2de9a4 1495 TMPFS_NODE_UNLOCK(node);
9fc94b5f 1496 }
7a2de9a4
MD
1497
1498 KKASSERT(vp->v_data == NULL);
1499 return 0;
1500}
1501
d22d7da4
VS
1502/* --------------------------------------------------------------------- */
1503
1504static int
1505tmpfs_mountctl(struct vop_mountctl_args *ap)
1506{
1507 struct tmpfs_mount *tmp;
1508 struct mount *mp;
1509 int rc;
1510
1511 switch (ap->a_op) {
1512 case (MOUNTCTL_SET_EXPORT):
1513 mp = ap->a_head.a_ops->head.vv_mount;
1514 tmp = (struct tmpfs_mount *) mp->mnt_data;
1515
1516 if (ap->a_ctllen != sizeof(struct export_args))
1517 rc = (EINVAL);
1518 else
1519 rc = vfs_export(mp, &tmp->tm_export,
1520 (const struct export_args *) ap->a_ctl);
1521 break;
1522 default:
1523 rc = vop_stdmountctl(ap);
1524 break;
1525 }
1526 return (rc);
1527}
1528
7a2de9a4
MD
1529/* --------------------------------------------------------------------- */
1530
1531static int
1532tmpfs_print(struct vop_print_args *v)
1533{
1534 struct vnode *vp = v->a_vp;
1535
1536 struct tmpfs_node *node;
1537
1538 node = VP_TO_TMPFS_NODE(vp);
1539
1540 kprintf("tag VT_TMPFS, tmpfs_node %p, flags 0x%x, links %d\n",
1541 node, node->tn_flags, node->tn_links);
1542 kprintf("\tmode 0%o, owner %d, group %d, size %ju, status 0x%x\n",
1543 node->tn_mode, node->tn_uid, node->tn_gid,
1544 (uintmax_t)node->tn_size, node->tn_status);
1545
1546 if (vp->v_type == VFIFO)
1547 fifo_printinfo(vp);
1548
1549 kprintf("\n");
1550
1551 return 0;
1552}
1553
1554/* --------------------------------------------------------------------- */
1555
1556static int
1557tmpfs_pathconf(struct vop_pathconf_args *v)
1558{
1559 int name = v->a_name;
1560 register_t *retval = v->a_retval;
1561
1562 int error;
1563
1564 error = 0;
1565
1566 switch (name) {
1567 case _PC_LINK_MAX:
1568 *retval = LINK_MAX;
1569 break;
1570
1571 case _PC_NAME_MAX:
1572 *retval = NAME_MAX;
1573 break;
1574
1575 case _PC_PATH_MAX:
1576 *retval = PATH_MAX;
1577 break;
1578
1579 case _PC_PIPE_BUF:
1580 *retval = PIPE_BUF;
1581 break;
1582
1583 case _PC_CHOWN_RESTRICTED:
1584 *retval = 1;
1585 break;
1586
1587 case _PC_NO_TRUNC:
1588 *retval = 1;
1589 break;
1590
1591 case _PC_SYNC_IO:
1592 *retval = 1;
1593 break;
1594
1595 case _PC_FILESIZEBITS:
1596 *retval = 0; /* XXX Don't know which value should I return. */
1597 break;
1598
1599 default:
1600 error = EINVAL;
1601 }
1602
1603 return error;
1604}
1605
80ae59d7
MD
1606/************************************************************************
1607 * KQFILTER OPS *
1608 ************************************************************************/
1609
1610static void filt_tmpfsdetach(struct knote *kn);
1611static int filt_tmpfsread(struct knote *kn, long hint);
1612static int filt_tmpfswrite(struct knote *kn, long hint);
1613static int filt_tmpfsvnode(struct knote *kn, long hint);
1614
1615static struct filterops tmpfsread_filtops =
1616 { FILTEROP_ISFD, NULL, filt_tmpfsdetach, filt_tmpfsread };
1617static struct filterops tmpfswrite_filtops =
1618 { FILTEROP_ISFD, NULL, filt_tmpfsdetach, filt_tmpfswrite };
1619static struct filterops tmpfsvnode_filtops =
1620 { FILTEROP_ISFD, NULL, filt_tmpfsdetach, filt_tmpfsvnode };
1621
1622static int
1623tmpfs_kqfilter (struct vop_kqfilter_args *ap)
1624{
1625 struct vnode *vp = ap->a_vp;
1626 struct knote *kn = ap->a_kn;
1627
1628 switch (kn->kn_filter) {
1629 case EVFILT_READ:
1630 kn->kn_fop = &tmpfsread_filtops;
1631 break;
1632 case EVFILT_WRITE:
1633 kn->kn_fop = &tmpfswrite_filtops;
1634 break;
1635 case EVFILT_VNODE:
1636 kn->kn_fop = &tmpfsvnode_filtops;
1637 break;
1638 default:
1639 return (EOPNOTSUPP);
1640 }
1641
1642 kn->kn_hook = (caddr_t)vp;
1643
1644 knote_insert(&vp->v_pollinfo.vpi_kqinfo.ki_note, kn);
1645
1646 return(0);
1647}
1648
1649static void
1650filt_tmpfsdetach(struct knote *kn)
1651{
1652 struct vnode *vp = (void *)kn->kn_hook;
1653
1654 knote_remove(&vp->v_pollinfo.vpi_kqinfo.ki_note, kn);
1655}
1656
1657static int
1658filt_tmpfsread(struct knote *kn, long hint)
1659{
1660 struct vnode *vp = (void *)kn->kn_hook;
1661 struct tmpfs_node *node = VP_TO_TMPFS_NODE(vp);
1662 off_t off;
1663
1664 if (hint == NOTE_REVOKE) {
3bcb6e5e 1665 kn->kn_flags |= (EV_EOF | EV_NODATA | EV_ONESHOT);
80ae59d7
MD
1666 return(1);
1667 }
f79d9cc9
MD
1668
1669 /*
1670 * Interlock against MP races when performing this function.
1671 */
1672 lwkt_gettoken(&vp->v_mount->mnt_token);
80ae59d7
MD
1673 off = node->tn_size - kn->kn_fp->f_offset;
1674 kn->kn_data = (off < INTPTR_MAX) ? off : INTPTR_MAX;
f79d9cc9
MD
1675 if (kn->kn_sfflags & NOTE_OLDAPI) {
1676 lwkt_reltoken(&vp->v_mount->mnt_token);
80ae59d7 1677 return(1);
f79d9cc9 1678 }
80ae59d7 1679
80ae59d7 1680 if (kn->kn_data == 0) {
80ae59d7 1681 kn->kn_data = (off < INTPTR_MAX) ? off : INTPTR_MAX;
80ae59d7 1682 }
f79d9cc9 1683 lwkt_reltoken(&vp->v_mount->mnt_token);
80ae59d7
MD
1684 return (kn->kn_data != 0);
1685}
1686
1687static int
1688filt_tmpfswrite(struct knote *kn, long hint)
1689{
1690 if (hint == NOTE_REVOKE)
3bcb6e5e 1691 kn->kn_flags |= (EV_EOF | EV_NODATA | EV_ONESHOT);
80ae59d7
MD
1692 kn->kn_data = 0;
1693 return (1);
1694}
1695
1696static int
1697filt_tmpfsvnode(struct knote *kn, long hint)
1698{
1699 if (kn->kn_sfflags & hint)
1700 kn->kn_fflags |= hint;
1701 if (hint == NOTE_REVOKE) {
3bcb6e5e 1702 kn->kn_flags |= (EV_EOF | EV_NODATA);
80ae59d7
MD
1703 return (1);
1704 }
1705 return (kn->kn_fflags != 0);
1706}
1707
1708
7a2de9a4
MD
1709/* --------------------------------------------------------------------- */
1710
1711/*
1712 * vnode operations vector used for files stored in a tmpfs file system.
1713 */
1714struct vop_ops tmpfs_vnode_vops = {
1715 .vop_default = vop_defaultop,
1716 .vop_getpages = vop_stdgetpages,
1717 .vop_putpages = vop_stdputpages,
1718 .vop_ncreate = tmpfs_ncreate,
1719 .vop_nresolve = tmpfs_nresolve,
1720 .vop_nlookupdotdot = tmpfs_nlookupdotdot,
1721 .vop_nmknod = tmpfs_nmknod,
1722 .vop_open = tmpfs_open,
1723 .vop_close = tmpfs_close,
1724 .vop_access = tmpfs_access,
1725 .vop_getattr = tmpfs_getattr,
1726 .vop_setattr = tmpfs_setattr,
1727 .vop_read = tmpfs_read,
1728 .vop_write = tmpfs_write,
1729 .vop_fsync = tmpfs_fsync,
d22d7da4 1730 .vop_mountctl = tmpfs_mountctl,
7a2de9a4
MD
1731 .vop_nremove = tmpfs_nremove,
1732 .vop_nlink = tmpfs_nlink,
1733 .vop_nrename = tmpfs_nrename,
1734 .vop_nmkdir = tmpfs_nmkdir,
1735 .vop_nrmdir = tmpfs_nrmdir,
1736 .vop_nsymlink = tmpfs_nsymlink,
1737 .vop_readdir = tmpfs_readdir,
1738 .vop_readlink = tmpfs_readlink,
1739 .vop_inactive = tmpfs_inactive,
1740 .vop_reclaim = tmpfs_reclaim,
1741 .vop_print = tmpfs_print,
1742 .vop_pathconf = tmpfs_pathconf,
9fc94b5f 1743 .vop_bmap = tmpfs_bmap,
7a2de9a4
MD
1744 .vop_strategy = tmpfs_strategy,
1745 .vop_advlock = tmpfs_advlock,
80ae59d7 1746 .vop_kqfilter = tmpfs_kqfilter
7a2de9a4 1747};