kernel - Adjust UFS and HAMMER to use uiomovebp()
[dragonfly.git] / sys / vfs / tmpfs / tmpfs_vnops.c
CommitLineData
7a2de9a4
MD
1/*-
2 * Copyright (c) 2005, 2006 The NetBSD Foundation, Inc.
3 * All rights reserved.
4 *
5 * This code is derived from software contributed to The NetBSD Foundation
6 * by Julio M. Merino Vidal, developed as part of Google's Summer of Code
7 * 2005 program.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
19 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
20 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
21 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
22 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
80ae59d7
MD
29 *
30 * $NetBSD: tmpfs_vnops.c,v 1.39 2007/07/23 15:41:01 jmmv Exp $
7a2de9a4
MD
31 */
32
33/*
34 * tmpfs vnode interface.
35 */
7a2de9a4
MD
36
37#include <sys/kernel.h>
38#include <sys/kern_syscall.h>
39#include <sys/param.h>
40#include <sys/fcntl.h>
41#include <sys/lockf.h>
42#include <sys/priv.h>
43#include <sys/proc.h>
44#include <sys/resourcevar.h>
45#include <sys/sched.h>
7a2de9a4
MD
46#include <sys/stat.h>
47#include <sys/systm.h>
48#include <sys/unistd.h>
49#include <sys/vfsops.h>
50#include <sys/vnode.h>
66fa44e7 51#include <sys/mountctl.h>
7a2de9a4 52
7a2de9a4
MD
53#include <vm/vm.h>
54#include <vm/vm_object.h>
55#include <vm/vm_page.h>
56#include <vm/vm_pager.h>
b7545cb3 57#include <vm/swap_pager.h>
7a2de9a4 58
54341a3b
MD
59#include <sys/buf2.h>
60
7a2de9a4
MD
61#include <vfs/fifofs/fifo.h>
62#include <vfs/tmpfs/tmpfs_vnops.h>
63#include <vfs/tmpfs/tmpfs.h>
64
65MALLOC_DECLARE(M_TMPFS);
66
8f9ba07b
MD
67static void tmpfs_strategy_done(struct bio *bio);
68
80ae59d7
MD
69static __inline
70void
71tmpfs_knote(struct vnode *vp, int flags)
72{
73 if (flags)
74 KNOTE(&vp->v_pollinfo.vpi_kqinfo.ki_note, flags);
75}
76
77
7a2de9a4
MD
78/* --------------------------------------------------------------------- */
79
80static int
81tmpfs_nresolve(struct vop_nresolve_args *v)
82{
83 struct vnode *dvp = v->a_dvp;
84 struct vnode *vp = NULL;
85 struct namecache *ncp = v->a_nch->ncp;
d89ce96a 86 struct tmpfs_node *tnode;
7a2de9a4
MD
87
88 int error;
89 struct tmpfs_dirent *de;
90 struct tmpfs_node *dnode;
91
92 dnode = VP_TO_TMPFS_DIR(dvp);
93
7a2de9a4
MD
94 de = tmpfs_dir_lookup(dnode, NULL, ncp);
95 if (de == NULL) {
d89ce96a 96 error = ENOENT;
7a2de9a4 97 } else {
d89ce96a
MD
98 /*
99 * Allocate a vnode for the node we found.
100 */
7a2de9a4 101 tnode = de->td_node;
7a2de9a4 102 error = tmpfs_alloc_vp(dvp->v_mount, tnode,
d89ce96a
MD
103 LK_EXCLUSIVE | LK_RETRY, &vp);
104 if (error)
7a2de9a4 105 goto out;
d89ce96a 106 KKASSERT(vp);
7a2de9a4
MD
107 }
108
7a2de9a4 109out:
d89ce96a
MD
110 /*
111 * Store the result of this lookup in the cache. Avoid this if the
7a2de9a4 112 * request was for creation, as it does not improve timings on
d89ce96a
MD
113 * emprical tests.
114 */
7a2de9a4
MD
115 if (vp) {
116 vn_unlock(vp);
117 cache_setvp(v->a_nch, vp);
118 vrele(vp);
d89ce96a 119 } else if (error == ENOENT) {
7a2de9a4
MD
120 cache_setvp(v->a_nch, NULL);
121 }
122 return error;
123}
124
125static int
126tmpfs_nlookupdotdot(struct vop_nlookupdotdot_args *v)
127{
128 struct vnode *dvp = v->a_dvp;
129 struct vnode **vpp = v->a_vpp;
130 struct tmpfs_node *dnode = VP_TO_TMPFS_NODE(dvp);
131 struct ucred *cred = v->a_cred;
132 int error;
133
134 *vpp = NULL;
135 /* Check accessibility of requested node as a first step. */
136 error = VOP_ACCESS(dvp, VEXEC, cred);
137 if (error != 0)
138 return error;
139
140 if (dnode->tn_dir.tn_parent != NULL) {
141 /* Allocate a new vnode on the matching entry. */
142 error = tmpfs_alloc_vp(dvp->v_mount, dnode->tn_dir.tn_parent,
143 LK_EXCLUSIVE | LK_RETRY, vpp);
144
145 if (*vpp)
146 vn_unlock(*vpp);
147 }
148
149 return (*vpp == NULL) ? ENOENT : 0;
150}
151
152/* --------------------------------------------------------------------- */
153
154static int
155tmpfs_ncreate(struct vop_ncreate_args *v)
156{
157 struct vnode *dvp = v->a_dvp;
158 struct vnode **vpp = v->a_vpp;
159 struct namecache *ncp = v->a_nch->ncp;
160 struct vattr *vap = v->a_vap;
161 struct ucred *cred = v->a_cred;
162 int error;
163
164 KKASSERT(vap->va_type == VREG || vap->va_type == VSOCK);
165
7a2de9a4
MD
166 error = tmpfs_alloc_file(dvp, vpp, vap, ncp, cred, NULL);
167 if (error == 0) {
168 cache_setunresolved(v->a_nch);
169 cache_setvp(v->a_nch, *vpp);
80ae59d7 170 tmpfs_knote(dvp, NOTE_WRITE);
7a2de9a4 171 }
7a2de9a4
MD
172
173 return error;
174}
175/* --------------------------------------------------------------------- */
176
177static int
178tmpfs_nmknod(struct vop_nmknod_args *v)
179{
180 struct vnode *dvp = v->a_dvp;
181 struct vnode **vpp = v->a_vpp;
182 struct namecache *ncp = v->a_nch->ncp;
183 struct vattr *vap = v->a_vap;
184 struct ucred *cred = v->a_cred;
185 int error;
186
187 if (vap->va_type != VBLK && vap->va_type != VCHR &&
188 vap->va_type != VFIFO)
189 return EINVAL;
190
7a2de9a4
MD
191 error = tmpfs_alloc_file(dvp, vpp, vap, ncp, cred, NULL);
192 if (error == 0) {
193 cache_setunresolved(v->a_nch);
194 cache_setvp(v->a_nch, *vpp);
80ae59d7 195 tmpfs_knote(dvp, NOTE_WRITE);
7a2de9a4 196 }
7a2de9a4
MD
197
198 return error;
199}
200
201/* --------------------------------------------------------------------- */
202
203static int
204tmpfs_open(struct vop_open_args *v)
205{
206 struct vnode *vp = v->a_vp;
207 int mode = v->a_mode;
208
209 int error;
210 struct tmpfs_node *node;
211
7a2de9a4
MD
212 node = VP_TO_TMPFS_NODE(vp);
213
214 /* The file is still active but all its names have been removed
215 * (e.g. by a "rmdir $(pwd)"). It cannot be opened any more as
216 * it is about to die. */
217 if (node->tn_links < 1)
218 return (ENOENT);
219
220 /* If the file is marked append-only, deny write requests. */
630e3a33
MD
221 if ((node->tn_flags & APPEND) &&
222 (mode & (FWRITE | O_APPEND)) == FWRITE) {
7a2de9a4 223 error = EPERM;
630e3a33 224 } else {
7a2de9a4
MD
225 return (vop_stdopen(v));
226 }
7a2de9a4
MD
227 return error;
228}
229
230/* --------------------------------------------------------------------- */
231
232static int
233tmpfs_close(struct vop_close_args *v)
234{
235 struct vnode *vp = v->a_vp;
236 struct tmpfs_node *node;
237
238 node = VP_TO_TMPFS_NODE(vp);
239
240 if (node->tn_links > 0) {
241 /* Update node times. No need to do it if the node has
242 * been deleted, because it will vanish after we return. */
243 tmpfs_update(vp);
244 }
245
246 return vop_stdclose(v);
247}
248
249/* --------------------------------------------------------------------- */
250
251int
252tmpfs_access(struct vop_access_args *v)
253{
254 struct vnode *vp = v->a_vp;
255 int error;
256 struct tmpfs_node *node;
257
7a2de9a4
MD
258 node = VP_TO_TMPFS_NODE(vp);
259
260 switch (vp->v_type) {
261 case VDIR:
262 /* FALLTHROUGH */
263 case VLNK:
264 /* FALLTHROUGH */
265 case VREG:
5a9e9ac7 266 if ((v->a_mode & VWRITE) && (vp->v_mount->mnt_flag & MNT_RDONLY)) {
7a2de9a4
MD
267 error = EROFS;
268 goto out;
269 }
270 break;
271
272 case VBLK:
273 /* FALLTHROUGH */
274 case VCHR:
275 /* FALLTHROUGH */
276 case VSOCK:
277 /* FALLTHROUGH */
278 case VFIFO:
279 break;
280
281 default:
282 error = EINVAL;
283 goto out;
284 }
285
5a9e9ac7 286 if ((v->a_mode & VWRITE) && (node->tn_flags & IMMUTABLE)) {
7a2de9a4
MD
287 error = EPERM;
288 goto out;
289 }
290
291 error = vop_helper_access(v, node->tn_uid, node->tn_gid, node->tn_mode, 0);
292
293out:
294
295 return error;
296}
297
298/* --------------------------------------------------------------------- */
299
300int
301tmpfs_getattr(struct vop_getattr_args *v)
302{
303 struct vnode *vp = v->a_vp;
304 struct vattr *vap = v->a_vap;
7a2de9a4 305 struct tmpfs_node *node;
7a2de9a4
MD
306
307 node = VP_TO_TMPFS_NODE(vp);
308
e575e508 309 lwkt_gettoken(&vp->v_mount->mnt_token);
7a2de9a4
MD
310 tmpfs_update(vp);
311
312 vap->va_type = vp->v_type;
313 vap->va_mode = node->tn_mode;
314 vap->va_nlink = node->tn_links;
315 vap->va_uid = node->tn_uid;
316 vap->va_gid = node->tn_gid;
317 vap->va_fsid = vp->v_mount->mnt_stat.f_fsid.val[0];
318 vap->va_fileid = node->tn_id;
319 vap->va_size = node->tn_size;
320 vap->va_blocksize = PAGE_SIZE;
321 vap->va_atime.tv_sec = node->tn_atime;
322 vap->va_atime.tv_nsec = node->tn_atimensec;
323 vap->va_mtime.tv_sec = node->tn_mtime;
324 vap->va_mtime.tv_nsec = node->tn_mtimensec;
325 vap->va_ctime.tv_sec = node->tn_ctime;
326 vap->va_ctime.tv_nsec = node->tn_ctimensec;
327 vap->va_gen = node->tn_gen;
328 vap->va_flags = node->tn_flags;
329 if (vp->v_type == VBLK || vp->v_type == VCHR)
330 {
331 vap->va_rmajor = umajor(node->tn_rdev);
332 vap->va_rminor = uminor(node->tn_rdev);
333 }
334 vap->va_bytes = round_page(node->tn_size);
335 vap->va_filerev = 0;
336
e575e508
VS
337 lwkt_reltoken(&vp->v_mount->mnt_token);
338
7a2de9a4
MD
339 return 0;
340}
341
342/* --------------------------------------------------------------------- */
343
344int
345tmpfs_setattr(struct vop_setattr_args *v)
346{
347 struct vnode *vp = v->a_vp;
348 struct vattr *vap = v->a_vap;
349 struct ucred *cred = v->a_cred;
80ae59d7 350 struct tmpfs_node *node = VP_TO_TMPFS_NODE(vp);
7a2de9a4 351 int error = 0;
80ae59d7 352 int kflags = 0;
7a2de9a4 353
80ae59d7 354 if (error == 0 && (vap->va_flags != VNOVAL)) {
7a2de9a4 355 error = tmpfs_chflags(vp, vap->va_flags, cred);
80ae59d7
MD
356 kflags |= NOTE_ATTRIB;
357 }
7a2de9a4 358
80ae59d7
MD
359 if (error == 0 && (vap->va_size != VNOVAL)) {
360 if (vap->va_size > node->tn_size)
361 kflags |= NOTE_WRITE | NOTE_EXTEND;
362 else
363 kflags |= NOTE_WRITE;
7a2de9a4 364 error = tmpfs_chsize(vp, vap->va_size, cred);
80ae59d7 365 }
7a2de9a4 366
d89ce96a
MD
367 if (error == 0 && (vap->va_uid != (uid_t)VNOVAL ||
368 vap->va_gid != (gid_t)VNOVAL)) {
7a2de9a4 369 error = tmpfs_chown(vp, vap->va_uid, vap->va_gid, cred);
80ae59d7 370 kflags |= NOTE_ATTRIB;
d89ce96a 371 }
7a2de9a4 372
80ae59d7 373 if (error == 0 && (vap->va_mode != (mode_t)VNOVAL)) {
7a2de9a4 374 error = tmpfs_chmod(vp, vap->va_mode, cred);
80ae59d7
MD
375 kflags |= NOTE_ATTRIB;
376 }
7a2de9a4
MD
377
378 if (error == 0 && ((vap->va_atime.tv_sec != VNOVAL &&
379 vap->va_atime.tv_nsec != VNOVAL) ||
380 (vap->va_mtime.tv_sec != VNOVAL &&
d89ce96a 381 vap->va_mtime.tv_nsec != VNOVAL) )) {
7a2de9a4 382 error = tmpfs_chtimes(vp, &vap->va_atime, &vap->va_mtime,
d89ce96a 383 vap->va_vaflags, cred);
80ae59d7 384 kflags |= NOTE_ATTRIB;
d89ce96a 385 }
7a2de9a4
MD
386
387 /* Update the node times. We give preference to the error codes
388 * generated by this function rather than the ones that may arise
389 * from tmpfs_update. */
390 tmpfs_update(vp);
80ae59d7 391 tmpfs_knote(vp, kflags);
7a2de9a4 392
7a2de9a4
MD
393 return error;
394}
395
396/* --------------------------------------------------------------------- */
397
9fc94b5f 398/*
630e3a33
MD
399 * fsync is usually a NOP, but we must take action when unmounting or
400 * when recycling.
9fc94b5f 401 */
7a2de9a4
MD
402static int
403tmpfs_fsync(struct vop_fsync_args *v)
404{
630e3a33 405 struct tmpfs_node *node;
7a2de9a4
MD
406 struct vnode *vp = v->a_vp;
407
630e3a33
MD
408 node = VP_TO_TMPFS_NODE(vp);
409
410 tmpfs_update(vp);
411 if (vp->v_type == VREG) {
d4623db3 412 if (vp->v_flag & VRECLAIMED) {
630e3a33
MD
413 if (node->tn_links == 0)
414 tmpfs_truncate(vp, 0);
415 else
416 vfsync(v->a_vp, v->a_waitfor, 1, NULL, NULL);
417 }
9fc94b5f 418 }
7a2de9a4
MD
419 return 0;
420}
421
422/* --------------------------------------------------------------------- */
423
424static int
425tmpfs_read (struct vop_read_args *ap)
426{
427 struct buf *bp;
428 struct vnode *vp = ap->a_vp;
429 struct uio *uio = ap->a_uio;
430 struct tmpfs_node *node;
7a2de9a4 431 off_t base_offset;
9fc94b5f 432 size_t offset;
7a2de9a4 433 size_t len;
9fc94b5f 434 int error;
7a2de9a4
MD
435
436 error = 0;
437 if (uio->uio_resid == 0) {
438 return error;
439 }
440
441 node = VP_TO_TMPFS_NODE(vp);
442
443 if (uio->uio_offset < 0)
444 return (EINVAL);
445 if (vp->v_type != VREG)
446 return (EINVAL);
447
7a2de9a4
MD
448 while (uio->uio_resid > 0 && uio->uio_offset < node->tn_size) {
449 /*
450 * Use buffer cache I/O (via tmpfs_strategy)
451 */
9fc94b5f 452 offset = (size_t)uio->uio_offset & BMASK;
7a2de9a4 453 base_offset = (off_t)uio->uio_offset - offset;
9de13b88
MD
454 bp = getcacheblk(vp, base_offset, BSIZE, 0);
455 if (bp == NULL) {
b403e861 456 lwkt_gettoken(&vp->v_mount->mnt_token);
7a2de9a4
MD
457 error = bread(vp, base_offset, BSIZE, &bp);
458 if (error) {
459 brelse(bp);
b403e861 460 lwkt_reltoken(&vp->v_mount->mnt_token);
7a2de9a4
MD
461 kprintf("tmpfs_read bread error %d\n", error);
462 break;
463 }
b403e861 464 lwkt_reltoken(&vp->v_mount->mnt_token);
7a2de9a4
MD
465 }
466
7a2de9a4
MD
467 /*
468 * Figure out how many bytes we can actually copy this loop.
469 */
470 len = BSIZE - offset;
471 if (len > uio->uio_resid)
472 len = uio->uio_resid;
473 if (len > node->tn_size - uio->uio_offset)
474 len = (size_t)(node->tn_size - uio->uio_offset);
475
44480e31 476 error = uiomovebp(bp, (char *)bp->b_data + offset, len, uio);
7a2de9a4
MD
477 bqrelse(bp);
478 if (error) {
479 kprintf("tmpfs_read uiomove error %d\n", error);
480 break;
481 }
482 }
483
7a2de9a4
MD
484 TMPFS_NODE_LOCK(node);
485 node->tn_status |= TMPFS_NODE_ACCESSED;
486 TMPFS_NODE_UNLOCK(node);
487
7a2de9a4
MD
488 return(error);
489}
490
491static int
492tmpfs_write (struct vop_write_args *ap)
493{
494 struct buf *bp;
495 struct vnode *vp = ap->a_vp;
496 struct uio *uio = ap->a_uio;
497 struct thread *td = uio->uio_td;
498 struct tmpfs_node *node;
499 boolean_t extended;
500 off_t oldsize;
501 int error;
7a2de9a4 502 off_t base_offset;
9fc94b5f 503 size_t offset;
7a2de9a4
MD
504 size_t len;
505 struct rlimit limit;
7a2de9a4 506 int trivial = 0;
80ae59d7 507 int kflags = 0;
7a2de9a4
MD
508
509 error = 0;
510 if (uio->uio_resid == 0) {
511 return error;
512 }
513
514 node = VP_TO_TMPFS_NODE(vp);
515
516 if (vp->v_type != VREG)
517 return (EINVAL);
518
1be4932c
VS
519 lwkt_gettoken(&vp->v_mount->mnt_token);
520
7a2de9a4
MD
521 oldsize = node->tn_size;
522 if (ap->a_ioflag & IO_APPEND)
523 uio->uio_offset = node->tn_size;
524
525 /*
526 * Check for illegal write offsets.
527 */
528 if (uio->uio_offset + uio->uio_resid >
1be4932c
VS
529 VFS_TO_TMPFS(vp->v_mount)->tm_maxfilesize) {
530 lwkt_reltoken(&vp->v_mount->mnt_token);
7a2de9a4 531 return (EFBIG);
1be4932c 532 }
7a2de9a4
MD
533
534 if (vp->v_type == VREG && td != NULL) {
535 error = kern_getrlimit(RLIMIT_FSIZE, &limit);
1be4932c
VS
536 if (error != 0) {
537 lwkt_reltoken(&vp->v_mount->mnt_token);
7a2de9a4 538 return error;
1be4932c 539 }
7a2de9a4
MD
540 if (uio->uio_offset + uio->uio_resid > limit.rlim_cur) {
541 ksignal(td->td_proc, SIGXFSZ);
1be4932c 542 lwkt_reltoken(&vp->v_mount->mnt_token);
7a2de9a4
MD
543 return (EFBIG);
544 }
545 }
546
547
548 /*
549 * Extend the file's size if necessary
550 */
9fc94b5f 551 extended = ((uio->uio_offset + uio->uio_resid) > node->tn_size);
7a2de9a4 552
7a2de9a4
MD
553 while (uio->uio_resid > 0) {
554 /*
555 * Use buffer cache I/O (via tmpfs_strategy)
556 */
9fc94b5f 557 offset = (size_t)uio->uio_offset & BMASK;
7a2de9a4
MD
558 base_offset = (off_t)uio->uio_offset - offset;
559 len = BSIZE - offset;
560 if (len > uio->uio_resid)
561 len = uio->uio_resid;
562
563 if ((uio->uio_offset + len) > node->tn_size) {
9fc94b5f 564 trivial = (uio->uio_offset <= node->tn_size);
7a2de9a4
MD
565 error = tmpfs_reg_resize(vp, uio->uio_offset + len, trivial);
566 if (error)
567 break;
568 }
569
9fc94b5f
MD
570 /*
571 * Read to fill in any gaps. Theoretically we could
572 * optimize this if the write covers the entire buffer
573 * and is not a UIO_NOCOPY write, however this can lead
574 * to a security violation exposing random kernel memory
575 * (whatever junk was in the backing VM pages before).
576 *
577 * So just use bread() to do the right thing.
578 */
579 error = bread(vp, base_offset, BSIZE, &bp);
44480e31 580 error = uiomovebp(bp, (char *)bp->b_data + offset, len, uio);
7a2de9a4
MD
581 if (error) {
582 kprintf("tmpfs_write uiomove error %d\n", error);
583 brelse(bp);
584 break;
585 }
586
80ae59d7 587 if (uio->uio_offset > node->tn_size) {
7a2de9a4 588 node->tn_size = uio->uio_offset;
80ae59d7
MD
589 kflags |= NOTE_EXTEND;
590 }
591 kflags |= NOTE_WRITE;
7a2de9a4
MD
592
593 /*
fc1a9118
MD
594 * Always try to flush the page if the request is coming
595 * from the pageout daemon (IO_ASYNC), else buwrite() the
596 * buffer.
7a2de9a4 597 *
fc1a9118
MD
598 * buwrite() dirties the underlying VM pages instead of
599 * dirtying the buffer, releasing the buffer as a clean
600 * buffer. This allows tmpfs to use essentially all
601 * available memory to cache file data. If we used bdwrite()
602 * the buffer cache would wind up flushing the data to
603 * swap too quickly.
7a2de9a4 604 */
fc1a9118
MD
605 bp->b_flags |= B_AGE;
606 if (ap->a_ioflag & IO_ASYNC) {
607 bawrite(bp);
b7545cb3 608 } else {
fc1a9118 609 buwrite(bp);
d89ce96a 610 }
9fc94b5f 611
7a2de9a4 612 if (bp->b_error) {
2cd8c774 613 kprintf("tmpfs_write bwrite error %d\n", bp->b_error);
7a2de9a4
MD
614 break;
615 }
616 }
7a2de9a4 617
7a2de9a4 618 if (error) {
80ae59d7 619 if (extended) {
7a2de9a4 620 (void)tmpfs_reg_resize(vp, oldsize, trivial);
80ae59d7
MD
621 kflags &= ~NOTE_EXTEND;
622 }
623 goto done;
7a2de9a4
MD
624 }
625
fc1a9118
MD
626 /*
627 * Currently we don't set the mtime on files modified via mmap()
628 * because we can't tell the difference between those modifications
629 * and an attempt by the pageout daemon to flush tmpfs pages to
630 * swap.
631 *
632 * This is because in order to defer flushes as long as possible
633 * buwrite() works by marking the underlying VM pages dirty in
634 * order to be able to dispose of the buffer cache buffer without
635 * flushing it.
636 */
7a2de9a4 637 TMPFS_NODE_LOCK(node);
fc1a9118
MD
638 if (uio->uio_segflg != UIO_NOCOPY)
639 node->tn_status |= TMPFS_NODE_ACCESSED | TMPFS_NODE_MODIFIED;
640 if (extended)
641 node->tn_status |= TMPFS_NODE_CHANGED;
7a2de9a4
MD
642
643 if (node->tn_mode & (S_ISUID | S_ISGID)) {
644 if (priv_check_cred(ap->a_cred, PRIV_VFS_RETAINSUGID, 0))
645 node->tn_mode &= ~(S_ISUID | S_ISGID);
646 }
647 TMPFS_NODE_UNLOCK(node);
80ae59d7 648done:
7a2de9a4 649
80ae59d7 650 tmpfs_knote(vp, kflags);
1be4932c
VS
651
652
653 lwkt_reltoken(&vp->v_mount->mnt_token);
7a2de9a4
MD
654 return(error);
655}
656
657static int
658tmpfs_advlock (struct vop_advlock_args *ap)
659{
660 struct tmpfs_node *node;
661 struct vnode *vp = ap->a_vp;
662
663 node = VP_TO_TMPFS_NODE(vp);
664
665 return (lf_advlock(ap, &node->tn_advlock, node->tn_size));
666}
667
fc1a9118
MD
668/*
669 * The strategy function is typically only called when memory pressure
670 * forces the system to attempt to pageout pages. It can also be called
671 * by [n]vtruncbuf() when a truncation cuts a page in half. Normal write
672 * operations
673 */
7a2de9a4
MD
674static int
675tmpfs_strategy(struct vop_strategy_args *ap)
676{
677 struct bio *bio = ap->a_bio;
8f9ba07b 678 struct bio *nbio;
9fc94b5f 679 struct buf *bp = bio->bio_buf;
7a2de9a4
MD
680 struct vnode *vp = ap->a_vp;
681 struct tmpfs_node *node;
682 vm_object_t uobj;
8f9ba07b
MD
683 vm_page_t m;
684 int i;
7a2de9a4 685
9fc94b5f
MD
686 if (vp->v_type != VREG) {
687 bp->b_resid = bp->b_bcount;
688 bp->b_flags |= B_ERROR | B_INVAL;
689 bp->b_error = EINVAL;
690 biodone(bio);
691 return(0);
692 }
7a2de9a4 693
d1f61aa2 694 lwkt_gettoken(&vp->v_mount->mnt_token);
7a2de9a4
MD
695 node = VP_TO_TMPFS_NODE(vp);
696
697 uobj = node->tn_reg.tn_aobj;
9fc94b5f 698
7a2de9a4 699 /*
fc1a9118
MD
700 * Don't bother flushing to swap if there is no swap, just
701 * ensure that the pages are marked as needing a commit (still).
7a2de9a4 702 */
8f9ba07b
MD
703 if (bp->b_cmd == BUF_CMD_WRITE && vm_swap_size == 0) {
704 for (i = 0; i < bp->b_xio.xio_npages; ++i) {
705 m = bp->b_xio.xio_pages[i];
fc1a9118 706 vm_page_need_commit(m);
8f9ba07b
MD
707 }
708 bp->b_resid = 0;
709 bp->b_error = 0;
710 biodone(bio);
711 } else {
712 nbio = push_bio(bio);
713 nbio->bio_done = tmpfs_strategy_done;
714 nbio->bio_offset = bio->bio_offset;
715 swap_pager_strategy(uobj, nbio);
716 }
7a2de9a4 717
d1f61aa2 718 lwkt_reltoken(&vp->v_mount->mnt_token);
7a2de9a4
MD
719 return 0;
720}
721
8f9ba07b 722/*
fc1a9118
MD
723 * If we were unable to commit the pages to swap make sure they are marked
724 * as needing a commit (again). If we were, clear the flag to allow the
725 * pages to be freed.
8f9ba07b
MD
726 */
727static void
728tmpfs_strategy_done(struct bio *bio)
729{
730 struct buf *bp;
731 vm_page_t m;
732 int i;
733
734 bp = bio->bio_buf;
735
fc1a9118 736 if (bp->b_flags & B_ERROR) {
8f9ba07b
MD
737 bp->b_flags &= ~B_ERROR;
738 bp->b_error = 0;
739 bp->b_resid = 0;
740 for (i = 0; i < bp->b_xio.xio_npages; ++i) {
741 m = bp->b_xio.xio_pages[i];
fc1a9118
MD
742 vm_page_need_commit(m);
743 }
744 } else {
745 for (i = 0; i < bp->b_xio.xio_npages; ++i) {
746 m = bp->b_xio.xio_pages[i];
747 vm_page_clear_commit(m);
8f9ba07b
MD
748 }
749 }
750 bio = pop_bio(bio);
751 biodone(bio);
752}
753
7a2de9a4
MD
754static int
755tmpfs_bmap(struct vop_bmap_args *ap)
756{
757 if (ap->a_doffsetp != NULL)
758 *ap->a_doffsetp = ap->a_loffset;
759 if (ap->a_runp != NULL)
760 *ap->a_runp = 0;
761 if (ap->a_runb != NULL)
762 *ap->a_runb = 0;
763
764 return 0;
765}
9fc94b5f 766
7a2de9a4
MD
767/* --------------------------------------------------------------------- */
768
769static int
770tmpfs_nremove(struct vop_nremove_args *v)
771{
772 struct vnode *dvp = v->a_dvp;
773 struct namecache *ncp = v->a_nch->ncp;
9fc94b5f 774 struct vnode *vp;
7a2de9a4
MD
775 int error;
776 struct tmpfs_dirent *de;
777 struct tmpfs_mount *tmp;
778 struct tmpfs_node *dnode;
779 struct tmpfs_node *node;
780
9fc94b5f 781 /*
a1fa5d8d
MD
782 * We have to acquire the vp from v->a_nch because we will likely
783 * unresolve the namecache entry, and a vrele/vput is needed to
784 * trigger the tmpfs_inactive/tmpfs_reclaim sequence.
785 *
786 * We have to use vget to clear any inactive state on the vnode,
787 * otherwise the vnode may remain inactive and thus tmpfs_inactive
788 * will not get called when we release it.
9fc94b5f 789 */
a1fa5d8d 790 error = cache_vget(v->a_nch, v->a_cred, LK_SHARED, &vp);
9fc94b5f 791 KKASSERT(error == 0);
a1fa5d8d 792 vn_unlock(vp);
7a2de9a4
MD
793
794 if (vp->v_type == VDIR) {
795 error = EISDIR;
796 goto out;
797 }
798
799 dnode = VP_TO_TMPFS_DIR(dvp);
800 node = VP_TO_TMPFS_NODE(vp);
801 tmp = VFS_TO_TMPFS(vp->v_mount);
802 de = tmpfs_dir_lookup(dnode, node, ncp);
803 if (de == NULL) {
804 error = ENOENT;
805 goto out;
806 }
807
808 /* Files marked as immutable or append-only cannot be deleted. */
809 if ((node->tn_flags & (IMMUTABLE | APPEND | NOUNLINK)) ||
810 (dnode->tn_flags & APPEND)) {
811 error = EPERM;
812 goto out;
813 }
814
815 /* Remove the entry from the directory; as it is a file, we do not
816 * have to change the number of hard links of the directory. */
22d3b394 817 tmpfs_dir_detach(dnode, de);
7a2de9a4
MD
818
819 /* Free the directory entry we just deleted. Note that the node
820 * referred by it will not be removed until the vnode is really
821 * reclaimed. */
0786baf1 822 tmpfs_free_dirent(tmp, de);
7a2de9a4
MD
823
824 if (node->tn_links > 0) {
825 TMPFS_NODE_LOCK(node);
826 node->tn_status |= TMPFS_NODE_ACCESSED | TMPFS_NODE_CHANGED | \
827 TMPFS_NODE_MODIFIED;
828 TMPFS_NODE_UNLOCK(node);
829 }
830
831 cache_setunresolved(v->a_nch);
832 cache_setvp(v->a_nch, NULL);
80ae59d7 833 tmpfs_knote(vp, NOTE_DELETE);
9fc94b5f 834 /*cache_inval_vp(vp, CINV_DESTROY);*/
80ae59d7 835 tmpfs_knote(dvp, NOTE_WRITE);
7a2de9a4
MD
836 error = 0;
837
7a2de9a4 838out:
9fc94b5f 839 vrele(vp);
7a2de9a4
MD
840
841 return error;
842}
843
844/* --------------------------------------------------------------------- */
845
846static int
847tmpfs_nlink(struct vop_nlink_args *v)
848{
849 struct vnode *dvp = v->a_dvp;
850 struct vnode *vp = v->a_vp;
851 struct namecache *ncp = v->a_nch->ncp;
7a2de9a4
MD
852 struct tmpfs_dirent *de;
853 struct tmpfs_node *node;
22d3b394
MD
854 struct tmpfs_node *dnode;
855 int error;
7a2de9a4 856
7a2de9a4
MD
857 KKASSERT(dvp != vp); /* XXX When can this be false? */
858
859 node = VP_TO_TMPFS_NODE(vp);
22d3b394 860 dnode = VP_TO_TMPFS_NODE(dvp);
7a2de9a4
MD
861
862 /* XXX: Why aren't the following two tests done by the caller? */
863
864 /* Hard links of directories are forbidden. */
865 if (vp->v_type == VDIR) {
866 error = EPERM;
867 goto out;
868 }
869
870 /* Cannot create cross-device links. */
871 if (dvp->v_mount != vp->v_mount) {
872 error = EXDEV;
873 goto out;
874 }
875
876 /* Ensure that we do not overflow the maximum number of links imposed
877 * by the system. */
878 KKASSERT(node->tn_links <= LINK_MAX);
879 if (node->tn_links == LINK_MAX) {
880 error = EMLINK;
881 goto out;
882 }
883
884 /* We cannot create links of files marked immutable or append-only. */
885 if (node->tn_flags & (IMMUTABLE | APPEND)) {
886 error = EPERM;
887 goto out;
888 }
889
890 /* Allocate a new directory entry to represent the node. */
891 error = tmpfs_alloc_dirent(VFS_TO_TMPFS(vp->v_mount), node,
892 ncp->nc_name, ncp->nc_nlen, &de);
893 if (error != 0)
894 goto out;
895
896 /* Insert the new directory entry into the appropriate directory. */
22d3b394 897 tmpfs_dir_attach(dnode, de);
7a2de9a4
MD
898
899 /* vp link count has changed, so update node times. */
900
901 TMPFS_NODE_LOCK(node);
902 node->tn_status |= TMPFS_NODE_CHANGED;
903 TMPFS_NODE_UNLOCK(node);
904 tmpfs_update(vp);
905
80ae59d7 906 tmpfs_knote(vp, NOTE_LINK);
7a2de9a4
MD
907 cache_setunresolved(v->a_nch);
908 cache_setvp(v->a_nch, vp);
80ae59d7 909 tmpfs_knote(dvp, NOTE_WRITE);
7a2de9a4
MD
910 error = 0;
911
912out:
7a2de9a4
MD
913 return error;
914}
915
916/* --------------------------------------------------------------------- */
917
918static int
919tmpfs_nrename(struct vop_nrename_args *v)
920{
921 struct vnode *fdvp = v->a_fdvp;
922 struct namecache *fncp = v->a_fnch->ncp;
923 struct vnode *fvp = fncp->nc_vp;
924 struct vnode *tdvp = v->a_tdvp;
925 struct namecache *tncp = v->a_tnch->ncp;
a1fa5d8d 926 struct vnode *tvp;
29ca4fd6 927 struct tmpfs_dirent *de, *tde;
7a2de9a4
MD
928 struct tmpfs_mount *tmp;
929 struct tmpfs_node *fdnode;
930 struct tmpfs_node *fnode;
931 struct tmpfs_node *tnode;
932 struct tmpfs_node *tdnode;
22d3b394 933 char *newname;
dca262fb 934 char *oldname;
22d3b394 935 int error;
7a2de9a4 936
a1fa5d8d
MD
937 /*
938 * Because tvp can get overwritten we have to vget it instead of
939 * just vref or use it, otherwise it's VINACTIVE flag may not get
940 * cleared and the node won't get destroyed.
941 */
942 error = cache_vget(v->a_tnch, v->a_cred, LK_SHARED, &tvp);
943 if (error == 0) {
944 tnode = VP_TO_TMPFS_NODE(tvp);
945 vn_unlock(tvp);
946 } else {
947 tnode = NULL;
948 }
7a2de9a4
MD
949
950 /* Disallow cross-device renames.
951 * XXX Why isn't this done by the caller? */
952 if (fvp->v_mount != tdvp->v_mount ||
953 (tvp != NULL && fvp->v_mount != tvp->v_mount)) {
954 error = EXDEV;
955 goto out;
956 }
957
958 tmp = VFS_TO_TMPFS(tdvp->v_mount);
959 tdnode = VP_TO_TMPFS_DIR(tdvp);
960
961 /* If source and target are the same file, there is nothing to do. */
962 if (fvp == tvp) {
963 error = 0;
964 goto out;
965 }
966
7a2de9a4
MD
967 fdnode = VP_TO_TMPFS_DIR(fdvp);
968 fnode = VP_TO_TMPFS_NODE(fvp);
969 de = tmpfs_dir_lookup(fdnode, fnode, fncp);
970
971 /* Avoid manipulating '.' and '..' entries. */
972 if (de == NULL) {
973 error = ENOENT;
974 goto out_locked;
975 }
976 KKASSERT(de->td_node == fnode);
977
dca262fb
MD
978 /*
979 * If replacing an entry in the target directory and that entry
980 * is a directory, it must be empty.
981 *
7a2de9a4 982 * Kern_rename gurantees the destination to be a directory
dca262fb
MD
983 * if the source is one (it does?).
984 */
7a2de9a4
MD
985 if (tvp != NULL) {
986 KKASSERT(tnode != NULL);
987
988 if ((tnode->tn_flags & (NOUNLINK | IMMUTABLE | APPEND)) ||
989 (tdnode->tn_flags & (APPEND | IMMUTABLE))) {
990 error = EPERM;
991 goto out_locked;
992 }
993
994 if (fnode->tn_type == VDIR && tnode->tn_type == VDIR) {
995 if (tnode->tn_size > 0) {
996 error = ENOTEMPTY;
997 goto out_locked;
998 }
999 } else if (fnode->tn_type == VDIR && tnode->tn_type != VDIR) {
1000 error = ENOTDIR;
1001 goto out_locked;
1002 } else if (fnode->tn_type != VDIR && tnode->tn_type == VDIR) {
1003 error = EISDIR;
1004 goto out_locked;
1005 } else {
1006 KKASSERT(fnode->tn_type != VDIR &&
1007 tnode->tn_type != VDIR);
1008 }
1009 }
1010
dca262fb
MD
1011 if ((fnode->tn_flags & (NOUNLINK | IMMUTABLE | APPEND)) ||
1012 (fdnode->tn_flags & (APPEND | IMMUTABLE))) {
7a2de9a4
MD
1013 error = EPERM;
1014 goto out_locked;
1015 }
1016
dca262fb
MD
1017 /*
1018 * Ensure that we have enough memory to hold the new name, if it
1019 * has to be changed.
1020 */
7a2de9a4
MD
1021 if (fncp->nc_nlen != tncp->nc_nlen ||
1022 bcmp(fncp->nc_name, tncp->nc_name, fncp->nc_nlen) != 0) {
d00cd01c 1023 newname = kmalloc(tncp->nc_nlen + 1, tmp->tm_name_zone,
42f6f6b1
VS
1024 M_WAITOK | M_NULLOK);
1025 if (newname == NULL) {
1026 error = ENOSPC;
1027 goto out_locked;
1028 }
dca262fb
MD
1029 bcopy(tncp->nc_name, newname, tncp->nc_nlen);
1030 newname[tncp->nc_nlen] = '\0';
1031 } else {
7a2de9a4 1032 newname = NULL;
dca262fb 1033 }
7a2de9a4 1034
dca262fb
MD
1035 /*
1036 * Unlink entry from source directory. Note that the kernel has
1037 * already checked for illegal recursion cases (renaming a directory
1038 * into a subdirectory of itself).
1039 */
1040 if (fdnode != tdnode)
1041 tmpfs_dir_detach(fdnode, de);
29ca4fd6
JH
1042 else {
1043 RB_REMOVE(tmpfs_dirtree, &fdnode->tn_dir.tn_dirtree, de);
1044 }
dca262fb
MD
1045
1046 /*
1047 * Handle any name change. Swap with newname, we will
1048 * deallocate it at the end.
1049 */
1050 if (newname != NULL) {
1051#if 0
1052 TMPFS_NODE_LOCK(fnode);
1053 fnode->tn_status |= TMPFS_NODE_CHANGED;
1054 TMPFS_NODE_UNLOCK(fnode);
1055#endif
1056 oldname = de->td_name;
1057 de->td_name = newname;
1058 de->td_namelen = (uint16_t)tncp->nc_nlen;
1059 newname = oldname;
1060 }
1061
1062 /*
29ca4fd6
JH
1063 * If we are overwriting an entry, we have to remove the old one
1064 * from the target directory.
1065 */
1066 if (tvp != NULL) {
1067 /* Remove the old entry from the target directory. */
1068 tde = tmpfs_dir_lookup(tdnode, tnode, tncp);
1069 tmpfs_dir_detach(tdnode, tde);
1070 tmpfs_knote(tdnode->tn_vnode, NOTE_DELETE);
1071
1072 /*
1073 * Free the directory entry we just deleted. Note that the
1074 * node referred by it will not be removed until the vnode is
1075 * really reclaimed.
1076 */
1077 tmpfs_free_dirent(VFS_TO_TMPFS(tvp->v_mount), tde);
1078 /*cache_inval_vp(tvp, CINV_DESTROY);*/
1079 }
1080
1081 /*
dca262fb
MD
1082 * Link entry to target directory. If the entry
1083 * represents a directory move the parent linkage
1084 * as well.
1085 */
7a2de9a4 1086 if (fdnode != tdnode) {
7a2de9a4 1087 if (de->td_node->tn_type == VDIR) {
7a2de9a4 1088 TMPFS_VALIDATE_DIR(fnode);
7a2de9a4 1089
7a2de9a4 1090 TMPFS_NODE_LOCK(tdnode);
7a2de9a4 1091 tdnode->tn_links++;
dca262fb
MD
1092 tdnode->tn_status |= TMPFS_NODE_MODIFIED;
1093 TMPFS_NODE_UNLOCK(tdnode);
7a2de9a4 1094
dca262fb
MD
1095 TMPFS_NODE_LOCK(fnode);
1096 fnode->tn_dir.tn_parent = tdnode;
1097 fnode->tn_status |= TMPFS_NODE_CHANGED;
1098 TMPFS_NODE_UNLOCK(fnode);
1099
1100 TMPFS_NODE_LOCK(fdnode);
1101 fdnode->tn_links--;
1102 fdnode->tn_status |= TMPFS_NODE_MODIFIED;
7a2de9a4 1103 TMPFS_NODE_UNLOCK(fdnode);
7a2de9a4 1104 }
22d3b394 1105 tmpfs_dir_attach(tdnode, de);
dca262fb 1106 } else {
7a2de9a4 1107 TMPFS_NODE_LOCK(tdnode);
7a2de9a4 1108 tdnode->tn_status |= TMPFS_NODE_MODIFIED;
29ca4fd6 1109 RB_INSERT(tmpfs_dirtree, &tdnode->tn_dir.tn_dirtree, de);
7a2de9a4
MD
1110 TMPFS_NODE_UNLOCK(tdnode);
1111 }
1112
dca262fb 1113 /*
dca262fb
MD
1114 * Finish up
1115 */
1116 if (newname) {
d00cd01c 1117 kfree(newname, tmp->tm_name_zone);
dca262fb
MD
1118 newname = NULL;
1119 }
7a2de9a4 1120 cache_rename(v->a_fnch, v->a_tnch);
80ae59d7
MD
1121 tmpfs_knote(v->a_fdvp, NOTE_WRITE);
1122 tmpfs_knote(v->a_tdvp, NOTE_WRITE);
1123 if (fnode->tn_vnode)
1124 tmpfs_knote(fnode->tn_vnode, NOTE_RENAME);
7a2de9a4
MD
1125 error = 0;
1126
1127out_locked:
630e3a33 1128 ;
7a2de9a4
MD
1129
1130out:
a1fa5d8d
MD
1131 if (tvp)
1132 vrele(tvp);
7a2de9a4
MD
1133
1134 return error;
1135}
1136
1137/* --------------------------------------------------------------------- */
1138
1139static int
1140tmpfs_nmkdir(struct vop_nmkdir_args *v)
1141{
1142 struct vnode *dvp = v->a_dvp;
1143 struct vnode **vpp = v->a_vpp;
1144 struct namecache *ncp = v->a_nch->ncp;
1145 struct vattr *vap = v->a_vap;
1146 struct ucred *cred = v->a_cred;
1147 int error;
1148
1149 KKASSERT(vap->va_type == VDIR);
1150
7a2de9a4
MD
1151 error = tmpfs_alloc_file(dvp, vpp, vap, ncp, cred, NULL);
1152 if (error == 0) {
1153 cache_setunresolved(v->a_nch);
1154 cache_setvp(v->a_nch, *vpp);
80ae59d7 1155 tmpfs_knote(dvp, NOTE_WRITE | NOTE_LINK);
7a2de9a4 1156 }
7a2de9a4
MD
1157
1158 return error;
1159}
1160
1161/* --------------------------------------------------------------------- */
1162
1163static int
1164tmpfs_nrmdir(struct vop_nrmdir_args *v)
1165{
1166 struct vnode *dvp = v->a_dvp;
1167 struct namecache *ncp = v->a_nch->ncp;
9fc94b5f 1168 struct vnode *vp;
7a2de9a4
MD
1169 struct tmpfs_dirent *de;
1170 struct tmpfs_mount *tmp;
1171 struct tmpfs_node *dnode;
1172 struct tmpfs_node *node;
38e5e604
MD
1173 int error;
1174
1175 /*
a1fa5d8d
MD
1176 * We have to acquire the vp from v->a_nch because we will likely
1177 * unresolve the namecache entry, and a vrele/vput is needed to
1178 * trigger the tmpfs_inactive/tmpfs_reclaim sequence.
1179 *
1180 * We have to use vget to clear any inactive state on the vnode,
1181 * otherwise the vnode may remain inactive and thus tmpfs_inactive
1182 * will not get called when we release it.
9fc94b5f 1183 */
a1fa5d8d 1184 error = cache_vget(v->a_nch, v->a_cred, LK_SHARED, &vp);
9fc94b5f 1185 KKASSERT(error == 0);
a1fa5d8d 1186 vn_unlock(vp);
7a2de9a4 1187
e527fb6b
MD
1188 /*
1189 * Prevalidate so we don't hit an assertion later
1190 */
1191 if (vp->v_type != VDIR) {
1192 error = ENOTDIR;
1193 goto out;
1194 }
1195
7a2de9a4
MD
1196 tmp = VFS_TO_TMPFS(dvp->v_mount);
1197 dnode = VP_TO_TMPFS_DIR(dvp);
1198 node = VP_TO_TMPFS_DIR(vp);
1199
1200 /* Directories with more than two entries ('.' and '..') cannot be
1201 * removed. */
1202 if (node->tn_size > 0) {
1203 error = ENOTEMPTY;
1204 goto out;
1205 }
1206
1207 if ((dnode->tn_flags & APPEND)
1208 || (node->tn_flags & (NOUNLINK | IMMUTABLE | APPEND))) {
1209 error = EPERM;
1210 goto out;
1211 }
1212
1213 /* This invariant holds only if we are not trying to remove "..".
1214 * We checked for that above so this is safe now. */
1215 KKASSERT(node->tn_dir.tn_parent == dnode);
1216
1217 /* Get the directory entry associated with node (vp). This was
1218 * filled by tmpfs_lookup while looking up the entry. */
1219 de = tmpfs_dir_lookup(dnode, node, ncp);
1220 KKASSERT(TMPFS_DIRENT_MATCHES(de,
1221 ncp->nc_name,
1222 ncp->nc_nlen));
1223
1224 /* Check flags to see if we are allowed to remove the directory. */
b7fe63af
MD
1225 if ((dnode->tn_flags & APPEND) ||
1226 node->tn_flags & (NOUNLINK | IMMUTABLE | APPEND)) {
7a2de9a4
MD
1227 error = EPERM;
1228 goto out;
1229 }
1230
1231
1232 /* Detach the directory entry from the directory (dnode). */
22d3b394 1233 tmpfs_dir_detach(dnode, de);
7a2de9a4
MD
1234
1235 /* No vnode should be allocated for this entry from this point */
1236 TMPFS_NODE_LOCK(node);
1237 TMPFS_ASSERT_ELOCKED(node);
1238 TMPFS_NODE_LOCK(dnode);
1239 TMPFS_ASSERT_ELOCKED(dnode);
1240
0786baf1
MD
1241#if 0
1242 /* handled by tmpfs_free_node */
1243 KKASSERT(node->tn_links > 0);
7a2de9a4
MD
1244 node->tn_links--;
1245 node->tn_dir.tn_parent = NULL;
0786baf1 1246#endif
7a2de9a4
MD
1247 node->tn_status |= TMPFS_NODE_ACCESSED | TMPFS_NODE_CHANGED | \
1248 TMPFS_NODE_MODIFIED;
1249
0786baf1
MD
1250#if 0
1251 /* handled by tmpfs_free_node */
1252 KKASSERT(dnode->tn_links > 0);
7a2de9a4 1253 dnode->tn_links--;
0786baf1 1254#endif
7a2de9a4
MD
1255 dnode->tn_status |= TMPFS_NODE_ACCESSED | \
1256 TMPFS_NODE_CHANGED | TMPFS_NODE_MODIFIED;
1257
1258 TMPFS_NODE_UNLOCK(dnode);
1259 TMPFS_NODE_UNLOCK(node);
1260
1261 /* Free the directory entry we just deleted. Note that the node
1262 * referred by it will not be removed until the vnode is really
1263 * reclaimed. */
0786baf1 1264 tmpfs_free_dirent(tmp, de);
7a2de9a4
MD
1265
1266 /* Release the deleted vnode (will destroy the node, notify
1267 * interested parties and clean it from the cache). */
1268
1269 TMPFS_NODE_LOCK(dnode);
1270 dnode->tn_status |= TMPFS_NODE_CHANGED;
1271 TMPFS_NODE_UNLOCK(dnode);
1272 tmpfs_update(dvp);
1273
1274 cache_setunresolved(v->a_nch);
1275 cache_setvp(v->a_nch, NULL);
9fc94b5f 1276 /*cache_inval_vp(vp, CINV_DESTROY);*/
80ae59d7 1277 tmpfs_knote(dvp, NOTE_WRITE | NOTE_LINK);
7a2de9a4
MD
1278 error = 0;
1279
1280out:
9fc94b5f 1281 vrele(vp);
7a2de9a4
MD
1282
1283 return error;
1284}
1285
1286/* --------------------------------------------------------------------- */
1287
1288static int
1289tmpfs_nsymlink(struct vop_nsymlink_args *v)
1290{
1291 struct vnode *dvp = v->a_dvp;
1292 struct vnode **vpp = v->a_vpp;
1293 struct namecache *ncp = v->a_nch->ncp;
1294 struct vattr *vap = v->a_vap;
1295 struct ucred *cred = v->a_cred;
1296 char *target = v->a_target;
1297 int error;
1298
7a2de9a4
MD
1299 vap->va_type = VLNK;
1300 error = tmpfs_alloc_file(dvp, vpp, vap, ncp, cred, target);
1301 if (error == 0) {
80ae59d7 1302 tmpfs_knote(*vpp, NOTE_WRITE);
7a2de9a4
MD
1303 cache_setunresolved(v->a_nch);
1304 cache_setvp(v->a_nch, *vpp);
1305 }
7a2de9a4
MD
1306
1307 return error;
1308}
1309
1310/* --------------------------------------------------------------------- */
1311
1312static int
1313tmpfs_readdir(struct vop_readdir_args *v)
1314{
1315 struct vnode *vp = v->a_vp;
1316 struct uio *uio = v->a_uio;
1317 int *eofflag = v->a_eofflag;
1318 off_t **cookies = v->a_cookies;
1319 int *ncookies = v->a_ncookies;
22d3b394 1320 struct tmpfs_mount *tmp;
7a2de9a4
MD
1321 int error;
1322 off_t startoff;
1323 off_t cnt = 0;
1324 struct tmpfs_node *node;
1325
1326 /* This operation only makes sense on directory nodes. */
1327 if (vp->v_type != VDIR)
1328 return ENOTDIR;
1329
22d3b394 1330 tmp = VFS_TO_TMPFS(vp->v_mount);
7a2de9a4
MD
1331 node = VP_TO_TMPFS_DIR(vp);
1332 startoff = uio->uio_offset;
1333
1334 if (uio->uio_offset == TMPFS_DIRCOOKIE_DOT) {
1335 error = tmpfs_dir_getdotdent(node, uio);
1336 if (error != 0)
1337 goto outok;
1338 cnt++;
1339 }
1340
1341 if (uio->uio_offset == TMPFS_DIRCOOKIE_DOTDOT) {
22d3b394 1342 error = tmpfs_dir_getdotdotdent(tmp, node, uio);
7a2de9a4
MD
1343 if (error != 0)
1344 goto outok;
1345 cnt++;
1346 }
1347
1348 error = tmpfs_dir_getdents(node, uio, &cnt);
1349
1350outok:
1351 KKASSERT(error >= -1);
1352
1353 if (error == -1)
1354 error = 0;
1355
1356 if (eofflag != NULL)
1357 *eofflag =
1358 (error == 0 && uio->uio_offset == TMPFS_DIRCOOKIE_EOF);
1359
1360 /* Update NFS-related variables. */
1361 if (error == 0 && cookies != NULL && ncookies != NULL) {
1362 off_t i;
1363 off_t off = startoff;
1364 struct tmpfs_dirent *de = NULL;
1365
1366 *ncookies = cnt;
1367 *cookies = kmalloc(cnt * sizeof(off_t), M_TEMP, M_WAITOK);
1368
1369 for (i = 0; i < cnt; i++) {
1370 KKASSERT(off != TMPFS_DIRCOOKIE_EOF);
1371 if (off == TMPFS_DIRCOOKIE_DOT) {
1372 off = TMPFS_DIRCOOKIE_DOTDOT;
1373 } else {
1374 if (off == TMPFS_DIRCOOKIE_DOTDOT) {
29ca4fd6 1375 de = RB_MIN(tmpfs_dirtree, &node->tn_dir.tn_dirtree);
7a2de9a4 1376 } else if (de != NULL) {
29ca4fd6 1377 de = RB_NEXT(tmpfs_dirtree, &node->tn_dir.tn_dirtree, de);
7a2de9a4
MD
1378 } else {
1379 de = tmpfs_dir_lookupbycookie(node,
1380 off);
1381 KKASSERT(de != NULL);
29ca4fd6 1382 de = RB_NEXT(tmpfs_dirtree, &node->tn_dir.tn_dirtree, de);
7a2de9a4
MD
1383 }
1384 if (de == NULL)
1385 off = TMPFS_DIRCOOKIE_EOF;
1386 else
1387 off = tmpfs_dircookie(de);
1388 }
1389
1390 (*cookies)[i] = off;
1391 }
1392 KKASSERT(uio->uio_offset == off);
1393 }
7a2de9a4
MD
1394
1395 return error;
1396}
1397
1398/* --------------------------------------------------------------------- */
1399
1400static int
1401tmpfs_readlink(struct vop_readlink_args *v)
1402{
1403 struct vnode *vp = v->a_vp;
1404 struct uio *uio = v->a_uio;
1405
1406 int error;
1407 struct tmpfs_node *node;
1408
1409 KKASSERT(uio->uio_offset == 0);
1410 KKASSERT(vp->v_type == VLNK);
1411
1412 node = VP_TO_TMPFS_NODE(vp);
1413
1414 error = uiomove(node->tn_link, MIN(node->tn_size, uio->uio_resid),
1415 uio);
1416 TMPFS_NODE_LOCK(node);
1417 node->tn_status |= TMPFS_NODE_ACCESSED;
1418 TMPFS_NODE_UNLOCK(node);
1419
1420 return error;
1421}
1422
1423/* --------------------------------------------------------------------- */
1424
1425static int
1426tmpfs_inactive(struct vop_inactive_args *v)
1427{
1428 struct vnode *vp = v->a_vp;
7a2de9a4
MD
1429 struct tmpfs_node *node;
1430
7a2de9a4
MD
1431 node = VP_TO_TMPFS_NODE(vp);
1432
9fc94b5f 1433 /*
a1fa5d8d
MD
1434 * Degenerate case
1435 */
1436 if (node == NULL) {
1437 vrecycle(vp);
1438 return(0);
1439 }
1440
1441 /*
9fc94b5f
MD
1442 * Get rid of unreferenced deleted vnodes sooner rather than
1443 * later so the data memory can be recovered immediately.
f96f2f39
MD
1444 *
1445 * We must truncate the vnode to prevent the normal reclamation
1446 * path from flushing the data for the removed file to disk.
9fc94b5f 1447 */
7a2de9a4 1448 TMPFS_NODE_LOCK(node);
b7fe63af
MD
1449 if ((node->tn_vpstate & TMPFS_VNODE_ALLOCATING) == 0 &&
1450 (node->tn_links == 0 ||
1451 (node->tn_links == 1 && node->tn_type == VDIR &&
1452 node->tn_dir.tn_parent)))
1453 {
9fc94b5f 1454 node->tn_vpstate = TMPFS_VNODE_DOOMED;
7a2de9a4 1455 TMPFS_NODE_UNLOCK(node);
f96f2f39
MD
1456 if (node->tn_type == VREG)
1457 tmpfs_truncate(vp, 0);
7a2de9a4 1458 vrecycle(vp);
9fc94b5f 1459 } else {
7a2de9a4 1460 TMPFS_NODE_UNLOCK(node);
9fc94b5f 1461 }
7a2de9a4
MD
1462
1463 return 0;
1464}
1465
1466/* --------------------------------------------------------------------- */
1467
1468int
1469tmpfs_reclaim(struct vop_reclaim_args *v)
1470{
1471 struct vnode *vp = v->a_vp;
7a2de9a4
MD
1472 struct tmpfs_mount *tmp;
1473 struct tmpfs_node *node;
1474
1475 node = VP_TO_TMPFS_NODE(vp);
1476 tmp = VFS_TO_TMPFS(vp->v_mount);
1477
7a2de9a4
MD
1478 tmpfs_free_vp(vp);
1479
b7fe63af
MD
1480 /*
1481 * If the node referenced by this vnode was deleted by the
1482 * user, we must free its associated data structures now that
1483 * the vnode is being reclaimed.
1484 *
1485 * Directories have an extra link ref.
1486 */
7a2de9a4 1487 TMPFS_NODE_LOCK(node);
b7fe63af
MD
1488 if ((node->tn_vpstate & TMPFS_VNODE_ALLOCATING) == 0 &&
1489 (node->tn_links == 0 ||
1490 (node->tn_links == 1 && node->tn_type == VDIR &&
1491 node->tn_dir.tn_parent)))
1492 {
7a2de9a4 1493 node->tn_vpstate = TMPFS_VNODE_DOOMED;
7a2de9a4 1494 tmpfs_free_node(tmp, node);
0786baf1 1495 /* eats the lock */
9fc94b5f 1496 } else {
7a2de9a4 1497 TMPFS_NODE_UNLOCK(node);
9fc94b5f 1498 }
7a2de9a4
MD
1499
1500 KKASSERT(vp->v_data == NULL);
1501 return 0;
1502}
1503
66fa44e7
VS
1504/* --------------------------------------------------------------------- */
1505
1506static int
1507tmpfs_mountctl(struct vop_mountctl_args *ap)
1508{
1509 struct tmpfs_mount *tmp;
1510 struct mount *mp;
1511 int rc;
1512
1513 switch (ap->a_op) {
1514 case (MOUNTCTL_SET_EXPORT):
1515 mp = ap->a_head.a_ops->head.vv_mount;
1516 tmp = (struct tmpfs_mount *) mp->mnt_data;
1517
1518 if (ap->a_ctllen != sizeof(struct export_args))
1519 rc = (EINVAL);
1520 else
1521 rc = vfs_export(mp, &tmp->tm_export,
1522 (const struct export_args *) ap->a_ctl);
1523 break;
1524 default:
1525 rc = vop_stdmountctl(ap);
1526 break;
1527 }
1528 return (rc);
1529}
1530
7a2de9a4
MD
1531/* --------------------------------------------------------------------- */
1532
1533static int
1534tmpfs_print(struct vop_print_args *v)
1535{
1536 struct vnode *vp = v->a_vp;
1537
1538 struct tmpfs_node *node;
1539
1540 node = VP_TO_TMPFS_NODE(vp);
1541
1542 kprintf("tag VT_TMPFS, tmpfs_node %p, flags 0x%x, links %d\n",
1543 node, node->tn_flags, node->tn_links);
1544 kprintf("\tmode 0%o, owner %d, group %d, size %ju, status 0x%x\n",
1545 node->tn_mode, node->tn_uid, node->tn_gid,
1546 (uintmax_t)node->tn_size, node->tn_status);
1547
1548 if (vp->v_type == VFIFO)
1549 fifo_printinfo(vp);
1550
1551 kprintf("\n");
1552
1553 return 0;
1554}
1555
1556/* --------------------------------------------------------------------- */
1557
1558static int
1559tmpfs_pathconf(struct vop_pathconf_args *v)
1560{
1561 int name = v->a_name;
1562 register_t *retval = v->a_retval;
1563
1564 int error;
1565
1566 error = 0;
1567
1568 switch (name) {
1569 case _PC_LINK_MAX:
1570 *retval = LINK_MAX;
1571 break;
1572
1573 case _PC_NAME_MAX:
1574 *retval = NAME_MAX;
1575 break;
1576
1577 case _PC_PATH_MAX:
1578 *retval = PATH_MAX;
1579 break;
1580
1581 case _PC_PIPE_BUF:
1582 *retval = PIPE_BUF;
1583 break;
1584
1585 case _PC_CHOWN_RESTRICTED:
1586 *retval = 1;
1587 break;
1588
1589 case _PC_NO_TRUNC:
1590 *retval = 1;
1591 break;
1592
1593 case _PC_SYNC_IO:
1594 *retval = 1;
1595 break;
1596
1597 case _PC_FILESIZEBITS:
1598 *retval = 0; /* XXX Don't know which value should I return. */
1599 break;
1600
1601 default:
1602 error = EINVAL;
1603 }
1604
1605 return error;
1606}
1607
80ae59d7
MD
1608/************************************************************************
1609 * KQFILTER OPS *
1610 ************************************************************************/
1611
1612static void filt_tmpfsdetach(struct knote *kn);
1613static int filt_tmpfsread(struct knote *kn, long hint);
1614static int filt_tmpfswrite(struct knote *kn, long hint);
1615static int filt_tmpfsvnode(struct knote *kn, long hint);
1616
1617static struct filterops tmpfsread_filtops =
1618 { FILTEROP_ISFD, NULL, filt_tmpfsdetach, filt_tmpfsread };
1619static struct filterops tmpfswrite_filtops =
1620 { FILTEROP_ISFD, NULL, filt_tmpfsdetach, filt_tmpfswrite };
1621static struct filterops tmpfsvnode_filtops =
1622 { FILTEROP_ISFD, NULL, filt_tmpfsdetach, filt_tmpfsvnode };
1623
1624static int
1625tmpfs_kqfilter (struct vop_kqfilter_args *ap)
1626{
1627 struct vnode *vp = ap->a_vp;
1628 struct knote *kn = ap->a_kn;
1629
1630 switch (kn->kn_filter) {
1631 case EVFILT_READ:
1632 kn->kn_fop = &tmpfsread_filtops;
1633 break;
1634 case EVFILT_WRITE:
1635 kn->kn_fop = &tmpfswrite_filtops;
1636 break;
1637 case EVFILT_VNODE:
1638 kn->kn_fop = &tmpfsvnode_filtops;
1639 break;
1640 default:
1641 return (EOPNOTSUPP);
1642 }
1643
1644 kn->kn_hook = (caddr_t)vp;
1645
1646 knote_insert(&vp->v_pollinfo.vpi_kqinfo.ki_note, kn);
1647
1648 return(0);
1649}
1650
1651static void
1652filt_tmpfsdetach(struct knote *kn)
1653{
1654 struct vnode *vp = (void *)kn->kn_hook;
1655
1656 knote_remove(&vp->v_pollinfo.vpi_kqinfo.ki_note, kn);
1657}
1658
1659static int
1660filt_tmpfsread(struct knote *kn, long hint)
1661{
1662 struct vnode *vp = (void *)kn->kn_hook;
1663 struct tmpfs_node *node = VP_TO_TMPFS_NODE(vp);
1664 off_t off;
1665
1666 if (hint == NOTE_REVOKE) {
3bcb6e5e 1667 kn->kn_flags |= (EV_EOF | EV_NODATA | EV_ONESHOT);
80ae59d7
MD
1668 return(1);
1669 }
f79d9cc9
MD
1670
1671 /*
1672 * Interlock against MP races when performing this function.
1673 */
1674 lwkt_gettoken(&vp->v_mount->mnt_token);
80ae59d7
MD
1675 off = node->tn_size - kn->kn_fp->f_offset;
1676 kn->kn_data = (off < INTPTR_MAX) ? off : INTPTR_MAX;
f79d9cc9
MD
1677 if (kn->kn_sfflags & NOTE_OLDAPI) {
1678 lwkt_reltoken(&vp->v_mount->mnt_token);
80ae59d7 1679 return(1);
f79d9cc9 1680 }
80ae59d7 1681
80ae59d7 1682 if (kn->kn_data == 0) {
80ae59d7 1683 kn->kn_data = (off < INTPTR_MAX) ? off : INTPTR_MAX;
80ae59d7 1684 }
f79d9cc9 1685 lwkt_reltoken(&vp->v_mount->mnt_token);
80ae59d7
MD
1686 return (kn->kn_data != 0);
1687}
1688
1689static int
1690filt_tmpfswrite(struct knote *kn, long hint)
1691{
1692 if (hint == NOTE_REVOKE)
3bcb6e5e 1693 kn->kn_flags |= (EV_EOF | EV_NODATA | EV_ONESHOT);
80ae59d7
MD
1694 kn->kn_data = 0;
1695 return (1);
1696}
1697
1698static int
1699filt_tmpfsvnode(struct knote *kn, long hint)
1700{
1701 if (kn->kn_sfflags & hint)
1702 kn->kn_fflags |= hint;
1703 if (hint == NOTE_REVOKE) {
3bcb6e5e 1704 kn->kn_flags |= (EV_EOF | EV_NODATA);
80ae59d7
MD
1705 return (1);
1706 }
1707 return (kn->kn_fflags != 0);
1708}
1709
1710
7a2de9a4
MD
1711/* --------------------------------------------------------------------- */
1712
1713/*
1714 * vnode operations vector used for files stored in a tmpfs file system.
1715 */
1716struct vop_ops tmpfs_vnode_vops = {
1717 .vop_default = vop_defaultop,
1718 .vop_getpages = vop_stdgetpages,
1719 .vop_putpages = vop_stdputpages,
1720 .vop_ncreate = tmpfs_ncreate,
1721 .vop_nresolve = tmpfs_nresolve,
1722 .vop_nlookupdotdot = tmpfs_nlookupdotdot,
1723 .vop_nmknod = tmpfs_nmknod,
1724 .vop_open = tmpfs_open,
1725 .vop_close = tmpfs_close,
1726 .vop_access = tmpfs_access,
1727 .vop_getattr = tmpfs_getattr,
1728 .vop_setattr = tmpfs_setattr,
1729 .vop_read = tmpfs_read,
1730 .vop_write = tmpfs_write,
1731 .vop_fsync = tmpfs_fsync,
66fa44e7 1732 .vop_mountctl = tmpfs_mountctl,
7a2de9a4
MD
1733 .vop_nremove = tmpfs_nremove,
1734 .vop_nlink = tmpfs_nlink,
1735 .vop_nrename = tmpfs_nrename,
1736 .vop_nmkdir = tmpfs_nmkdir,
1737 .vop_nrmdir = tmpfs_nrmdir,
1738 .vop_nsymlink = tmpfs_nsymlink,
1739 .vop_readdir = tmpfs_readdir,
1740 .vop_readlink = tmpfs_readlink,
1741 .vop_inactive = tmpfs_inactive,
1742 .vop_reclaim = tmpfs_reclaim,
1743 .vop_print = tmpfs_print,
1744 .vop_pathconf = tmpfs_pathconf,
9fc94b5f 1745 .vop_bmap = tmpfs_bmap,
7a2de9a4
MD
1746 .vop_strategy = tmpfs_strategy,
1747 .vop_advlock = tmpfs_advlock,
80ae59d7 1748 .vop_kqfilter = tmpfs_kqfilter
7a2de9a4 1749};