use rb-tree for directory lookups
[dragonfly.git] / sys / vfs / tmpfs / tmpfs_subr.c
CommitLineData
7a2de9a4
MD
1/* $NetBSD: tmpfs_subr.c,v 1.35 2007/07/09 21:10:50 ad Exp $ */
2
3/*-
4 * Copyright (c) 2005 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Julio M. Merino Vidal, developed as part of Google's Summer of Code
9 * 2005 program.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 */
32
33/*
34 * Efficient memory file system supporting functions.
35 */
7a2de9a4
MD
36
37#include <sys/kernel.h>
38#include <sys/param.h>
39#include <sys/namei.h>
40#include <sys/priv.h>
41#include <sys/proc.h>
42#include <sys/spinlock2.h>
43#include <sys/stat.h>
44#include <sys/systm.h>
45#include <sys/vnode.h>
46#include <sys/vmmeter.h>
47
7a2de9a4
MD
48#include <vm/vm.h>
49#include <vm/vm_object.h>
50#include <vm/vm_page.h>
51#include <vm/vm_pager.h>
52#include <vm/vm_extern.h>
53
54#include <vfs/tmpfs/tmpfs.h>
7a2de9a4
MD
55#include <vfs/tmpfs/tmpfs_vnops.h>
56
f7db522f 57static ino_t tmpfs_fetch_ino(struct tmpfs_mount *);
29ca4fd6
JH
58static int tmpfs_dirtree_compare(struct tmpfs_dirent *a,
59 struct tmpfs_dirent *b);
60
61RB_GENERATE(tmpfs_dirtree, tmpfs_dirent, rb_node, tmpfs_dirtree_compare);
62
7a2de9a4
MD
63
64/* --------------------------------------------------------------------- */
65
66/*
67 * Allocates a new node of type 'type' inside the 'tmp' mount point, with
68 * its owner set to 'uid', its group to 'gid' and its mode set to 'mode',
69 * using the credentials of the process 'p'.
70 *
71 * If the node type is set to 'VDIR', then the parent parameter must point
72 * to the parent directory of the node being created. It may only be NULL
73 * while allocating the root node.
74 *
75 * If the node type is set to 'VBLK' or 'VCHR', then the rdev parameter
76 * specifies the device the node represents.
77 *
78 * If the node type is set to 'VLNK', then the parameter target specifies
79 * the file name of the target file for the symbolic link that is being
80 * created.
81 *
82 * Note that new nodes are retrieved from the available list if it has
83 * items or, if it is empty, from the node pool as long as there is enough
84 * space to create them.
85 *
86 * Returns zero on success or an appropriate error code on failure.
87 */
88int
89tmpfs_alloc_node(struct tmpfs_mount *tmp, enum vtype type,
90 uid_t uid, gid_t gid, mode_t mode, struct tmpfs_node *parent,
91 char *target, int rmajor, int rminor, struct tmpfs_node **node)
92{
93 struct tmpfs_node *nnode;
94 struct timespec ts;
95 udev_t rdev;
96
97 /* If the root directory of the 'tmp' file system is not yet
98 * allocated, this must be the request to do it. */
99 KKASSERT(IMPLIES(tmp->tm_root == NULL, parent == NULL && type == VDIR));
100
101 KKASSERT(IFF(type == VLNK, target != NULL));
102 KKASSERT(IFF(type == VBLK || type == VCHR, rmajor != VNOVAL));
103
c01f27eb 104 if (tmp->tm_nodes_inuse >= tmp->tm_nodes_max)
7a2de9a4
MD
105 return (ENOSPC);
106
881dac8b
VS
107 nnode = objcache_get(tmp->tm_node_pool, M_WAITOK | M_NULLOK);
108 if (nnode == NULL)
109 return (ENOSPC);
7a2de9a4
MD
110
111 /* Generic initialization. */
112 nnode->tn_type = type;
113 vfs_timestamp(&ts);
114 nnode->tn_ctime = nnode->tn_mtime = nnode->tn_atime
115 = ts.tv_sec;
116 nnode->tn_ctimensec = nnode->tn_mtimensec = nnode->tn_atimensec
117 = ts.tv_nsec;
118 nnode->tn_uid = uid;
119 nnode->tn_gid = gid;
120 nnode->tn_mode = mode;
f7db522f 121 nnode->tn_id = tmpfs_fetch_ino(tmp);
12a5de0e 122 nnode->tn_advlock.init_done = 0;
7a2de9a4
MD
123
124 /* Type-specific initialization. */
125 switch (nnode->tn_type) {
126 case VBLK:
127 case VCHR:
128 rdev = makeudev(rmajor, rminor);
129 if (rdev == NOUDEV) {
42f6f6b1 130 objcache_put(tmp->tm_node_pool, nnode);
7a2de9a4
MD
131 return(EINVAL);
132 }
133 nnode->tn_rdev = rdev;
134 break;
135
136 case VDIR:
29ca4fd6 137 RB_INIT(&nnode->tn_dir.tn_dirtree);
7a2de9a4
MD
138 KKASSERT(parent != nnode);
139 KKASSERT(IMPLIES(parent == NULL, tmp->tm_root == NULL));
0786baf1 140 nnode->tn_dir.tn_parent = parent;
7a2de9a4
MD
141 nnode->tn_dir.tn_readdir_lastn = 0;
142 nnode->tn_dir.tn_readdir_lastp = NULL;
143 nnode->tn_links++;
144 nnode->tn_size = 0;
0786baf1
MD
145 if (parent) {
146 TMPFS_NODE_LOCK(parent);
147 parent->tn_links++;
148 TMPFS_NODE_UNLOCK(parent);
149 }
7a2de9a4
MD
150 break;
151
152 case VFIFO:
153 /* FALLTHROUGH */
154 case VSOCK:
155 break;
156
157 case VLNK:
9fc94b5f 158 nnode->tn_size = strlen(target);
d00cd01c 159 nnode->tn_link = kmalloc(nnode->tn_size + 1, tmp->tm_name_zone,
42f6f6b1
VS
160 M_WAITOK | M_NULLOK);
161 if (nnode->tn_link == NULL) {
162 objcache_put(tmp->tm_node_pool, nnode);
163 return (ENOSPC);
164 }
7a2de9a4
MD
165 bcopy(target, nnode->tn_link, nnode->tn_size);
166 nnode->tn_link[nnode->tn_size] = '\0';
167 break;
168
169 case VREG:
170 nnode->tn_reg.tn_aobj =
5a648714 171 swap_pager_alloc(NULL, 0, VM_PROT_DEFAULT, 0);
7a2de9a4
MD
172 nnode->tn_reg.tn_aobj_pages = 0;
173 nnode->tn_size = 0;
174 break;
175
176 default:
177 panic("tmpfs_alloc_node: type %p %d", nnode, (int)nnode->tn_type);
178 }
179
180 TMPFS_NODE_LOCK(nnode);
7a2de9a4 181 TMPFS_LOCK(tmp);
0786baf1 182 LIST_INSERT_HEAD(&tmp->tm_nodes_used, nnode, tn_entries);
7a2de9a4
MD
183 tmp->tm_nodes_inuse++;
184 TMPFS_UNLOCK(tmp);
185 TMPFS_NODE_UNLOCK(nnode);
186
187 *node = nnode;
188 return 0;
189}
190
191/* --------------------------------------------------------------------- */
192
193/*
194 * Destroys the node pointed to by node from the file system 'tmp'.
195 * If the node does not belong to the given mount point, the results are
196 * unpredicted.
197 *
198 * If the node references a directory; no entries are allowed because
199 * their removal could need a recursive algorithm, something forbidden in
200 * kernel space. Furthermore, there is not need to provide such
201 * functionality (recursive removal) because the only primitives offered
202 * to the user are the removal of empty directories and the deletion of
203 * individual files.
204 *
205 * Note that nodes are not really deleted; in fact, when a node has been
206 * allocated, it cannot be deleted during the whole life of the file
207 * system. Instead, they are moved to the available list and remain there
208 * until reused.
209 */
210void
211tmpfs_free_node(struct tmpfs_mount *tmp, struct tmpfs_node *node)
212{
29ffeb28 213 vm_pindex_t pages = 0;
7a2de9a4 214
7a2de9a4
MD
215#ifdef INVARIANTS
216 TMPFS_ASSERT_ELOCKED(node);
217 KKASSERT(node->tn_vnode == NULL);
218 KKASSERT((node->tn_vpstate & TMPFS_VNODE_ALLOCATING) == 0);
219#endif
220
7a2de9a4 221 TMPFS_LOCK(tmp);
0786baf1 222 LIST_REMOVE(node, tn_entries);
7a2de9a4
MD
223 tmp->tm_nodes_inuse--;
224 TMPFS_UNLOCK(tmp);
0786baf1 225 TMPFS_NODE_UNLOCK(node);
7a2de9a4
MD
226
227 switch (node->tn_type) {
228 case VNON:
229 /* Do not do anything. VNON is provided to let the
230 * allocation routine clean itself easily by avoiding
231 * duplicating code in it. */
232 /* FALLTHROUGH */
233 case VBLK:
234 /* FALLTHROUGH */
235 case VCHR:
236 /* FALLTHROUGH */
237 break;
238 case VDIR:
0786baf1
MD
239 /*
240 * The parent link can be NULL if this is the root
241 * node.
242 */
7a2de9a4
MD
243 node->tn_links--;
244 node->tn_size = 0;
0786baf1
MD
245 KKASSERT(node->tn_dir.tn_parent || node == tmp->tm_root);
246 if (node->tn_dir.tn_parent) {
247 TMPFS_NODE_LOCK(node->tn_dir.tn_parent);
248 node->tn_dir.tn_parent->tn_links--;
249
250 /*
251 * If the parent directory has no more links and
252 * no vnode ref nothing is going to come along
253 * and clean it up unless we do it here.
254 */
255 if (node->tn_dir.tn_parent->tn_links == 0 &&
256 node->tn_dir.tn_parent->tn_vnode == NULL) {
257 tmpfs_free_node(tmp, node->tn_dir.tn_parent);
258 /* eats parent lock */
259 } else {
260 TMPFS_NODE_UNLOCK(node->tn_dir.tn_parent);
261 }
262 node->tn_dir.tn_parent = NULL;
263 }
264
265 /*
266 * If the root node is being destroyed don't leave a
267 * dangling pointer in tmpfs_mount.
268 */
269 if (node == tmp->tm_root)
270 tmp->tm_root = NULL;
7a2de9a4
MD
271 break;
272 case VFIFO:
273 /* FALLTHROUGH */
274 case VSOCK:
275 break;
276
277 case VLNK:
d00cd01c 278 kfree(node->tn_link, tmp->tm_name_zone);
9fc94b5f 279 node->tn_link = NULL;
2706b587 280 node->tn_size = 0;
7a2de9a4
MD
281 break;
282
283 case VREG:
284 if (node->tn_reg.tn_aobj != NULL)
f96f2f39 285 vm_object_deallocate(node->tn_reg.tn_aobj);
7a2de9a4
MD
286 node->tn_reg.tn_aobj = NULL;
287 pages = node->tn_reg.tn_aobj_pages;
288 break;
289
290 default:
291 panic("tmpfs_free_node: type %p %d", node, (int)node->tn_type);
292 }
293
0786baf1
MD
294 /*
295 * Clean up fields for the next allocation. The objcache only ctors
296 * new allocations.
297 */
298 tmpfs_node_ctor(node, NULL, 0);
7a2de9a4 299 objcache_put(tmp->tm_node_pool, node);
0786baf1 300 /* node is now invalid */
7a2de9a4
MD
301
302 TMPFS_LOCK(tmp);
303 tmp->tm_pages_used -= pages;
304 TMPFS_UNLOCK(tmp);
7a2de9a4
MD
305}
306
307/* --------------------------------------------------------------------- */
308
309/*
310 * Allocates a new directory entry for the node node with a name of name.
311 * The new directory entry is returned in *de.
312 *
313 * The link count of node is increased by one to reflect the new object
314 * referencing it.
315 *
316 * Returns zero on success or an appropriate error code on failure.
317 */
318int
319tmpfs_alloc_dirent(struct tmpfs_mount *tmp, struct tmpfs_node *node,
320 const char *name, uint16_t len, struct tmpfs_dirent **de)
321{
322 struct tmpfs_dirent *nde;
323
42f6f6b1 324 nde = objcache_get(tmp->tm_dirent_pool, M_WAITOK);
d00cd01c 325 nde->td_name = kmalloc(len + 1, tmp->tm_name_zone, M_WAITOK | M_NULLOK);
42f6f6b1
VS
326 if (nde->td_name == NULL) {
327 objcache_put(tmp->tm_dirent_pool, nde);
328 *de = NULL;
329 return (ENOSPC);
330 }
7a2de9a4
MD
331 nde->td_namelen = len;
332 bcopy(name, nde->td_name, len);
333 nde->td_name[len] = '\0';
334
335 nde->td_node = node;
336
337 TMPFS_NODE_LOCK(node);
338 node->tn_links++;
339 TMPFS_NODE_UNLOCK(node);
340
341 *de = nde;
342
343 return 0;
344}
345
346/* --------------------------------------------------------------------- */
347
348/*
349 * Frees a directory entry. It is the caller's responsibility to destroy
350 * the node referenced by it if needed.
351 *
352 * The link count of node is decreased by one to reflect the removal of an
353 * object that referenced it. This only happens if 'node_exists' is true;
354 * otherwise the function will not access the node referred to by the
355 * directory entry, as it may already have been released from the outside.
356 */
357void
0786baf1 358tmpfs_free_dirent(struct tmpfs_mount *tmp, struct tmpfs_dirent *de)
7a2de9a4 359{
0786baf1 360 struct tmpfs_node *node;
7a2de9a4 361
0786baf1 362 node = de->td_node;
7a2de9a4 363
0786baf1
MD
364 TMPFS_NODE_LOCK(node);
365 TMPFS_ASSERT_ELOCKED(node);
366 KKASSERT(node->tn_links > 0);
367 node->tn_links--;
368 TMPFS_NODE_UNLOCK(node);
7a2de9a4 369
d00cd01c 370 kfree(de->td_name, tmp->tm_name_zone);
0786baf1 371 de->td_namelen = 0;
9fc94b5f 372 de->td_name = NULL;
0786baf1 373 de->td_node = NULL;
7a2de9a4
MD
374 objcache_put(tmp->tm_dirent_pool, de);
375}
376
377/* --------------------------------------------------------------------- */
378
379/*
380 * Allocates a new vnode for the node node or returns a new reference to
381 * an existing one if the node had already a vnode referencing it. The
382 * resulting locked vnode is returned in *vpp.
383 *
384 * Returns zero on success or an appropriate error code on failure.
385 */
386int
387tmpfs_alloc_vp(struct mount *mp, struct tmpfs_node *node, int lkflag,
2706b587 388 struct vnode **vpp)
7a2de9a4
MD
389{
390 int error = 0;
391 struct vnode *vp;
392
393loop:
2706b587
MD
394 /*
395 * Interlocked extraction from node. This can race many things.
396 * We have to get a soft reference on the vnode while we hold
397 * the node locked, then acquire it properly and check for races.
398 */
7a2de9a4
MD
399 TMPFS_NODE_LOCK(node);
400 if ((vp = node->tn_vnode) != NULL) {
401 KKASSERT((node->tn_vpstate & TMPFS_VNODE_DOOMED) == 0);
2706b587 402 vhold_interlocked(vp);
7a2de9a4 403 TMPFS_NODE_UNLOCK(node);
7a2de9a4 404
2706b587
MD
405 if (vget(vp, lkflag | LK_EXCLUSIVE) != 0) {
406 vdrop(vp);
407 goto loop;
408 }
409 if (node->tn_vnode != vp) {
7a2de9a4 410 vput(vp);
2706b587 411 vdrop(vp);
7a2de9a4
MD
412 goto loop;
413 }
2706b587 414 vdrop(vp);
7a2de9a4
MD
415 goto out;
416 }
2706b587 417 /* vp is NULL */
7a2de9a4 418
0786baf1
MD
419 /*
420 * This should never happen.
421 */
422 if (node->tn_vpstate & TMPFS_VNODE_DOOMED) {
7a2de9a4
MD
423 TMPFS_NODE_UNLOCK(node);
424 error = ENOENT;
7a2de9a4
MD
425 goto out;
426 }
427
428 /*
2706b587
MD
429 * Interlock against other calls to tmpfs_alloc_vp() trying to
430 * allocate and assign a vp to node.
7a2de9a4
MD
431 */
432 if (node->tn_vpstate & TMPFS_VNODE_ALLOCATING) {
433 node->tn_vpstate |= TMPFS_VNODE_WANT;
2706b587
MD
434 error = tsleep(&node->tn_vpstate, PINTERLOCKED | PCATCH,
435 "tmpfs_alloc_vp", 0);
7a2de9a4
MD
436 TMPFS_NODE_UNLOCK(node);
437 if (error)
438 return error;
7a2de9a4 439 goto loop;
2706b587
MD
440 }
441 node->tn_vpstate |= TMPFS_VNODE_ALLOCATING;
7a2de9a4
MD
442 TMPFS_NODE_UNLOCK(node);
443
2706b587
MD
444 /*
445 * Allocate a new vnode (may block). The ALLOCATING flag should
446 * prevent a race against someone else assigning node->tn_vnode.
447 */
7a2de9a4
MD
448 error = getnewvnode(VT_TMPFS, mp, &vp, VLKTIMEOUT, LK_CANRECURSE);
449 if (error != 0)
450 goto unlock;
7a2de9a4 451
2706b587
MD
452 KKASSERT(node->tn_vnode == NULL);
453 KKASSERT(vp != NULL);
7a2de9a4
MD
454 vp->v_data = node;
455 vp->v_type = node->tn_type;
456
457 /* Type-specific initialization. */
458 switch (node->tn_type) {
459 case VBLK:
460 /* FALLTHROUGH */
461 case VCHR:
462 /* FALLTHROUGH */
463 case VSOCK:
464 break;
465 case VREG:
b0d18f7d 466 vinitvmio(vp, node->tn_size, BMASK, -1);
7a2de9a4
MD
467 break;
468 case VLNK:
7a2de9a4
MD
469 break;
470 case VFIFO:
471 vp->v_ops = &mp->mnt_vn_fifo_ops;
472 break;
473 case VDIR:
7a2de9a4
MD
474 break;
475
476 default:
477 panic("tmpfs_alloc_vp: type %p %d", node, (int)node->tn_type);
478 }
479
480 insmntque(vp, mp);
481
482unlock:
483 TMPFS_NODE_LOCK(node);
484
485 KKASSERT(node->tn_vpstate & TMPFS_VNODE_ALLOCATING);
486 node->tn_vpstate &= ~TMPFS_VNODE_ALLOCATING;
487 node->tn_vnode = vp;
488
489 if (node->tn_vpstate & TMPFS_VNODE_WANT) {
490 node->tn_vpstate &= ~TMPFS_VNODE_WANT;
491 TMPFS_NODE_UNLOCK(node);
2706b587
MD
492 wakeup(&node->tn_vpstate);
493 } else {
7a2de9a4 494 TMPFS_NODE_UNLOCK(node);
2706b587 495 }
7a2de9a4
MD
496
497out:
498 *vpp = vp;
499
500 KKASSERT(IFF(error == 0, *vpp != NULL && vn_islocked(*vpp)));
501#ifdef INVARIANTS
502 TMPFS_NODE_LOCK(node);
503 KKASSERT(*vpp == node->tn_vnode);
504 TMPFS_NODE_UNLOCK(node);
505#endif
506
507 return error;
508}
509
510/* --------------------------------------------------------------------- */
511
512/*
513 * Destroys the association between the vnode vp and the node it
514 * references.
515 */
516void
517tmpfs_free_vp(struct vnode *vp)
518{
519 struct tmpfs_node *node;
520
521 node = VP_TO_TMPFS_NODE(vp);
522
523 TMPFS_NODE_LOCK(node);
524 KKASSERT(lockcount(TMPFS_NODE_MTX(node)) > 0);
525 node->tn_vnode = NULL;
526 TMPFS_NODE_UNLOCK(node);
527 vp->v_data = NULL;
528}
529
530/* --------------------------------------------------------------------- */
531
532/*
533 * Allocates a new file of type 'type' and adds it to the parent directory
534 * 'dvp'; this addition is done using the component name given in 'cnp'.
535 * The ownership of the new file is automatically assigned based on the
536 * credentials of the caller (through 'cnp'), the group is set based on
537 * the parent directory and the mode is determined from the 'vap' argument.
538 * If successful, *vpp holds a vnode to the newly created file and zero
539 * is returned. Otherwise *vpp is NULL and the function returns an
540 * appropriate error code.
541 */
542int
543tmpfs_alloc_file(struct vnode *dvp, struct vnode **vpp, struct vattr *vap,
2706b587 544 struct namecache *ncp, struct ucred *cred, char *target)
7a2de9a4
MD
545{
546 int error;
547 struct tmpfs_dirent *de;
548 struct tmpfs_mount *tmp;
549 struct tmpfs_node *dnode;
550 struct tmpfs_node *node;
551 struct tmpfs_node *parent;
552
553 tmp = VFS_TO_TMPFS(dvp->v_mount);
554 dnode = VP_TO_TMPFS_DIR(dvp);
555 *vpp = NULL;
556
557 /* If the entry we are creating is a directory, we cannot overflow
558 * the number of links of its parent, because it will get a new
559 * link. */
560 if (vap->va_type == VDIR) {
561 /* Ensure that we do not overflow the maximum number of links
562 * imposed by the system. */
563 KKASSERT(dnode->tn_links <= LINK_MAX);
564 if (dnode->tn_links == LINK_MAX) {
565 return EMLINK;
566 }
567
568 parent = dnode;
569 KKASSERT(parent != NULL);
570 } else
571 parent = NULL;
572
573 /* Allocate a node that represents the new file. */
574 error = tmpfs_alloc_node(tmp, vap->va_type, cred->cr_uid,
575 dnode->tn_gid, vap->va_mode, parent, target, vap->va_rmajor, vap->va_rminor, &node);
576 if (error != 0)
577 return error;
0786baf1 578 TMPFS_NODE_LOCK(node);
7a2de9a4
MD
579
580 /* Allocate a directory entry that points to the new file. */
0786baf1 581 error = tmpfs_alloc_dirent(tmp, node, ncp->nc_name, ncp->nc_nlen, &de);
7a2de9a4
MD
582 if (error != 0) {
583 tmpfs_free_node(tmp, node);
0786baf1 584 /* eats node lock */
7a2de9a4
MD
585 return error;
586 }
587
588 /* Allocate a vnode for the new file. */
589 error = tmpfs_alloc_vp(dvp->v_mount, node, LK_EXCLUSIVE, vpp);
590 if (error != 0) {
0786baf1 591 tmpfs_free_dirent(tmp, de);
7a2de9a4 592 tmpfs_free_node(tmp, node);
0786baf1 593 /* eats node lock */
7a2de9a4
MD
594 return error;
595 }
596
597 /* Now that all required items are allocated, we can proceed to
598 * insert the new node into the directory, an operation that
599 * cannot fail. */
22d3b394 600 tmpfs_dir_attach(dnode, de);
0786baf1 601 TMPFS_NODE_UNLOCK(node);
7a2de9a4
MD
602
603 return error;
604}
605
606/* --------------------------------------------------------------------- */
607
608/*
609 * Attaches the directory entry de to the directory represented by vp.
610 * Note that this does not change the link count of the node pointed by
611 * the directory entry, as this is done by tmpfs_alloc_dirent.
612 */
613void
22d3b394 614tmpfs_dir_attach(struct tmpfs_node *dnode, struct tmpfs_dirent *de)
7a2de9a4 615{
22d3b394 616 TMPFS_NODE_LOCK(dnode);
29ca4fd6 617 RB_INSERT(tmpfs_dirtree, &dnode->tn_dir.tn_dirtree, de);
7a2de9a4 618
7a2de9a4
MD
619 TMPFS_ASSERT_ELOCKED(dnode);
620 dnode->tn_size += sizeof(struct tmpfs_dirent);
22d3b394
MD
621 dnode->tn_status |= TMPFS_NODE_ACCESSED | TMPFS_NODE_CHANGED |
622 TMPFS_NODE_MODIFIED;
7a2de9a4
MD
623 TMPFS_NODE_UNLOCK(dnode);
624}
625
626/* --------------------------------------------------------------------- */
627
628/*
629 * Detaches the directory entry de from the directory represented by vp.
630 * Note that this does not change the link count of the node pointed by
631 * the directory entry, as this is done by tmpfs_free_dirent.
632 */
633void
22d3b394 634tmpfs_dir_detach(struct tmpfs_node *dnode, struct tmpfs_dirent *de)
7a2de9a4 635{
22d3b394 636 TMPFS_NODE_LOCK(dnode);
7a2de9a4
MD
637 if (dnode->tn_dir.tn_readdir_lastp == de) {
638 dnode->tn_dir.tn_readdir_lastn = 0;
639 dnode->tn_dir.tn_readdir_lastp = NULL;
640 }
29ca4fd6 641 RB_REMOVE(tmpfs_dirtree, &dnode->tn_dir.tn_dirtree, de);
7a2de9a4 642
7a2de9a4
MD
643 TMPFS_ASSERT_ELOCKED(dnode);
644 dnode->tn_size -= sizeof(struct tmpfs_dirent);
22d3b394
MD
645 dnode->tn_status |= TMPFS_NODE_ACCESSED | TMPFS_NODE_CHANGED |
646 TMPFS_NODE_MODIFIED;
7a2de9a4
MD
647 TMPFS_NODE_UNLOCK(dnode);
648}
649
650/* --------------------------------------------------------------------- */
651
652/*
653 * Looks for a directory entry in the directory represented by node.
654 * 'ncp' describes the name of the entry to look for. Note that the .
655 * and .. components are not allowed as they do not physically exist
656 * within directories.
657 *
658 * Returns a pointer to the entry when found, otherwise NULL.
659 */
660struct tmpfs_dirent *
661tmpfs_dir_lookup(struct tmpfs_node *node, struct tmpfs_node *f,
662 struct namecache *ncp)
663{
7a2de9a4
MD
664 struct tmpfs_dirent *de;
665 int len = ncp->nc_nlen;
29ca4fd6
JH
666 struct tmpfs_dirent wanted;
667
668 wanted.td_namelen = len;
669 wanted.td_name = ncp->nc_name;
7a2de9a4
MD
670
671 TMPFS_VALIDATE_DIR(node);
672
29ca4fd6
JH
673 de = RB_FIND(tmpfs_dirtree, &node->tn_dir.tn_dirtree, &wanted);
674
675 KKASSERT(f == NULL || f == de->td_node);
7a2de9a4
MD
676
677 TMPFS_NODE_LOCK(node);
678 node->tn_status |= TMPFS_NODE_ACCESSED;
679 TMPFS_NODE_UNLOCK(node);
680
9fc94b5f 681 return de;
7a2de9a4
MD
682}
683
684/* --------------------------------------------------------------------- */
685
686/*
687 * Helper function for tmpfs_readdir. Creates a '.' entry for the given
688 * directory and returns it in the uio space. The function returns 0
689 * on success, -1 if there was not enough space in the uio structure to
690 * hold the directory entry or an appropriate error code if another
691 * error happens.
692 */
693int
694tmpfs_dir_getdotdent(struct tmpfs_node *node, struct uio *uio)
695{
696 int error;
697 struct dirent dent;
698 int dirsize;
699
700 TMPFS_VALIDATE_DIR(node);
701 KKASSERT(uio->uio_offset == TMPFS_DIRCOOKIE_DOT);
702
703 dent.d_ino = node->tn_id;
704 dent.d_type = DT_DIR;
705 dent.d_namlen = 1;
706 dent.d_name[0] = '.';
707 dent.d_name[1] = '\0';
708 dirsize = _DIRENT_DIRSIZ(&dent);
709
710 if (dirsize > uio->uio_resid)
711 error = -1;
712 else {
713 error = uiomove((caddr_t)&dent, dirsize, uio);
714 if (error == 0)
715 uio->uio_offset = TMPFS_DIRCOOKIE_DOTDOT;
716 }
717
2706b587 718 TMPFS_NODE_LOCK(node);
7a2de9a4 719 node->tn_status |= TMPFS_NODE_ACCESSED;
2706b587 720 TMPFS_NODE_UNLOCK(node);
7a2de9a4
MD
721
722 return error;
723}
724
725/* --------------------------------------------------------------------- */
726
727/*
728 * Helper function for tmpfs_readdir. Creates a '..' entry for the given
729 * directory and returns it in the uio space. The function returns 0
730 * on success, -1 if there was not enough space in the uio structure to
731 * hold the directory entry or an appropriate error code if another
732 * error happens.
733 */
734int
22d3b394
MD
735tmpfs_dir_getdotdotdent(struct tmpfs_mount *tmp, struct tmpfs_node *node,
736 struct uio *uio)
7a2de9a4
MD
737{
738 int error;
739 struct dirent dent;
740 int dirsize;
741
742 TMPFS_VALIDATE_DIR(node);
743 KKASSERT(uio->uio_offset == TMPFS_DIRCOOKIE_DOTDOT);
744
22d3b394
MD
745 if (node->tn_dir.tn_parent) {
746 TMPFS_NODE_LOCK(node->tn_dir.tn_parent);
747 dent.d_ino = node->tn_dir.tn_parent->tn_id;
748 TMPFS_NODE_UNLOCK(node->tn_dir.tn_parent);
749 } else {
750 dent.d_ino = tmp->tm_root->tn_id;
7a2de9a4
MD
751 }
752
7a2de9a4
MD
753 dent.d_type = DT_DIR;
754 dent.d_namlen = 2;
755 dent.d_name[0] = '.';
756 dent.d_name[1] = '.';
757 dent.d_name[2] = '\0';
758 dirsize = _DIRENT_DIRSIZ(&dent);
759
760 if (dirsize > uio->uio_resid)
761 error = -1;
762 else {
763 error = uiomove((caddr_t)&dent, dirsize, uio);
764 if (error == 0) {
765 struct tmpfs_dirent *de;
766
29ca4fd6 767 de = RB_MIN(tmpfs_dirtree, &node->tn_dir.tn_dirtree);
7a2de9a4
MD
768 if (de == NULL)
769 uio->uio_offset = TMPFS_DIRCOOKIE_EOF;
770 else
771 uio->uio_offset = tmpfs_dircookie(de);
772 }
773 }
774
2706b587 775 TMPFS_NODE_LOCK(node);
7a2de9a4 776 node->tn_status |= TMPFS_NODE_ACCESSED;
2706b587 777 TMPFS_NODE_UNLOCK(node);
7a2de9a4
MD
778
779 return error;
780}
781
782/* --------------------------------------------------------------------- */
783
784/*
785 * Lookup a directory entry by its associated cookie.
786 */
787struct tmpfs_dirent *
788tmpfs_dir_lookupbycookie(struct tmpfs_node *node, off_t cookie)
789{
790 struct tmpfs_dirent *de;
791
792 if (cookie == node->tn_dir.tn_readdir_lastn &&
793 node->tn_dir.tn_readdir_lastp != NULL) {
794 return node->tn_dir.tn_readdir_lastp;
795 }
796
29ca4fd6 797 RB_FOREACH(de, tmpfs_dirtree, &node->tn_dir.tn_dirtree) {
7a2de9a4
MD
798 if (tmpfs_dircookie(de) == cookie) {
799 break;
800 }
801 }
802
803 return de;
804}
805
806/* --------------------------------------------------------------------- */
807
808/*
809 * Helper function for tmpfs_readdir. Returns as much directory entries
810 * as can fit in the uio space. The read starts at uio->uio_offset.
811 * The function returns 0 on success, -1 if there was not enough space
812 * in the uio structure to hold the directory entry or an appropriate
813 * error code if another error happens.
814 */
815int
816tmpfs_dir_getdents(struct tmpfs_node *node, struct uio *uio, off_t *cntp)
817{
818 int error;
819 off_t startcookie;
820 struct tmpfs_dirent *de;
821
822 TMPFS_VALIDATE_DIR(node);
823
824 /* Locate the first directory entry we have to return. We have cached
825 * the last readdir in the node, so use those values if appropriate.
826 * Otherwise do a linear scan to find the requested entry. */
827 startcookie = uio->uio_offset;
828 KKASSERT(startcookie != TMPFS_DIRCOOKIE_DOT);
829 KKASSERT(startcookie != TMPFS_DIRCOOKIE_DOTDOT);
830 if (startcookie == TMPFS_DIRCOOKIE_EOF) {
831 return 0;
832 } else {
833 de = tmpfs_dir_lookupbycookie(node, startcookie);
834 }
835 if (de == NULL) {
836 return EINVAL;
837 }
838
839 /* Read as much entries as possible; i.e., until we reach the end of
840 * the directory or we exhaust uio space. */
841 do {
842 struct dirent d;
843 int reclen;
844
845 /* Create a dirent structure representing the current
846 * tmpfs_node and fill it. */
847 d.d_ino = de->td_node->tn_id;
848 switch (de->td_node->tn_type) {
849 case VBLK:
850 d.d_type = DT_BLK;
851 break;
852
853 case VCHR:
854 d.d_type = DT_CHR;
855 break;
856
857 case VDIR:
858 d.d_type = DT_DIR;
859 break;
860
861 case VFIFO:
862 d.d_type = DT_FIFO;
863 break;
864
865 case VLNK:
866 d.d_type = DT_LNK;
867 break;
868
869 case VREG:
870 d.d_type = DT_REG;
871 break;
872
873 case VSOCK:
874 d.d_type = DT_SOCK;
875 break;
876
877 default:
878 panic("tmpfs_dir_getdents: type %p %d",
879 de->td_node, (int)de->td_node->tn_type);
880 }
881 d.d_namlen = de->td_namelen;
882 KKASSERT(de->td_namelen < sizeof(d.d_name));
883 bcopy(de->td_name, d.d_name, d.d_namlen);
884 d.d_name[d.d_namlen] = '\0';
885 reclen = _DIRENT_RECLEN(d.d_namlen);
886
887 /* Stop reading if the directory entry we are treating is
888 * bigger than the amount of data that can be returned. */
889 if (reclen > uio->uio_resid) {
890 error = -1;
891 break;
892 }
893
894 /* Copy the new dirent structure into the output buffer and
895 * advance pointers. */
896 error = uiomove((caddr_t)&d, reclen, uio);
897
898 (*cntp)++;
29ca4fd6 899 de = RB_NEXT(tmpfs_dirtree, node->tn_dir.tn_dirtree, de);
7a2de9a4
MD
900 } while (error == 0 && uio->uio_resid > 0 && de != NULL);
901
902 /* Update the offset and cache. */
903 if (de == NULL) {
904 uio->uio_offset = TMPFS_DIRCOOKIE_EOF;
905 node->tn_dir.tn_readdir_lastn = 0;
906 node->tn_dir.tn_readdir_lastp = NULL;
907 } else {
908 node->tn_dir.tn_readdir_lastn = uio->uio_offset = tmpfs_dircookie(de);
909 node->tn_dir.tn_readdir_lastp = de;
910 }
911 node->tn_status |= TMPFS_NODE_ACCESSED;
912
913 return error;
914}
915
916/* --------------------------------------------------------------------- */
917
918/*
919 * Resizes the aobj associated to the regular file pointed to by vp to
920 * the size newsize. 'vp' must point to a vnode that represents a regular
921 * file. 'newsize' must be positive.
922 *
923 * pass trivial as 1 when buf content will be overwritten, otherwise set 0
924 * to be zero filled.
925 *
926 * Returns zero on success or an appropriate error code on failure.
927 */
928int
929tmpfs_reg_resize(struct vnode *vp, off_t newsize, int trivial)
930{
931 int error;
29ffeb28 932 vm_pindex_t newpages, oldpages;
7a2de9a4
MD
933 struct tmpfs_mount *tmp;
934 struct tmpfs_node *node;
935 off_t oldsize;
7a2de9a4
MD
936
937#ifdef INVARIANTS
938 KKASSERT(vp->v_type == VREG);
939 KKASSERT(newsize >= 0);
940#endif
941
942 node = VP_TO_TMPFS_NODE(vp);
943 tmp = VFS_TO_TMPFS(vp->v_mount);
944
945 /* Convert the old and new sizes to the number of pages needed to
946 * store them. It may happen that we do not need to do anything
947 * because the last allocated page can accommodate the change on
948 * its own. */
949 oldsize = node->tn_size;
29ffeb28 950 oldpages = round_page64(oldsize) / PAGE_SIZE;
7a2de9a4 951 KKASSERT(oldpages == node->tn_reg.tn_aobj_pages);
29ffeb28 952 newpages = round_page64(newsize) / PAGE_SIZE;
7a2de9a4
MD
953
954 if (newpages > oldpages &&
29ffeb28 955 tmp->tm_pages_used + newpages - oldpages > tmp->tm_pages_max) {
7a2de9a4
MD
956 error = ENOSPC;
957 goto out;
958 }
959
960 TMPFS_LOCK(tmp);
961 tmp->tm_pages_used += (newpages - oldpages);
962 TMPFS_UNLOCK(tmp);
963
964 TMPFS_NODE_LOCK(node);
965 node->tn_reg.tn_aobj_pages = newpages;
966 node->tn_size = newsize;
967 TMPFS_NODE_UNLOCK(node);
968
9fc94b5f
MD
969 /*
970 * When adjusting the vnode filesize and its VM object we must
971 * also adjust our backing VM object (aobj). The blocksize
972 * used must match the block sized we use for the buffer cache.
22d3b394
MD
973 *
974 * The backing VM object contains no VM pages, only swap
975 * assignments.
9fc94b5f
MD
976 */
977 if (newsize < oldsize) {
978 vm_pindex_t osize;
22d3b394 979 vm_pindex_t nsize;
9fc94b5f
MD
980 vm_object_t aobj;
981
753df37e 982 error = nvtruncbuf(vp, newsize, BSIZE, -1, 0);
9fc94b5f
MD
983 aobj = node->tn_reg.tn_aobj;
984 if (aobj) {
985 osize = aobj->size;
22d3b394
MD
986 nsize = vp->v_object->size;
987 if (nsize < osize) {
9fc94b5f 988 aobj->size = osize;
22d3b394
MD
989 swap_pager_freespace(aobj, nsize,
990 osize - nsize);
9fc94b5f
MD
991 }
992 }
993 } else {
994 vm_object_t aobj;
995
996 error = nvextendbuf(vp, oldsize, newsize, BSIZE, BSIZE,
7a2de9a4 997 -1, -1, trivial);
9fc94b5f
MD
998 aobj = node->tn_reg.tn_aobj;
999 if (aobj)
1000 aobj->size = vp->v_object->size;
1001 }
7a2de9a4
MD
1002
1003out:
1004 return error;
1005}
1006
1007/* --------------------------------------------------------------------- */
1008
1009/*
1010 * Change flags of the given vnode.
1011 * Caller should execute tmpfs_update on vp after a successful execution.
1012 * The vnode must be locked on entry and remain locked on exit.
1013 */
1014int
80ae59d7 1015tmpfs_chflags(struct vnode *vp, int vaflags, struct ucred *cred)
7a2de9a4
MD
1016{
1017 int error;
1018 struct tmpfs_node *node;
80ae59d7 1019 int flags;
7a2de9a4
MD
1020
1021 KKASSERT(vn_islocked(vp));
1022
1023 node = VP_TO_TMPFS_NODE(vp);
80ae59d7 1024 flags = node->tn_flags;
7a2de9a4
MD
1025
1026 /* Disallow this operation if the file system is mounted read-only. */
1027 if (vp->v_mount->mnt_flag & MNT_RDONLY)
1028 return EROFS;
80ae59d7 1029 error = vop_helper_setattr_flags(&flags, vaflags, node->tn_uid, cred);
7a2de9a4 1030
7a2de9a4
MD
1031 /*
1032 * Unprivileged processes are not permitted to unset system
1033 * flags, or modify flags if any system flags are set.
d4623db3
MD
1034 *
1035 * Silently enforce SF_NOCACHE on the root tmpfs vnode so
1036 * tmpfs data is not double-cached by swapcache.
7a2de9a4 1037 */
80ae59d7
MD
1038 if (error == 0) {
1039 TMPFS_NODE_LOCK(node);
1040 if (!priv_check_cred(cred, PRIV_VFS_SYSFLAGS, 0)) {
1041 if (vp->v_flag & VROOT)
1042 flags |= SF_NOCACHE;
1043 node->tn_flags = flags;
1044 } else {
1045 if (node->tn_flags & (SF_NOUNLINK | SF_IMMUTABLE |
1046 SF_APPEND) ||
1047 (flags & UF_SETTABLE) != flags) {
1048 error = EPERM;
1049 } else {
1050 node->tn_flags &= SF_SETTABLE;
1051 node->tn_flags |= (flags & UF_SETTABLE);
1052 }
7a2de9a4 1053 }
80ae59d7
MD
1054 node->tn_status |= TMPFS_NODE_CHANGED;
1055 TMPFS_NODE_UNLOCK(node);
7a2de9a4 1056 }
7a2de9a4
MD
1057
1058 KKASSERT(vn_islocked(vp));
1059
80ae59d7 1060 return error;
7a2de9a4
MD
1061}
1062
1063/* --------------------------------------------------------------------- */
1064
1065/*
1066 * Change access mode on the given vnode.
1067 * Caller should execute tmpfs_update on vp after a successful execution.
1068 * The vnode must be locked on entry and remain locked on exit.
1069 */
1070int
80ae59d7 1071tmpfs_chmod(struct vnode *vp, mode_t vamode, struct ucred *cred)
7a2de9a4 1072{
7a2de9a4 1073 struct tmpfs_node *node;
80ae59d7
MD
1074 mode_t cur_mode;
1075 int error;
7a2de9a4
MD
1076
1077 KKASSERT(vn_islocked(vp));
1078
1079 node = VP_TO_TMPFS_NODE(vp);
1080
1081 /* Disallow this operation if the file system is mounted read-only. */
1082 if (vp->v_mount->mnt_flag & MNT_RDONLY)
1083 return EROFS;
1084
1085 /* Immutable or append-only files cannot be modified, either. */
1086 if (node->tn_flags & (IMMUTABLE | APPEND))
1087 return EPERM;
1088
80ae59d7
MD
1089 cur_mode = node->tn_mode;
1090 error = vop_helper_chmod(vp, vamode, cred, node->tn_uid, node->tn_gid,
1091 &cur_mode);
7a2de9a4 1092
80ae59d7
MD
1093 if (error == 0 &&
1094 (node->tn_mode & ALLPERMS) != (cur_mode & ALLPERMS)) {
1095 TMPFS_NODE_LOCK(node);
1096 node->tn_mode &= ~ALLPERMS;
1097 node->tn_mode |= cur_mode & ALLPERMS;
7a2de9a4 1098
80ae59d7
MD
1099 node->tn_status |= TMPFS_NODE_CHANGED;
1100 TMPFS_NODE_UNLOCK(node);
7a2de9a4
MD
1101 }
1102
7a2de9a4
MD
1103 KKASSERT(vn_islocked(vp));
1104
1105 return 0;
1106}
1107
1108/* --------------------------------------------------------------------- */
1109
1110/*
1111 * Change ownership of the given vnode. At least one of uid or gid must
1112 * be different than VNOVAL. If one is set to that value, the attribute
1113 * is unchanged.
1114 * Caller should execute tmpfs_update on vp after a successful execution.
1115 * The vnode must be locked on entry and remain locked on exit.
1116 */
1117int
1118tmpfs_chown(struct vnode *vp, uid_t uid, gid_t gid, struct ucred *cred)
1119{
bd48f29c
MD
1120 mode_t cur_mode;
1121 uid_t cur_uid;
1122 gid_t cur_gid;
7a2de9a4 1123 struct tmpfs_node *node;
bd48f29c 1124 int error;
7a2de9a4
MD
1125
1126 KKASSERT(vn_islocked(vp));
7a2de9a4
MD
1127 node = VP_TO_TMPFS_NODE(vp);
1128
7a2de9a4
MD
1129 /* Disallow this operation if the file system is mounted read-only. */
1130 if (vp->v_mount->mnt_flag & MNT_RDONLY)
1131 return EROFS;
1132
1133 /* Immutable or append-only files cannot be modified, either. */
1134 if (node->tn_flags & (IMMUTABLE | APPEND))
1135 return EPERM;
1136
bd48f29c
MD
1137 cur_uid = node->tn_uid;
1138 cur_gid = node->tn_gid;
1139 cur_mode = node->tn_mode;
1140 error = vop_helper_chown(vp, uid, gid, cred,
1141 &cur_uid, &cur_gid, &cur_mode);
1142
1143 if (error == 0) {
1144 TMPFS_NODE_LOCK(node);
1145 if (cur_uid != node->tn_uid ||
1146 cur_gid != node->tn_gid ||
1147 cur_mode != node->tn_mode) {
3b9337bb
YT
1148 node->tn_uid = cur_uid;
1149 node->tn_gid = cur_gid;
bd48f29c
MD
1150 node->tn_mode = cur_mode;
1151 node->tn_status |= TMPFS_NODE_CHANGED;
7a2de9a4 1152 }
bd48f29c 1153 TMPFS_NODE_UNLOCK(node);
7a2de9a4 1154 }
7a2de9a4 1155
bd48f29c 1156 return error;
7a2de9a4
MD
1157}
1158
1159/* --------------------------------------------------------------------- */
1160
1161/*
1162 * Change size of the given vnode.
1163 * Caller should execute tmpfs_update on vp after a successful execution.
1164 * The vnode must be locked on entry and remain locked on exit.
1165 */
1166int
1167tmpfs_chsize(struct vnode *vp, u_quad_t size, struct ucred *cred)
1168{
1169 int error;
1170 struct tmpfs_node *node;
1171
1172 KKASSERT(vn_islocked(vp));
1173
1174 node = VP_TO_TMPFS_NODE(vp);
1175
1176 /* Decide whether this is a valid operation based on the file type. */
1177 error = 0;
1178 switch (vp->v_type) {
1179 case VDIR:
1180 return EISDIR;
1181
1182 case VREG:
1183 if (vp->v_mount->mnt_flag & MNT_RDONLY)
1184 return EROFS;
1185 break;
1186
1187 case VBLK:
1188 /* FALLTHROUGH */
1189 case VCHR:
1190 /* FALLTHROUGH */
1191 case VFIFO:
1192 /* Allow modifications of special files even if in the file
1193 * system is mounted read-only (we are not modifying the
1194 * files themselves, but the objects they represent). */
1195 return 0;
1196
1197 default:
1198 /* Anything else is unsupported. */
1199 return EOPNOTSUPP;
1200 }
1201
1202 /* Immutable or append-only files cannot be modified, either. */
1203 if (node->tn_flags & (IMMUTABLE | APPEND))
1204 return EPERM;
1205
1206 error = tmpfs_truncate(vp, size);
1207 /* tmpfs_truncate will raise the NOTE_EXTEND and NOTE_ATTRIB kevents
1208 * for us, as will update tn_status; no need to do that here. */
1209
1210 KKASSERT(vn_islocked(vp));
1211
1212 return error;
1213}
1214
1215/* --------------------------------------------------------------------- */
1216
1217/*
1218 * Change access and modification times of the given vnode.
1219 * Caller should execute tmpfs_update on vp after a successful execution.
1220 * The vnode must be locked on entry and remain locked on exit.
1221 */
1222int
1223tmpfs_chtimes(struct vnode *vp, struct timespec *atime, struct timespec *mtime,
1224 int vaflags, struct ucred *cred)
1225{
7a2de9a4 1226 struct tmpfs_node *node;
7a2de9a4
MD
1227
1228 KKASSERT(vn_islocked(vp));
1229
1230 node = VP_TO_TMPFS_NODE(vp);
1231
1232 /* Disallow this operation if the file system is mounted read-only. */
1233 if (vp->v_mount->mnt_flag & MNT_RDONLY)
1234 return EROFS;
1235
1236 /* Immutable or append-only files cannot be modified, either. */
1237 if (node->tn_flags & (IMMUTABLE | APPEND))
1238 return EPERM;
1239
7a2de9a4
MD
1240 TMPFS_NODE_LOCK(node);
1241 if (atime->tv_sec != VNOVAL && atime->tv_nsec != VNOVAL)
1242 node->tn_status |= TMPFS_NODE_ACCESSED;
1243
1244 if (mtime->tv_sec != VNOVAL && mtime->tv_nsec != VNOVAL)
1245 node->tn_status |= TMPFS_NODE_MODIFIED;
1246
1247 TMPFS_NODE_UNLOCK(node);
1248
1249 tmpfs_itimes(vp, atime, mtime);
1250
1251 KKASSERT(vn_islocked(vp));
1252
1253 return 0;
1254}
1255
1256/* --------------------------------------------------------------------- */
1257/* Sync timestamps */
1258void
1259tmpfs_itimes(struct vnode *vp, const struct timespec *acc,
1260 const struct timespec *mod)
1261{
1262 struct tmpfs_node *node;
1263 struct timespec now;
1264
1265 node = VP_TO_TMPFS_NODE(vp);
1266
1267 if ((node->tn_status & (TMPFS_NODE_ACCESSED | TMPFS_NODE_MODIFIED |
1268 TMPFS_NODE_CHANGED)) == 0)
1269 return;
1270
1271 vfs_timestamp(&now);
1272
1273 TMPFS_NODE_LOCK(node);
1274 if (node->tn_status & TMPFS_NODE_ACCESSED) {
1275 if (acc == NULL)
1276 acc = &now;
1277 node->tn_atime = acc->tv_sec;
1278 node->tn_atimensec = acc->tv_nsec;
1279 }
1280 if (node->tn_status & TMPFS_NODE_MODIFIED) {
1281 if (mod == NULL)
1282 mod = &now;
1283 node->tn_mtime = mod->tv_sec;
1284 node->tn_mtimensec = mod->tv_nsec;
1285 }
1286 if (node->tn_status & TMPFS_NODE_CHANGED) {
1287 node->tn_ctime = now.tv_sec;
1288 node->tn_ctimensec = now.tv_nsec;
1289 }
1290 node->tn_status &=
1291 ~(TMPFS_NODE_ACCESSED | TMPFS_NODE_MODIFIED | TMPFS_NODE_CHANGED);
1292 TMPFS_NODE_UNLOCK(node);
1293}
1294
1295/* --------------------------------------------------------------------- */
1296
1297void
1298tmpfs_update(struct vnode *vp)
1299{
1300
1301 tmpfs_itimes(vp, NULL, NULL);
1302}
1303
1304/* --------------------------------------------------------------------- */
1305
1306int
1307tmpfs_truncate(struct vnode *vp, off_t length)
1308{
1309 int error;
1310 struct tmpfs_node *node;
1311
1312 node = VP_TO_TMPFS_NODE(vp);
1313
1314 if (length < 0) {
1315 error = EINVAL;
1316 goto out;
1317 }
1318
1319 if (node->tn_size == length) {
1320 error = 0;
1321 goto out;
1322 }
1323
1324 if (length > VFS_TO_TMPFS(vp->v_mount)->tm_maxfilesize)
1325 return (EFBIG);
1326
1327
1328 error = tmpfs_reg_resize(vp, length, 1);
1329
1330 if (error == 0) {
1331 TMPFS_NODE_LOCK(node);
1332 node->tn_status |= TMPFS_NODE_CHANGED | TMPFS_NODE_MODIFIED;
1333 TMPFS_NODE_UNLOCK(node);
1334 }
1335
1336out:
1337 tmpfs_update(vp);
1338
1339 return error;
1340}
1341
1342/* --------------------------------------------------------------------- */
1343
1344static ino_t
f7db522f 1345tmpfs_fetch_ino(struct tmpfs_mount *tmp)
7a2de9a4 1346{
f7db522f 1347 ino_t ret;
7a2de9a4 1348
f7db522f 1349 ret = tmp->tm_ino++;
7a2de9a4 1350
f7db522f 1351 return (ret);
7a2de9a4 1352}
29ca4fd6
JH
1353
1354static int
1355tmpfs_dirtree_compare(struct tmpfs_dirent *a, struct tmpfs_dirent *b)
1356{
1357 if (a->td_namelen > b->td_namelen)
1358 return 1;
1359 else if (a->td_namelen < b->td_namelen)
1360 return -1;
1361 else
1362 return strncmp(a->td_name, b->td_name, a->td_namelen);
1363}