tmpfs - fix chgrp(), or chown() when one of uid/gid is to be unchanged
[dragonfly.git] / sys / vfs / tmpfs / tmpfs_subr.c
CommitLineData
7a2de9a4
MD
1/* $NetBSD: tmpfs_subr.c,v 1.35 2007/07/09 21:10:50 ad Exp $ */
2
3/*-
4 * Copyright (c) 2005 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Julio M. Merino Vidal, developed as part of Google's Summer of Code
9 * 2005 program.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 */
32
33/*
34 * Efficient memory file system supporting functions.
35 */
7a2de9a4
MD
36
37#include <sys/kernel.h>
38#include <sys/param.h>
39#include <sys/namei.h>
40#include <sys/priv.h>
41#include <sys/proc.h>
42#include <sys/spinlock2.h>
43#include <sys/stat.h>
44#include <sys/systm.h>
45#include <sys/vnode.h>
46#include <sys/vmmeter.h>
47
48#include <sys/mplock2.h>
49
50#include <vm/vm.h>
51#include <vm/vm_object.h>
52#include <vm/vm_page.h>
53#include <vm/vm_pager.h>
54#include <vm/vm_extern.h>
55
56#include <vfs/tmpfs/tmpfs.h>
57#include <vfs/tmpfs/tmpfs_fifoops.h>
58#include <vfs/tmpfs/tmpfs_vnops.h>
59
60static ino_t t_ino = 2;
61static struct spinlock ino_lock;
62static ino_t tmpfs_fetch_ino(void);
63
64/* --------------------------------------------------------------------- */
65
66/*
67 * Allocates a new node of type 'type' inside the 'tmp' mount point, with
68 * its owner set to 'uid', its group to 'gid' and its mode set to 'mode',
69 * using the credentials of the process 'p'.
70 *
71 * If the node type is set to 'VDIR', then the parent parameter must point
72 * to the parent directory of the node being created. It may only be NULL
73 * while allocating the root node.
74 *
75 * If the node type is set to 'VBLK' or 'VCHR', then the rdev parameter
76 * specifies the device the node represents.
77 *
78 * If the node type is set to 'VLNK', then the parameter target specifies
79 * the file name of the target file for the symbolic link that is being
80 * created.
81 *
82 * Note that new nodes are retrieved from the available list if it has
83 * items or, if it is empty, from the node pool as long as there is enough
84 * space to create them.
85 *
86 * Returns zero on success or an appropriate error code on failure.
87 */
88int
89tmpfs_alloc_node(struct tmpfs_mount *tmp, enum vtype type,
90 uid_t uid, gid_t gid, mode_t mode, struct tmpfs_node *parent,
91 char *target, int rmajor, int rminor, struct tmpfs_node **node)
92{
93 struct tmpfs_node *nnode;
94 struct timespec ts;
95 udev_t rdev;
96
97 /* If the root directory of the 'tmp' file system is not yet
98 * allocated, this must be the request to do it. */
99 KKASSERT(IMPLIES(tmp->tm_root == NULL, parent == NULL && type == VDIR));
100
101 KKASSERT(IFF(type == VLNK, target != NULL));
102 KKASSERT(IFF(type == VBLK || type == VCHR, rmajor != VNOVAL));
103
c01f27eb 104 if (tmp->tm_nodes_inuse >= tmp->tm_nodes_max)
7a2de9a4
MD
105 return (ENOSPC);
106
881dac8b
VS
107 nnode = objcache_get(tmp->tm_node_pool, M_WAITOK | M_NULLOK);
108 if (nnode == NULL)
109 return (ENOSPC);
7a2de9a4
MD
110
111 /* Generic initialization. */
112 nnode->tn_type = type;
113 vfs_timestamp(&ts);
114 nnode->tn_ctime = nnode->tn_mtime = nnode->tn_atime
115 = ts.tv_sec;
116 nnode->tn_ctimensec = nnode->tn_mtimensec = nnode->tn_atimensec
117 = ts.tv_nsec;
118 nnode->tn_uid = uid;
119 nnode->tn_gid = gid;
120 nnode->tn_mode = mode;
121 nnode->tn_id = tmpfs_fetch_ino();
12a5de0e 122 nnode->tn_advlock.init_done = 0;
7a2de9a4
MD
123
124 /* Type-specific initialization. */
125 switch (nnode->tn_type) {
126 case VBLK:
127 case VCHR:
128 rdev = makeudev(rmajor, rminor);
129 if (rdev == NOUDEV) {
42f6f6b1 130 objcache_put(tmp->tm_node_pool, nnode);
7a2de9a4
MD
131 return(EINVAL);
132 }
133 nnode->tn_rdev = rdev;
134 break;
135
136 case VDIR:
137 TAILQ_INIT(&nnode->tn_dir.tn_dirhead);
138 KKASSERT(parent != nnode);
139 KKASSERT(IMPLIES(parent == NULL, tmp->tm_root == NULL));
0786baf1 140 nnode->tn_dir.tn_parent = parent;
7a2de9a4
MD
141 nnode->tn_dir.tn_readdir_lastn = 0;
142 nnode->tn_dir.tn_readdir_lastp = NULL;
143 nnode->tn_links++;
144 nnode->tn_size = 0;
0786baf1
MD
145 if (parent) {
146 TMPFS_NODE_LOCK(parent);
147 parent->tn_links++;
148 TMPFS_NODE_UNLOCK(parent);
149 }
7a2de9a4
MD
150 break;
151
152 case VFIFO:
153 /* FALLTHROUGH */
154 case VSOCK:
155 break;
156
157 case VLNK:
9fc94b5f 158 nnode->tn_size = strlen(target);
d00cd01c 159 nnode->tn_link = kmalloc(nnode->tn_size + 1, tmp->tm_name_zone,
42f6f6b1
VS
160 M_WAITOK | M_NULLOK);
161 if (nnode->tn_link == NULL) {
162 objcache_put(tmp->tm_node_pool, nnode);
163 return (ENOSPC);
164 }
7a2de9a4
MD
165 bcopy(target, nnode->tn_link, nnode->tn_size);
166 nnode->tn_link[nnode->tn_size] = '\0';
167 break;
168
169 case VREG:
170 nnode->tn_reg.tn_aobj =
5a648714 171 swap_pager_alloc(NULL, 0, VM_PROT_DEFAULT, 0);
7a2de9a4
MD
172 nnode->tn_reg.tn_aobj_pages = 0;
173 nnode->tn_size = 0;
174 break;
175
176 default:
177 panic("tmpfs_alloc_node: type %p %d", nnode, (int)nnode->tn_type);
178 }
179
180 TMPFS_NODE_LOCK(nnode);
7a2de9a4 181 TMPFS_LOCK(tmp);
0786baf1 182 LIST_INSERT_HEAD(&tmp->tm_nodes_used, nnode, tn_entries);
7a2de9a4
MD
183 tmp->tm_nodes_inuse++;
184 TMPFS_UNLOCK(tmp);
185 TMPFS_NODE_UNLOCK(nnode);
186
187 *node = nnode;
188 return 0;
189}
190
191/* --------------------------------------------------------------------- */
192
193/*
194 * Destroys the node pointed to by node from the file system 'tmp'.
195 * If the node does not belong to the given mount point, the results are
196 * unpredicted.
197 *
198 * If the node references a directory; no entries are allowed because
199 * their removal could need a recursive algorithm, something forbidden in
200 * kernel space. Furthermore, there is not need to provide such
201 * functionality (recursive removal) because the only primitives offered
202 * to the user are the removal of empty directories and the deletion of
203 * individual files.
204 *
205 * Note that nodes are not really deleted; in fact, when a node has been
206 * allocated, it cannot be deleted during the whole life of the file
207 * system. Instead, they are moved to the available list and remain there
208 * until reused.
209 */
210void
211tmpfs_free_node(struct tmpfs_mount *tmp, struct tmpfs_node *node)
212{
29ffeb28 213 vm_pindex_t pages = 0;
7a2de9a4 214
7a2de9a4
MD
215#ifdef INVARIANTS
216 TMPFS_ASSERT_ELOCKED(node);
217 KKASSERT(node->tn_vnode == NULL);
218 KKASSERT((node->tn_vpstate & TMPFS_VNODE_ALLOCATING) == 0);
219#endif
220
7a2de9a4 221 TMPFS_LOCK(tmp);
0786baf1 222 LIST_REMOVE(node, tn_entries);
7a2de9a4
MD
223 tmp->tm_nodes_inuse--;
224 TMPFS_UNLOCK(tmp);
0786baf1 225 TMPFS_NODE_UNLOCK(node);
7a2de9a4
MD
226
227 switch (node->tn_type) {
228 case VNON:
229 /* Do not do anything. VNON is provided to let the
230 * allocation routine clean itself easily by avoiding
231 * duplicating code in it. */
232 /* FALLTHROUGH */
233 case VBLK:
234 /* FALLTHROUGH */
235 case VCHR:
236 /* FALLTHROUGH */
237 break;
238 case VDIR:
0786baf1
MD
239 /*
240 * The parent link can be NULL if this is the root
241 * node.
242 */
7a2de9a4
MD
243 node->tn_links--;
244 node->tn_size = 0;
0786baf1
MD
245 KKASSERT(node->tn_dir.tn_parent || node == tmp->tm_root);
246 if (node->tn_dir.tn_parent) {
247 TMPFS_NODE_LOCK(node->tn_dir.tn_parent);
248 node->tn_dir.tn_parent->tn_links--;
249
250 /*
251 * If the parent directory has no more links and
252 * no vnode ref nothing is going to come along
253 * and clean it up unless we do it here.
254 */
255 if (node->tn_dir.tn_parent->tn_links == 0 &&
256 node->tn_dir.tn_parent->tn_vnode == NULL) {
257 tmpfs_free_node(tmp, node->tn_dir.tn_parent);
258 /* eats parent lock */
259 } else {
260 TMPFS_NODE_UNLOCK(node->tn_dir.tn_parent);
261 }
262 node->tn_dir.tn_parent = NULL;
263 }
264
265 /*
266 * If the root node is being destroyed don't leave a
267 * dangling pointer in tmpfs_mount.
268 */
269 if (node == tmp->tm_root)
270 tmp->tm_root = NULL;
7a2de9a4
MD
271 break;
272 case VFIFO:
273 /* FALLTHROUGH */
274 case VSOCK:
275 break;
276
277 case VLNK:
d00cd01c 278 kfree(node->tn_link, tmp->tm_name_zone);
9fc94b5f 279 node->tn_link = NULL;
2706b587 280 node->tn_size = 0;
7a2de9a4
MD
281 break;
282
283 case VREG:
284 if (node->tn_reg.tn_aobj != NULL)
f96f2f39 285 vm_object_deallocate(node->tn_reg.tn_aobj);
7a2de9a4
MD
286 node->tn_reg.tn_aobj = NULL;
287 pages = node->tn_reg.tn_aobj_pages;
288 break;
289
290 default:
291 panic("tmpfs_free_node: type %p %d", node, (int)node->tn_type);
292 }
293
0786baf1
MD
294 /*
295 * Clean up fields for the next allocation. The objcache only ctors
296 * new allocations.
297 */
298 tmpfs_node_ctor(node, NULL, 0);
7a2de9a4 299 objcache_put(tmp->tm_node_pool, node);
0786baf1 300 /* node is now invalid */
7a2de9a4
MD
301
302 TMPFS_LOCK(tmp);
303 tmp->tm_pages_used -= pages;
304 TMPFS_UNLOCK(tmp);
7a2de9a4
MD
305}
306
307/* --------------------------------------------------------------------- */
308
309/*
310 * Allocates a new directory entry for the node node with a name of name.
311 * The new directory entry is returned in *de.
312 *
313 * The link count of node is increased by one to reflect the new object
314 * referencing it.
315 *
316 * Returns zero on success or an appropriate error code on failure.
317 */
318int
319tmpfs_alloc_dirent(struct tmpfs_mount *tmp, struct tmpfs_node *node,
320 const char *name, uint16_t len, struct tmpfs_dirent **de)
321{
322 struct tmpfs_dirent *nde;
323
42f6f6b1 324 nde = objcache_get(tmp->tm_dirent_pool, M_WAITOK);
d00cd01c 325 nde->td_name = kmalloc(len + 1, tmp->tm_name_zone, M_WAITOK | M_NULLOK);
42f6f6b1
VS
326 if (nde->td_name == NULL) {
327 objcache_put(tmp->tm_dirent_pool, nde);
328 *de = NULL;
329 return (ENOSPC);
330 }
7a2de9a4
MD
331 nde->td_namelen = len;
332 bcopy(name, nde->td_name, len);
333 nde->td_name[len] = '\0';
334
335 nde->td_node = node;
336
337 TMPFS_NODE_LOCK(node);
338 node->tn_links++;
339 TMPFS_NODE_UNLOCK(node);
340
341 *de = nde;
342
343 return 0;
344}
345
346/* --------------------------------------------------------------------- */
347
348/*
349 * Frees a directory entry. It is the caller's responsibility to destroy
350 * the node referenced by it if needed.
351 *
352 * The link count of node is decreased by one to reflect the removal of an
353 * object that referenced it. This only happens if 'node_exists' is true;
354 * otherwise the function will not access the node referred to by the
355 * directory entry, as it may already have been released from the outside.
356 */
357void
0786baf1 358tmpfs_free_dirent(struct tmpfs_mount *tmp, struct tmpfs_dirent *de)
7a2de9a4 359{
0786baf1 360 struct tmpfs_node *node;
7a2de9a4 361
0786baf1 362 node = de->td_node;
7a2de9a4 363
0786baf1
MD
364 TMPFS_NODE_LOCK(node);
365 TMPFS_ASSERT_ELOCKED(node);
366 KKASSERT(node->tn_links > 0);
367 node->tn_links--;
368 TMPFS_NODE_UNLOCK(node);
7a2de9a4 369
d00cd01c 370 kfree(de->td_name, tmp->tm_name_zone);
0786baf1 371 de->td_namelen = 0;
9fc94b5f 372 de->td_name = NULL;
0786baf1 373 de->td_node = NULL;
7a2de9a4
MD
374 objcache_put(tmp->tm_dirent_pool, de);
375}
376
377/* --------------------------------------------------------------------- */
378
379/*
380 * Allocates a new vnode for the node node or returns a new reference to
381 * an existing one if the node had already a vnode referencing it. The
382 * resulting locked vnode is returned in *vpp.
383 *
384 * Returns zero on success or an appropriate error code on failure.
385 */
386int
387tmpfs_alloc_vp(struct mount *mp, struct tmpfs_node *node, int lkflag,
2706b587 388 struct vnode **vpp)
7a2de9a4
MD
389{
390 int error = 0;
391 struct vnode *vp;
392
393loop:
2706b587
MD
394 /*
395 * Interlocked extraction from node. This can race many things.
396 * We have to get a soft reference on the vnode while we hold
397 * the node locked, then acquire it properly and check for races.
398 */
7a2de9a4
MD
399 TMPFS_NODE_LOCK(node);
400 if ((vp = node->tn_vnode) != NULL) {
401 KKASSERT((node->tn_vpstate & TMPFS_VNODE_DOOMED) == 0);
2706b587 402 vhold_interlocked(vp);
7a2de9a4 403 TMPFS_NODE_UNLOCK(node);
7a2de9a4 404
2706b587
MD
405 if (vget(vp, lkflag | LK_EXCLUSIVE) != 0) {
406 vdrop(vp);
407 goto loop;
408 }
409 if (node->tn_vnode != vp) {
7a2de9a4 410 vput(vp);
2706b587 411 vdrop(vp);
7a2de9a4
MD
412 goto loop;
413 }
2706b587 414 vdrop(vp);
7a2de9a4
MD
415 goto out;
416 }
2706b587 417 /* vp is NULL */
7a2de9a4 418
0786baf1
MD
419 /*
420 * This should never happen.
421 */
422 if (node->tn_vpstate & TMPFS_VNODE_DOOMED) {
7a2de9a4
MD
423 TMPFS_NODE_UNLOCK(node);
424 error = ENOENT;
7a2de9a4
MD
425 goto out;
426 }
427
428 /*
2706b587
MD
429 * Interlock against other calls to tmpfs_alloc_vp() trying to
430 * allocate and assign a vp to node.
7a2de9a4
MD
431 */
432 if (node->tn_vpstate & TMPFS_VNODE_ALLOCATING) {
433 node->tn_vpstate |= TMPFS_VNODE_WANT;
2706b587
MD
434 error = tsleep(&node->tn_vpstate, PINTERLOCKED | PCATCH,
435 "tmpfs_alloc_vp", 0);
7a2de9a4
MD
436 TMPFS_NODE_UNLOCK(node);
437 if (error)
438 return error;
7a2de9a4 439 goto loop;
2706b587
MD
440 }
441 node->tn_vpstate |= TMPFS_VNODE_ALLOCATING;
7a2de9a4
MD
442 TMPFS_NODE_UNLOCK(node);
443
2706b587
MD
444 /*
445 * Allocate a new vnode (may block). The ALLOCATING flag should
446 * prevent a race against someone else assigning node->tn_vnode.
447 */
7a2de9a4
MD
448 error = getnewvnode(VT_TMPFS, mp, &vp, VLKTIMEOUT, LK_CANRECURSE);
449 if (error != 0)
450 goto unlock;
7a2de9a4 451
2706b587
MD
452 KKASSERT(node->tn_vnode == NULL);
453 KKASSERT(vp != NULL);
7a2de9a4
MD
454 vp->v_data = node;
455 vp->v_type = node->tn_type;
456
457 /* Type-specific initialization. */
458 switch (node->tn_type) {
459 case VBLK:
460 /* FALLTHROUGH */
461 case VCHR:
462 /* FALLTHROUGH */
463 case VSOCK:
464 break;
465 case VREG:
b0d18f7d 466 vinitvmio(vp, node->tn_size, BMASK, -1);
7a2de9a4
MD
467 break;
468 case VLNK:
7a2de9a4
MD
469 break;
470 case VFIFO:
471 vp->v_ops = &mp->mnt_vn_fifo_ops;
472 break;
473 case VDIR:
7a2de9a4
MD
474 break;
475
476 default:
477 panic("tmpfs_alloc_vp: type %p %d", node, (int)node->tn_type);
478 }
479
480 insmntque(vp, mp);
481
482unlock:
483 TMPFS_NODE_LOCK(node);
484
485 KKASSERT(node->tn_vpstate & TMPFS_VNODE_ALLOCATING);
486 node->tn_vpstate &= ~TMPFS_VNODE_ALLOCATING;
487 node->tn_vnode = vp;
488
489 if (node->tn_vpstate & TMPFS_VNODE_WANT) {
490 node->tn_vpstate &= ~TMPFS_VNODE_WANT;
491 TMPFS_NODE_UNLOCK(node);
2706b587
MD
492 wakeup(&node->tn_vpstate);
493 } else {
7a2de9a4 494 TMPFS_NODE_UNLOCK(node);
2706b587 495 }
7a2de9a4
MD
496
497out:
498 *vpp = vp;
499
500 KKASSERT(IFF(error == 0, *vpp != NULL && vn_islocked(*vpp)));
501#ifdef INVARIANTS
502 TMPFS_NODE_LOCK(node);
503 KKASSERT(*vpp == node->tn_vnode);
504 TMPFS_NODE_UNLOCK(node);
505#endif
506
507 return error;
508}
509
510/* --------------------------------------------------------------------- */
511
512/*
513 * Destroys the association between the vnode vp and the node it
514 * references.
515 */
516void
517tmpfs_free_vp(struct vnode *vp)
518{
519 struct tmpfs_node *node;
520
521 node = VP_TO_TMPFS_NODE(vp);
522
523 TMPFS_NODE_LOCK(node);
524 KKASSERT(lockcount(TMPFS_NODE_MTX(node)) > 0);
525 node->tn_vnode = NULL;
526 TMPFS_NODE_UNLOCK(node);
527 vp->v_data = NULL;
528}
529
530/* --------------------------------------------------------------------- */
531
532/*
533 * Allocates a new file of type 'type' and adds it to the parent directory
534 * 'dvp'; this addition is done using the component name given in 'cnp'.
535 * The ownership of the new file is automatically assigned based on the
536 * credentials of the caller (through 'cnp'), the group is set based on
537 * the parent directory and the mode is determined from the 'vap' argument.
538 * If successful, *vpp holds a vnode to the newly created file and zero
539 * is returned. Otherwise *vpp is NULL and the function returns an
540 * appropriate error code.
541 */
542int
543tmpfs_alloc_file(struct vnode *dvp, struct vnode **vpp, struct vattr *vap,
2706b587 544 struct namecache *ncp, struct ucred *cred, char *target)
7a2de9a4
MD
545{
546 int error;
547 struct tmpfs_dirent *de;
548 struct tmpfs_mount *tmp;
549 struct tmpfs_node *dnode;
550 struct tmpfs_node *node;
551 struct tmpfs_node *parent;
552
553 tmp = VFS_TO_TMPFS(dvp->v_mount);
554 dnode = VP_TO_TMPFS_DIR(dvp);
555 *vpp = NULL;
556
557 /* If the entry we are creating is a directory, we cannot overflow
558 * the number of links of its parent, because it will get a new
559 * link. */
560 if (vap->va_type == VDIR) {
561 /* Ensure that we do not overflow the maximum number of links
562 * imposed by the system. */
563 KKASSERT(dnode->tn_links <= LINK_MAX);
564 if (dnode->tn_links == LINK_MAX) {
565 return EMLINK;
566 }
567
568 parent = dnode;
569 KKASSERT(parent != NULL);
570 } else
571 parent = NULL;
572
573 /* Allocate a node that represents the new file. */
574 error = tmpfs_alloc_node(tmp, vap->va_type, cred->cr_uid,
575 dnode->tn_gid, vap->va_mode, parent, target, vap->va_rmajor, vap->va_rminor, &node);
576 if (error != 0)
577 return error;
0786baf1 578 TMPFS_NODE_LOCK(node);
7a2de9a4
MD
579
580 /* Allocate a directory entry that points to the new file. */
0786baf1 581 error = tmpfs_alloc_dirent(tmp, node, ncp->nc_name, ncp->nc_nlen, &de);
7a2de9a4
MD
582 if (error != 0) {
583 tmpfs_free_node(tmp, node);
0786baf1 584 /* eats node lock */
7a2de9a4
MD
585 return error;
586 }
587
588 /* Allocate a vnode for the new file. */
589 error = tmpfs_alloc_vp(dvp->v_mount, node, LK_EXCLUSIVE, vpp);
590 if (error != 0) {
0786baf1 591 tmpfs_free_dirent(tmp, de);
7a2de9a4 592 tmpfs_free_node(tmp, node);
0786baf1 593 /* eats node lock */
7a2de9a4
MD
594 return error;
595 }
596
597 /* Now that all required items are allocated, we can proceed to
598 * insert the new node into the directory, an operation that
599 * cannot fail. */
22d3b394 600 tmpfs_dir_attach(dnode, de);
0786baf1 601 TMPFS_NODE_UNLOCK(node);
7a2de9a4
MD
602
603 return error;
604}
605
606/* --------------------------------------------------------------------- */
607
608/*
609 * Attaches the directory entry de to the directory represented by vp.
610 * Note that this does not change the link count of the node pointed by
611 * the directory entry, as this is done by tmpfs_alloc_dirent.
612 */
613void
22d3b394 614tmpfs_dir_attach(struct tmpfs_node *dnode, struct tmpfs_dirent *de)
7a2de9a4 615{
22d3b394 616 TMPFS_NODE_LOCK(dnode);
7a2de9a4 617 TAILQ_INSERT_TAIL(&dnode->tn_dir.tn_dirhead, de, td_entries);
7a2de9a4 618
7a2de9a4
MD
619 TMPFS_ASSERT_ELOCKED(dnode);
620 dnode->tn_size += sizeof(struct tmpfs_dirent);
22d3b394
MD
621 dnode->tn_status |= TMPFS_NODE_ACCESSED | TMPFS_NODE_CHANGED |
622 TMPFS_NODE_MODIFIED;
7a2de9a4
MD
623 TMPFS_NODE_UNLOCK(dnode);
624}
625
626/* --------------------------------------------------------------------- */
627
628/*
629 * Detaches the directory entry de from the directory represented by vp.
630 * Note that this does not change the link count of the node pointed by
631 * the directory entry, as this is done by tmpfs_free_dirent.
632 */
633void
22d3b394 634tmpfs_dir_detach(struct tmpfs_node *dnode, struct tmpfs_dirent *de)
7a2de9a4 635{
22d3b394 636 TMPFS_NODE_LOCK(dnode);
7a2de9a4
MD
637 if (dnode->tn_dir.tn_readdir_lastp == de) {
638 dnode->tn_dir.tn_readdir_lastn = 0;
639 dnode->tn_dir.tn_readdir_lastp = NULL;
640 }
7a2de9a4 641 TAILQ_REMOVE(&dnode->tn_dir.tn_dirhead, de, td_entries);
7a2de9a4 642
7a2de9a4
MD
643 TMPFS_ASSERT_ELOCKED(dnode);
644 dnode->tn_size -= sizeof(struct tmpfs_dirent);
22d3b394
MD
645 dnode->tn_status |= TMPFS_NODE_ACCESSED | TMPFS_NODE_CHANGED |
646 TMPFS_NODE_MODIFIED;
7a2de9a4
MD
647 TMPFS_NODE_UNLOCK(dnode);
648}
649
650/* --------------------------------------------------------------------- */
651
652/*
653 * Looks for a directory entry in the directory represented by node.
654 * 'ncp' describes the name of the entry to look for. Note that the .
655 * and .. components are not allowed as they do not physically exist
656 * within directories.
657 *
658 * Returns a pointer to the entry when found, otherwise NULL.
659 */
660struct tmpfs_dirent *
661tmpfs_dir_lookup(struct tmpfs_node *node, struct tmpfs_node *f,
662 struct namecache *ncp)
663{
7a2de9a4
MD
664 struct tmpfs_dirent *de;
665 int len = ncp->nc_nlen;
666
667 TMPFS_VALIDATE_DIR(node);
668
7a2de9a4
MD
669 TAILQ_FOREACH(de, &node->tn_dir.tn_dirhead, td_entries) {
670 if (f != NULL && de->td_node != f)
671 continue;
672 if (len == de->td_namelen) {
9fc94b5f 673 if (!memcmp(ncp->nc_name, de->td_name, len))
7a2de9a4 674 break;
7a2de9a4
MD
675 }
676 }
677
678 TMPFS_NODE_LOCK(node);
679 node->tn_status |= TMPFS_NODE_ACCESSED;
680 TMPFS_NODE_UNLOCK(node);
681
9fc94b5f 682 return de;
7a2de9a4
MD
683}
684
685/* --------------------------------------------------------------------- */
686
687/*
688 * Helper function for tmpfs_readdir. Creates a '.' entry for the given
689 * directory and returns it in the uio space. The function returns 0
690 * on success, -1 if there was not enough space in the uio structure to
691 * hold the directory entry or an appropriate error code if another
692 * error happens.
693 */
694int
695tmpfs_dir_getdotdent(struct tmpfs_node *node, struct uio *uio)
696{
697 int error;
698 struct dirent dent;
699 int dirsize;
700
701 TMPFS_VALIDATE_DIR(node);
702 KKASSERT(uio->uio_offset == TMPFS_DIRCOOKIE_DOT);
703
704 dent.d_ino = node->tn_id;
705 dent.d_type = DT_DIR;
706 dent.d_namlen = 1;
707 dent.d_name[0] = '.';
708 dent.d_name[1] = '\0';
709 dirsize = _DIRENT_DIRSIZ(&dent);
710
711 if (dirsize > uio->uio_resid)
712 error = -1;
713 else {
714 error = uiomove((caddr_t)&dent, dirsize, uio);
715 if (error == 0)
716 uio->uio_offset = TMPFS_DIRCOOKIE_DOTDOT;
717 }
718
2706b587 719 TMPFS_NODE_LOCK(node);
7a2de9a4 720 node->tn_status |= TMPFS_NODE_ACCESSED;
2706b587 721 TMPFS_NODE_UNLOCK(node);
7a2de9a4
MD
722
723 return error;
724}
725
726/* --------------------------------------------------------------------- */
727
728/*
729 * Helper function for tmpfs_readdir. Creates a '..' entry for the given
730 * directory and returns it in the uio space. The function returns 0
731 * on success, -1 if there was not enough space in the uio structure to
732 * hold the directory entry or an appropriate error code if another
733 * error happens.
734 */
735int
22d3b394
MD
736tmpfs_dir_getdotdotdent(struct tmpfs_mount *tmp, struct tmpfs_node *node,
737 struct uio *uio)
7a2de9a4
MD
738{
739 int error;
740 struct dirent dent;
741 int dirsize;
742
743 TMPFS_VALIDATE_DIR(node);
744 KKASSERT(uio->uio_offset == TMPFS_DIRCOOKIE_DOTDOT);
745
22d3b394
MD
746 if (node->tn_dir.tn_parent) {
747 TMPFS_NODE_LOCK(node->tn_dir.tn_parent);
748 dent.d_ino = node->tn_dir.tn_parent->tn_id;
749 TMPFS_NODE_UNLOCK(node->tn_dir.tn_parent);
750 } else {
751 dent.d_ino = tmp->tm_root->tn_id;
7a2de9a4
MD
752 }
753
7a2de9a4
MD
754 dent.d_type = DT_DIR;
755 dent.d_namlen = 2;
756 dent.d_name[0] = '.';
757 dent.d_name[1] = '.';
758 dent.d_name[2] = '\0';
759 dirsize = _DIRENT_DIRSIZ(&dent);
760
761 if (dirsize > uio->uio_resid)
762 error = -1;
763 else {
764 error = uiomove((caddr_t)&dent, dirsize, uio);
765 if (error == 0) {
766 struct tmpfs_dirent *de;
767
768 de = TAILQ_FIRST(&node->tn_dir.tn_dirhead);
769 if (de == NULL)
770 uio->uio_offset = TMPFS_DIRCOOKIE_EOF;
771 else
772 uio->uio_offset = tmpfs_dircookie(de);
773 }
774 }
775
2706b587 776 TMPFS_NODE_LOCK(node);
7a2de9a4 777 node->tn_status |= TMPFS_NODE_ACCESSED;
2706b587 778 TMPFS_NODE_UNLOCK(node);
7a2de9a4
MD
779
780 return error;
781}
782
783/* --------------------------------------------------------------------- */
784
785/*
786 * Lookup a directory entry by its associated cookie.
787 */
788struct tmpfs_dirent *
789tmpfs_dir_lookupbycookie(struct tmpfs_node *node, off_t cookie)
790{
791 struct tmpfs_dirent *de;
792
793 if (cookie == node->tn_dir.tn_readdir_lastn &&
794 node->tn_dir.tn_readdir_lastp != NULL) {
795 return node->tn_dir.tn_readdir_lastp;
796 }
797
798 TAILQ_FOREACH(de, &node->tn_dir.tn_dirhead, td_entries) {
799 if (tmpfs_dircookie(de) == cookie) {
800 break;
801 }
802 }
803
804 return de;
805}
806
807/* --------------------------------------------------------------------- */
808
809/*
810 * Helper function for tmpfs_readdir. Returns as much directory entries
811 * as can fit in the uio space. The read starts at uio->uio_offset.
812 * The function returns 0 on success, -1 if there was not enough space
813 * in the uio structure to hold the directory entry or an appropriate
814 * error code if another error happens.
815 */
816int
817tmpfs_dir_getdents(struct tmpfs_node *node, struct uio *uio, off_t *cntp)
818{
819 int error;
820 off_t startcookie;
821 struct tmpfs_dirent *de;
822
823 TMPFS_VALIDATE_DIR(node);
824
825 /* Locate the first directory entry we have to return. We have cached
826 * the last readdir in the node, so use those values if appropriate.
827 * Otherwise do a linear scan to find the requested entry. */
828 startcookie = uio->uio_offset;
829 KKASSERT(startcookie != TMPFS_DIRCOOKIE_DOT);
830 KKASSERT(startcookie != TMPFS_DIRCOOKIE_DOTDOT);
831 if (startcookie == TMPFS_DIRCOOKIE_EOF) {
832 return 0;
833 } else {
834 de = tmpfs_dir_lookupbycookie(node, startcookie);
835 }
836 if (de == NULL) {
837 return EINVAL;
838 }
839
840 /* Read as much entries as possible; i.e., until we reach the end of
841 * the directory or we exhaust uio space. */
842 do {
843 struct dirent d;
844 int reclen;
845
846 /* Create a dirent structure representing the current
847 * tmpfs_node and fill it. */
848 d.d_ino = de->td_node->tn_id;
849 switch (de->td_node->tn_type) {
850 case VBLK:
851 d.d_type = DT_BLK;
852 break;
853
854 case VCHR:
855 d.d_type = DT_CHR;
856 break;
857
858 case VDIR:
859 d.d_type = DT_DIR;
860 break;
861
862 case VFIFO:
863 d.d_type = DT_FIFO;
864 break;
865
866 case VLNK:
867 d.d_type = DT_LNK;
868 break;
869
870 case VREG:
871 d.d_type = DT_REG;
872 break;
873
874 case VSOCK:
875 d.d_type = DT_SOCK;
876 break;
877
878 default:
879 panic("tmpfs_dir_getdents: type %p %d",
880 de->td_node, (int)de->td_node->tn_type);
881 }
882 d.d_namlen = de->td_namelen;
883 KKASSERT(de->td_namelen < sizeof(d.d_name));
884 bcopy(de->td_name, d.d_name, d.d_namlen);
885 d.d_name[d.d_namlen] = '\0';
886 reclen = _DIRENT_RECLEN(d.d_namlen);
887
888 /* Stop reading if the directory entry we are treating is
889 * bigger than the amount of data that can be returned. */
890 if (reclen > uio->uio_resid) {
891 error = -1;
892 break;
893 }
894
895 /* Copy the new dirent structure into the output buffer and
896 * advance pointers. */
897 error = uiomove((caddr_t)&d, reclen, uio);
898
899 (*cntp)++;
900 de = TAILQ_NEXT(de, td_entries);
901 } while (error == 0 && uio->uio_resid > 0 && de != NULL);
902
903 /* Update the offset and cache. */
904 if (de == NULL) {
905 uio->uio_offset = TMPFS_DIRCOOKIE_EOF;
906 node->tn_dir.tn_readdir_lastn = 0;
907 node->tn_dir.tn_readdir_lastp = NULL;
908 } else {
909 node->tn_dir.tn_readdir_lastn = uio->uio_offset = tmpfs_dircookie(de);
910 node->tn_dir.tn_readdir_lastp = de;
911 }
912 node->tn_status |= TMPFS_NODE_ACCESSED;
913
914 return error;
915}
916
917/* --------------------------------------------------------------------- */
918
919/*
920 * Resizes the aobj associated to the regular file pointed to by vp to
921 * the size newsize. 'vp' must point to a vnode that represents a regular
922 * file. 'newsize' must be positive.
923 *
924 * pass trivial as 1 when buf content will be overwritten, otherwise set 0
925 * to be zero filled.
926 *
927 * Returns zero on success or an appropriate error code on failure.
928 */
929int
930tmpfs_reg_resize(struct vnode *vp, off_t newsize, int trivial)
931{
932 int error;
29ffeb28 933 vm_pindex_t newpages, oldpages;
7a2de9a4
MD
934 struct tmpfs_mount *tmp;
935 struct tmpfs_node *node;
936 off_t oldsize;
7a2de9a4
MD
937
938#ifdef INVARIANTS
939 KKASSERT(vp->v_type == VREG);
940 KKASSERT(newsize >= 0);
941#endif
942
943 node = VP_TO_TMPFS_NODE(vp);
944 tmp = VFS_TO_TMPFS(vp->v_mount);
945
946 /* Convert the old and new sizes to the number of pages needed to
947 * store them. It may happen that we do not need to do anything
948 * because the last allocated page can accommodate the change on
949 * its own. */
950 oldsize = node->tn_size;
29ffeb28 951 oldpages = round_page64(oldsize) / PAGE_SIZE;
7a2de9a4 952 KKASSERT(oldpages == node->tn_reg.tn_aobj_pages);
29ffeb28 953 newpages = round_page64(newsize) / PAGE_SIZE;
7a2de9a4
MD
954
955 if (newpages > oldpages &&
29ffeb28 956 tmp->tm_pages_used + newpages - oldpages > tmp->tm_pages_max) {
7a2de9a4
MD
957 error = ENOSPC;
958 goto out;
959 }
960
961 TMPFS_LOCK(tmp);
962 tmp->tm_pages_used += (newpages - oldpages);
963 TMPFS_UNLOCK(tmp);
964
965 TMPFS_NODE_LOCK(node);
966 node->tn_reg.tn_aobj_pages = newpages;
967 node->tn_size = newsize;
968 TMPFS_NODE_UNLOCK(node);
969
9fc94b5f
MD
970 /*
971 * When adjusting the vnode filesize and its VM object we must
972 * also adjust our backing VM object (aobj). The blocksize
973 * used must match the block sized we use for the buffer cache.
22d3b394
MD
974 *
975 * The backing VM object contains no VM pages, only swap
976 * assignments.
9fc94b5f
MD
977 */
978 if (newsize < oldsize) {
979 vm_pindex_t osize;
22d3b394 980 vm_pindex_t nsize;
9fc94b5f
MD
981 vm_object_t aobj;
982
983 error = nvtruncbuf(vp, newsize, BSIZE, -1);
984 aobj = node->tn_reg.tn_aobj;
985 if (aobj) {
986 osize = aobj->size;
22d3b394
MD
987 nsize = vp->v_object->size;
988 if (nsize < osize) {
9fc94b5f 989 aobj->size = osize;
22d3b394
MD
990 swap_pager_freespace(aobj, nsize,
991 osize - nsize);
9fc94b5f
MD
992 }
993 }
994 } else {
995 vm_object_t aobj;
996
997 error = nvextendbuf(vp, oldsize, newsize, BSIZE, BSIZE,
7a2de9a4 998 -1, -1, trivial);
9fc94b5f
MD
999 aobj = node->tn_reg.tn_aobj;
1000 if (aobj)
1001 aobj->size = vp->v_object->size;
1002 }
7a2de9a4
MD
1003
1004out:
1005 return error;
1006}
1007
1008/* --------------------------------------------------------------------- */
1009
1010/*
1011 * Change flags of the given vnode.
1012 * Caller should execute tmpfs_update on vp after a successful execution.
1013 * The vnode must be locked on entry and remain locked on exit.
1014 */
1015int
80ae59d7 1016tmpfs_chflags(struct vnode *vp, int vaflags, struct ucred *cred)
7a2de9a4
MD
1017{
1018 int error;
1019 struct tmpfs_node *node;
80ae59d7 1020 int flags;
7a2de9a4
MD
1021
1022 KKASSERT(vn_islocked(vp));
1023
1024 node = VP_TO_TMPFS_NODE(vp);
80ae59d7 1025 flags = node->tn_flags;
7a2de9a4
MD
1026
1027 /* Disallow this operation if the file system is mounted read-only. */
1028 if (vp->v_mount->mnt_flag & MNT_RDONLY)
1029 return EROFS;
80ae59d7 1030 error = vop_helper_setattr_flags(&flags, vaflags, node->tn_uid, cred);
7a2de9a4 1031
7a2de9a4
MD
1032 /*
1033 * Unprivileged processes are not permitted to unset system
1034 * flags, or modify flags if any system flags are set.
d4623db3
MD
1035 *
1036 * Silently enforce SF_NOCACHE on the root tmpfs vnode so
1037 * tmpfs data is not double-cached by swapcache.
7a2de9a4 1038 */
80ae59d7
MD
1039 if (error == 0) {
1040 TMPFS_NODE_LOCK(node);
1041 if (!priv_check_cred(cred, PRIV_VFS_SYSFLAGS, 0)) {
1042 if (vp->v_flag & VROOT)
1043 flags |= SF_NOCACHE;
1044 node->tn_flags = flags;
1045 } else {
1046 if (node->tn_flags & (SF_NOUNLINK | SF_IMMUTABLE |
1047 SF_APPEND) ||
1048 (flags & UF_SETTABLE) != flags) {
1049 error = EPERM;
1050 } else {
1051 node->tn_flags &= SF_SETTABLE;
1052 node->tn_flags |= (flags & UF_SETTABLE);
1053 }
7a2de9a4 1054 }
80ae59d7
MD
1055 node->tn_status |= TMPFS_NODE_CHANGED;
1056 TMPFS_NODE_UNLOCK(node);
7a2de9a4 1057 }
7a2de9a4
MD
1058
1059 KKASSERT(vn_islocked(vp));
1060
80ae59d7 1061 return error;
7a2de9a4
MD
1062}
1063
1064/* --------------------------------------------------------------------- */
1065
1066/*
1067 * Change access mode on the given vnode.
1068 * Caller should execute tmpfs_update on vp after a successful execution.
1069 * The vnode must be locked on entry and remain locked on exit.
1070 */
1071int
80ae59d7 1072tmpfs_chmod(struct vnode *vp, mode_t vamode, struct ucred *cred)
7a2de9a4 1073{
7a2de9a4 1074 struct tmpfs_node *node;
80ae59d7
MD
1075 mode_t cur_mode;
1076 int error;
7a2de9a4
MD
1077
1078 KKASSERT(vn_islocked(vp));
1079
1080 node = VP_TO_TMPFS_NODE(vp);
1081
1082 /* Disallow this operation if the file system is mounted read-only. */
1083 if (vp->v_mount->mnt_flag & MNT_RDONLY)
1084 return EROFS;
1085
1086 /* Immutable or append-only files cannot be modified, either. */
1087 if (node->tn_flags & (IMMUTABLE | APPEND))
1088 return EPERM;
1089
80ae59d7
MD
1090 cur_mode = node->tn_mode;
1091 error = vop_helper_chmod(vp, vamode, cred, node->tn_uid, node->tn_gid,
1092 &cur_mode);
7a2de9a4 1093
80ae59d7
MD
1094 if (error == 0 &&
1095 (node->tn_mode & ALLPERMS) != (cur_mode & ALLPERMS)) {
1096 TMPFS_NODE_LOCK(node);
1097 node->tn_mode &= ~ALLPERMS;
1098 node->tn_mode |= cur_mode & ALLPERMS;
7a2de9a4 1099
80ae59d7
MD
1100 node->tn_status |= TMPFS_NODE_CHANGED;
1101 TMPFS_NODE_UNLOCK(node);
7a2de9a4
MD
1102 }
1103
7a2de9a4
MD
1104 KKASSERT(vn_islocked(vp));
1105
1106 return 0;
1107}
1108
1109/* --------------------------------------------------------------------- */
1110
1111/*
1112 * Change ownership of the given vnode. At least one of uid or gid must
1113 * be different than VNOVAL. If one is set to that value, the attribute
1114 * is unchanged.
1115 * Caller should execute tmpfs_update on vp after a successful execution.
1116 * The vnode must be locked on entry and remain locked on exit.
1117 */
1118int
1119tmpfs_chown(struct vnode *vp, uid_t uid, gid_t gid, struct ucred *cred)
1120{
bd48f29c
MD
1121 mode_t cur_mode;
1122 uid_t cur_uid;
1123 gid_t cur_gid;
7a2de9a4 1124 struct tmpfs_node *node;
bd48f29c 1125 int error;
7a2de9a4
MD
1126
1127 KKASSERT(vn_islocked(vp));
7a2de9a4
MD
1128 node = VP_TO_TMPFS_NODE(vp);
1129
7a2de9a4
MD
1130 /* Disallow this operation if the file system is mounted read-only. */
1131 if (vp->v_mount->mnt_flag & MNT_RDONLY)
1132 return EROFS;
1133
1134 /* Immutable or append-only files cannot be modified, either. */
1135 if (node->tn_flags & (IMMUTABLE | APPEND))
1136 return EPERM;
1137
bd48f29c
MD
1138 cur_uid = node->tn_uid;
1139 cur_gid = node->tn_gid;
1140 cur_mode = node->tn_mode;
1141 error = vop_helper_chown(vp, uid, gid, cred,
1142 &cur_uid, &cur_gid, &cur_mode);
1143
1144 if (error == 0) {
1145 TMPFS_NODE_LOCK(node);
1146 if (cur_uid != node->tn_uid ||
1147 cur_gid != node->tn_gid ||
1148 cur_mode != node->tn_mode) {
314b6efe
YT
1149 node->tn_uid = cur_uid;
1150 node->tn_gid = cur_gid;
bd48f29c
MD
1151 node->tn_mode = cur_mode;
1152 node->tn_status |= TMPFS_NODE_CHANGED;
7a2de9a4 1153 }
bd48f29c 1154 TMPFS_NODE_UNLOCK(node);
7a2de9a4 1155 }
7a2de9a4 1156
bd48f29c 1157 return error;
7a2de9a4
MD
1158}
1159
1160/* --------------------------------------------------------------------- */
1161
1162/*
1163 * Change size of the given vnode.
1164 * Caller should execute tmpfs_update on vp after a successful execution.
1165 * The vnode must be locked on entry and remain locked on exit.
1166 */
1167int
1168tmpfs_chsize(struct vnode *vp, u_quad_t size, struct ucred *cred)
1169{
1170 int error;
1171 struct tmpfs_node *node;
1172
1173 KKASSERT(vn_islocked(vp));
1174
1175 node = VP_TO_TMPFS_NODE(vp);
1176
1177 /* Decide whether this is a valid operation based on the file type. */
1178 error = 0;
1179 switch (vp->v_type) {
1180 case VDIR:
1181 return EISDIR;
1182
1183 case VREG:
1184 if (vp->v_mount->mnt_flag & MNT_RDONLY)
1185 return EROFS;
1186 break;
1187
1188 case VBLK:
1189 /* FALLTHROUGH */
1190 case VCHR:
1191 /* FALLTHROUGH */
1192 case VFIFO:
1193 /* Allow modifications of special files even if in the file
1194 * system is mounted read-only (we are not modifying the
1195 * files themselves, but the objects they represent). */
1196 return 0;
1197
1198 default:
1199 /* Anything else is unsupported. */
1200 return EOPNOTSUPP;
1201 }
1202
1203 /* Immutable or append-only files cannot be modified, either. */
1204 if (node->tn_flags & (IMMUTABLE | APPEND))
1205 return EPERM;
1206
1207 error = tmpfs_truncate(vp, size);
1208 /* tmpfs_truncate will raise the NOTE_EXTEND and NOTE_ATTRIB kevents
1209 * for us, as will update tn_status; no need to do that here. */
1210
1211 KKASSERT(vn_islocked(vp));
1212
1213 return error;
1214}
1215
1216/* --------------------------------------------------------------------- */
1217
1218/*
1219 * Change access and modification times of the given vnode.
1220 * Caller should execute tmpfs_update on vp after a successful execution.
1221 * The vnode must be locked on entry and remain locked on exit.
1222 */
1223int
1224tmpfs_chtimes(struct vnode *vp, struct timespec *atime, struct timespec *mtime,
1225 int vaflags, struct ucred *cred)
1226{
7a2de9a4 1227 struct tmpfs_node *node;
7a2de9a4
MD
1228
1229 KKASSERT(vn_islocked(vp));
1230
1231 node = VP_TO_TMPFS_NODE(vp);
1232
1233 /* Disallow this operation if the file system is mounted read-only. */
1234 if (vp->v_mount->mnt_flag & MNT_RDONLY)
1235 return EROFS;
1236
1237 /* Immutable or append-only files cannot be modified, either. */
1238 if (node->tn_flags & (IMMUTABLE | APPEND))
1239 return EPERM;
1240
7a2de9a4
MD
1241 TMPFS_NODE_LOCK(node);
1242 if (atime->tv_sec != VNOVAL && atime->tv_nsec != VNOVAL)
1243 node->tn_status |= TMPFS_NODE_ACCESSED;
1244
1245 if (mtime->tv_sec != VNOVAL && mtime->tv_nsec != VNOVAL)
1246 node->tn_status |= TMPFS_NODE_MODIFIED;
1247
1248 TMPFS_NODE_UNLOCK(node);
1249
1250 tmpfs_itimes(vp, atime, mtime);
1251
1252 KKASSERT(vn_islocked(vp));
1253
1254 return 0;
1255}
1256
1257/* --------------------------------------------------------------------- */
1258/* Sync timestamps */
1259void
1260tmpfs_itimes(struct vnode *vp, const struct timespec *acc,
1261 const struct timespec *mod)
1262{
1263 struct tmpfs_node *node;
1264 struct timespec now;
1265
1266 node = VP_TO_TMPFS_NODE(vp);
1267
1268 if ((node->tn_status & (TMPFS_NODE_ACCESSED | TMPFS_NODE_MODIFIED |
1269 TMPFS_NODE_CHANGED)) == 0)
1270 return;
1271
1272 vfs_timestamp(&now);
1273
1274 TMPFS_NODE_LOCK(node);
1275 if (node->tn_status & TMPFS_NODE_ACCESSED) {
1276 if (acc == NULL)
1277 acc = &now;
1278 node->tn_atime = acc->tv_sec;
1279 node->tn_atimensec = acc->tv_nsec;
1280 }
1281 if (node->tn_status & TMPFS_NODE_MODIFIED) {
1282 if (mod == NULL)
1283 mod = &now;
1284 node->tn_mtime = mod->tv_sec;
1285 node->tn_mtimensec = mod->tv_nsec;
1286 }
1287 if (node->tn_status & TMPFS_NODE_CHANGED) {
1288 node->tn_ctime = now.tv_sec;
1289 node->tn_ctimensec = now.tv_nsec;
1290 }
1291 node->tn_status &=
1292 ~(TMPFS_NODE_ACCESSED | TMPFS_NODE_MODIFIED | TMPFS_NODE_CHANGED);
1293 TMPFS_NODE_UNLOCK(node);
1294}
1295
1296/* --------------------------------------------------------------------- */
1297
1298void
1299tmpfs_update(struct vnode *vp)
1300{
1301
1302 tmpfs_itimes(vp, NULL, NULL);
1303}
1304
1305/* --------------------------------------------------------------------- */
1306
1307int
1308tmpfs_truncate(struct vnode *vp, off_t length)
1309{
1310 int error;
1311 struct tmpfs_node *node;
1312
1313 node = VP_TO_TMPFS_NODE(vp);
1314
1315 if (length < 0) {
1316 error = EINVAL;
1317 goto out;
1318 }
1319
1320 if (node->tn_size == length) {
1321 error = 0;
1322 goto out;
1323 }
1324
1325 if (length > VFS_TO_TMPFS(vp->v_mount)->tm_maxfilesize)
1326 return (EFBIG);
1327
1328
1329 error = tmpfs_reg_resize(vp, length, 1);
1330
1331 if (error == 0) {
1332 TMPFS_NODE_LOCK(node);
1333 node->tn_status |= TMPFS_NODE_CHANGED | TMPFS_NODE_MODIFIED;
1334 TMPFS_NODE_UNLOCK(node);
1335 }
1336
1337out:
1338 tmpfs_update(vp);
1339
1340 return error;
1341}
1342
1343/* --------------------------------------------------------------------- */
1344
1345static ino_t
1346tmpfs_fetch_ino(void)
1347{
1348 ino_t ret;
1349
287a8577 1350 spin_lock(&ino_lock);
7a2de9a4 1351 ret = t_ino++;
287a8577 1352 spin_unlock(&ino_lock);
7a2de9a4
MD
1353
1354 return ret;
1355}