2 * Copyright (c) 2011, 2012 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * Copyright (c) 2005 The NetBSD Foundation, Inc.
36 * All rights reserved.
38 * This code is derived from software contributed to The NetBSD Foundation
39 * by Julio M. Merino Vidal, developed as part of Google's Summer of Code
42 * Redistribution and use in source and binary forms, with or without
43 * modification, are permitted provided that the following conditions
45 * 1. Redistributions of source code must retain the above copyright
46 * notice, this list of conditions and the following disclaimer.
47 * 2. Redistributions in binary form must reproduce the above copyright
48 * notice, this list of conditions and the following disclaimer in the
49 * documentation and/or other materials provided with the distribution.
51 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
52 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
53 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
54 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
55 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
56 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
57 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
58 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
59 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
60 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
61 * POSSIBILITY OF SUCH DAMAGE.
64 #include <sys/param.h>
65 #include <sys/systm.h>
66 #include <sys/kernel.h>
67 #include <sys/nlookup.h>
68 #include <sys/vnode.h>
69 #include <sys/mount.h>
70 #include <sys/fcntl.h>
75 #include "hammer2_disk.h"
76 #include "hammer2_mount.h"
78 static int hammer2_init(struct vfsconf *conf);
79 static int hammer2_mount(struct mount *mp, char *path, caddr_t data,
81 static int hammer2_remount(struct mount *, char *, struct vnode *,
83 static int hammer2_unmount(struct mount *mp, int mntflags);
84 static int hammer2_root(struct mount *mp, struct vnode **vpp);
85 static int hammer2_statfs(struct mount *mp, struct statfs *sbp,
87 static int hammer2_statvfs(struct mount *mp, struct statvfs *sbp,
89 static int hammer2_sync(struct mount *mp, int waitfor);
90 static int hammer2_vget(struct mount *mp, struct vnode *dvp,
91 ino_t ino, struct vnode **vpp);
92 static int hammer2_fhtovp(struct mount *mp, struct vnode *rootvp,
93 struct fid *fhp, struct vnode **vpp);
94 static int hammer2_vptofh(struct vnode *vp, struct fid *fhp);
95 static int hammer2_checkexp(struct mount *mp, struct sockaddr *nam,
96 int *exflagsp, struct ucred **credanonp);
98 static int tmpfs_unmount(struct mount *, int);
99 static int tmpfs_root(struct mount *, struct vnode **);
102 * HAMMER2 vfs operations.
104 static struct vfsops hammer2_vfsops = {
106 .vfs_root = tmpfs_root,
109 .vfs_init = hammer2_init,
110 .vfs_sync = hammer2_sync,
111 .vfs_mount = hammer2_mount,
112 .vfs_unmount = hammer2_unmount,
114 .vfs_root = hammer2_root,
116 .vfs_statfs = hammer2_statfs,
117 /* If we enable statvfs, we disappear in df, till we implement it. */
118 /* That makes debugging difficult :) */
119 // .vfs_statvfs = hammer2_statvfs,
120 .vfs_vget = hammer2_vget,
121 .vfs_vptofh = hammer2_vptofh,
122 .vfs_fhtovp = hammer2_fhtovp,
123 .vfs_checkexp = hammer2_checkexp
127 MALLOC_DEFINE(M_HAMMER2, "HAMMER2-mount", "");
129 VFS_SET(hammer2_vfsops, hammer2, 0);
130 MODULE_VERSION(hammer2, 1);
133 hammer2_init(struct vfsconf *conf)
139 if (HAMMER2_BLOCKREF_BYTES != sizeof(struct hammer2_blockref))
141 if (HAMMER2_INODE_BYTES != sizeof(struct hammer2_inode_data))
143 if (HAMMER2_ALLOCREF_BYTES != sizeof(struct hammer2_allocref))
145 if (HAMMER2_VOLUME_BYTES != sizeof(struct hammer2_volume_data))
149 kprintf("HAMMER2 structure size mismatch; cannot continue.\n");
155 * Mount or remount HAMMER2 fileystem from physical media
158 * mp mount point structure
164 * mp mount point structure
165 * path path to mount point
166 * data pointer to argument structure in user space
167 * volume volume path (device@LABEL form)
168 * hflags user mount flags
169 * cred user credentials
175 hammer2_mount(struct mount *mp, char *path, caddr_t data,
178 struct hammer2_mount_info info;
179 struct hammer2_mount *hmp;
181 struct nlookupdata nd;
182 char devstr[MNAMELEN];
194 kprintf("hammer2_mount\n");
204 * Non-root mount or updating a mount
207 error = copyin(data, &info, sizeof(info));
211 error = copyinstr(info.volume, devstr, MNAMELEN - 1, &done);
215 /* Extract device and label */
217 label = strchr(devstr, '@');
219 ((label + 1) - dev) > done)
226 if (mp->mnt_flag & MNT_UPDATE) {
228 /* HAMMER2 implements NFS export via mountctl */
230 devvp = hmp->hm_devvp;
231 return hammer2_remount(mp, path, devvp, cred);
238 /* Lookup name and verify it refers to a block device */
239 error = nlookup_init(&nd, dev, UIO_SYSSPACE, NLC_FOLLOW);
242 error = nlookup(&nd);
245 error = cache_vref(&nd.nl_nch, nd.nl_cred, &devvp);
250 if (!vn_isdisk(devvp, &error)) {
256 * Common path for new root/non-root mounts;
257 * devvp is a ref-ed by not locked vnode referring to the fs device
260 error = vfs_mountedon(devvp);
266 if (vcount(devvp) > 0) {
274 ronly = (mp->mnt_flag & MNT_RDONLY) != 0;
275 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
276 error = vinvalbuf(devvp, V_SAVE, 0, 0);
282 /* This is correct; however due to an NFS quirk of my setup, FREAD
285 error = VOP_OPEN(devvp, ronly ? FREAD : FREAD | FWRITE, FSCRED, NULL);
287 error = VOP_OPEN(devvp, FREAD, FSCRED, NULL);
295 /* VOP_IOCTL(EXTENDED_DISK_INFO, devvp); */
296 /* if vn device, never use bdwrite(); */
297 /* check if device supports BUF_CMD_READALL; */
298 /* check if device supports BUF_CMD_WRITEALL; */
301 hmp = kmalloc(sizeof(*hmp), M_HAMMER2, M_WAITOK | M_ZERO);
302 /*mp->mnt_data = (qaddr_t) hmp;*/
304 /*hmp->hm_ronly = ronly;*/
305 /*hmp->hm_devvp = devvp;*/
306 lockinit(&hmp->hm_lk, "h2mp", 0, 0);
307 kmalloc_create(&hmp->hm_inodes, "HAMMER2-inodes");
308 kmalloc_create(&hmp->hm_ipstacks, "HAMMER2-ipstacks");
310 /* Readout volume headers, make sure we have a live filesystem */
311 /* Kinda hacky atm */
313 struct buf *bps[HAMMER2_NUM_VOLHDRS];
319 struct hammer2_volume_data *vd;
320 for (i = 0; i < HAMMER2_NUM_VOLHDRS; i++) {
321 //rc = bread(devvp, i * HAMMER2_RESERVE_ALIGN64,
322 // HAMMER2_PBUFSIZE, &bps[i]);
330 if (vd->magic == HAMMER2_VOLUME_ID_HBO) {
332 unsigned char tmp[512];
333 bcopy(bps[i]->b_data, &tmp, 512);
334 bzero(&tmp[512 - 4], 4);
335 /* Calculate CRC32 w/ crc field zero */
336 /* XXX: Can we modify b_data? */
337 //ccrc = hammer2_icrc32(tmp, 512);
338 //crc = vd->icrc_sect0;
347 if (vd->last_tid > hi_tid) {
348 hi_tid = vd->last_tid;
354 /* We have found the hammer volume header w/
355 * the highest transaction id. Use it. */
357 bcopy(bps[hi_num]->b_data, &hmp->hm_sb,
360 for (i = 0 ; i < HAMMER2_NUM_VOLHDRS; i++)
363 kprintf("HAMMER2 volume %d by\n", hmp->hm_sb.volu_size);
365 /* XXX More to do! Release structures and stuff */
371 * Filesystem subroutines are self-synchronized
373 /*mp->mnt_kern_flag |= MNTK_ALL_MPSAFE;*/
376 /* Setup root inode */
377 hmp->hm_iroot = alloci(hmp);
378 hmp->hm_iroot->type = HAMMER2_INODE_TYPE_DIR | HAMMER2_INODE_TYPE_ROOT;
379 hmp->hm_iroot->inum = 1;
381 /* currently rely on tmpfs routines */
382 /*vfs_getnewfsid(mp);*/
383 /*vfs_add_vnodeops(mp, &hammer2_vnode_vops, &mp->mnt_vn_norm_ops);*/
384 /*vfs_add_vnodeops(mp, &hammer2_spec_vops, &mp->mnt_vn_spec_ops);*/
385 /*vfs_add_vnodeops(mp, &hammer2_fifo_vops, &mp->mnt_vn_fifo_ops);*/
387 copystr("hammer2", mp->mnt_stat.f_mntfromname, MNAMELEN - 1, &size);
388 bzero(mp->mnt_stat.f_mntfromname + size, MNAMELEN - size);
389 bzero(mp->mnt_stat.f_mntonname, sizeof(mp->mnt_stat.f_mntonname));
390 copyinstr(path, mp->mnt_stat.f_mntonname,
391 sizeof(mp->mnt_stat.f_mntonname) - 1,
394 hammer2_statfs(mp, &mp->mnt_stat, cred);
396 hammer2_inode_unlock_ex(hmp->hm_iroot);
398 return (tmpfs_mount(hmp, mp, path, data, cred));
402 hammer2_remount(struct mount *mp, char *path, struct vnode *devvp,
409 hammer2_unmount(struct mount *mp, int mntflags)
411 struct hammer2_mount *hmp;
415 kprintf("hammer2_unmount\n");
420 if (mntflags & MNT_FORCE)
423 hammer2_mount_exlock(hmp);
425 error = vflush(mp, 0, flags);
429 * 1) Wait on the flusher having no work; heat up if needed
430 * 2) Scan inode RB tree till all the inodes are free
431 * 3) Destroy the kmalloc inode zone
432 * 4) Free the mount point
435 kmalloc_destroy(&hmp->hm_inodes);
436 kmalloc_destroy(&hmp->hm_ipstacks);
438 hammer2_mount_unlock(hmp);
441 //kfree(hmp, M_HAMMER2);
443 return (tmpfs_unmount(mp, mntflags));
449 hammer2_vget(struct mount *mp, struct vnode *dvp,
450 ino_t ino, struct vnode **vpp)
452 kprintf("hammer2_vget\n");
457 hammer2_root(struct mount *mp, struct vnode **vpp)
459 struct hammer2_mount *hmp;
463 kprintf("hammer2_root\n");
466 hammer2_mount_lock_ex(hmp);
467 if (hmp->hm_iroot == NULL) {
471 vp = igetv(hmp->hm_iroot, &error);
474 kprintf("vnodefail\n");
476 hammer2_mount_unlock(hmp);
482 hammer2_statfs(struct mount *mp, struct statfs *sbp, struct ucred *cred)
484 struct hammer2_mount *hmp;
486 kprintf("hammer2_statfs\n");
490 sbp->f_iosize = PAGE_SIZE;
491 sbp->f_bsize = PAGE_SIZE;
505 hammer2_statvfs(struct mount *mp, struct statvfs *sbp, struct ucred *cred)
507 kprintf("hammer2_statvfs\n");
512 * Sync the entire filesystem; this is called from the filesystem syncer
513 * process periodically and whenever a user calls sync(1) on the hammer
516 * Currently is actually called from the syncer! \o/
518 * This task will have to snapshot the state of the dirty inode chain.
519 * From that, it will have to make sure all of the inodes on the dirty
520 * chain have IO initiated. We make sure that io is initiated for the root
523 * If waitfor is set, we wait for media to acknowledge the new rootblock.
525 * THINKS: side A vs side B, to have sync not stall all I/O?
528 hammer2_sync(struct mount *mp, int waitfor)
530 struct hammer2_mount *hmp;
531 struct hammer2_inode *ip;
533 kprintf("hammer2_sync \n");
541 hammer2_vptofh(struct vnode *vp, struct fid *fhp)
547 hammer2_fhtovp(struct mount *mp, struct vnode *rootvp,
548 struct fid *fhp, struct vnode **vpp)
554 hammer2_checkexp(struct mount *mp, struct sockaddr *nam,
555 int *exflagsp, struct ucred **credanonp)
561 * Efficient memory file system.
563 * tmpfs is a file system that uses NetBSD's virtual memory sub-system
564 * (the well-known UVM) to store file data and metadata in an efficient
565 * way. This means that it does not follow the structure of an on-disk
566 * file system because it simply does not need to. Instead, it uses
567 * memory-specific data structures and algorithms to automatically
568 * allocate and release resources.
571 #include <sys/conf.h>
572 #include <sys/param.h>
573 #include <sys/limits.h>
574 #include <sys/lock.h>
575 #include <sys/mutex.h>
576 #include <sys/kernel.h>
577 #include <sys/stat.h>
578 #include <sys/systm.h>
579 #include <sys/sysctl.h>
580 #include <sys/objcache.h>
583 #include <vm/vm_object.h>
584 #include <vm/vm_param.h>
589 * Default permission for root node
591 #define TMPFS_DEFAULT_ROOT_MODE (S_IRWXU|S_IRGRP|S_IXGRP|S_IROTH|S_IXOTH)
593 /* --------------------------------------------------------------------- */
595 tmpfs_node_ctor(void *obj, void *privdata, int flags)
597 struct hammer2_node *node = (struct hammer2_node *)obj;
604 node->tn_vnode = NULL;
605 node->tn_vpstate = TMPFS_VNODE_WANT;
606 bzero(&node->tn_spec, sizeof(node->tn_spec));
612 tmpfs_node_dtor(void *obj, void *privdata)
614 struct hammer2_node *node = (struct hammer2_node *)obj;
615 node->tn_type = VNON;
616 node->tn_vpstate = TMPFS_VNODE_DOOMED;
620 tmpfs_node_init(void *args, int flags)
622 struct hammer2_node *node = (struct hammer2_node *)objcache_malloc_alloc(args, flags);
627 lockinit(&node->tn_interlock, "tmpfs node interlock", 0, LK_CANRECURSE);
628 node->tn_gen = karc4random();
634 tmpfs_node_fini(void *obj, void *args)
636 struct hammer2_node *node = (struct hammer2_node *)obj;
637 lockuninit(&node->tn_interlock);
638 objcache_malloc_free(obj, args);
642 tmpfs_mount(struct hammer2_mount *hmp,
643 struct mount *mp, char *path, caddr_t data, struct ucred *cred)
645 // struct tmpfs_mount *tmp;
646 struct hammer2_node *root;
647 // struct tmpfs_args args;
649 vm_pindex_t pages_limit;
659 /* Root node attributes. */
660 uid_t root_uid = cred->cr_uid;
661 gid_t root_gid = cred->cr_gid;
662 mode_t root_mode = (VREAD | VWRITE);
664 if (mp->mnt_flag & MNT_UPDATE) {
665 /* XXX: There is no support yet to update file system
666 * settings. Should be added. */
671 kprintf("tmpfs_mount\n");
676 // bzero(&args, sizeof(args));
683 // error = copyin(data, &args, sizeof(args));
688 size_max = args.ta_size_max;
689 nodes_max = args.ta_nodes_max;
690 maxfsize_max = args.ta_maxfsize_max;
691 root_uid = args.ta_root_uid;
692 root_gid = args.ta_root_gid;
693 root_mode = args.ta_root_mode;
698 * If mount by non-root, then verify that user has necessary
699 * permissions on the device.
701 if (cred->cr_uid != 0) {
703 if ((mp->mnt_flag & MNT_RDONLY) == 0)
707 pages_limit = vm_swap_max + vmstats.v_page_count / 2;
710 pages = pages_limit / 2;
711 else if (size_max < PAGE_SIZE)
713 else if (OFF_TO_IDX(size_max) > pages_limit)
716 pages = OFF_TO_IDX(size_max);
719 nodes = 3 + pages * PAGE_SIZE / 1024;
720 else if (nodes_max < 3)
722 else if (nodes_max > pages)
727 maxfsize = IDX_TO_OFF(pages_limit);
728 if (maxfsize_max != 0 && maxfsize > maxfsize_max)
729 maxfsize = maxfsize_max;
731 /* Allocate the tmpfs mount structure and fill it. */
732 // tmp = kmalloc(sizeof(*tmp), M_HAMMER2, M_WAITOK | M_ZERO);
734 struct hammer2_mount *tmp = hmp;
735 lockinit(&(tmp->allnode_lock), "tmpfs allnode lock", 0, LK_CANRECURSE);
736 tmp->tm_nodes_max = nodes;
737 tmp->tm_nodes_inuse = 0;
738 tmp->tm_maxfilesize = maxfsize;
739 LIST_INIT(&tmp->tm_nodes_used);
741 tmp->tm_pages_max = pages;
742 tmp->tm_pages_used = 0;
744 kmalloc_create(&tmp->tm_node_zone, "tmpfs node");
745 kmalloc_create(&tmp->tm_dirent_zone, "tmpfs dirent");
746 kmalloc_create(&tmp->tm_name_zone, "tmpfs name zone");
748 kmalloc_raise_limit(tmp->tm_node_zone, sizeof(struct hammer2_node) *
751 tmp->tm_node_zone_malloc_args.objsize = sizeof(struct hammer2_node);
752 tmp->tm_node_zone_malloc_args.mtype = tmp->tm_node_zone;
754 tmp->tm_dirent_zone_malloc_args.objsize = sizeof(struct hammer2_dirent);
755 tmp->tm_dirent_zone_malloc_args.mtype = tmp->tm_dirent_zone;
757 tmp->tm_dirent_pool = objcache_create( "tmpfs dirent cache",
760 objcache_malloc_alloc, objcache_malloc_free,
761 &tmp->tm_dirent_zone_malloc_args);
762 tmp->tm_node_pool = objcache_create( "tmpfs node cache",
764 tmpfs_node_ctor, tmpfs_node_dtor, NULL,
765 tmpfs_node_init, tmpfs_node_fini,
766 &tmp->tm_node_zone_malloc_args);
768 /* Allocate the root node. */
769 error = tmpfs_alloc_node(tmp, VDIR, root_uid, root_gid,
770 root_mode & ALLPERMS, NULL, NULL,
771 VNOVAL, VNOVAL, &root);
774 * We are backed by swap, set snocache chflags flag so we
775 * don't trip over swapcache.
777 root->tn_flags = SF_NOCACHE;
779 if (error != 0 || root == NULL) {
780 objcache_destroy(tmp->tm_node_pool);
781 objcache_destroy(tmp->tm_dirent_pool);
782 kfree(tmp, M_HAMMER2);
785 KASSERT(root->tn_id >= 0, ("tmpfs root with invalid ino: %d", (int)root->tn_id));
788 mp->mnt_flag |= MNT_LOCAL;
790 mp->mnt_kern_flag |= MNTK_RD_MPSAFE | MNTK_WR_MPSAFE | MNTK_GA_MPSAFE |
791 MNTK_IN_MPSAFE | MNTK_SG_MPSAFE;
793 mp->mnt_kern_flag |= MNTK_RD_MPSAFE | MNTK_GA_MPSAFE | MNTK_SG_MPSAFE;
794 mp->mnt_kern_flag |= MNTK_WR_MPSAFE;
795 mp->mnt_kern_flag |= MNTK_NOMSYNC;
796 mp->mnt_kern_flag |= MNTK_THR_SYNC;
797 mp->mnt_data = (qaddr_t)tmp;
800 vfs_add_vnodeops(mp, &tmpfs_vnode_vops, &mp->mnt_vn_norm_ops);
801 vfs_add_vnodeops(mp, &tmpfs_fifo_vops, &mp->mnt_vn_fifo_ops);
803 hammer2_statfs(mp, &mp->mnt_stat, cred);
808 /* --------------------------------------------------------------------- */
812 tmpfs_unmount(struct mount *mp, int mntflags)
817 struct hammer2_mount *tmp;
818 struct hammer2_node *node;
820 kprintf("tmpfs_umount\n");
822 /* Handle forced unmounts. */
823 if (mntflags & MNT_FORCE)
826 tmp = VFS_TO_TMPFS(mp);
829 * Finalize all pending I/O. In the case of tmpfs we want
830 * to throw all the data away so clean out the buffer cache
831 * and vm objects before calling vflush().
833 LIST_FOREACH(node, &tmp->tm_nodes_used, tn_entries) {
834 if (node->tn_type == VREG && node->tn_vnode) {
836 TMPFS_NODE_LOCK(node);
837 vx_get(node->tn_vnode);
838 tmpfs_truncate(node->tn_vnode, 0);
839 vx_put(node->tn_vnode);
840 TMPFS_NODE_UNLOCK(node);
844 error = vflush(mp, 0, flags);
849 * First pass get rid of all the directory entries and
850 * vnode associations. The directory structure will
851 * remain via the extra link count representing tn_dir.tn_parent.
853 * No vnodes should remain after the vflush above.
855 LIST_FOREACH(node, &tmp->tm_nodes_used, tn_entries) {
857 TMPFS_NODE_LOCK(node);
858 if (node->tn_type == VDIR) {
859 struct tmpfs_dirent *de;
861 while (!TAILQ_EMPTY(&node->tn_dir.tn_dirhead)) {
862 de = TAILQ_FIRST(&node->tn_dir.tn_dirhead);
863 tmpfs_dir_detach(node, de);
864 tmpfs_free_dirent(tmp, de);
865 node->tn_size -= sizeof(struct hammer2_dirent);
868 KKASSERT(node->tn_vnode == NULL);
874 node->tn_vnode = NULL;
877 TMPFS_NODE_UNLOCK(node);
882 * Now get rid of all nodes. We can remove any node with a
883 * link count of 0 or any directory node with a link count of
884 * 1. The parents will not be destroyed until all their children
885 * have been destroyed.
887 * Recursion in tmpfs_free_node() can further modify the list so
888 * we cannot use a next pointer here.
890 * The root node will be destroyed by this loop (it will be last).
892 while (!LIST_EMPTY(&tmp->tm_nodes_used)) {
894 LIST_FOREACH(node, &tmp->tm_nodes_used, tn_entries) {
895 if (node->tn_links == 0 ||
896 (node->tn_links == 1 && node->tn_type == VDIR)) {
897 TMPFS_NODE_LOCK(node);
898 tmpfs_free_node(tmp, node);
905 kprintf("tmpfs: Cannot free entire node tree!");
910 KKASSERT(tmp->tm_root == NULL);
912 objcache_destroy(tmp->tm_dirent_pool);
913 objcache_destroy(tmp->tm_node_pool);
915 kmalloc_destroy(&tmp->tm_name_zone);
916 kmalloc_destroy(&tmp->tm_dirent_zone);
917 kmalloc_destroy(&tmp->tm_node_zone);
919 tmp->tm_node_zone = tmp->tm_dirent_zone = NULL;
921 lockuninit(&tmp->allnode_lock);
922 KKASSERT(tmp->tm_pages_used == 0);
923 KKASSERT(tmp->tm_nodes_inuse == 0);
925 /* Throw away the hammer2_mount structure. */
926 kfree(tmp, M_HAMMER2);
929 mp->mnt_flag &= ~MNT_LOCAL;
933 /* --------------------------------------------------------------------- */
936 tmpfs_root(struct mount *mp, struct vnode **vpp)
938 struct hammer2_mount *tmp;
941 kprintf("tmpfs_root\n");
943 tmp = VFS_TO_TMPFS(mp);
944 if (tmp->tm_root == NULL) {
945 kprintf("tmpfs_root: called without root node %p\n", mp);
950 error = tmpfs_alloc_vp(mp, tmp->tm_root, LK_EXCLUSIVE, vpp);
951 (*vpp)->v_flag |= VROOT;
952 (*vpp)->v_type = VDIR;
957 /* --------------------------------------------------------------------- */
960 tmpfs_fhtovp(struct mount *mp, struct vnode *rootvp, struct fid *fhp, struct vnode **vpp)
963 struct tmpfs_fid *tfhp;
964 struct hammer2_mount *tmp;
965 struct hammer2_node *node;
967 tmp = VFS_TO_TMPFS(mp);
969 tfhp = (struct tmpfs_fid *)fhp;
970 if (tfhp->tf_len != sizeof(struct tmpfs_fid))
973 if (tfhp->tf_id >= tmp->tm_nodes_max)
979 LIST_FOREACH(node, &tmp->tm_nodes_used, tn_entries) {
980 if (node->tn_id == tfhp->tf_id &&
981 node->tn_gen == tfhp->tf_gen) {
989 return (tmpfs_alloc_vp(mp, node, LK_EXCLUSIVE, vpp));
994 /* --------------------------------------------------------------------- */
997 tmpfs_vptofh(struct vnode *vp, struct fid *fhp)
999 struct hammer2_node *node;
1000 struct tmpfs_fid tfh;
1001 node = VP_TO_TMPFS_NODE(vp);
1002 memset(&tfh, 0, sizeof(tfh));
1003 tfh.tf_len = sizeof(struct tmpfs_fid);
1004 tfh.tf_gen = node->tn_gen;
1005 tfh.tf_id = node->tn_id;
1006 memcpy(fhp, &tfh, sizeof(tfh));