hammer2 - create new branch, sync working trees from dillon & vsrinivas
[dragonfly.git] / sys / vfs / hammer2 / hammer2_vfsops.c
CommitLineData
703720e4
MD
1/*
2 * Copyright (c) 2011, 2012 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 */
34/*-
35 * Copyright (c) 2005 The NetBSD Foundation, Inc.
36 * All rights reserved.
37 *
38 * This code is derived from software contributed to The NetBSD Foundation
39 * by Julio M. Merino Vidal, developed as part of Google's Summer of Code
40 * 2005 program.
41 *
42 * Redistribution and use in source and binary forms, with or without
43 * modification, are permitted provided that the following conditions
44 * are met:
45 * 1. Redistributions of source code must retain the above copyright
46 * notice, this list of conditions and the following disclaimer.
47 * 2. Redistributions in binary form must reproduce the above copyright
48 * notice, this list of conditions and the following disclaimer in the
49 * documentation and/or other materials provided with the distribution.
50 *
51 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
52 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
53 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
54 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
55 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
56 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
57 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
58 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
59 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
60 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
61 * POSSIBILITY OF SUCH DAMAGE.
62 */
63
64#include <sys/param.h>
65#include <sys/systm.h>
66#include <sys/kernel.h>
67#include <sys/nlookup.h>
68#include <sys/vnode.h>
69#include <sys/mount.h>
70#include <sys/fcntl.h>
71#include <sys/buf.h>
72#include <sys/uuid.h>
73
74#include "hammer2.h"
75#include "hammer2_disk.h"
76#include "hammer2_mount.h"
77
78static int hammer2_init(struct vfsconf *conf);
79static int hammer2_mount(struct mount *mp, char *path, caddr_t data,
80 struct ucred *cred);
81static int hammer2_remount(struct mount *, char *, struct vnode *,
82 struct ucred *);
83static int hammer2_unmount(struct mount *mp, int mntflags);
84static int hammer2_root(struct mount *mp, struct vnode **vpp);
85static int hammer2_statfs(struct mount *mp, struct statfs *sbp,
86 struct ucred *cred);
87static int hammer2_statvfs(struct mount *mp, struct statvfs *sbp,
88 struct ucred *cred);
89static int hammer2_sync(struct mount *mp, int waitfor);
90static int hammer2_vget(struct mount *mp, struct vnode *dvp,
91 ino_t ino, struct vnode **vpp);
92static int hammer2_fhtovp(struct mount *mp, struct vnode *rootvp,
93 struct fid *fhp, struct vnode **vpp);
94static int hammer2_vptofh(struct vnode *vp, struct fid *fhp);
95static int hammer2_checkexp(struct mount *mp, struct sockaddr *nam,
96 int *exflagsp, struct ucred **credanonp);
97
98static int tmpfs_unmount(struct mount *, int);
99static int tmpfs_root(struct mount *, struct vnode **);
100
101/*
102 * HAMMER2 vfs operations.
103 */
104static struct vfsops hammer2_vfsops = {
105 /* From tmpfs */
106 .vfs_root = tmpfs_root,
107
108 /* From HAMMER2 */
109 .vfs_init = hammer2_init,
110 .vfs_sync = hammer2_sync,
111 .vfs_mount = hammer2_mount,
112 .vfs_unmount = hammer2_unmount,
113#ifdef notyet
114 .vfs_root = hammer2_root,
115#endif
116 .vfs_statfs = hammer2_statfs,
117 /* If we enable statvfs, we disappear in df, till we implement it. */
118 /* That makes debugging difficult :) */
119// .vfs_statvfs = hammer2_statvfs,
120 .vfs_vget = hammer2_vget,
121 .vfs_vptofh = hammer2_vptofh,
122 .vfs_fhtovp = hammer2_fhtovp,
123 .vfs_checkexp = hammer2_checkexp
124};
125
126
127MALLOC_DEFINE(M_HAMMER2, "HAMMER2-mount", "");
128
129VFS_SET(hammer2_vfsops, hammer2, 0);
130MODULE_VERSION(hammer2, 1);
131
132static int
133hammer2_init(struct vfsconf *conf)
134{
135 int error;
136
137 error = 0;
138
139 if (HAMMER2_BLOCKREF_SIZE != sizeof(struct hammer2_blockref))
140 error = EINVAL;
141 if (HAMMER2_INODE_SIZE != sizeof(struct hammer2_inode_data))
142 error = EINVAL;
143 if (HAMMER2_ALLOCREF_SIZE != sizeof(struct hammer2_allocref))
144 error = EINVAL;
145 if (HAMMER2_VOLUME_SIZE != sizeof(struct hammer2_volume_data))
146 error = EINVAL;
147
148 if (error)
149 kprintf("HAMMER2 structure size mismatch; cannot continue.\n");
150
151 return (error);
152}
153
154/*
155 * Mount or remount HAMMER2 fileystem from physical media
156 *
157 * mountroot
158 * mp mount point structure
159 * path NULL
160 * data <unused>
161 * cred <unused>
162 *
163 * mount
164 * mp mount point structure
165 * path path to mount point
166 * data pointer to argument structure in user space
167 * volume volume path (device@LABEL form)
168 * hflags user mount flags
169 * cred user credentials
170 *
171 * RETURNS: 0 Success
172 * !0 error number
173 */
174static int
175hammer2_mount(struct mount *mp, char *path, caddr_t data,
176 struct ucred *cred)
177{
178 struct hammer2_mount_info info;
179 struct hammer2_mount *hmp;
180 struct vnode *devvp;
181 struct nlookupdata nd;
182 char devstr[MNAMELEN];
183 size_t size;
184 size_t done;
185 char *dev, *label;
186 int ronly;
187 int error;
188 int rc;
189
190 hmp = NULL;
191 dev = label = NULL;
192 devvp = NULL;
193
194 kprintf("hammer2_mount\n");
195
196 if (path == NULL) {
197 /*
198 * Root mount
199 */
200
201 return (EOPNOTSUPP);
202 } else {
203 /*
204 * Non-root mount or updating a mount
205 */
206
207 error = copyin(data, &info, sizeof(info));
208 if (error)
209 return (error);
210
211 error = copyinstr(info.volume, devstr, MNAMELEN - 1, &done);
212 if (error)
213 return (error);
214
215 /* Extract device and label */
216 dev = devstr;
217 label = strchr(devstr, '@');
218 if (label == NULL ||
219 ((label + 1) - dev) > done)
220 return (EINVAL);
221 *label = '\0';
222 label++;
223 if (*label == '\0')
224 return (EINVAL);
225
226 if (mp->mnt_flag & MNT_UPDATE) {
227 /* Update mount */
228 /* HAMMER2 implements NFS export via mountctl */
229 hmp = MPTOH2(mp);
230 devvp = hmp->hm_devvp;
231 return hammer2_remount(mp, path, devvp, cred);
232 }
233 }
234
235 /*
236 * New non-root mount
237 */
238 /* Lookup name and verify it refers to a block device */
239 error = nlookup_init(&nd, dev, UIO_SYSSPACE, NLC_FOLLOW);
240 if (error)
241 return (error);
242 error = nlookup(&nd);
243 if (error)
244 return (error);
245 error = cache_vref(&nd.nl_nch, nd.nl_cred, &devvp);
246 if (error)
247 return (error);
248 nlookup_done(&nd);
249
250 if (!vn_isdisk(devvp, &error)) {
251 vrele(devvp);
252 return (error);
253 }
254
255 /*
256 * Common path for new root/non-root mounts;
257 * devvp is a ref-ed by not locked vnode referring to the fs device
258 */
259
260 error = vfs_mountedon(devvp);
261 if (error) {
262 vrele(devvp);
263 return (error);
264 }
265
266 if (vcount(devvp) > 0) {
267 vrele(devvp);
268 return (EBUSY);
269 }
270
271 /*
272 * Open the fs device
273 */
274 ronly = (mp->mnt_flag & MNT_RDONLY) != 0;
275 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
276 error = vinvalbuf(devvp, V_SAVE, 0, 0);
277 if (error) {
278 vn_unlock(devvp);
279 vrele(devvp);
280 return (error);
281 }
282 /* This is correct; however due to an NFS quirk of my setup, FREAD
283 * is required... */
284 /*
285 error = VOP_OPEN(devvp, ronly ? FREAD : FREAD | FWRITE, FSCRED, NULL);
286 */
287 error = VOP_OPEN(devvp, FREAD, FSCRED, NULL);
288 vn_unlock(devvp);
289 if (error) {
290 vrele(devvp);
291 return (error);
292 }
293
294#ifdef notyet
295 /* VOP_IOCTL(EXTENDED_DISK_INFO, devvp); */
296 /* if vn device, never use bdwrite(); */
297 /* check if device supports BUF_CMD_READALL; */
298 /* check if device supports BUF_CMD_WRITEALL; */
299#endif
300
301 hmp = kmalloc(sizeof(*hmp), M_HAMMER2, M_WAITOK | M_ZERO);
302 /*mp->mnt_data = (qaddr_t) hmp;*/
303 hmp->hm_mp = mp;
304 /*hmp->hm_ronly = ronly;*/
305 /*hmp->hm_devvp = devvp;*/
306 lockinit(&hmp->hm_lk, "h2mp", 0, 0);
307 kmalloc_create(&hmp->hm_inodes, "HAMMER2-inodes");
308 kmalloc_create(&hmp->hm_ipstacks, "HAMMER2-ipstacks");
309
310 /* Readout volume headers, make sure we have a live filesystem */
311 /* Kinda hacky atm */
312 {
313 struct buf *bps[HAMMER2_NUM_VOLHDRS];
314 int valid = 0;
315 int hi_tid = 0;
316 int hi_num = 0;
317 int i;
318 uint32_t crc;
319 struct hammer2_volume_data *vd;
320 for (i = 0; i < HAMMER2_NUM_VOLHDRS; i++) {
321 rc = bread(devvp, i * HAMMER2_RESERVE_ALIGN64,
322 HAMMER2_BUFSIZE, &bps[i]);
323 if (rc != 0) {
324 brelse(bps[i]);
325 bps[i] = NULL;
326 continue;
327 }
328
329 vd = bps[i]->b_data;
330 if (vd->magic == HAMMER2_VOLUME_ID_HBO) {
331 uint32_t ccrc;
332 unsigned char tmp[512];
333 bcopy(bps[i]->b_data, &tmp, 512);
334 bzero(&tmp[512 - 4], 4);
335 /* Calculate CRC32 w/ crc field zero */
336 /* XXX: Can we modify b_data? */
337 ccrc = hammer2_icrc32(tmp, 512);
338 crc = vd->icrc_sect0;
339
340 if (ccrc != crc) {
341 brelse(bps[i]);
342 bps[i] = NULL;
343 continue;
344 }
345
346 valid++;
347 if (vd->last_tid > hi_tid) {
348 hi_tid = vd->last_tid;
349 hi_num = i;
350 }
351 }
352 }
353 if (valid) {
354 /* We have found the hammer volume header w/
355 * the highest transaction id. Use it. */
356
357 bcopy(bps[hi_num]->b_data, &hmp->hm_sb,
358 HAMMER2_BUFSIZE);
359
360 for (i = 0 ; i < HAMMER2_NUM_VOLHDRS; i++)
361 brelse(bps[i]);
362
363 kprintf("HAMMER2 volume %d by\n", hmp->hm_sb.volu_size);
364 } else {
365 /* XXX More to do! Release structures and stuff */
366 return (EINVAL);
367 }
368 }
369
370 /*
371 * Filesystem subroutines are self-synchronized
372 */
373 /*mp->mnt_kern_flag |= MNTK_ALL_MPSAFE;*/
374
375
376 /* Setup root inode */
377 hmp->hm_iroot = alloci(hmp);
378 hmp->hm_iroot->type = HAMMER2_INODE_DIR | HAMMER2_INODE_ROOT;
379 hmp->hm_iroot->hi_inum = 1;
380
381 /* currently rely on tmpfs routines */
382 /*vfs_getnewfsid(mp);*/
383 /*vfs_add_vnodeops(mp, &hammer2_vnode_vops, &mp->mnt_vn_norm_ops);*/
384 /*vfs_add_vnodeops(mp, &hammer2_spec_vops, &mp->mnt_vn_spec_ops);*/
385 /*vfs_add_vnodeops(mp, &hammer2_fifo_vops, &mp->mnt_vn_fifo_ops);*/
386
387 copystr("hammer2", mp->mnt_stat.f_mntfromname, MNAMELEN - 1, &size);
388 bzero(mp->mnt_stat.f_mntfromname + size, MNAMELEN - size);
389 bzero(mp->mnt_stat.f_mntonname, sizeof(mp->mnt_stat.f_mntonname));
390 copyinstr(path, mp->mnt_stat.f_mntonname,
391 sizeof(mp->mnt_stat.f_mntonname) - 1,
392 &size);
393
394 hammer2_statfs(mp, &mp->mnt_stat, cred);
395
396 hammer2_inode_unlock_ex(hmp->hm_iroot);
397
398 return (tmpfs_mount(hmp, mp, path, data, cred));
399}
400
401static int
402hammer2_remount(struct mount *mp, char *path, struct vnode *devvp,
403 struct ucred *cred)
404{
405 return (0);
406}
407
408static int
409hammer2_unmount(struct mount *mp, int mntflags)
410{
411 struct hammer2_mount *hmp;
412 int flags;
413 int error;
414
415 kprintf("hammer2_unmount\n");
416
417 hmp = MPTOH2(mp);
418 flags = 0;
419
420 if (mntflags & MNT_FORCE)
421 flags |= FORCECLOSE;
422
423 hammer2_mount_exlock(hmp);
424
425 error = vflush(mp, 0, flags);
426
427 /*
428 * Work to do:
429 * 1) Wait on the flusher having no work; heat up if needed
430 * 2) Scan inode RB tree till all the inodes are free
431 * 3) Destroy the kmalloc inode zone
432 * 4) Free the mount point
433 */
434
435 kmalloc_destroy(&hmp->hm_inodes);
436 kmalloc_destroy(&hmp->hm_ipstacks);
437
438 hammer2_mount_unlock(hmp);
439
440 // Tmpfs does this
441 //kfree(hmp, M_HAMMER2);
442
443 return (tmpfs_unmount(mp, mntflags));
444
445 return (error);
446}
447
448static int
449hammer2_vget(struct mount *mp, struct vnode *dvp,
450 ino_t ino, struct vnode **vpp)
451{
452 kprintf("hammer2_vget\n");
453 return (EOPNOTSUPP);
454}
455
456static int
457hammer2_root(struct mount *mp, struct vnode **vpp)
458{
459 struct hammer2_mount *hmp;
460 int error;
461 struct vnode *vp;
462
463 kprintf("hammer2_root\n");
464
465 hmp = MPTOH2(mp);
466 hammer2_mount_lock_ex(hmp);
467 if (hmp->hm_iroot == NULL) {
468 *vpp = NULL;
469 error = EINVAL;
470 } else {
471 vp = igetv(hmp->hm_iroot, &error);
472 *vpp = vp;
473 if (vp == NULL)
474 kprintf("vnodefail\n");
475 }
476 hammer2_mount_unlock(hmp);
477
478 return (error);
479}
480
481static int
482hammer2_statfs(struct mount *mp, struct statfs *sbp, struct ucred *cred)
483{
484 struct hammer2_mount *hmp;
485
486 kprintf("hammer2_statfs\n");
487
488 hmp = MPTOH2(mp);
489
490 sbp->f_iosize = PAGE_SIZE;
491 sbp->f_bsize = PAGE_SIZE;
492
493 sbp->f_blocks = 10;
494 sbp->f_bavail = 10;
495 sbp->f_bfree = 10;
496
497 sbp->f_files = 10;
498 sbp->f_ffree = 10;
499 sbp->f_owner = 0;
500
501 return (0);
502}
503
504static int
505hammer2_statvfs(struct mount *mp, struct statvfs *sbp, struct ucred *cred)
506{
507 kprintf("hammer2_statvfs\n");
508 return (EOPNOTSUPP);
509}
510
511/*
512 * Sync the entire filesystem; this is called from the filesystem syncer
513 * process periodically and whenever a user calls sync(1) on the hammer
514 * mountpoint.
515 *
516 * Currently is actually called from the syncer! \o/
517 *
518 * This task will have to snapshot the state of the dirty inode chain.
519 * From that, it will have to make sure all of the inodes on the dirty
520 * chain have IO initiated. We make sure that io is initiated for the root
521 * block.
522 *
523 * If waitfor is set, we wait for media to acknowledge the new rootblock.
524 *
525 * THINKS: side A vs side B, to have sync not stall all I/O?
526 */
527static int
528hammer2_sync(struct mount *mp, int waitfor)
529{
530 struct hammer2_mount *hmp;
531 struct hammer2_inode *ip;
532
533 kprintf("hammer2_sync \n");
534
535// hmp = MPTOH2(mp);
536
537 return (0);
538}
539
540static int
541hammer2_vptofh(struct vnode *vp, struct fid *fhp)
542{
543 return (0);
544}
545
546static int
547hammer2_fhtovp(struct mount *mp, struct vnode *rootvp,
548 struct fid *fhp, struct vnode **vpp)
549{
550 return (0);
551}
552
553static int
554hammer2_checkexp(struct mount *mp, struct sockaddr *nam,
555 int *exflagsp, struct ucred **credanonp)
556{
557 return (0);
558}
559
560/*
561 * Efficient memory file system.
562 *
563 * tmpfs is a file system that uses NetBSD's virtual memory sub-system
564 * (the well-known UVM) to store file data and metadata in an efficient
565 * way. This means that it does not follow the structure of an on-disk
566 * file system because it simply does not need to. Instead, it uses
567 * memory-specific data structures and algorithms to automatically
568 * allocate and release resources.
569 */
570
571#include <sys/conf.h>
572#include <sys/param.h>
573#include <sys/limits.h>
574#include <sys/lock.h>
575#include <sys/mutex.h>
576#include <sys/kernel.h>
577#include <sys/stat.h>
578#include <sys/systm.h>
579#include <sys/sysctl.h>
580#include <sys/objcache.h>
581
582#include <vm/vm.h>
583#include <vm/vm_object.h>
584#include <vm/vm_param.h>
585
586#include "hammer2.h"
587
588/*
589 * Default permission for root node
590 */
591#define TMPFS_DEFAULT_ROOT_MODE (S_IRWXU|S_IRGRP|S_IXGRP|S_IROTH|S_IXOTH)
592
593/* --------------------------------------------------------------------- */
594int
595tmpfs_node_ctor(void *obj, void *privdata, int flags)
596{
597 struct tmpfs_node *node = (struct tmpfs_node *)obj;
598
599 node->tn_gen++;
600 node->tn_size = 0;
601 node->tn_status = 0;
602 node->tn_flags = 0;
603 node->tn_links = 0;
604 node->tn_vnode = NULL;
605 node->tn_vpstate = TMPFS_VNODE_WANT;
606 bzero(&node->tn_spec, sizeof(node->tn_spec));
607
608 return (1);
609}
610
611static void
612tmpfs_node_dtor(void *obj, void *privdata)
613{
614 struct tmpfs_node *node = (struct tmpfs_node *)obj;
615 node->tn_type = VNON;
616 node->tn_vpstate = TMPFS_VNODE_DOOMED;
617}
618
619static void*
620tmpfs_node_init(void *args, int flags)
621{
622 struct tmpfs_node *node = (struct tmpfs_node *)objcache_malloc_alloc(args, flags);
623 if (node == NULL)
624 return (NULL);
625 node->tn_id = 0;
626
627 lockinit(&node->tn_interlock, "tmpfs node interlock", 0, LK_CANRECURSE);
628 node->tn_gen = karc4random();
629
630 return node;
631}
632
633static void
634tmpfs_node_fini(void *obj, void *args)
635{
636 struct tmpfs_node *node = (struct tmpfs_node *)obj;
637 lockuninit(&node->tn_interlock);
638 objcache_malloc_free(obj, args);
639}
640
641int
642tmpfs_mount(struct hammer2_mount *hmp,
643 struct mount *mp, char *path, caddr_t data, struct ucred *cred)
644{
645// struct tmpfs_mount *tmp;
646 struct tmpfs_node *root;
647// struct tmpfs_args args;
648 vm_pindex_t pages;
649 vm_pindex_t pages_limit;
650 ino_t nodes;
651 u_int64_t maxfsize;
652 int error;
653 /* Size counters. */
654 ino_t nodes_max;
655 off_t size_max;
656 size_t maxfsize_max;
657 size_t size;
658
659 /* Root node attributes. */
660 uid_t root_uid = cred->cr_uid;
661 gid_t root_gid = cred->cr_gid;
662 mode_t root_mode = (VREAD | VWRITE);
663
664 if (mp->mnt_flag & MNT_UPDATE) {
665 /* XXX: There is no support yet to update file system
666 * settings. Should be added. */
667
668 return EOPNOTSUPP;
669 }
670
671 kprintf("tmpfs_mount\n");
672
673 /*
674 * mount info
675 */
676// bzero(&args, sizeof(args));
677 size_max = 0;
678 nodes_max = 0;
679 maxfsize_max = 0;
680
681 if (path) {
682 if (data) {
683// error = copyin(data, &args, sizeof(args));
684// if (error)
685// return (error);
686 }
687 /*
688 size_max = args.ta_size_max;
689 nodes_max = args.ta_nodes_max;
690 maxfsize_max = args.ta_maxfsize_max;
691 root_uid = args.ta_root_uid;
692 root_gid = args.ta_root_gid;
693 root_mode = args.ta_root_mode;
694 */
695 }
696
697 /*
698 * If mount by non-root, then verify that user has necessary
699 * permissions on the device.
700 */
701 if (cred->cr_uid != 0) {
702 root_mode = VREAD;
703 if ((mp->mnt_flag & MNT_RDONLY) == 0)
704 root_mode |= VWRITE;
705 }
706
707 pages_limit = vm_swap_max + vmstats.v_page_count / 2;
708
709 if (size_max == 0)
710 pages = pages_limit / 2;
711 else if (size_max < PAGE_SIZE)
712 pages = 1;
713 else if (OFF_TO_IDX(size_max) > pages_limit)
714 pages = pages_limit;
715 else
716 pages = OFF_TO_IDX(size_max);
717
718 if (nodes_max == 0)
719 nodes = 3 + pages * PAGE_SIZE / 1024;
720 else if (nodes_max < 3)
721 nodes = 3;
722 else if (nodes_max > pages)
723 nodes = pages;
724 else
725 nodes = nodes_max;
726
727 maxfsize = IDX_TO_OFF(pages_limit);
728 if (maxfsize_max != 0 && maxfsize > maxfsize_max)
729 maxfsize = maxfsize_max;
730
731 /* Allocate the tmpfs mount structure and fill it. */
732// tmp = kmalloc(sizeof(*tmp), M_HAMMER2, M_WAITOK | M_ZERO);
733
734 struct hammer2_mount *tmp = hmp;
735 lockinit(&(tmp->allnode_lock), "tmpfs allnode lock", 0, LK_CANRECURSE);
736 tmp->tm_nodes_max = nodes;
737 tmp->tm_nodes_inuse = 0;
738 tmp->tm_maxfilesize = maxfsize;
739 LIST_INIT(&tmp->tm_nodes_used);
740
741 tmp->tm_pages_max = pages;
742 tmp->tm_pages_used = 0;
743
744 kmalloc_create(&tmp->tm_node_zone, "tmpfs node");
745 kmalloc_create(&tmp->tm_dirent_zone, "tmpfs dirent");
746 kmalloc_create(&tmp->tm_name_zone, "tmpfs name zone");
747
748 kmalloc_raise_limit(tmp->tm_node_zone, sizeof(struct tmpfs_node) *
749 tmp->tm_nodes_max);
750
751 tmp->tm_node_zone_malloc_args.objsize = sizeof(struct tmpfs_node);
752 tmp->tm_node_zone_malloc_args.mtype = tmp->tm_node_zone;
753
754 tmp->tm_dirent_zone_malloc_args.objsize = sizeof(struct tmpfs_dirent);
755 tmp->tm_dirent_zone_malloc_args.mtype = tmp->tm_dirent_zone;
756
757 tmp->tm_dirent_pool = objcache_create( "tmpfs dirent cache",
758 0, 0,
759 NULL, NULL, NULL,
760 objcache_malloc_alloc, objcache_malloc_free,
761 &tmp->tm_dirent_zone_malloc_args);
762 tmp->tm_node_pool = objcache_create( "tmpfs node cache",
763 0, 0,
764 tmpfs_node_ctor, tmpfs_node_dtor, NULL,
765 tmpfs_node_init, tmpfs_node_fini,
766 &tmp->tm_node_zone_malloc_args);
767
768 /* Allocate the root node. */
769 error = tmpfs_alloc_node(tmp, VDIR, root_uid, root_gid,
770 root_mode & ALLPERMS, NULL, NULL,
771 VNOVAL, VNOVAL, &root);
772
773 /*
774 * We are backed by swap, set snocache chflags flag so we
775 * don't trip over swapcache.
776 */
777 root->tn_flags = SF_NOCACHE;
778
779 if (error != 0 || root == NULL) {
780 objcache_destroy(tmp->tm_node_pool);
781 objcache_destroy(tmp->tm_dirent_pool);
782 kfree(tmp, M_HAMMER2);
783 return error;
784 }
785 KASSERT(root->tn_id >= 0, ("tmpfs root with invalid ino: %d", (int)root->tn_id));
786 tmp->tm_root = root;
787
788 mp->mnt_flag |= MNT_LOCAL;
789#if 0
790 mp->mnt_kern_flag |= MNTK_RD_MPSAFE | MNTK_WR_MPSAFE | MNTK_GA_MPSAFE |
791 MNTK_IN_MPSAFE | MNTK_SG_MPSAFE;
792#endif
793 mp->mnt_kern_flag |= MNTK_RD_MPSAFE | MNTK_GA_MPSAFE | MNTK_SG_MPSAFE;
794 mp->mnt_kern_flag |= MNTK_WR_MPSAFE;
795 mp->mnt_kern_flag |= MNTK_NOMSYNC;
796 mp->mnt_kern_flag |= MNTK_THR_SYNC;
797 mp->mnt_data = (qaddr_t)tmp;
798 vfs_getnewfsid(mp);
799
800 vfs_add_vnodeops(mp, &tmpfs_vnode_vops, &mp->mnt_vn_norm_ops);
801 vfs_add_vnodeops(mp, &tmpfs_fifo_vops, &mp->mnt_vn_fifo_ops);
802
803 hammer2_statfs(mp, &mp->mnt_stat, cred);
804
805 return 0;
806}
807
808/* --------------------------------------------------------------------- */
809
810/* ARGSUSED2 */
811static int
812tmpfs_unmount(struct mount *mp, int mntflags)
813{
814 int error;
815 int flags = 0;
816 int found;
817 struct hammer2_mount *tmp;
818 struct tmpfs_node *node;
819
820 kprintf("tmpfs_umount\n");
821
822 /* Handle forced unmounts. */
823 if (mntflags & MNT_FORCE)
824 flags |= FORCECLOSE;
825
826 tmp = VFS_TO_TMPFS(mp);
827
828 /*
829 * Finalize all pending I/O. In the case of tmpfs we want
830 * to throw all the data away so clean out the buffer cache
831 * and vm objects before calling vflush().
832 */
833 LIST_FOREACH(node, &tmp->tm_nodes_used, tn_entries) {
834 if (node->tn_type == VREG && node->tn_vnode) {
835 ++node->tn_links;
836 TMPFS_NODE_LOCK(node);
837 vx_get(node->tn_vnode);
838 tmpfs_truncate(node->tn_vnode, 0);
839 vx_put(node->tn_vnode);
840 TMPFS_NODE_UNLOCK(node);
841 --node->tn_links;
842 }
843 }
844 error = vflush(mp, 0, flags);
845 if (error != 0)
846 return error;
847
848 /*
849 * First pass get rid of all the directory entries and
850 * vnode associations. The directory structure will
851 * remain via the extra link count representing tn_dir.tn_parent.
852 *
853 * No vnodes should remain after the vflush above.
854 */
855 LIST_FOREACH(node, &tmp->tm_nodes_used, tn_entries) {
856 ++node->tn_links;
857 TMPFS_NODE_LOCK(node);
858 if (node->tn_type == VDIR) {
859 struct tmpfs_dirent *de;
860
861 while (!TAILQ_EMPTY(&node->tn_dir.tn_dirhead)) {
862 de = TAILQ_FIRST(&node->tn_dir.tn_dirhead);
863 tmpfs_dir_detach(node, de);
864 tmpfs_free_dirent(tmp, de);
865 node->tn_size -= sizeof(struct tmpfs_dirent);
866 }
867 }
868 KKASSERT(node->tn_vnode == NULL);
869#if 0
870 vp = node->tn_vnode;
871 if (vp != NULL) {
872 tmpfs_free_vp(vp);
873 vrecycle(vp);
874 node->tn_vnode = NULL;
875 }
876#endif
877 TMPFS_NODE_UNLOCK(node);
878 --node->tn_links;
879 }
880
881 /*
882 * Now get rid of all nodes. We can remove any node with a
883 * link count of 0 or any directory node with a link count of
884 * 1. The parents will not be destroyed until all their children
885 * have been destroyed.
886 *
887 * Recursion in tmpfs_free_node() can further modify the list so
888 * we cannot use a next pointer here.
889 *
890 * The root node will be destroyed by this loop (it will be last).
891 */
892 while (!LIST_EMPTY(&tmp->tm_nodes_used)) {
893 found = 0;
894 LIST_FOREACH(node, &tmp->tm_nodes_used, tn_entries) {
895 if (node->tn_links == 0 ||
896 (node->tn_links == 1 && node->tn_type == VDIR)) {
897 TMPFS_NODE_LOCK(node);
898 tmpfs_free_node(tmp, node);
899 /* eats lock */
900 found = 1;
901 break;
902 }
903 }
904 if (found == 0) {
905 kprintf("tmpfs: Cannot free entire node tree!");
906 break;
907 }
908 }
909
910 KKASSERT(tmp->tm_root == NULL);
911
912 objcache_destroy(tmp->tm_dirent_pool);
913 objcache_destroy(tmp->tm_node_pool);
914
915 kmalloc_destroy(&tmp->tm_name_zone);
916 kmalloc_destroy(&tmp->tm_dirent_zone);
917 kmalloc_destroy(&tmp->tm_node_zone);
918
919 tmp->tm_node_zone = tmp->tm_dirent_zone = NULL;
920
921 lockuninit(&tmp->allnode_lock);
922 KKASSERT(tmp->tm_pages_used == 0);
923 KKASSERT(tmp->tm_nodes_inuse == 0);
924
925 /* Throw away the hammer2_mount structure. */
926 kfree(tmp, M_HAMMER2);
927 mp->mnt_data = NULL;
928
929 mp->mnt_flag &= ~MNT_LOCAL;
930 return 0;
931}
932
933/* --------------------------------------------------------------------- */
934
935static int
936tmpfs_root(struct mount *mp, struct vnode **vpp)
937{
938 struct hammer2_mount *tmp;
939 int error;
940
941 kprintf("tmpfs_root\n");
942
943 tmp = VFS_TO_TMPFS(mp);
944 if (tmp->tm_root == NULL) {
945 kprintf("tmpfs_root: called without root node %p\n", mp);
946 print_backtrace(-1);
947 *vpp = NULL;
948 error = EINVAL;
949 } else {
950 error = tmpfs_alloc_vp(mp, tmp->tm_root, LK_EXCLUSIVE, vpp);
951 (*vpp)->v_flag |= VROOT;
952 (*vpp)->v_type = VDIR;
953 }
954 return error;
955}
956
957/* --------------------------------------------------------------------- */
958
959static int
960tmpfs_fhtovp(struct mount *mp, struct vnode *rootvp, struct fid *fhp, struct vnode **vpp)
961{
962 boolean_t found;
963 struct tmpfs_fid *tfhp;
964 struct hammer2_mount *tmp;
965 struct tmpfs_node *node;
966
967 tmp = VFS_TO_TMPFS(mp);
968
969 tfhp = (struct tmpfs_fid *)fhp;
970 if (tfhp->tf_len != sizeof(struct tmpfs_fid))
971 return EINVAL;
972
973 if (tfhp->tf_id >= tmp->tm_nodes_max)
974 return EINVAL;
975
976 found = FALSE;
977
978 TMPFS_LOCK(tmp);
979 LIST_FOREACH(node, &tmp->tm_nodes_used, tn_entries) {
980 if (node->tn_id == tfhp->tf_id &&
981 node->tn_gen == tfhp->tf_gen) {
982 found = TRUE;
983 break;
984 }
985 }
986 TMPFS_UNLOCK(tmp);
987
988 if (found)
989 return (tmpfs_alloc_vp(mp, node, LK_EXCLUSIVE, vpp));
990
991 return (EINVAL);
992}
993
994/* --------------------------------------------------------------------- */
995
996static int
997tmpfs_vptofh(struct vnode *vp, struct fid *fhp)
998{
999 struct tmpfs_node *node;
1000 struct tmpfs_fid tfh;
1001 node = VP_TO_TMPFS_NODE(vp);
1002 memset(&tfh, 0, sizeof(tfh));
1003 tfh.tf_len = sizeof(struct tmpfs_fid);
1004 tfh.tf_gen = node->tn_gen;
1005 tfh.tf_id = node->tn_id;
1006 memcpy(fhp, &tfh, sizeof(tfh));
1007 return (0);
1008}