4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2012, 2018 by Delphix. All rights reserved.
26 /* Portions Copyright 2010 Robert Milkowski */
28 #include <sys/types.h>
29 #include <sys/param.h>
30 #include <sys/sysmacros.h>
32 #include <sys/pathname.h>
33 #include <sys/vnode.h>
35 #include <sys/mntent.h>
36 #include <sys/cmn_err.h>
37 #include <sys/zfs_znode.h>
38 #include <sys/zfs_vnops.h>
39 #include <sys/zfs_dir.h>
41 #include <sys/fs/zfs.h>
43 #include <sys/dsl_prop.h>
44 #include <sys/dsl_dataset.h>
45 #include <sys/dsl_deleg.h>
49 #include <sys/sa_impl.h>
50 #include <sys/policy.h>
51 #include <sys/atomic.h>
52 #include <sys/zfs_ioctl.h>
53 #include <sys/zfs_ctldir.h>
54 #include <sys/zfs_fuid.h>
55 #include <sys/zfs_quota.h>
56 #include <sys/sunddi.h>
57 #include <sys/dmu_objset.h>
58 #include <sys/dsl_dir.h>
59 #include <sys/spa_boot.h>
60 #include <sys/objlist.h>
62 #include <linux/vfs_compat.h>
63 #include "zfs_comutil.h"
88 static const match_table_t zpl_tokens = {
89 { TOKEN_RO, MNTOPT_RO },
90 { TOKEN_RW, MNTOPT_RW },
91 { TOKEN_SETUID, MNTOPT_SETUID },
92 { TOKEN_NOSETUID, MNTOPT_NOSETUID },
93 { TOKEN_EXEC, MNTOPT_EXEC },
94 { TOKEN_NOEXEC, MNTOPT_NOEXEC },
95 { TOKEN_DEVICES, MNTOPT_DEVICES },
96 { TOKEN_NODEVICES, MNTOPT_NODEVICES },
97 { TOKEN_DIRXATTR, MNTOPT_DIRXATTR },
98 { TOKEN_SAXATTR, MNTOPT_SAXATTR },
99 { TOKEN_XATTR, MNTOPT_XATTR },
100 { TOKEN_NOXATTR, MNTOPT_NOXATTR },
101 { TOKEN_ATIME, MNTOPT_ATIME },
102 { TOKEN_NOATIME, MNTOPT_NOATIME },
103 { TOKEN_RELATIME, MNTOPT_RELATIME },
104 { TOKEN_NORELATIME, MNTOPT_NORELATIME },
105 { TOKEN_NBMAND, MNTOPT_NBMAND },
106 { TOKEN_NONBMAND, MNTOPT_NONBMAND },
107 { TOKEN_MNTPOINT, MNTOPT_MNTPOINT "=%s" },
108 { TOKEN_LAST, NULL },
112 zfsvfs_vfs_free(vfs_t *vfsp)
115 if (vfsp->vfs_mntpoint != NULL)
116 kmem_strfree(vfsp->vfs_mntpoint);
118 kmem_free(vfsp, sizeof (vfs_t));
123 zfsvfs_parse_option(char *option, int token, substring_t *args, vfs_t *vfsp)
127 vfsp->vfs_readonly = B_TRUE;
128 vfsp->vfs_do_readonly = B_TRUE;
131 vfsp->vfs_readonly = B_FALSE;
132 vfsp->vfs_do_readonly = B_TRUE;
135 vfsp->vfs_setuid = B_TRUE;
136 vfsp->vfs_do_setuid = B_TRUE;
139 vfsp->vfs_setuid = B_FALSE;
140 vfsp->vfs_do_setuid = B_TRUE;
143 vfsp->vfs_exec = B_TRUE;
144 vfsp->vfs_do_exec = B_TRUE;
147 vfsp->vfs_exec = B_FALSE;
148 vfsp->vfs_do_exec = B_TRUE;
151 vfsp->vfs_devices = B_TRUE;
152 vfsp->vfs_do_devices = B_TRUE;
154 case TOKEN_NODEVICES:
155 vfsp->vfs_devices = B_FALSE;
156 vfsp->vfs_do_devices = B_TRUE;
159 vfsp->vfs_xattr = ZFS_XATTR_DIR;
160 vfsp->vfs_do_xattr = B_TRUE;
163 vfsp->vfs_xattr = ZFS_XATTR_SA;
164 vfsp->vfs_do_xattr = B_TRUE;
167 vfsp->vfs_xattr = ZFS_XATTR_DIR;
168 vfsp->vfs_do_xattr = B_TRUE;
171 vfsp->vfs_xattr = ZFS_XATTR_OFF;
172 vfsp->vfs_do_xattr = B_TRUE;
175 vfsp->vfs_atime = B_TRUE;
176 vfsp->vfs_do_atime = B_TRUE;
179 vfsp->vfs_atime = B_FALSE;
180 vfsp->vfs_do_atime = B_TRUE;
183 vfsp->vfs_relatime = B_TRUE;
184 vfsp->vfs_do_relatime = B_TRUE;
186 case TOKEN_NORELATIME:
187 vfsp->vfs_relatime = B_FALSE;
188 vfsp->vfs_do_relatime = B_TRUE;
191 vfsp->vfs_nbmand = B_TRUE;
192 vfsp->vfs_do_nbmand = B_TRUE;
195 vfsp->vfs_nbmand = B_FALSE;
196 vfsp->vfs_do_nbmand = B_TRUE;
199 vfsp->vfs_mntpoint = match_strdup(&args[0]);
200 if (vfsp->vfs_mntpoint == NULL)
201 return (SET_ERROR(ENOMEM));
212 * Parse the raw mntopts and return a vfs_t describing the options.
215 zfsvfs_parse_options(char *mntopts, vfs_t **vfsp)
220 tmp_vfsp = kmem_zalloc(sizeof (vfs_t), KM_SLEEP);
222 if (mntopts != NULL) {
223 substring_t args[MAX_OPT_ARGS];
224 char *tmp_mntopts, *p, *t;
227 tmp_mntopts = t = kmem_strdup(mntopts);
228 if (tmp_mntopts == NULL)
229 return (SET_ERROR(ENOMEM));
231 while ((p = strsep(&t, ",")) != NULL) {
235 args[0].to = args[0].from = NULL;
236 token = match_token(p, zpl_tokens, args);
237 error = zfsvfs_parse_option(p, token, args, tmp_vfsp);
239 kmem_strfree(tmp_mntopts);
240 zfsvfs_vfs_free(tmp_vfsp);
245 kmem_strfree(tmp_mntopts);
254 zfs_is_readonly(zfsvfs_t *zfsvfs)
256 return (!!(zfsvfs->z_sb->s_flags & SB_RDONLY));
261 zfs_sync(struct super_block *sb, int wait, cred_t *cr)
263 zfsvfs_t *zfsvfs = sb->s_fs_info;
266 * Semantically, the only requirement is that the sync be initiated.
267 * The DMU syncs out txgs frequently, so there's nothing to do.
272 if (zfsvfs != NULL) {
274 * Sync a specific filesystem.
279 dp = dmu_objset_pool(zfsvfs->z_os);
282 * If the system is shutting down, then skip any
283 * filesystems which may exist on a suspended pool.
285 if (spa_suspended(dp->dp_spa)) {
290 if (zfsvfs->z_log != NULL)
291 zil_commit(zfsvfs->z_log, 0);
296 * Sync all ZFS filesystems. This is what happens when you
297 * run sync(1M). Unlike other filesystems, ZFS honors the
298 * request by waiting for all pools to commit all dirty data.
307 atime_changed_cb(void *arg, uint64_t newval)
309 zfsvfs_t *zfsvfs = arg;
310 struct super_block *sb = zfsvfs->z_sb;
315 * Update SB_NOATIME bit in VFS super block. Since atime update is
316 * determined by atime_needs_update(), atime_needs_update() needs to
317 * return false if atime is turned off, and not unconditionally return
318 * false if atime is turned on.
321 sb->s_flags &= ~SB_NOATIME;
323 sb->s_flags |= SB_NOATIME;
327 relatime_changed_cb(void *arg, uint64_t newval)
329 ((zfsvfs_t *)arg)->z_relatime = newval;
333 xattr_changed_cb(void *arg, uint64_t newval)
335 zfsvfs_t *zfsvfs = arg;
337 if (newval == ZFS_XATTR_OFF) {
338 zfsvfs->z_flags &= ~ZSB_XATTR;
340 zfsvfs->z_flags |= ZSB_XATTR;
342 if (newval == ZFS_XATTR_SA)
343 zfsvfs->z_xattr_sa = B_TRUE;
345 zfsvfs->z_xattr_sa = B_FALSE;
350 acltype_changed_cb(void *arg, uint64_t newval)
352 zfsvfs_t *zfsvfs = arg;
355 case ZFS_ACLTYPE_NFSV4:
356 case ZFS_ACLTYPE_OFF:
357 zfsvfs->z_acl_type = ZFS_ACLTYPE_OFF;
358 zfsvfs->z_sb->s_flags &= ~SB_POSIXACL;
360 case ZFS_ACLTYPE_POSIX:
361 #ifdef CONFIG_FS_POSIX_ACL
362 zfsvfs->z_acl_type = ZFS_ACLTYPE_POSIX;
363 zfsvfs->z_sb->s_flags |= SB_POSIXACL;
365 zfsvfs->z_acl_type = ZFS_ACLTYPE_OFF;
366 zfsvfs->z_sb->s_flags &= ~SB_POSIXACL;
367 #endif /* CONFIG_FS_POSIX_ACL */
375 blksz_changed_cb(void *arg, uint64_t newval)
377 zfsvfs_t *zfsvfs = arg;
378 ASSERT3U(newval, <=, spa_maxblocksize(dmu_objset_spa(zfsvfs->z_os)));
379 ASSERT3U(newval, >=, SPA_MINBLOCKSIZE);
380 ASSERT(ISP2(newval));
382 zfsvfs->z_max_blksz = newval;
386 readonly_changed_cb(void *arg, uint64_t newval)
388 zfsvfs_t *zfsvfs = arg;
389 struct super_block *sb = zfsvfs->z_sb;
395 sb->s_flags |= SB_RDONLY;
397 sb->s_flags &= ~SB_RDONLY;
401 devices_changed_cb(void *arg, uint64_t newval)
406 setuid_changed_cb(void *arg, uint64_t newval)
411 exec_changed_cb(void *arg, uint64_t newval)
416 nbmand_changed_cb(void *arg, uint64_t newval)
418 zfsvfs_t *zfsvfs = arg;
419 struct super_block *sb = zfsvfs->z_sb;
425 sb->s_flags |= SB_MANDLOCK;
427 sb->s_flags &= ~SB_MANDLOCK;
431 snapdir_changed_cb(void *arg, uint64_t newval)
433 ((zfsvfs_t *)arg)->z_show_ctldir = newval;
437 vscan_changed_cb(void *arg, uint64_t newval)
439 ((zfsvfs_t *)arg)->z_vscan = newval;
443 acl_mode_changed_cb(void *arg, uint64_t newval)
445 zfsvfs_t *zfsvfs = arg;
447 zfsvfs->z_acl_mode = newval;
451 acl_inherit_changed_cb(void *arg, uint64_t newval)
453 ((zfsvfs_t *)arg)->z_acl_inherit = newval;
457 zfs_register_callbacks(vfs_t *vfsp)
459 struct dsl_dataset *ds = NULL;
461 zfsvfs_t *zfsvfs = NULL;
465 zfsvfs = vfsp->vfs_data;
470 * The act of registering our callbacks will destroy any mount
471 * options we may have. In order to enable temporary overrides
472 * of mount options, we stash away the current values and
473 * restore them after we register the callbacks.
475 if (zfs_is_readonly(zfsvfs) || !spa_writeable(dmu_objset_spa(os))) {
476 vfsp->vfs_do_readonly = B_TRUE;
477 vfsp->vfs_readonly = B_TRUE;
481 * Register property callbacks.
483 * It would probably be fine to just check for i/o error from
484 * the first prop_register(), but I guess I like to go
487 ds = dmu_objset_ds(os);
488 dsl_pool_config_enter(dmu_objset_pool(os), FTAG);
489 error = dsl_prop_register(ds,
490 zfs_prop_to_name(ZFS_PROP_ATIME), atime_changed_cb, zfsvfs);
491 error = error ? error : dsl_prop_register(ds,
492 zfs_prop_to_name(ZFS_PROP_RELATIME), relatime_changed_cb, zfsvfs);
493 error = error ? error : dsl_prop_register(ds,
494 zfs_prop_to_name(ZFS_PROP_XATTR), xattr_changed_cb, zfsvfs);
495 error = error ? error : dsl_prop_register(ds,
496 zfs_prop_to_name(ZFS_PROP_RECORDSIZE), blksz_changed_cb, zfsvfs);
497 error = error ? error : dsl_prop_register(ds,
498 zfs_prop_to_name(ZFS_PROP_READONLY), readonly_changed_cb, zfsvfs);
499 error = error ? error : dsl_prop_register(ds,
500 zfs_prop_to_name(ZFS_PROP_DEVICES), devices_changed_cb, zfsvfs);
501 error = error ? error : dsl_prop_register(ds,
502 zfs_prop_to_name(ZFS_PROP_SETUID), setuid_changed_cb, zfsvfs);
503 error = error ? error : dsl_prop_register(ds,
504 zfs_prop_to_name(ZFS_PROP_EXEC), exec_changed_cb, zfsvfs);
505 error = error ? error : dsl_prop_register(ds,
506 zfs_prop_to_name(ZFS_PROP_SNAPDIR), snapdir_changed_cb, zfsvfs);
507 error = error ? error : dsl_prop_register(ds,
508 zfs_prop_to_name(ZFS_PROP_ACLTYPE), acltype_changed_cb, zfsvfs);
509 error = error ? error : dsl_prop_register(ds,
510 zfs_prop_to_name(ZFS_PROP_ACLMODE), acl_mode_changed_cb, zfsvfs);
511 error = error ? error : dsl_prop_register(ds,
512 zfs_prop_to_name(ZFS_PROP_ACLINHERIT), acl_inherit_changed_cb,
514 error = error ? error : dsl_prop_register(ds,
515 zfs_prop_to_name(ZFS_PROP_VSCAN), vscan_changed_cb, zfsvfs);
516 error = error ? error : dsl_prop_register(ds,
517 zfs_prop_to_name(ZFS_PROP_NBMAND), nbmand_changed_cb, zfsvfs);
518 dsl_pool_config_exit(dmu_objset_pool(os), FTAG);
523 * Invoke our callbacks to restore temporary mount options.
525 if (vfsp->vfs_do_readonly)
526 readonly_changed_cb(zfsvfs, vfsp->vfs_readonly);
527 if (vfsp->vfs_do_setuid)
528 setuid_changed_cb(zfsvfs, vfsp->vfs_setuid);
529 if (vfsp->vfs_do_exec)
530 exec_changed_cb(zfsvfs, vfsp->vfs_exec);
531 if (vfsp->vfs_do_devices)
532 devices_changed_cb(zfsvfs, vfsp->vfs_devices);
533 if (vfsp->vfs_do_xattr)
534 xattr_changed_cb(zfsvfs, vfsp->vfs_xattr);
535 if (vfsp->vfs_do_atime)
536 atime_changed_cb(zfsvfs, vfsp->vfs_atime);
537 if (vfsp->vfs_do_relatime)
538 relatime_changed_cb(zfsvfs, vfsp->vfs_relatime);
539 if (vfsp->vfs_do_nbmand)
540 nbmand_changed_cb(zfsvfs, vfsp->vfs_nbmand);
545 dsl_prop_unregister_all(ds, zfsvfs);
550 * Takes a dataset, a property, a value and that value's setpoint as
551 * found in the ZAP. Checks if the property has been changed in the vfs.
552 * If so, val and setpoint will be overwritten with updated content.
553 * Otherwise, they are left unchanged.
556 zfs_get_temporary_prop(dsl_dataset_t *ds, zfs_prop_t zfs_prop, uint64_t *val,
565 error = dmu_objset_from_ds(ds, &os);
569 if (dmu_objset_type(os) != DMU_OST_ZFS)
572 mutex_enter(&os->os_user_ptr_lock);
573 zfvp = dmu_objset_get_user(os);
574 mutex_exit(&os->os_user_ptr_lock);
582 if (vfsp->vfs_do_atime)
583 tmp = vfsp->vfs_atime;
585 case ZFS_PROP_RELATIME:
586 if (vfsp->vfs_do_relatime)
587 tmp = vfsp->vfs_relatime;
589 case ZFS_PROP_DEVICES:
590 if (vfsp->vfs_do_devices)
591 tmp = vfsp->vfs_devices;
594 if (vfsp->vfs_do_exec)
595 tmp = vfsp->vfs_exec;
597 case ZFS_PROP_SETUID:
598 if (vfsp->vfs_do_setuid)
599 tmp = vfsp->vfs_setuid;
601 case ZFS_PROP_READONLY:
602 if (vfsp->vfs_do_readonly)
603 tmp = vfsp->vfs_readonly;
606 if (vfsp->vfs_do_xattr)
607 tmp = vfsp->vfs_xattr;
609 case ZFS_PROP_NBMAND:
610 if (vfsp->vfs_do_nbmand)
611 tmp = vfsp->vfs_nbmand;
618 (void) strcpy(setpoint, "temporary");
625 * Associate this zfsvfs with the given objset, which must be owned.
626 * This will cache a bunch of on-disk state from the objset in the
630 zfsvfs_init(zfsvfs_t *zfsvfs, objset_t *os)
635 zfsvfs->z_max_blksz = SPA_OLD_MAXBLOCKSIZE;
636 zfsvfs->z_show_ctldir = ZFS_SNAPDIR_VISIBLE;
639 error = zfs_get_zplprop(os, ZFS_PROP_VERSION, &zfsvfs->z_version);
642 if (zfsvfs->z_version >
643 zfs_zpl_version_map(spa_version(dmu_objset_spa(os)))) {
644 (void) printk("Can't mount a version %lld file system "
645 "on a version %lld pool\n. Pool must be upgraded to mount "
646 "this file system.\n", (u_longlong_t)zfsvfs->z_version,
647 (u_longlong_t)spa_version(dmu_objset_spa(os)));
648 return (SET_ERROR(ENOTSUP));
650 error = zfs_get_zplprop(os, ZFS_PROP_NORMALIZE, &val);
653 zfsvfs->z_norm = (int)val;
655 error = zfs_get_zplprop(os, ZFS_PROP_UTF8ONLY, &val);
658 zfsvfs->z_utf8 = (val != 0);
660 error = zfs_get_zplprop(os, ZFS_PROP_CASE, &val);
663 zfsvfs->z_case = (uint_t)val;
665 if ((error = zfs_get_zplprop(os, ZFS_PROP_ACLTYPE, &val)) != 0)
667 zfsvfs->z_acl_type = (uint_t)val;
670 * Fold case on file systems that are always or sometimes case
673 if (zfsvfs->z_case == ZFS_CASE_INSENSITIVE ||
674 zfsvfs->z_case == ZFS_CASE_MIXED)
675 zfsvfs->z_norm |= U8_TEXTPREP_TOUPPER;
677 zfsvfs->z_use_fuids = USE_FUIDS(zfsvfs->z_version, zfsvfs->z_os);
678 zfsvfs->z_use_sa = USE_SA(zfsvfs->z_version, zfsvfs->z_os);
681 if (zfsvfs->z_use_sa) {
682 /* should either have both of these objects or none */
683 error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_SA_ATTRS, 8, 1,
688 error = zfs_get_zplprop(os, ZFS_PROP_XATTR, &val);
689 if ((error == 0) && (val == ZFS_XATTR_SA))
690 zfsvfs->z_xattr_sa = B_TRUE;
693 error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_ROOT_OBJ, 8, 1,
697 ASSERT(zfsvfs->z_root != 0);
699 error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_UNLINKED_SET, 8, 1,
700 &zfsvfs->z_unlinkedobj);
704 error = zap_lookup(os, MASTER_NODE_OBJ,
705 zfs_userquota_prop_prefixes[ZFS_PROP_USERQUOTA],
706 8, 1, &zfsvfs->z_userquota_obj);
708 zfsvfs->z_userquota_obj = 0;
712 error = zap_lookup(os, MASTER_NODE_OBJ,
713 zfs_userquota_prop_prefixes[ZFS_PROP_GROUPQUOTA],
714 8, 1, &zfsvfs->z_groupquota_obj);
716 zfsvfs->z_groupquota_obj = 0;
720 error = zap_lookup(os, MASTER_NODE_OBJ,
721 zfs_userquota_prop_prefixes[ZFS_PROP_PROJECTQUOTA],
722 8, 1, &zfsvfs->z_projectquota_obj);
724 zfsvfs->z_projectquota_obj = 0;
728 error = zap_lookup(os, MASTER_NODE_OBJ,
729 zfs_userquota_prop_prefixes[ZFS_PROP_USEROBJQUOTA],
730 8, 1, &zfsvfs->z_userobjquota_obj);
732 zfsvfs->z_userobjquota_obj = 0;
736 error = zap_lookup(os, MASTER_NODE_OBJ,
737 zfs_userquota_prop_prefixes[ZFS_PROP_GROUPOBJQUOTA],
738 8, 1, &zfsvfs->z_groupobjquota_obj);
740 zfsvfs->z_groupobjquota_obj = 0;
744 error = zap_lookup(os, MASTER_NODE_OBJ,
745 zfs_userquota_prop_prefixes[ZFS_PROP_PROJECTOBJQUOTA],
746 8, 1, &zfsvfs->z_projectobjquota_obj);
748 zfsvfs->z_projectobjquota_obj = 0;
752 error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_FUID_TABLES, 8, 1,
753 &zfsvfs->z_fuid_obj);
755 zfsvfs->z_fuid_obj = 0;
759 error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_SHARES_DIR, 8, 1,
760 &zfsvfs->z_shares_dir);
762 zfsvfs->z_shares_dir = 0;
766 error = sa_setup(os, sa_obj, zfs_attr_table, ZPL_END,
767 &zfsvfs->z_attr_table);
771 if (zfsvfs->z_version >= ZPL_VERSION_SA)
772 sa_register_update_callback(os, zfs_sa_upgrade);
778 zfsvfs_create(const char *osname, boolean_t readonly, zfsvfs_t **zfvp)
783 boolean_t ro = (readonly || (strchr(osname, '@') != NULL));
785 zfsvfs = kmem_zalloc(sizeof (zfsvfs_t), KM_SLEEP);
787 error = dmu_objset_own(osname, DMU_OST_ZFS, ro, B_TRUE, zfsvfs, &os);
789 kmem_free(zfsvfs, sizeof (zfsvfs_t));
793 error = zfsvfs_create_impl(zfvp, zfsvfs, os);
795 dmu_objset_disown(os, B_TRUE, zfsvfs);
802 * Note: zfsvfs is assumed to be malloc'd, and will be freed by this function
803 * on a failure. Do not pass in a statically allocated zfsvfs.
806 zfsvfs_create_impl(zfsvfs_t **zfvp, zfsvfs_t *zfsvfs, objset_t *os)
810 zfsvfs->z_vfs = NULL;
812 zfsvfs->z_parent = zfsvfs;
814 mutex_init(&zfsvfs->z_znodes_lock, NULL, MUTEX_DEFAULT, NULL);
815 mutex_init(&zfsvfs->z_lock, NULL, MUTEX_DEFAULT, NULL);
816 list_create(&zfsvfs->z_all_znodes, sizeof (znode_t),
817 offsetof(znode_t, z_link_node));
818 rrm_init(&zfsvfs->z_teardown_lock, B_FALSE);
819 rw_init(&zfsvfs->z_teardown_inactive_lock, NULL, RW_DEFAULT, NULL);
820 rw_init(&zfsvfs->z_fuid_lock, NULL, RW_DEFAULT, NULL);
822 int size = MIN(1 << (highbit64(zfs_object_mutex_size) - 1),
824 zfsvfs->z_hold_size = size;
825 zfsvfs->z_hold_trees = vmem_zalloc(sizeof (avl_tree_t) * size,
827 zfsvfs->z_hold_locks = vmem_zalloc(sizeof (kmutex_t) * size, KM_SLEEP);
828 for (int i = 0; i != size; i++) {
829 avl_create(&zfsvfs->z_hold_trees[i], zfs_znode_hold_compare,
830 sizeof (znode_hold_t), offsetof(znode_hold_t, zh_node));
831 mutex_init(&zfsvfs->z_hold_locks[i], NULL, MUTEX_DEFAULT, NULL);
834 error = zfsvfs_init(zfsvfs, os);
841 zfsvfs->z_drain_task = TASKQID_INVALID;
842 zfsvfs->z_draining = B_FALSE;
843 zfsvfs->z_drain_cancel = B_TRUE;
850 zfsvfs_setup(zfsvfs_t *zfsvfs, boolean_t mounting)
853 boolean_t readonly = zfs_is_readonly(zfsvfs);
855 error = zfs_register_callbacks(zfsvfs->z_vfs);
859 zfsvfs->z_log = zil_open(zfsvfs->z_os, zfs_get_data);
862 * If we are not mounting (ie: online recv), then we don't
863 * have to worry about replaying the log as we blocked all
864 * operations out since we closed the ZIL.
867 ASSERT3P(zfsvfs->z_kstat.dk_kstats, ==, NULL);
868 dataset_kstats_create(&zfsvfs->z_kstat, zfsvfs->z_os);
871 * During replay we remove the read only flag to
872 * allow replays to succeed.
875 readonly_changed_cb(zfsvfs, B_FALSE);
878 if (zap_get_stats(zfsvfs->z_os, zfsvfs->z_unlinkedobj,
880 dataset_kstats_update_nunlinks_kstat(
881 &zfsvfs->z_kstat, zs.zs_num_entries);
882 dprintf_ds(zfsvfs->z_os->os_dsl_dataset,
883 "num_entries in unlinked set: %llu",
886 zfs_unlinked_drain(zfsvfs);
887 dsl_dir_t *dd = zfsvfs->z_os->os_dsl_dataset->ds_dir;
888 dd->dd_activity_cancelled = B_FALSE;
892 * Parse and replay the intent log.
894 * Because of ziltest, this must be done after
895 * zfs_unlinked_drain(). (Further note: ziltest
896 * doesn't use readonly mounts, where
897 * zfs_unlinked_drain() isn't called.) This is because
898 * ziltest causes spa_sync() to think it's committed,
899 * but actually it is not, so the intent log contains
900 * many txg's worth of changes.
902 * In particular, if object N is in the unlinked set in
903 * the last txg to actually sync, then it could be
904 * actually freed in a later txg and then reallocated
905 * in a yet later txg. This would write a "create
906 * object N" record to the intent log. Normally, this
907 * would be fine because the spa_sync() would have
908 * written out the fact that object N is free, before
909 * we could write the "create object N" intent log
912 * But when we are in ziltest mode, we advance the "open
913 * txg" without actually spa_sync()-ing the changes to
914 * disk. So we would see that object N is still
915 * allocated and in the unlinked set, and there is an
916 * intent log record saying to allocate it.
918 if (spa_writeable(dmu_objset_spa(zfsvfs->z_os))) {
919 if (zil_replay_disable) {
920 zil_destroy(zfsvfs->z_log, B_FALSE);
922 zfsvfs->z_replay = B_TRUE;
923 zil_replay(zfsvfs->z_os, zfsvfs,
925 zfsvfs->z_replay = B_FALSE;
929 /* restore readonly bit */
931 readonly_changed_cb(zfsvfs, B_TRUE);
935 * Set the objset user_ptr to track its zfsvfs.
937 mutex_enter(&zfsvfs->z_os->os_user_ptr_lock);
938 dmu_objset_set_user(zfsvfs->z_os, zfsvfs);
939 mutex_exit(&zfsvfs->z_os->os_user_ptr_lock);
945 zfsvfs_free(zfsvfs_t *zfsvfs)
947 int i, size = zfsvfs->z_hold_size;
949 zfs_fuid_destroy(zfsvfs);
951 mutex_destroy(&zfsvfs->z_znodes_lock);
952 mutex_destroy(&zfsvfs->z_lock);
953 list_destroy(&zfsvfs->z_all_znodes);
954 rrm_destroy(&zfsvfs->z_teardown_lock);
955 rw_destroy(&zfsvfs->z_teardown_inactive_lock);
956 rw_destroy(&zfsvfs->z_fuid_lock);
957 for (i = 0; i != size; i++) {
958 avl_destroy(&zfsvfs->z_hold_trees[i]);
959 mutex_destroy(&zfsvfs->z_hold_locks[i]);
961 vmem_free(zfsvfs->z_hold_trees, sizeof (avl_tree_t) * size);
962 vmem_free(zfsvfs->z_hold_locks, sizeof (kmutex_t) * size);
963 zfsvfs_vfs_free(zfsvfs->z_vfs);
964 dataset_kstats_destroy(&zfsvfs->z_kstat);
965 kmem_free(zfsvfs, sizeof (zfsvfs_t));
969 zfs_set_fuid_feature(zfsvfs_t *zfsvfs)
971 zfsvfs->z_use_fuids = USE_FUIDS(zfsvfs->z_version, zfsvfs->z_os);
972 zfsvfs->z_use_sa = USE_SA(zfsvfs->z_version, zfsvfs->z_os);
976 zfs_unregister_callbacks(zfsvfs_t *zfsvfs)
978 objset_t *os = zfsvfs->z_os;
980 if (!dmu_objset_is_snapshot(os))
981 dsl_prop_unregister_all(dmu_objset_ds(os), zfsvfs);
986 * Check that the hex label string is appropriate for the dataset being
987 * mounted into the global_zone proper.
989 * Return an error if the hex label string is not default or
990 * admin_low/admin_high. For admin_low labels, the corresponding
991 * dataset must be readonly.
994 zfs_check_global_label(const char *dsname, const char *hexsl)
996 if (strcasecmp(hexsl, ZFS_MLSLABEL_DEFAULT) == 0)
998 if (strcasecmp(hexsl, ADMIN_HIGH) == 0)
1000 if (strcasecmp(hexsl, ADMIN_LOW) == 0) {
1001 /* must be readonly */
1004 if (dsl_prop_get_integer(dsname,
1005 zfs_prop_to_name(ZFS_PROP_READONLY), &rdonly, NULL))
1006 return (SET_ERROR(EACCES));
1007 return (rdonly ? 0 : SET_ERROR(EACCES));
1009 return (SET_ERROR(EACCES));
1011 #endif /* HAVE_MLSLABEL */
1014 zfs_statfs_project(zfsvfs_t *zfsvfs, znode_t *zp, struct kstatfs *statp,
1017 char buf[20 + DMU_OBJACCT_PREFIX_LEN];
1018 uint64_t offset = DMU_OBJACCT_PREFIX_LEN;
1023 strlcpy(buf, DMU_OBJACCT_PREFIX, DMU_OBJACCT_PREFIX_LEN + 1);
1024 err = zfs_id_to_fuidstr(zfsvfs, NULL, zp->z_projid, buf + offset,
1025 sizeof (buf) - offset, B_FALSE);
1029 if (zfsvfs->z_projectquota_obj == 0)
1032 err = zap_lookup(zfsvfs->z_os, zfsvfs->z_projectquota_obj,
1033 buf + offset, 8, 1, "a);
1039 err = zap_lookup(zfsvfs->z_os, DMU_PROJECTUSED_OBJECT,
1040 buf + offset, 8, 1, &used);
1041 if (unlikely(err == ENOENT)) {
1043 u_longlong_t nblocks;
1046 * Quota accounting is async, so it is possible race case.
1047 * There is at least one object with the given project ID.
1049 sa_object_size(zp->z_sa_hdl, &blksize, &nblocks);
1050 if (unlikely(zp->z_blksz == 0))
1051 blksize = zfsvfs->z_max_blksz;
1053 used = blksize * nblocks;
1058 statp->f_blocks = quota >> bshift;
1059 statp->f_bfree = (quota > used) ? ((quota - used) >> bshift) : 0;
1060 statp->f_bavail = statp->f_bfree;
1063 if (zfsvfs->z_projectobjquota_obj == 0)
1066 err = zap_lookup(zfsvfs->z_os, zfsvfs->z_projectobjquota_obj,
1067 buf + offset, 8, 1, "a);
1073 err = zap_lookup(zfsvfs->z_os, DMU_PROJECTUSED_OBJECT,
1075 if (unlikely(err == ENOENT)) {
1077 * Quota accounting is async, so it is possible race case.
1078 * There is at least one object with the given project ID.
1085 statp->f_files = quota;
1086 statp->f_ffree = (quota > used) ? (quota - used) : 0;
1092 zfs_statvfs(struct inode *ip, struct kstatfs *statp)
1094 zfsvfs_t *zfsvfs = ITOZSB(ip);
1095 uint64_t refdbytes, availbytes, usedobjs, availobjs;
1100 dmu_objset_space(zfsvfs->z_os,
1101 &refdbytes, &availbytes, &usedobjs, &availobjs);
1103 uint64_t fsid = dmu_objset_fsid_guid(zfsvfs->z_os);
1105 * The underlying storage pool actually uses multiple block
1106 * size. Under Solaris frsize (fragment size) is reported as
1107 * the smallest block size we support, and bsize (block size)
1108 * as the filesystem's maximum block size. Unfortunately,
1109 * under Linux the fragment size and block size are often used
1110 * interchangeably. Thus we are forced to report both of them
1111 * as the filesystem's maximum block size.
1113 statp->f_frsize = zfsvfs->z_max_blksz;
1114 statp->f_bsize = zfsvfs->z_max_blksz;
1115 uint32_t bshift = fls(statp->f_bsize) - 1;
1118 * The following report "total" blocks of various kinds in
1119 * the file system, but reported in terms of f_bsize - the
1123 /* Round up so we never have a filesystem using 0 blocks. */
1124 refdbytes = P2ROUNDUP(refdbytes, statp->f_bsize);
1125 statp->f_blocks = (refdbytes + availbytes) >> bshift;
1126 statp->f_bfree = availbytes >> bshift;
1127 statp->f_bavail = statp->f_bfree; /* no root reservation */
1130 * statvfs() should really be called statufs(), because it assumes
1131 * static metadata. ZFS doesn't preallocate files, so the best
1132 * we can do is report the max that could possibly fit in f_files,
1133 * and that minus the number actually used in f_ffree.
1134 * For f_ffree, report the smaller of the number of objects available
1135 * and the number of blocks (each object will take at least a block).
1137 statp->f_ffree = MIN(availobjs, availbytes >> DNODE_SHIFT);
1138 statp->f_files = statp->f_ffree + usedobjs;
1139 statp->f_fsid.val[0] = (uint32_t)fsid;
1140 statp->f_fsid.val[1] = (uint32_t)(fsid >> 32);
1141 statp->f_type = ZFS_SUPER_MAGIC;
1142 statp->f_namelen = MAXNAMELEN - 1;
1145 * We have all of 40 characters to stuff a string here.
1146 * Is there anything useful we could/should provide?
1148 bzero(statp->f_spare, sizeof (statp->f_spare));
1150 if (dmu_objset_projectquota_enabled(zfsvfs->z_os) &&
1151 dmu_objset_projectquota_present(zfsvfs->z_os)) {
1152 znode_t *zp = ITOZ(ip);
1154 if (zp->z_pflags & ZFS_PROJINHERIT && zp->z_projid &&
1155 zpl_is_valid_projid(zp->z_projid))
1156 err = zfs_statfs_project(zfsvfs, zp, statp, bshift);
1164 zfs_root(zfsvfs_t *zfsvfs, struct inode **ipp)
1171 error = zfs_zget(zfsvfs, zfsvfs->z_root, &rootzp);
1173 *ipp = ZTOI(rootzp);
1180 * Linux kernels older than 3.1 do not support a per-filesystem shrinker.
1181 * To accommodate this we must improvise and manually walk the list of znodes
1182 * attempting to prune dentries in order to be able to drop the inodes.
1184 * To avoid scanning the same znodes multiple times they are always rotated
1185 * to the end of the z_all_znodes list. New znodes are inserted at the
1186 * end of the list so we're always scanning the oldest znodes first.
1189 zfs_prune_aliases(zfsvfs_t *zfsvfs, unsigned long nr_to_scan)
1191 znode_t **zp_array, *zp;
1192 int max_array = MIN(nr_to_scan, PAGE_SIZE * 8 / sizeof (znode_t *));
1196 zp_array = kmem_zalloc(max_array * sizeof (znode_t *), KM_SLEEP);
1198 mutex_enter(&zfsvfs->z_znodes_lock);
1199 while ((zp = list_head(&zfsvfs->z_all_znodes)) != NULL) {
1201 if ((i++ > nr_to_scan) || (j >= max_array))
1204 ASSERT(list_link_active(&zp->z_link_node));
1205 list_remove(&zfsvfs->z_all_znodes, zp);
1206 list_insert_tail(&zfsvfs->z_all_znodes, zp);
1208 /* Skip active znodes and .zfs entries */
1209 if (MUTEX_HELD(&zp->z_lock) || zp->z_is_ctldir)
1212 if (igrab(ZTOI(zp)) == NULL)
1218 mutex_exit(&zfsvfs->z_znodes_lock);
1220 for (i = 0; i < j; i++) {
1223 ASSERT3P(zp, !=, NULL);
1224 d_prune_aliases(ZTOI(zp));
1226 if (atomic_read(&ZTOI(zp)->i_count) == 1)
1232 kmem_free(zp_array, max_array * sizeof (znode_t *));
1238 * The ARC has requested that the filesystem drop entries from the dentry
1239 * and inode caches. This can occur when the ARC needs to free meta data
1240 * blocks but can't because they are all pinned by entries in these caches.
1243 zfs_prune(struct super_block *sb, unsigned long nr_to_scan, int *objects)
1245 zfsvfs_t *zfsvfs = sb->s_fs_info;
1247 struct shrinker *shrinker = &sb->s_shrink;
1248 struct shrink_control sc = {
1249 .nr_to_scan = nr_to_scan,
1250 .gfp_mask = GFP_KERNEL,
1255 #if defined(HAVE_SPLIT_SHRINKER_CALLBACK) && \
1256 defined(SHRINK_CONTROL_HAS_NID) && \
1257 defined(SHRINKER_NUMA_AWARE)
1258 if (sb->s_shrink.flags & SHRINKER_NUMA_AWARE) {
1260 for_each_online_node(sc.nid) {
1261 *objects += (*shrinker->scan_objects)(shrinker, &sc);
1264 *objects = (*shrinker->scan_objects)(shrinker, &sc);
1267 #elif defined(HAVE_SPLIT_SHRINKER_CALLBACK)
1268 *objects = (*shrinker->scan_objects)(shrinker, &sc);
1269 #elif defined(HAVE_SINGLE_SHRINKER_CALLBACK)
1270 *objects = (*shrinker->shrink)(shrinker, &sc);
1271 #elif defined(HAVE_D_PRUNE_ALIASES)
1272 #define D_PRUNE_ALIASES_IS_DEFAULT
1273 *objects = zfs_prune_aliases(zfsvfs, nr_to_scan);
1275 #error "No available dentry and inode cache pruning mechanism."
1278 #if defined(HAVE_D_PRUNE_ALIASES) && !defined(D_PRUNE_ALIASES_IS_DEFAULT)
1279 #undef D_PRUNE_ALIASES_IS_DEFAULT
1281 * Fall back to zfs_prune_aliases if the kernel's per-superblock
1282 * shrinker couldn't free anything, possibly due to the inodes being
1283 * allocated in a different memcg.
1286 *objects = zfs_prune_aliases(zfsvfs, nr_to_scan);
1291 dprintf_ds(zfsvfs->z_os->os_dsl_dataset,
1292 "pruning, nr_to_scan=%lu objects=%d error=%d\n",
1293 nr_to_scan, *objects, error);
1299 * Teardown the zfsvfs_t.
1301 * Note, if 'unmounting' is FALSE, we return with the 'z_teardown_lock'
1302 * and 'z_teardown_inactive_lock' held.
1305 zfsvfs_teardown(zfsvfs_t *zfsvfs, boolean_t unmounting)
1309 zfs_unlinked_drain_stop_wait(zfsvfs);
1312 * If someone has not already unmounted this file system,
1313 * drain the zrele_taskq to ensure all active references to the
1314 * zfsvfs_t have been handled only then can it be safely destroyed.
1318 * If we're unmounting we have to wait for the list to
1321 * If we're not unmounting there's no guarantee the list
1322 * will drain completely, but iputs run from the taskq
1323 * may add the parents of dir-based xattrs to the taskq
1324 * so we want to wait for these.
1326 * We can safely read z_nr_znodes without locking because the
1327 * VFS has already blocked operations which add to the
1328 * z_all_znodes list and thus increment z_nr_znodes.
1331 while (zfsvfs->z_nr_znodes > 0) {
1332 taskq_wait_outstanding(dsl_pool_zrele_taskq(
1333 dmu_objset_pool(zfsvfs->z_os)), 0);
1334 if (++round > 1 && !unmounting)
1339 rrm_enter(&zfsvfs->z_teardown_lock, RW_WRITER, FTAG);
1343 * We purge the parent filesystem's super block as the
1344 * parent filesystem and all of its snapshots have their
1345 * inode's super block set to the parent's filesystem's
1346 * super block. Note, 'z_parent' is self referential
1347 * for non-snapshots.
1349 shrink_dcache_sb(zfsvfs->z_parent->z_sb);
1353 * Close the zil. NB: Can't close the zil while zfs_inactive
1354 * threads are blocked as zil_close can call zfs_inactive.
1356 if (zfsvfs->z_log) {
1357 zil_close(zfsvfs->z_log);
1358 zfsvfs->z_log = NULL;
1361 rw_enter(&zfsvfs->z_teardown_inactive_lock, RW_WRITER);
1364 * If we are not unmounting (ie: online recv) and someone already
1365 * unmounted this file system while we were doing the switcheroo,
1366 * or a reopen of z_os failed then just bail out now.
1368 if (!unmounting && (zfsvfs->z_unmounted || zfsvfs->z_os == NULL)) {
1369 rw_exit(&zfsvfs->z_teardown_inactive_lock);
1370 rrm_exit(&zfsvfs->z_teardown_lock, FTAG);
1371 return (SET_ERROR(EIO));
1375 * At this point there are no VFS ops active, and any new VFS ops
1376 * will fail with EIO since we have z_teardown_lock for writer (only
1377 * relevant for forced unmount).
1379 * Release all holds on dbufs. We also grab an extra reference to all
1380 * the remaining inodes so that the kernel does not attempt to free
1381 * any inodes of a suspended fs. This can cause deadlocks since the
1382 * zfs_resume_fs() process may involve starting threads, which might
1383 * attempt to free unreferenced inodes to free up memory for the new
1387 mutex_enter(&zfsvfs->z_znodes_lock);
1388 for (zp = list_head(&zfsvfs->z_all_znodes); zp != NULL;
1389 zp = list_next(&zfsvfs->z_all_znodes, zp)) {
1391 zfs_znode_dmu_fini(zp);
1392 if (igrab(ZTOI(zp)) != NULL)
1393 zp->z_suspended = B_TRUE;
1396 mutex_exit(&zfsvfs->z_znodes_lock);
1400 * If we are unmounting, set the unmounted flag and let new VFS ops
1401 * unblock. zfs_inactive will have the unmounted behavior, and all
1402 * other VFS ops will fail with EIO.
1405 zfsvfs->z_unmounted = B_TRUE;
1406 rw_exit(&zfsvfs->z_teardown_inactive_lock);
1407 rrm_exit(&zfsvfs->z_teardown_lock, FTAG);
1411 * z_os will be NULL if there was an error in attempting to reopen
1412 * zfsvfs, so just return as the properties had already been
1414 * unregistered and cached data had been evicted before.
1416 if (zfsvfs->z_os == NULL)
1420 * Unregister properties.
1422 zfs_unregister_callbacks(zfsvfs);
1425 * Evict cached data. We must write out any dirty data before
1426 * disowning the dataset.
1428 objset_t *os = zfsvfs->z_os;
1429 boolean_t os_dirty = B_FALSE;
1430 for (int t = 0; t < TXG_SIZE; t++) {
1431 if (dmu_objset_is_dirty(os, t)) {
1436 if (!zfs_is_readonly(zfsvfs) && os_dirty) {
1437 txg_wait_synced(dmu_objset_pool(zfsvfs->z_os), 0);
1439 dmu_objset_evict_dbufs(zfsvfs->z_os);
1440 dsl_dir_t *dd = os->os_dsl_dataset->ds_dir;
1441 dsl_dir_cancel_waiters(dd);
1446 #if defined(HAVE_SUPER_SETUP_BDI_NAME)
1447 atomic_long_t zfs_bdi_seq = ATOMIC_LONG_INIT(0);
1451 zfs_domount(struct super_block *sb, zfs_mnt_t *zm, int silent)
1453 const char *osname = zm->mnt_osname;
1454 struct inode *root_inode;
1455 uint64_t recordsize;
1457 zfsvfs_t *zfsvfs = NULL;
1463 error = zfsvfs_parse_options(zm->mnt_data, &vfs);
1467 error = zfsvfs_create(osname, vfs->vfs_readonly, &zfsvfs);
1469 zfsvfs_vfs_free(vfs);
1473 if ((error = dsl_prop_get_integer(osname, "recordsize",
1474 &recordsize, NULL))) {
1475 zfsvfs_vfs_free(vfs);
1479 vfs->vfs_data = zfsvfs;
1480 zfsvfs->z_vfs = vfs;
1482 sb->s_fs_info = zfsvfs;
1483 sb->s_magic = ZFS_SUPER_MAGIC;
1484 sb->s_maxbytes = MAX_LFS_FILESIZE;
1485 sb->s_time_gran = 1;
1486 sb->s_blocksize = recordsize;
1487 sb->s_blocksize_bits = ilog2(recordsize);
1489 error = -zpl_bdi_setup(sb, "zfs");
1493 sb->s_bdi->ra_pages = 0;
1495 /* Set callback operations for the file system. */
1496 sb->s_op = &zpl_super_operations;
1497 sb->s_xattr = zpl_xattr_handlers;
1498 sb->s_export_op = &zpl_export_operations;
1499 sb->s_d_op = &zpl_dentry_operations;
1501 /* Set features for file system. */
1502 zfs_set_fuid_feature(zfsvfs);
1504 if (dmu_objset_is_snapshot(zfsvfs->z_os)) {
1507 atime_changed_cb(zfsvfs, B_FALSE);
1508 readonly_changed_cb(zfsvfs, B_TRUE);
1509 if ((error = dsl_prop_get_integer(osname,
1510 "xattr", &pval, NULL)))
1512 xattr_changed_cb(zfsvfs, pval);
1513 if ((error = dsl_prop_get_integer(osname,
1514 "acltype", &pval, NULL)))
1516 acltype_changed_cb(zfsvfs, pval);
1517 zfsvfs->z_issnap = B_TRUE;
1518 zfsvfs->z_os->os_sync = ZFS_SYNC_DISABLED;
1519 zfsvfs->z_snap_defer_time = jiffies;
1521 mutex_enter(&zfsvfs->z_os->os_user_ptr_lock);
1522 dmu_objset_set_user(zfsvfs->z_os, zfsvfs);
1523 mutex_exit(&zfsvfs->z_os->os_user_ptr_lock);
1525 if ((error = zfsvfs_setup(zfsvfs, B_TRUE)))
1529 /* Allocate a root inode for the filesystem. */
1530 error = zfs_root(zfsvfs, &root_inode);
1532 (void) zfs_umount(sb);
1536 /* Allocate a root dentry for the filesystem */
1537 sb->s_root = d_make_root(root_inode);
1538 if (sb->s_root == NULL) {
1539 (void) zfs_umount(sb);
1540 error = SET_ERROR(ENOMEM);
1544 if (!zfsvfs->z_issnap)
1545 zfsctl_create(zfsvfs);
1547 zfsvfs->z_arc_prune = arc_add_prune_callback(zpl_prune_sb, sb);
1550 if (zfsvfs != NULL) {
1551 dmu_objset_disown(zfsvfs->z_os, B_TRUE, zfsvfs);
1552 zfsvfs_free(zfsvfs);
1555 * make sure we don't have dangling sb->s_fs_info which
1556 * zfs_preumount will use.
1558 sb->s_fs_info = NULL;
1565 * Called when an unmount is requested and certain sanity checks have
1566 * already passed. At this point no dentries or inodes have been reclaimed
1567 * from their respective caches. We drop the extra reference on the .zfs
1568 * control directory to allow everything to be reclaimed. All snapshots
1569 * must already have been unmounted to reach this point.
1572 zfs_preumount(struct super_block *sb)
1574 zfsvfs_t *zfsvfs = sb->s_fs_info;
1576 /* zfsvfs is NULL when zfs_domount fails during mount */
1578 zfs_unlinked_drain_stop_wait(zfsvfs);
1579 zfsctl_destroy(sb->s_fs_info);
1581 * Wait for zrele_async before entering evict_inodes in
1582 * generic_shutdown_super. The reason we must finish before
1583 * evict_inodes is when lazytime is on, or when zfs_purgedir
1584 * calls zfs_zget, zrele would bump i_count from 0 to 1. This
1585 * would race with the i_count check in evict_inodes. This means
1586 * it could destroy the inode while we are still using it.
1588 * We wait for two passes. xattr directories in the first pass
1589 * may add xattr entries in zfs_purgedir, so in the second pass
1590 * we wait for them. We don't use taskq_wait here because it is
1591 * a pool wide taskq. Other mounted filesystems can constantly
1592 * do zrele_async and there's no guarantee when taskq will be
1595 taskq_wait_outstanding(dsl_pool_zrele_taskq(
1596 dmu_objset_pool(zfsvfs->z_os)), 0);
1597 taskq_wait_outstanding(dsl_pool_zrele_taskq(
1598 dmu_objset_pool(zfsvfs->z_os)), 0);
1603 * Called once all other unmount released tear down has occurred.
1604 * It is our responsibility to release any remaining infrastructure.
1608 zfs_umount(struct super_block *sb)
1610 zfsvfs_t *zfsvfs = sb->s_fs_info;
1613 if (zfsvfs->z_arc_prune != NULL)
1614 arc_remove_prune_callback(zfsvfs->z_arc_prune);
1615 VERIFY(zfsvfs_teardown(zfsvfs, B_TRUE) == 0);
1617 zpl_bdi_destroy(sb);
1620 * z_os will be NULL if there was an error in
1621 * attempting to reopen zfsvfs.
1625 * Unset the objset user_ptr.
1627 mutex_enter(&os->os_user_ptr_lock);
1628 dmu_objset_set_user(os, NULL);
1629 mutex_exit(&os->os_user_ptr_lock);
1632 * Finally release the objset
1634 dmu_objset_disown(os, B_TRUE, zfsvfs);
1637 zfsvfs_free(zfsvfs);
1642 zfs_remount(struct super_block *sb, int *flags, zfs_mnt_t *zm)
1644 zfsvfs_t *zfsvfs = sb->s_fs_info;
1646 boolean_t issnap = dmu_objset_is_snapshot(zfsvfs->z_os);
1649 if ((issnap || !spa_writeable(dmu_objset_spa(zfsvfs->z_os))) &&
1650 !(*flags & SB_RDONLY)) {
1651 *flags |= SB_RDONLY;
1655 error = zfsvfs_parse_options(zm->mnt_data, &vfsp);
1659 if (!zfs_is_readonly(zfsvfs) && (*flags & SB_RDONLY))
1660 txg_wait_synced(dmu_objset_pool(zfsvfs->z_os), 0);
1662 zfs_unregister_callbacks(zfsvfs);
1663 zfsvfs_vfs_free(zfsvfs->z_vfs);
1665 vfsp->vfs_data = zfsvfs;
1666 zfsvfs->z_vfs = vfsp;
1668 (void) zfs_register_callbacks(vfsp);
1674 zfs_vget(struct super_block *sb, struct inode **ipp, fid_t *fidp)
1676 zfsvfs_t *zfsvfs = sb->s_fs_info;
1678 uint64_t object = 0;
1679 uint64_t fid_gen = 0;
1686 if (fidp->fid_len == SHORT_FID_LEN || fidp->fid_len == LONG_FID_LEN) {
1687 zfid_short_t *zfid = (zfid_short_t *)fidp;
1689 for (i = 0; i < sizeof (zfid->zf_object); i++)
1690 object |= ((uint64_t)zfid->zf_object[i]) << (8 * i);
1692 for (i = 0; i < sizeof (zfid->zf_gen); i++)
1693 fid_gen |= ((uint64_t)zfid->zf_gen[i]) << (8 * i);
1695 return (SET_ERROR(EINVAL));
1698 /* LONG_FID_LEN means snapdirs */
1699 if (fidp->fid_len == LONG_FID_LEN) {
1700 zfid_long_t *zlfid = (zfid_long_t *)fidp;
1701 uint64_t objsetid = 0;
1702 uint64_t setgen = 0;
1704 for (i = 0; i < sizeof (zlfid->zf_setid); i++)
1705 objsetid |= ((uint64_t)zlfid->zf_setid[i]) << (8 * i);
1707 for (i = 0; i < sizeof (zlfid->zf_setgen); i++)
1708 setgen |= ((uint64_t)zlfid->zf_setgen[i]) << (8 * i);
1710 if (objsetid != ZFSCTL_INO_SNAPDIRS - object) {
1711 dprintf("snapdir fid: objsetid (%llu) != "
1712 "ZFSCTL_INO_SNAPDIRS (%llu) - object (%llu)\n",
1713 objsetid, ZFSCTL_INO_SNAPDIRS, object);
1715 return (SET_ERROR(EINVAL));
1718 if (fid_gen > 1 || setgen != 0) {
1719 dprintf("snapdir fid: fid_gen (%llu) and setgen "
1720 "(%llu)\n", fid_gen, setgen);
1721 return (SET_ERROR(EINVAL));
1724 return (zfsctl_snapdir_vget(sb, objsetid, fid_gen, ipp));
1728 /* A zero fid_gen means we are in the .zfs control directories */
1730 (object == ZFSCTL_INO_ROOT || object == ZFSCTL_INO_SNAPDIR)) {
1731 *ipp = zfsvfs->z_ctldir;
1732 ASSERT(*ipp != NULL);
1733 if (object == ZFSCTL_INO_SNAPDIR) {
1734 VERIFY(zfsctl_root_lookup(*ipp, "snapshot", ipp,
1735 0, kcred, NULL, NULL) == 0);
1743 gen_mask = -1ULL >> (64 - 8 * i);
1745 dprintf("getting %llu [%llu mask %llx]\n", object, fid_gen, gen_mask);
1746 if ((err = zfs_zget(zfsvfs, object, &zp))) {
1751 /* Don't export xattr stuff */
1752 if (zp->z_pflags & ZFS_XATTR) {
1755 return (SET_ERROR(ENOENT));
1758 (void) sa_lookup(zp->z_sa_hdl, SA_ZPL_GEN(zfsvfs), &zp_gen,
1760 zp_gen = zp_gen & gen_mask;
1763 if ((fid_gen == 0) && (zfsvfs->z_root == object))
1765 if (zp->z_unlinked || zp_gen != fid_gen) {
1766 dprintf("znode gen (%llu) != fid gen (%llu)\n", zp_gen,
1770 return (SET_ERROR(ENOENT));
1775 zfs_inode_update(ITOZ(*ipp));
1782 * Block out VFS ops and close zfsvfs_t
1784 * Note, if successful, then we return with the 'z_teardown_lock' and
1785 * 'z_teardown_inactive_lock' write held. We leave ownership of the underlying
1786 * dataset and objset intact so that they can be atomically handed off during
1787 * a subsequent rollback or recv operation and the resume thereafter.
1790 zfs_suspend_fs(zfsvfs_t *zfsvfs)
1794 if ((error = zfsvfs_teardown(zfsvfs, B_FALSE)) != 0)
1801 * Rebuild SA and release VOPs. Note that ownership of the underlying dataset
1802 * is an invariant across any of the operations that can be performed while the
1803 * filesystem was suspended. Whether it succeeded or failed, the preconditions
1804 * are the same: the relevant objset and associated dataset are owned by
1805 * zfsvfs, held, and long held on entry.
1808 zfs_resume_fs(zfsvfs_t *zfsvfs, dsl_dataset_t *ds)
1813 ASSERT(RRM_WRITE_HELD(&zfsvfs->z_teardown_lock));
1814 ASSERT(RW_WRITE_HELD(&zfsvfs->z_teardown_inactive_lock));
1817 * We already own this, so just update the objset_t, as the one we
1818 * had before may have been evicted.
1821 VERIFY3P(ds->ds_owner, ==, zfsvfs);
1822 VERIFY(dsl_dataset_long_held(ds));
1823 dsl_pool_t *dp = spa_get_dsl(dsl_dataset_get_spa(ds));
1824 dsl_pool_config_enter(dp, FTAG);
1825 VERIFY0(dmu_objset_from_ds(ds, &os));
1826 dsl_pool_config_exit(dp, FTAG);
1828 err = zfsvfs_init(zfsvfs, os);
1832 ds->ds_dir->dd_activity_cancelled = B_FALSE;
1833 VERIFY(zfsvfs_setup(zfsvfs, B_FALSE) == 0);
1835 zfs_set_fuid_feature(zfsvfs);
1836 zfsvfs->z_rollback_time = jiffies;
1839 * Attempt to re-establish all the active inodes with their
1840 * dbufs. If a zfs_rezget() fails, then we unhash the inode
1841 * and mark it stale. This prevents a collision if a new
1842 * inode/object is created which must use the same inode
1843 * number. The stale inode will be be released when the
1844 * VFS prunes the dentry holding the remaining references
1845 * on the stale inode.
1847 mutex_enter(&zfsvfs->z_znodes_lock);
1848 for (zp = list_head(&zfsvfs->z_all_znodes); zp;
1849 zp = list_next(&zfsvfs->z_all_znodes, zp)) {
1850 err2 = zfs_rezget(zp);
1852 remove_inode_hash(ZTOI(zp));
1853 zp->z_is_stale = B_TRUE;
1856 /* see comment in zfs_suspend_fs() */
1857 if (zp->z_suspended) {
1858 zfs_zrele_async(zp);
1859 zp->z_suspended = B_FALSE;
1862 mutex_exit(&zfsvfs->z_znodes_lock);
1864 if (!zfs_is_readonly(zfsvfs) && !zfsvfs->z_unmounted) {
1866 * zfs_suspend_fs() could have interrupted freeing
1867 * of dnodes. We need to restart this freeing so
1868 * that we don't "leak" the space.
1870 zfs_unlinked_drain(zfsvfs);
1874 * Most of the time zfs_suspend_fs is used for changing the contents
1875 * of the underlying dataset. ZFS rollback and receive operations
1876 * might create files for which negative dentries are present in
1877 * the cache. Since walking the dcache would require a lot of GPL-only
1878 * code duplication, it's much easier on these rather rare occasions
1879 * just to flush the whole dcache for the given dataset/filesystem.
1881 shrink_dcache_sb(zfsvfs->z_sb);
1885 zfsvfs->z_unmounted = B_TRUE;
1887 /* release the VFS ops */
1888 rw_exit(&zfsvfs->z_teardown_inactive_lock);
1889 rrm_exit(&zfsvfs->z_teardown_lock, FTAG);
1893 * Since we couldn't setup the sa framework, try to force
1894 * unmount this file system.
1897 (void) zfs_umount(zfsvfs->z_sb);
1903 * Release VOPs and unmount a suspended filesystem.
1906 zfs_end_fs(zfsvfs_t *zfsvfs, dsl_dataset_t *ds)
1908 ASSERT(RRM_WRITE_HELD(&zfsvfs->z_teardown_lock));
1909 ASSERT(RW_WRITE_HELD(&zfsvfs->z_teardown_inactive_lock));
1912 * We already own this, so just hold and rele it to update the
1913 * objset_t, as the one we had before may have been evicted.
1916 VERIFY3P(ds->ds_owner, ==, zfsvfs);
1917 VERIFY(dsl_dataset_long_held(ds));
1918 dsl_pool_t *dp = spa_get_dsl(dsl_dataset_get_spa(ds));
1919 dsl_pool_config_enter(dp, FTAG);
1920 VERIFY0(dmu_objset_from_ds(ds, &os));
1921 dsl_pool_config_exit(dp, FTAG);
1924 /* release the VOPs */
1925 rw_exit(&zfsvfs->z_teardown_inactive_lock);
1926 rrm_exit(&zfsvfs->z_teardown_lock, FTAG);
1929 * Try to force unmount this file system.
1931 (void) zfs_umount(zfsvfs->z_sb);
1932 zfsvfs->z_unmounted = B_TRUE;
1937 * Automounted snapshots rely on periodic revalidation
1938 * to defer snapshots from being automatically unmounted.
1942 zfs_exit_fs(zfsvfs_t *zfsvfs)
1944 if (!zfsvfs->z_issnap)
1947 if (time_after(jiffies, zfsvfs->z_snap_defer_time +
1948 MAX(zfs_expire_snapshot * HZ / 2, HZ))) {
1949 zfsvfs->z_snap_defer_time = jiffies;
1950 zfsctl_snapshot_unmount_delay(zfsvfs->z_os->os_spa,
1951 dmu_objset_id(zfsvfs->z_os),
1952 zfs_expire_snapshot);
1957 zfs_set_version(zfsvfs_t *zfsvfs, uint64_t newvers)
1960 objset_t *os = zfsvfs->z_os;
1963 if (newvers < ZPL_VERSION_INITIAL || newvers > ZPL_VERSION)
1964 return (SET_ERROR(EINVAL));
1966 if (newvers < zfsvfs->z_version)
1967 return (SET_ERROR(EINVAL));
1969 if (zfs_spa_version_map(newvers) >
1970 spa_version(dmu_objset_spa(zfsvfs->z_os)))
1971 return (SET_ERROR(ENOTSUP));
1973 tx = dmu_tx_create(os);
1974 dmu_tx_hold_zap(tx, MASTER_NODE_OBJ, B_FALSE, ZPL_VERSION_STR);
1975 if (newvers >= ZPL_VERSION_SA && !zfsvfs->z_use_sa) {
1976 dmu_tx_hold_zap(tx, MASTER_NODE_OBJ, B_TRUE,
1978 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, FALSE, NULL);
1980 error = dmu_tx_assign(tx, TXG_WAIT);
1986 error = zap_update(os, MASTER_NODE_OBJ, ZPL_VERSION_STR,
1987 8, 1, &newvers, tx);
1994 if (newvers >= ZPL_VERSION_SA && !zfsvfs->z_use_sa) {
1997 ASSERT3U(spa_version(dmu_objset_spa(zfsvfs->z_os)), >=,
1999 sa_obj = zap_create(os, DMU_OT_SA_MASTER_NODE,
2000 DMU_OT_NONE, 0, tx);
2002 error = zap_add(os, MASTER_NODE_OBJ,
2003 ZFS_SA_ATTRS, 8, 1, &sa_obj, tx);
2006 VERIFY(0 == sa_set_sa_object(os, sa_obj));
2007 sa_register_update_callback(os, zfs_sa_upgrade);
2010 spa_history_log_internal_ds(dmu_objset_ds(os), "upgrade", tx,
2011 "from %llu to %llu", zfsvfs->z_version, newvers);
2015 zfsvfs->z_version = newvers;
2016 os->os_version = newvers;
2018 zfs_set_fuid_feature(zfsvfs);
2024 * Read a property stored within the master node.
2027 zfs_get_zplprop(objset_t *os, zfs_prop_t prop, uint64_t *value)
2029 uint64_t *cached_copy = NULL;
2032 * Figure out where in the objset_t the cached copy would live, if it
2033 * is available for the requested property.
2037 case ZFS_PROP_VERSION:
2038 cached_copy = &os->os_version;
2040 case ZFS_PROP_NORMALIZE:
2041 cached_copy = &os->os_normalization;
2043 case ZFS_PROP_UTF8ONLY:
2044 cached_copy = &os->os_utf8only;
2047 cached_copy = &os->os_casesensitivity;
2053 if (cached_copy != NULL && *cached_copy != OBJSET_PROP_UNINITIALIZED) {
2054 *value = *cached_copy;
2059 * If the property wasn't cached, look up the file system's value for
2060 * the property. For the version property, we look up a slightly
2065 if (prop == ZFS_PROP_VERSION)
2066 pname = ZPL_VERSION_STR;
2068 pname = zfs_prop_to_name(prop);
2071 ASSERT3U(os->os_phys->os_type, ==, DMU_OST_ZFS);
2072 error = zap_lookup(os, MASTER_NODE_OBJ, pname, 8, 1, value);
2075 if (error == ENOENT) {
2076 /* No value set, use the default value */
2078 case ZFS_PROP_VERSION:
2079 *value = ZPL_VERSION;
2081 case ZFS_PROP_NORMALIZE:
2082 case ZFS_PROP_UTF8ONLY:
2086 *value = ZFS_CASE_SENSITIVE;
2088 case ZFS_PROP_ACLTYPE:
2089 *value = ZFS_ACLTYPE_OFF;
2098 * If one of the methods for getting the property value above worked,
2099 * copy it into the objset_t's cache.
2101 if (error == 0 && cached_copy != NULL) {
2102 *cached_copy = *value;
2109 * Return true if the corresponding vfs's unmounted flag is set.
2110 * Otherwise return false.
2111 * If this function returns true we know VFS unmount has been initiated.
2114 zfs_get_vfs_flag_unmounted(objset_t *os)
2117 boolean_t unmounted = B_FALSE;
2119 ASSERT(dmu_objset_type(os) == DMU_OST_ZFS);
2121 mutex_enter(&os->os_user_ptr_lock);
2122 zfvp = dmu_objset_get_user(os);
2123 if (zfvp != NULL && zfvp->z_unmounted)
2125 mutex_exit(&os->os_user_ptr_lock);
2132 zfsvfs_update_fromname(const char *oldname, const char *newname)
2135 * We don't need to do anything here, the devname is always current by
2136 * virtue of zfsvfs->z_sb->s_op->show_devname.
2145 dmu_objset_register_type(DMU_OST_ZFS, zpl_get_file_info);
2146 register_filesystem(&zpl_fs_type);
2153 * we don't use outstanding because zpl_posix_acl_free might add more.
2155 taskq_wait(system_delay_taskq);
2156 taskq_wait(system_taskq);
2157 unregister_filesystem(&zpl_fs_type);
2162 #if defined(_KERNEL)
2163 EXPORT_SYMBOL(zfs_suspend_fs);
2164 EXPORT_SYMBOL(zfs_resume_fs);
2165 EXPORT_SYMBOL(zfs_set_version);
2166 EXPORT_SYMBOL(zfsvfs_create);
2167 EXPORT_SYMBOL(zfsvfs_free);
2168 EXPORT_SYMBOL(zfs_is_readonly);
2169 EXPORT_SYMBOL(zfs_domount);
2170 EXPORT_SYMBOL(zfs_preumount);
2171 EXPORT_SYMBOL(zfs_umount);
2172 EXPORT_SYMBOL(zfs_remount);
2173 EXPORT_SYMBOL(zfs_statvfs);
2174 EXPORT_SYMBOL(zfs_vget);
2175 EXPORT_SYMBOL(zfs_prune);