2 * Copyright (c) 2011-2018 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@dragonflybsd.org>
6 * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the
18 * 3. Neither the name of The DragonFly Project nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific, prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * WARNING! The ioctl functions which manipulate the connection state need
39 * to be able to run without deadlock on the volume's chain lock.
40 * Most of these functions use a separate lock.
45 static int hammer2_ioctl_version_get(hammer2_inode_t *ip, void *data);
46 static int hammer2_ioctl_recluster(hammer2_inode_t *ip, void *data);
47 static int hammer2_ioctl_remote_scan(hammer2_inode_t *ip, void *data);
48 static int hammer2_ioctl_remote_add(hammer2_inode_t *ip, void *data);
49 static int hammer2_ioctl_remote_del(hammer2_inode_t *ip, void *data);
50 static int hammer2_ioctl_remote_rep(hammer2_inode_t *ip, void *data);
51 static int hammer2_ioctl_socket_get(hammer2_inode_t *ip, void *data);
52 static int hammer2_ioctl_socket_set(hammer2_inode_t *ip, void *data);
53 static int hammer2_ioctl_pfs_get(hammer2_inode_t *ip, void *data);
54 static int hammer2_ioctl_pfs_lookup(hammer2_inode_t *ip, void *data);
55 static int hammer2_ioctl_pfs_create(hammer2_inode_t *ip, void *data);
56 static int hammer2_ioctl_pfs_snapshot(hammer2_inode_t *ip, void *data);
57 static int hammer2_ioctl_pfs_delete(hammer2_inode_t *ip, void *data);
58 static int hammer2_ioctl_inode_get(hammer2_inode_t *ip, void *data);
59 static int hammer2_ioctl_inode_set(hammer2_inode_t *ip, void *data);
60 static int hammer2_ioctl_debug_dump(hammer2_inode_t *ip, u_int flags);
61 //static int hammer2_ioctl_inode_comp_set(hammer2_inode_t *ip, void *data);
62 //static int hammer2_ioctl_inode_comp_rec_set(hammer2_inode_t *ip, void *data);
63 //static int hammer2_ioctl_inode_comp_rec_set2(hammer2_inode_t *ip, void *data);
64 static int hammer2_ioctl_bulkfree_scan(hammer2_inode_t *ip, void *data);
65 static int hammer2_ioctl_destroy(hammer2_inode_t *ip, void *data);
68 hammer2_ioctl(hammer2_inode_t *ip, u_long com, void *data, int fflag,
74 * Standard root cred checks, will be selectively ignored below
75 * for ioctls that do not require root creds.
77 error = priv_check_cred(cred, PRIV_HAMMER_IOCTL, 0);
80 case HAMMER2IOC_VERSION_GET:
81 error = hammer2_ioctl_version_get(ip, data);
83 case HAMMER2IOC_RECLUSTER:
85 error = hammer2_ioctl_recluster(ip, data);
87 case HAMMER2IOC_REMOTE_SCAN:
89 error = hammer2_ioctl_remote_scan(ip, data);
91 case HAMMER2IOC_REMOTE_ADD:
93 error = hammer2_ioctl_remote_add(ip, data);
95 case HAMMER2IOC_REMOTE_DEL:
97 error = hammer2_ioctl_remote_del(ip, data);
99 case HAMMER2IOC_REMOTE_REP:
101 error = hammer2_ioctl_remote_rep(ip, data);
103 case HAMMER2IOC_SOCKET_GET:
105 error = hammer2_ioctl_socket_get(ip, data);
107 case HAMMER2IOC_SOCKET_SET:
109 error = hammer2_ioctl_socket_set(ip, data);
111 case HAMMER2IOC_PFS_GET:
113 error = hammer2_ioctl_pfs_get(ip, data);
115 case HAMMER2IOC_PFS_LOOKUP:
117 error = hammer2_ioctl_pfs_lookup(ip, data);
119 case HAMMER2IOC_PFS_CREATE:
121 error = hammer2_ioctl_pfs_create(ip, data);
123 case HAMMER2IOC_PFS_DELETE:
125 error = hammer2_ioctl_pfs_delete(ip, data);
127 case HAMMER2IOC_PFS_SNAPSHOT:
129 error = hammer2_ioctl_pfs_snapshot(ip, data);
131 case HAMMER2IOC_INODE_GET:
132 error = hammer2_ioctl_inode_get(ip, data);
134 case HAMMER2IOC_INODE_SET:
136 error = hammer2_ioctl_inode_set(ip, data);
138 case HAMMER2IOC_BULKFREE_SCAN:
139 error = hammer2_ioctl_bulkfree_scan(ip, data);
141 case HAMMER2IOC_BULKFREE_ASYNC:
142 error = hammer2_ioctl_bulkfree_scan(ip, NULL);
144 /*case HAMMER2IOC_INODE_COMP_SET:
145 error = hammer2_ioctl_inode_comp_set(ip, data);
147 case HAMMER2IOC_INODE_COMP_REC_SET:
148 error = hammer2_ioctl_inode_comp_rec_set(ip, data);
150 case HAMMER2IOC_INODE_COMP_REC_SET2:
151 error = hammer2_ioctl_inode_comp_rec_set2(ip, data);
153 case HAMMER2IOC_DESTROY:
155 error = hammer2_ioctl_destroy(ip, data);
157 case HAMMER2IOC_DEBUG_DUMP:
158 error = hammer2_ioctl_debug_dump(ip, *(u_int *)data);
168 * Retrieve version and basic info
171 hammer2_ioctl_version_get(hammer2_inode_t *ip, void *data)
173 hammer2_ioc_version_t *version = data;
176 hmp = ip->pmp->pfs_hmps[0];
178 version->version = hmp->voldata.version;
180 version->version = -1;
185 hammer2_ioctl_recluster(hammer2_inode_t *ip, void *data)
187 hammer2_ioc_recluster_t *recl = data;
188 struct vnode *vproot;
190 hammer2_cluster_t *cluster;
193 fp = holdfp(curthread, recl->fd, -1);
195 error = VFS_ROOT(ip->pmp->mp, &vproot);
197 cluster = &ip->pmp->iroot->cluster;
198 kprintf("reconnect to cluster: nc=%d focus=%p\n",
199 cluster->nchains, cluster->focus);
200 if (cluster->nchains != 1 || cluster->focus == NULL) {
201 kprintf("not a local device mount\n");
204 hammer2_cluster_reconnect(cluster->focus->hmp,
218 * Retrieve information about a remote
221 hammer2_ioctl_remote_scan(hammer2_inode_t *ip, void *data)
224 hammer2_ioc_remote_t *remote = data;
225 int copyid = remote->copyid;
227 hmp = ip->pmp->pfs_hmps[0];
231 if (copyid < 0 || copyid >= HAMMER2_COPYID_COUNT)
234 hammer2_voldata_lock(hmp);
235 remote->copy1 = hmp->voldata.copyinfo[copyid];
236 hammer2_voldata_unlock(hmp);
239 * Adjust nextid (GET only)
241 while (++copyid < HAMMER2_COPYID_COUNT &&
242 hmp->voldata.copyinfo[copyid].copyid == 0) {
245 if (copyid == HAMMER2_COPYID_COUNT)
248 remote->nextid = copyid;
254 * Add new remote entry
257 hammer2_ioctl_remote_add(hammer2_inode_t *ip, void *data)
259 hammer2_ioc_remote_t *remote = data;
260 hammer2_pfs_t *pmp = ip->pmp;
262 int copyid = remote->copyid;
265 hmp = pmp->pfs_hmps[0];
268 if (copyid >= HAMMER2_COPYID_COUNT)
271 hammer2_voldata_lock(hmp);
273 for (copyid = 1; copyid < HAMMER2_COPYID_COUNT; ++copyid) {
274 if (hmp->voldata.copyinfo[copyid].copyid == 0)
277 if (copyid == HAMMER2_COPYID_COUNT) {
282 hammer2_voldata_modify(hmp);
283 remote->copy1.copyid = copyid;
284 hmp->voldata.copyinfo[copyid] = remote->copy1;
285 hammer2_volconf_update(hmp, copyid);
287 hammer2_voldata_unlock(hmp);
292 * Delete existing remote entry
295 hammer2_ioctl_remote_del(hammer2_inode_t *ip, void *data)
297 hammer2_ioc_remote_t *remote = data;
298 hammer2_pfs_t *pmp = ip->pmp;
300 int copyid = remote->copyid;
303 hmp = pmp->pfs_hmps[0];
306 if (copyid >= HAMMER2_COPYID_COUNT)
308 remote->copy1.path[sizeof(remote->copy1.path) - 1] = 0;
309 hammer2_voldata_lock(hmp);
311 for (copyid = 1; copyid < HAMMER2_COPYID_COUNT; ++copyid) {
312 if (hmp->voldata.copyinfo[copyid].copyid == 0)
314 if (strcmp(remote->copy1.path,
315 hmp->voldata.copyinfo[copyid].path) == 0) {
319 if (copyid == HAMMER2_COPYID_COUNT) {
324 hammer2_voldata_modify(hmp);
325 hmp->voldata.copyinfo[copyid].copyid = 0;
326 hammer2_volconf_update(hmp, copyid);
328 hammer2_voldata_unlock(hmp);
333 * Replace existing remote entry
336 hammer2_ioctl_remote_rep(hammer2_inode_t *ip, void *data)
338 hammer2_ioc_remote_t *remote = data;
340 int copyid = remote->copyid;
342 hmp = ip->pmp->pfs_hmps[0];
345 if (copyid < 0 || copyid >= HAMMER2_COPYID_COUNT)
348 hammer2_voldata_lock(hmp);
349 hammer2_voldata_modify(hmp);
350 /*hammer2_volconf_update(hmp, copyid);*/
351 hammer2_voldata_unlock(hmp);
357 * Retrieve communications socket
360 hammer2_ioctl_socket_get(hammer2_inode_t *ip, void *data)
366 * Set communications socket for connection
369 hammer2_ioctl_socket_set(hammer2_inode_t *ip, void *data)
371 hammer2_ioc_remote_t *remote = data;
373 int copyid = remote->copyid;
375 hmp = ip->pmp->pfs_hmps[0];
378 if (copyid < 0 || copyid >= HAMMER2_COPYID_COUNT)
381 hammer2_voldata_lock(hmp);
382 hammer2_voldata_unlock(hmp);
388 * Used to scan and retrieve PFS information. PFS's are directories under
391 * To scan PFSs pass name_key=0. The function will scan for the next
392 * PFS and set all fields, as well as set name_next to the next key.
393 * When no PFSs remain, name_next is set to (hammer2_key_t)-1.
395 * To retrieve a particular PFS by key, specify the key but note that
396 * the ioctl will return the lowest key >= specified_key, so the caller
397 * must verify the key.
399 * To retrieve the PFS associated with the file descriptor, pass
400 * name_key set to (hammer2_key_t)-1.
403 hammer2_ioctl_pfs_get(hammer2_inode_t *ip, void *data)
405 const hammer2_inode_data_t *ripdata;
407 hammer2_ioc_pfs_t *pfs;
408 hammer2_chain_t *parent;
409 hammer2_chain_t *chain;
410 hammer2_key_t key_next;
411 hammer2_key_t save_key;
414 hmp = ip->pmp->pfs_hmps[0];
419 save_key = pfs->name_key;
425 if (save_key == (hammer2_key_t)-1) {
426 hammer2_inode_lock(ip->pmp->iroot, 0);
428 chain = hammer2_inode_chain(ip->pmp->iroot, 0,
429 HAMMER2_RESOLVE_ALWAYS |
430 HAMMER2_RESOLVE_SHARED);
432 hammer2_inode_lock(hmp->spmp->iroot, 0);
433 parent = hammer2_inode_chain(hmp->spmp->iroot, 0,
434 HAMMER2_RESOLVE_ALWAYS |
435 HAMMER2_RESOLVE_SHARED);
436 chain = hammer2_chain_lookup(&parent, &key_next,
437 pfs->name_key, HAMMER2_KEY_MAX,
439 HAMMER2_LOOKUP_SHARED);
446 if (chain->bref.type == HAMMER2_BREF_TYPE_INODE)
448 if (parent == NULL) {
449 hammer2_chain_unlock(chain);
450 hammer2_chain_drop(chain);
454 chain = hammer2_chain_next(&parent, chain, &key_next,
455 key_next, HAMMER2_KEY_MAX,
457 HAMMER2_LOOKUP_SHARED);
459 error = hammer2_error_to_errno(error);
462 * Load the data being returned by the ioctl.
464 if (chain && chain->error == 0) {
465 ripdata = &chain->data->ipdata;
466 pfs->name_key = ripdata->meta.name_key;
467 pfs->pfs_type = ripdata->meta.pfs_type;
468 pfs->pfs_subtype = ripdata->meta.pfs_subtype;
469 pfs->pfs_clid = ripdata->meta.pfs_clid;
470 pfs->pfs_fsid = ripdata->meta.pfs_fsid;
471 KKASSERT(ripdata->meta.name_len < sizeof(pfs->name));
472 bcopy(ripdata->filename, pfs->name, ripdata->meta.name_len);
473 pfs->name[ripdata->meta.name_len] = 0;
474 ripdata = NULL; /* safety */
477 * Calculate name_next, if any. We are only accessing
478 * chain->bref so we can ignore chain->error (if the key
479 * is used later it will error then).
481 if (parent == NULL) {
482 pfs->name_next = (hammer2_key_t)-1;
484 chain = hammer2_chain_next(&parent, chain, &key_next,
485 key_next, HAMMER2_KEY_MAX,
487 HAMMER2_LOOKUP_SHARED);
489 pfs->name_next = chain->bref.key;
491 pfs->name_next = (hammer2_key_t)-1;
494 pfs->name_next = (hammer2_key_t)-1;
502 hammer2_chain_unlock(chain);
503 hammer2_chain_drop(chain);
506 hammer2_chain_unlock(parent);
507 hammer2_chain_drop(parent);
509 if (save_key == (hammer2_key_t)-1) {
510 hammer2_inode_unlock(ip->pmp->iroot);
512 hammer2_inode_unlock(hmp->spmp->iroot);
519 * Find a specific PFS by name
522 hammer2_ioctl_pfs_lookup(hammer2_inode_t *ip, void *data)
524 const hammer2_inode_data_t *ripdata;
526 hammer2_ioc_pfs_t *pfs;
527 hammer2_chain_t *parent;
528 hammer2_chain_t *chain;
529 hammer2_key_t key_next;
534 hmp = ip->pmp->pfs_hmps[0];
541 hammer2_inode_lock(hmp->spmp->iroot, HAMMER2_RESOLVE_SHARED);
542 parent = hammer2_inode_chain(hmp->spmp->iroot, 0,
543 HAMMER2_RESOLVE_ALWAYS |
544 HAMMER2_RESOLVE_SHARED);
546 pfs->name[sizeof(pfs->name) - 1] = 0;
547 len = strlen(pfs->name);
548 lhc = hammer2_dirhash(pfs->name, len);
550 chain = hammer2_chain_lookup(&parent, &key_next,
551 lhc, lhc + HAMMER2_DIRHASH_LOMASK,
552 &error, HAMMER2_LOOKUP_SHARED);
554 if (hammer2_chain_dirent_test(chain, pfs->name, len))
556 chain = hammer2_chain_next(&parent, chain, &key_next,
558 lhc + HAMMER2_DIRHASH_LOMASK,
559 &error, HAMMER2_LOOKUP_SHARED);
561 error = hammer2_error_to_errno(error);
564 * Load the data being returned by the ioctl.
566 if (chain && chain->error == 0) {
567 KKASSERT(chain->bref.type == HAMMER2_BREF_TYPE_INODE);
568 ripdata = &chain->data->ipdata;
569 pfs->name_key = ripdata->meta.name_key;
570 pfs->pfs_type = ripdata->meta.pfs_type;
571 pfs->pfs_subtype = ripdata->meta.pfs_subtype;
572 pfs->pfs_clid = ripdata->meta.pfs_clid;
573 pfs->pfs_fsid = ripdata->meta.pfs_fsid;
576 hammer2_chain_unlock(chain);
577 hammer2_chain_drop(chain);
578 } else if (error == 0) {
582 hammer2_chain_unlock(parent);
583 hammer2_chain_drop(parent);
585 hammer2_inode_unlock(hmp->spmp->iroot);
591 * Create a new PFS under the super-root
594 hammer2_ioctl_pfs_create(hammer2_inode_t *ip, void *data)
596 hammer2_inode_data_t *nipdata;
597 hammer2_chain_t *nchain;
599 hammer2_dev_t *force_local;
600 hammer2_ioc_pfs_t *pfs;
601 hammer2_inode_t *nip;
605 hmp = ip->pmp->pfs_hmps[0]; /* XXX */
612 if (pfs->name[0] == 0)
614 pfs->name[sizeof(pfs->name) - 1] = 0; /* ensure 0-termination */
616 if (hammer2_ioctl_pfs_lookup(ip, pfs) == 0)
619 hammer2_trans_init(hmp->spmp, HAMMER2_TRANS_ISFLUSH);
620 mtid = hammer2_trans_sub(hmp->spmp);
621 nip = hammer2_inode_create_pfs(hmp->spmp, pfs->name, strlen(pfs->name),
624 /* nip->flags |= HAMMER2_INODE_NOSIDEQ; */
625 hammer2_inode_modify(nip);
626 nchain = hammer2_inode_chain(nip, 0, HAMMER2_RESOLVE_ALWAYS);
627 error = hammer2_chain_modify(nchain, mtid, 0, 0);
628 KKASSERT(error == 0);
629 nipdata = &nchain->data->ipdata;
631 nip->meta.pfs_type = pfs->pfs_type;
632 nip->meta.pfs_subtype = pfs->pfs_subtype;
633 nip->meta.pfs_clid = pfs->pfs_clid;
634 nip->meta.pfs_fsid = pfs->pfs_fsid;
635 nip->meta.op_flags |= HAMMER2_OPFLAG_PFSROOT;
638 * Set default compression and check algorithm. This
639 * can be changed later.
641 * Do not allow compression on PFS's with the special name
642 * "boot", the boot loader can't decompress (yet).
644 nip->meta.comp_algo =
645 HAMMER2_ENC_ALGO(HAMMER2_COMP_NEWFS_DEFAULT);
646 nip->meta.check_algo =
647 HAMMER2_ENC_ALGO( HAMMER2_CHECK_XXHASH64);
649 if (strcasecmp(pfs->name, "boot") == 0) {
650 nip->meta.comp_algo =
651 HAMMER2_ENC_ALGO(HAMMER2_COMP_AUTOZERO);
655 * Super-root isn't mounted, fsync it
657 hammer2_chain_unlock(nchain);
658 hammer2_inode_ref(nip);
659 hammer2_inode_unlock(nip);
660 hammer2_inode_chain_sync(nip);
661 hammer2_inode_chain_flush(nip);
662 KKASSERT(nip->refs == 1);
663 hammer2_inode_drop(nip);
666 * We still have a ref on the chain, relock and associate
667 * with an appropriate PFS.
669 force_local = (hmp->hflags & HMNT2_LOCAL) ? hmp : NULL;
671 hammer2_chain_lock(nchain, HAMMER2_RESOLVE_ALWAYS);
672 nipdata = &nchain->data->ipdata;
673 kprintf("ADD LOCAL PFS (IOCTL): %s\n", nipdata->filename);
674 hammer2_pfsalloc(nchain, nipdata,
675 nchain->bref.modify_tid, force_local);
677 hammer2_chain_unlock(nchain);
678 hammer2_chain_drop(nchain);
681 hammer2_trans_done(hmp->spmp, 1);
687 * Destroy an existing PFS under the super-root
690 hammer2_ioctl_pfs_delete(hammer2_inode_t *ip, void *data)
692 hammer2_ioc_pfs_t *pfs = data;
696 hammer2_xop_unlink_t *xop;
697 hammer2_inode_t *dip;
698 hammer2_inode_t *iroot;
703 * The PFS should be probed, so we should be able to
704 * locate it. We only delete the PFS from the
705 * specific H2 block device (hmp), not all of
706 * them. We must remove the PFS from the cluster
707 * before we can destroy it.
709 hmp = ip->pmp->pfs_hmps[0];
713 pfs->name[sizeof(pfs->name) - 1] = 0; /* ensure termination */
715 lockmgr(&hammer2_mntlk, LK_EXCLUSIVE);
717 TAILQ_FOREACH(pmp, &hammer2_pfslist, mntentry) {
718 for (i = 0; i < HAMMER2_MAXCLUSTER; ++i) {
719 if (pmp->pfs_hmps[i] != hmp)
721 if (pmp->pfs_names[i] &&
722 strcmp(pmp->pfs_names[i], pfs->name) == 0) {
726 if (i != HAMMER2_MAXCLUSTER)
731 lockmgr(&hammer2_mntlk, LK_RELEASE);
736 * Ok, we found the pmp and we have the index. Permanently remove
737 * the PFS from the cluster
740 kprintf("FOUND PFS %s CLINDEX %d\n", pfs->name, i);
741 hammer2_pfsdealloc(pmp, i, 1);
743 lockmgr(&hammer2_mntlk, LK_RELEASE);
746 * Now destroy the PFS under its device using the per-device
751 hammer2_trans_init(spmp, 0);
752 hammer2_inode_lock(dip, 0);
754 xop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING);
755 hammer2_xop_setname(&xop->head, pfs->name, strlen(pfs->name));
757 xop->dopermanent = H2DOPERM_PERMANENT | H2DOPERM_FORCE;
758 hammer2_xop_start(&xop->head, &hammer2_unlink_desc);
760 error = hammer2_xop_collect(&xop->head, 0);
762 hammer2_inode_unlock(dip);
766 ip = hammer2_inode_get(dip->pmp, &xop->head, -1, -1);
767 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
769 hammer2_inode_unlink_finisher(ip, 0);
770 hammer2_inode_unlock(ip);
773 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
776 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
778 hammer2_trans_done(spmp, 1);
780 return (hammer2_error_to_errno(error));
784 hammer2_ioctl_pfs_snapshot(hammer2_inode_t *ip, void *data)
786 const hammer2_inode_data_t *ripdata;
787 hammer2_ioc_pfs_t *pfs = data;
790 hammer2_chain_t *chain;
791 hammer2_inode_t *nip;
800 if (pfs->name[0] == 0)
802 if (pfs->name[sizeof(pfs->name)-1] != 0)
808 hmp = pmp->pfs_hmps[0];
812 lockmgr(&hmp->bulklk, LK_EXCLUSIVE);
814 hammer2_vfs_sync(pmp->mp, MNT_WAIT);
816 hammer2_trans_init(pmp, HAMMER2_TRANS_ISFLUSH);
817 mtid = hammer2_trans_sub(pmp);
818 hammer2_inode_lock(ip, 0);
819 hammer2_inode_modify(ip);
820 ip->meta.pfs_lsnap_tid = mtid;
822 /* XXX cluster it! */
823 chain = hammer2_inode_chain(ip, 0, HAMMER2_RESOLVE_ALWAYS);
825 name_len = strlen(pfs->name);
826 lhc = hammer2_dirhash(pfs->name, name_len);
831 ripdata = &chain->data->ipdata;
833 opfs_clid = ripdata->meta.pfs_clid;
838 * Create the snapshot directory under the super-root
840 * Set PFS type, generate a unique filesystem id, and generate
841 * a cluster id. Use the same clid when snapshotting a PFS root,
842 * which theoretically allows the snapshot to be used as part of
843 * the same cluster (perhaps as a cache).
845 * Copy the (flushed) blockref array. Theoretically we could use
846 * chain_duplicate() but it becomes difficult to disentangle
847 * the shared core so for now just brute-force it.
849 hammer2_chain_unlock(chain);
850 nip = hammer2_inode_create_pfs(hmp->spmp, pfs->name, name_len, &error);
851 hammer2_chain_lock(chain, HAMMER2_RESOLVE_ALWAYS);
852 ripdata = &chain->data->ipdata;
855 hammer2_dev_t *force_local;
856 hammer2_chain_t *nchain;
857 hammer2_inode_data_t *wipdata;
858 hammer2_key_t starting_inum;
860 /* nip->flags |= HAMMER2_INODE_NOSIDEQ; */
861 hammer2_inode_modify(nip);
862 nchain = hammer2_inode_chain(nip, 0, HAMMER2_RESOLVE_ALWAYS);
863 error = hammer2_chain_modify(nchain, mtid, 0, 0);
864 KKASSERT(error == 0);
865 wipdata = &nchain->data->ipdata;
867 starting_inum = ip->pmp->inode_tid + 1;
868 nip->meta.pfs_inum = starting_inum;
869 nip->meta.pfs_type = HAMMER2_PFSTYPE_MASTER;
870 nip->meta.pfs_subtype = HAMMER2_PFSSUBTYPE_SNAPSHOT;
871 nip->meta.op_flags |= HAMMER2_OPFLAG_PFSROOT;
872 nchain->bref.embed.stats = chain->bref.embed.stats;
874 kern_uuidgen(&nip->meta.pfs_fsid, 1);
878 * Give the snapshot its own private cluster id. As a
879 * snapshot no further synchronization with the original
880 * cluster will be done.
882 if (chain->flags & HAMMER2_CHAIN_PFSBOUNDARY)
883 nip->meta.pfs_clid = opfs_clid;
885 kern_uuidgen(&nip->meta.pfs_clid, 1);
887 kern_uuidgen(&nip->meta.pfs_clid, 1);
888 nchain->bref.flags |= HAMMER2_BREF_FLAG_PFSROOT;
890 /* XXX hack blockset copy */
891 /* XXX doesn't work with real cluster */
892 wipdata->meta = nip->meta;
893 wipdata->u.blockset = ripdata->u.blockset;
895 KKASSERT(wipdata == &nchain->data->ipdata);
897 hammer2_chain_unlock(nchain);
898 hammer2_inode_ref(nip);
899 hammer2_inode_unlock(nip);
900 hammer2_inode_chain_sync(nip);
901 hammer2_inode_chain_flush(nip);
902 KKASSERT(nip->refs == 1);
903 hammer2_inode_drop(nip);
905 force_local = (hmp->hflags & HMNT2_LOCAL) ? hmp : NULL;
907 hammer2_chain_lock(nchain, HAMMER2_RESOLVE_ALWAYS);
908 wipdata = &nchain->data->ipdata;
909 kprintf("SNAPSHOT LOCAL PFS (IOCTL): %s\n", wipdata->filename);
910 hammer2_pfsalloc(nchain, wipdata, nchain->bref.modify_tid,
912 nchain->pmp->inode_tid = starting_inum;
914 hammer2_chain_unlock(nchain);
915 hammer2_chain_drop(nchain);
918 hammer2_chain_unlock(chain);
919 hammer2_chain_drop(chain);
921 hammer2_inode_unlock(ip);
922 hammer2_trans_done(pmp, 1);
924 lockmgr(&hmp->bulklk, LK_RELEASE);
926 return (hammer2_error_to_errno(error));
930 * Retrieve the raw inode structure, non-inclusive of node-specific data.
933 hammer2_ioctl_inode_get(hammer2_inode_t *ip, void *data)
935 hammer2_ioc_inode_t *ino;
936 hammer2_chain_t *chain;
943 hammer2_inode_lock(ip, HAMMER2_RESOLVE_SHARED);
945 ino->inode_count = 0;
946 for (i = 0; i < ip->cluster.nchains; ++i) {
947 if ((chain = ip->cluster.array[i].chain) != NULL) {
948 if (ino->data_count <
949 chain->bref.embed.stats.data_count) {
951 chain->bref.embed.stats.data_count;
953 if (ino->inode_count <
954 chain->bref.embed.stats.inode_count) {
956 chain->bref.embed.stats.inode_count;
960 bzero(&ino->ip_data, sizeof(ino->ip_data));
961 ino->ip_data.meta = ip->meta;
963 hammer2_inode_unlock(ip);
965 return hammer2_error_to_errno(error);
969 * Set various parameters in an inode which cannot be set through
970 * normal filesystem VNOPS.
973 hammer2_ioctl_inode_set(hammer2_inode_t *ip, void *data)
975 hammer2_ioc_inode_t *ino = data;
978 hammer2_trans_init(ip->pmp, 0);
979 hammer2_inode_lock(ip, 0);
981 if ((ino->flags & HAMMER2IOC_INODE_FLAG_CHECK) &&
982 ip->meta.check_algo != ino->ip_data.meta.check_algo) {
983 hammer2_inode_modify(ip);
984 ip->meta.check_algo = ino->ip_data.meta.check_algo;
986 if ((ino->flags & HAMMER2IOC_INODE_FLAG_COMP) &&
987 ip->meta.comp_algo != ino->ip_data.meta.comp_algo) {
988 hammer2_inode_modify(ip);
989 ip->meta.comp_algo = ino->ip_data.meta.comp_algo;
993 /* Ignore these flags for now...*/
994 if ((ino->flags & HAMMER2IOC_INODE_FLAG_IQUOTA) &&
995 ip->meta.inode_quota != ino->ip_data.meta.inode_quota) {
996 hammer2_inode_modify(ip);
997 ip->meta.inode_quota = ino->ip_data.meta.inode_quota;
999 if ((ino->flags & HAMMER2IOC_INODE_FLAG_DQUOTA) &&
1000 ip->meta.data_quota != ino->ip_data.meta.data_quota) {
1001 hammer2_inode_modify(ip);
1002 ip->meta.data_quota = ino->ip_data.meta.data_quota;
1004 if ((ino->flags & HAMMER2IOC_INODE_FLAG_COPIES) &&
1005 ip->meta.ncopies != ino->ip_data.meta.ncopies) {
1006 hammer2_inode_modify(ip);
1007 ip->meta.ncopies = ino->ip_data.meta.ncopies;
1009 hammer2_inode_unlock(ip);
1010 hammer2_trans_done(ip->pmp, 1);
1012 return (hammer2_error_to_errno(error));
1017 hammer2_ioctl_debug_dump(hammer2_inode_t *ip, u_int flags)
1019 hammer2_chain_t *chain;
1023 for (i = 0; i < ip->cluster.nchains; ++i) {
1024 chain = ip->cluster.array[i].chain;
1027 hammer2_dump_chain(chain, 0, &count, 'i', flags);
1033 * Executes one flush/free pass per call. If trying to recover
1034 * data we just freed up a moment ago it can take up to six passes
1035 * to fully free the blocks. Note that passes occur automatically based
1036 * on free space as the storage fills up, but manual passes may be needed
1037 * if storage becomes almost completely full.
1041 hammer2_ioctl_bulkfree_scan(hammer2_inode_t *ip, void *data)
1043 hammer2_ioc_bulkfree_t *bfi = data;
1046 hammer2_chain_t *vchain;
1053 hmp = pmp->pfs_hmps[0];
1060 * Bulkfree has to be serialized to guarantee at least one sync
1061 * inbetween bulkfrees.
1063 error = lockmgr(&hmp->bflock, LK_EXCLUSIVE | LK_PCATCH);
1068 * sync the filesystem and obtain a snapshot of the synchronized
1069 * hmp volume header. We treat the snapshot as an independent
1072 * If ENOSPC occurs we should continue, because bulkfree is the only
1073 * way to fix that. The flush will have flushed everything it could
1074 * and not left any modified chains. Otherwise an error is fatal.
1076 error = hammer2_vfs_sync(pmp->mp, MNT_WAIT);
1077 if (error && error != ENOSPC)
1081 * If we have an ENOSPC error we have to bulkfree on the live
1082 * topology. Otherwise we can bulkfree on a snapshot.
1085 kprintf("hammer2: WARNING! Bulkfree forced to use live "
1087 vchain = &hmp->vchain;
1088 hammer2_chain_ref(vchain);
1091 vchain = hammer2_chain_bulksnap(hmp);
1096 * Bulkfree on a snapshot does not need a transaction, which allows
1097 * it to run concurrently with any operation other than another
1100 * If we are running bulkfree on the live topology we have to be
1101 * in a FLUSH transaction.
1104 hammer2_trans_init(pmp, HAMMER2_TRANS_ISFLUSH);
1107 hammer2_thr_freeze(&hmp->bfthr);
1108 error = hammer2_bulkfree_pass(hmp, vchain, bfi);
1109 hammer2_thr_unfreeze(&hmp->bfthr);
1112 hammer2_chain_bulkdrop(vchain);
1114 hammer2_chain_drop(vchain);
1115 hammer2_trans_done(pmp, 1);
1117 error = hammer2_error_to_errno(error);
1120 lockmgr(&hmp->bflock, LK_RELEASE);
1125 * Unconditionally delete meta-data in a hammer2 filesystem
1129 hammer2_ioctl_destroy(hammer2_inode_t *ip, void *data)
1131 hammer2_ioc_destroy_t *iocd = data;
1132 hammer2_pfs_t *pmp = ip->pmp;
1141 case HAMMER2_DELETE_FILE:
1143 * Destroy a bad directory entry by name. Caller must
1144 * pass the directory as fd.
1147 hammer2_xop_unlink_t *xop;
1149 if (iocd->path[sizeof(iocd->path)-1]) {
1153 if (ip->meta.type != HAMMER2_OBJTYPE_DIRECTORY) {
1157 hammer2_pfs_memory_wait(ip, 0);
1158 hammer2_trans_init(pmp, 0);
1159 hammer2_inode_lock(ip, 0);
1161 xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING);
1162 hammer2_xop_setname(&xop->head, iocd->path, strlen(iocd->path));
1164 xop->dopermanent = H2DOPERM_PERMANENT |
1167 hammer2_xop_start(&xop->head, &hammer2_unlink_desc);
1169 error = hammer2_xop_collect(&xop->head, 0);
1170 error = hammer2_error_to_errno(error);
1171 hammer2_inode_unlock(ip);
1172 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1173 hammer2_trans_done(pmp, 1);
1176 case HAMMER2_DELETE_INUM:
1178 * Destroy a bad inode by inode number.
1181 hammer2_xop_lookup_t *xop;
1183 if (iocd->inum < 1) {
1187 hammer2_pfs_memory_wait(ip, 0);
1188 hammer2_trans_init(pmp, 0);
1190 xop = hammer2_xop_alloc(pmp->iroot, HAMMER2_XOP_MODIFYING);
1191 xop->lhc = iocd->inum;
1192 hammer2_xop_start(&xop->head, &hammer2_delete_desc);
1193 error = hammer2_xop_collect(&xop->head, 0);
1194 error = hammer2_error_to_errno(error);
1195 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1196 hammer2_trans_done(pmp, 1);