2 * Copyright (c) 2011-2018 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@dragonflybsd.org>
6 * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the
18 * 3. Neither the name of The DragonFly Project nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific, prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * WARNING! The ioctl functions which manipulate the connection state need
39 * to be able to run without deadlock on the volume's chain lock.
40 * Most of these functions use a separate lock.
45 static int hammer2_ioctl_version_get(hammer2_inode_t *ip, void *data);
46 static int hammer2_ioctl_recluster(hammer2_inode_t *ip, void *data);
47 static int hammer2_ioctl_remote_scan(hammer2_inode_t *ip, void *data);
48 static int hammer2_ioctl_remote_add(hammer2_inode_t *ip, void *data);
49 static int hammer2_ioctl_remote_del(hammer2_inode_t *ip, void *data);
50 static int hammer2_ioctl_remote_rep(hammer2_inode_t *ip, void *data);
51 static int hammer2_ioctl_socket_get(hammer2_inode_t *ip, void *data);
52 static int hammer2_ioctl_socket_set(hammer2_inode_t *ip, void *data);
53 static int hammer2_ioctl_pfs_get(hammer2_inode_t *ip, void *data);
54 static int hammer2_ioctl_pfs_lookup(hammer2_inode_t *ip, void *data);
55 static int hammer2_ioctl_pfs_create(hammer2_inode_t *ip, void *data);
56 static int hammer2_ioctl_pfs_snapshot(hammer2_inode_t *ip, void *data);
57 static int hammer2_ioctl_pfs_delete(hammer2_inode_t *ip, void *data);
58 static int hammer2_ioctl_inode_get(hammer2_inode_t *ip, void *data);
59 static int hammer2_ioctl_inode_set(hammer2_inode_t *ip, void *data);
60 static int hammer2_ioctl_debug_dump(hammer2_inode_t *ip, u_int flags);
61 //static int hammer2_ioctl_inode_comp_set(hammer2_inode_t *ip, void *data);
62 //static int hammer2_ioctl_inode_comp_rec_set(hammer2_inode_t *ip, void *data);
63 //static int hammer2_ioctl_inode_comp_rec_set2(hammer2_inode_t *ip, void *data);
64 static int hammer2_ioctl_bulkfree_scan(hammer2_inode_t *ip, void *data);
65 static int hammer2_ioctl_destroy(hammer2_inode_t *ip, void *data);
68 hammer2_ioctl(hammer2_inode_t *ip, u_long com, void *data, int fflag,
74 * Standard root cred checks, will be selectively ignored below
75 * for ioctls that do not require root creds.
77 error = priv_check_cred(cred, PRIV_HAMMER_IOCTL, 0);
80 case HAMMER2IOC_VERSION_GET:
81 error = hammer2_ioctl_version_get(ip, data);
83 case HAMMER2IOC_RECLUSTER:
85 error = hammer2_ioctl_recluster(ip, data);
87 case HAMMER2IOC_REMOTE_SCAN:
89 error = hammer2_ioctl_remote_scan(ip, data);
91 case HAMMER2IOC_REMOTE_ADD:
93 error = hammer2_ioctl_remote_add(ip, data);
95 case HAMMER2IOC_REMOTE_DEL:
97 error = hammer2_ioctl_remote_del(ip, data);
99 case HAMMER2IOC_REMOTE_REP:
101 error = hammer2_ioctl_remote_rep(ip, data);
103 case HAMMER2IOC_SOCKET_GET:
105 error = hammer2_ioctl_socket_get(ip, data);
107 case HAMMER2IOC_SOCKET_SET:
109 error = hammer2_ioctl_socket_set(ip, data);
111 case HAMMER2IOC_PFS_GET:
113 error = hammer2_ioctl_pfs_get(ip, data);
115 case HAMMER2IOC_PFS_LOOKUP:
117 error = hammer2_ioctl_pfs_lookup(ip, data);
119 case HAMMER2IOC_PFS_CREATE:
121 error = hammer2_ioctl_pfs_create(ip, data);
123 case HAMMER2IOC_PFS_DELETE:
125 error = hammer2_ioctl_pfs_delete(ip, data);
127 case HAMMER2IOC_PFS_SNAPSHOT:
129 error = hammer2_ioctl_pfs_snapshot(ip, data);
131 case HAMMER2IOC_INODE_GET:
132 error = hammer2_ioctl_inode_get(ip, data);
134 case HAMMER2IOC_INODE_SET:
136 error = hammer2_ioctl_inode_set(ip, data);
138 case HAMMER2IOC_BULKFREE_SCAN:
139 error = hammer2_ioctl_bulkfree_scan(ip, data);
141 case HAMMER2IOC_BULKFREE_ASYNC:
142 error = hammer2_ioctl_bulkfree_scan(ip, NULL);
144 /*case HAMMER2IOC_INODE_COMP_SET:
145 error = hammer2_ioctl_inode_comp_set(ip, data);
147 case HAMMER2IOC_INODE_COMP_REC_SET:
148 error = hammer2_ioctl_inode_comp_rec_set(ip, data);
150 case HAMMER2IOC_INODE_COMP_REC_SET2:
151 error = hammer2_ioctl_inode_comp_rec_set2(ip, data);
153 case HAMMER2IOC_DESTROY:
155 error = hammer2_ioctl_destroy(ip, data);
157 case HAMMER2IOC_DEBUG_DUMP:
158 error = hammer2_ioctl_debug_dump(ip, *(u_int *)data);
168 * Retrieve version and basic info
171 hammer2_ioctl_version_get(hammer2_inode_t *ip, void *data)
173 hammer2_ioc_version_t *version = data;
176 hmp = ip->pmp->pfs_hmps[0];
178 version->version = hmp->voldata.version;
180 version->version = -1;
185 hammer2_ioctl_recluster(hammer2_inode_t *ip, void *data)
187 hammer2_ioc_recluster_t *recl = data;
188 struct vnode *vproot;
190 hammer2_cluster_t *cluster;
193 fp = holdfp(curproc->p_fd, recl->fd, -1);
195 error = VFS_ROOT(ip->pmp->mp, &vproot);
197 cluster = &ip->pmp->iroot->cluster;
198 kprintf("reconnect to cluster: nc=%d focus=%p\n",
199 cluster->nchains, cluster->focus);
200 if (cluster->nchains != 1 || cluster->focus == NULL) {
201 kprintf("not a local device mount\n");
204 hammer2_cluster_reconnect(cluster->focus->hmp,
218 * Retrieve information about a remote
221 hammer2_ioctl_remote_scan(hammer2_inode_t *ip, void *data)
224 hammer2_ioc_remote_t *remote = data;
225 int copyid = remote->copyid;
227 hmp = ip->pmp->pfs_hmps[0];
231 if (copyid < 0 || copyid >= HAMMER2_COPYID_COUNT)
234 hammer2_voldata_lock(hmp);
235 remote->copy1 = hmp->voldata.copyinfo[copyid];
236 hammer2_voldata_unlock(hmp);
239 * Adjust nextid (GET only)
241 while (++copyid < HAMMER2_COPYID_COUNT &&
242 hmp->voldata.copyinfo[copyid].copyid == 0) {
245 if (copyid == HAMMER2_COPYID_COUNT)
248 remote->nextid = copyid;
254 * Add new remote entry
257 hammer2_ioctl_remote_add(hammer2_inode_t *ip, void *data)
259 hammer2_ioc_remote_t *remote = data;
260 hammer2_pfs_t *pmp = ip->pmp;
262 int copyid = remote->copyid;
265 hmp = pmp->pfs_hmps[0];
268 if (copyid >= HAMMER2_COPYID_COUNT)
271 hammer2_voldata_lock(hmp);
273 for (copyid = 1; copyid < HAMMER2_COPYID_COUNT; ++copyid) {
274 if (hmp->voldata.copyinfo[copyid].copyid == 0)
277 if (copyid == HAMMER2_COPYID_COUNT) {
282 hammer2_voldata_modify(hmp);
283 remote->copy1.copyid = copyid;
284 hmp->voldata.copyinfo[copyid] = remote->copy1;
285 hammer2_volconf_update(hmp, copyid);
287 hammer2_voldata_unlock(hmp);
292 * Delete existing remote entry
295 hammer2_ioctl_remote_del(hammer2_inode_t *ip, void *data)
297 hammer2_ioc_remote_t *remote = data;
298 hammer2_pfs_t *pmp = ip->pmp;
300 int copyid = remote->copyid;
303 hmp = pmp->pfs_hmps[0];
306 if (copyid >= HAMMER2_COPYID_COUNT)
308 remote->copy1.path[sizeof(remote->copy1.path) - 1] = 0;
309 hammer2_voldata_lock(hmp);
311 for (copyid = 1; copyid < HAMMER2_COPYID_COUNT; ++copyid) {
312 if (hmp->voldata.copyinfo[copyid].copyid == 0)
314 if (strcmp(remote->copy1.path,
315 hmp->voldata.copyinfo[copyid].path) == 0) {
319 if (copyid == HAMMER2_COPYID_COUNT) {
324 hammer2_voldata_modify(hmp);
325 hmp->voldata.copyinfo[copyid].copyid = 0;
326 hammer2_volconf_update(hmp, copyid);
328 hammer2_voldata_unlock(hmp);
333 * Replace existing remote entry
336 hammer2_ioctl_remote_rep(hammer2_inode_t *ip, void *data)
338 hammer2_ioc_remote_t *remote = data;
340 int copyid = remote->copyid;
342 hmp = ip->pmp->pfs_hmps[0];
345 if (copyid < 0 || copyid >= HAMMER2_COPYID_COUNT)
348 hammer2_voldata_lock(hmp);
349 hammer2_voldata_modify(hmp);
350 /*hammer2_volconf_update(hmp, copyid);*/
351 hammer2_voldata_unlock(hmp);
357 * Retrieve communications socket
360 hammer2_ioctl_socket_get(hammer2_inode_t *ip, void *data)
366 * Set communications socket for connection
369 hammer2_ioctl_socket_set(hammer2_inode_t *ip, void *data)
371 hammer2_ioc_remote_t *remote = data;
373 int copyid = remote->copyid;
375 hmp = ip->pmp->pfs_hmps[0];
378 if (copyid < 0 || copyid >= HAMMER2_COPYID_COUNT)
381 hammer2_voldata_lock(hmp);
382 hammer2_voldata_unlock(hmp);
388 * Used to scan and retrieve PFS information. PFS's are directories under
391 * To scan PFSs pass name_key=0. The function will scan for the next
392 * PFS and set all fields, as well as set name_next to the next key.
393 * When no PFSs remain, name_next is set to (hammer2_key_t)-1.
395 * To retrieve a particular PFS by key, specify the key but note that
396 * the ioctl will return the lowest key >= specified_key, so the caller
397 * must verify the key.
399 * To retrieve the PFS associated with the file descriptor, pass
400 * name_key set to (hammer2_key_t)-1.
403 hammer2_ioctl_pfs_get(hammer2_inode_t *ip, void *data)
405 const hammer2_inode_data_t *ripdata;
407 hammer2_ioc_pfs_t *pfs;
408 hammer2_chain_t *parent;
409 hammer2_chain_t *chain;
410 hammer2_key_t key_next;
411 hammer2_key_t save_key;
414 hmp = ip->pmp->pfs_hmps[0];
419 save_key = pfs->name_key;
425 if (save_key == (hammer2_key_t)-1) {
426 hammer2_inode_lock(ip->pmp->iroot, 0);
428 chain = hammer2_inode_chain(ip->pmp->iroot, 0,
429 HAMMER2_RESOLVE_ALWAYS |
430 HAMMER2_RESOLVE_SHARED);
432 hammer2_inode_lock(hmp->spmp->iroot, 0);
433 parent = hammer2_inode_chain(hmp->spmp->iroot, 0,
434 HAMMER2_RESOLVE_ALWAYS |
435 HAMMER2_RESOLVE_SHARED);
436 chain = hammer2_chain_lookup(&parent, &key_next,
437 pfs->name_key, HAMMER2_KEY_MAX,
439 HAMMER2_LOOKUP_SHARED);
446 if (chain->bref.type == HAMMER2_BREF_TYPE_INODE)
448 if (parent == NULL) {
449 hammer2_chain_unlock(chain);
450 hammer2_chain_drop(chain);
454 chain = hammer2_chain_next(&parent, chain, &key_next,
455 key_next, HAMMER2_KEY_MAX,
457 HAMMER2_LOOKUP_SHARED);
459 error = hammer2_error_to_errno(error);
462 * Load the data being returned by the ioctl.
464 if (chain && chain->error == 0) {
465 ripdata = &chain->data->ipdata;
466 pfs->name_key = ripdata->meta.name_key;
467 pfs->pfs_type = ripdata->meta.pfs_type;
468 pfs->pfs_subtype = ripdata->meta.pfs_subtype;
469 pfs->pfs_clid = ripdata->meta.pfs_clid;
470 pfs->pfs_fsid = ripdata->meta.pfs_fsid;
471 KKASSERT(ripdata->meta.name_len < sizeof(pfs->name));
472 bcopy(ripdata->filename, pfs->name, ripdata->meta.name_len);
473 pfs->name[ripdata->meta.name_len] = 0;
474 ripdata = NULL; /* safety */
477 * Calculate name_next, if any. We are only accessing
478 * chain->bref so we can ignore chain->error (if the key
479 * is used later it will error then).
481 if (parent == NULL) {
482 pfs->name_next = (hammer2_key_t)-1;
484 chain = hammer2_chain_next(&parent, chain, &key_next,
485 key_next, HAMMER2_KEY_MAX,
487 HAMMER2_LOOKUP_SHARED);
489 pfs->name_next = chain->bref.key;
491 pfs->name_next = (hammer2_key_t)-1;
494 pfs->name_next = (hammer2_key_t)-1;
502 hammer2_chain_unlock(chain);
503 hammer2_chain_drop(chain);
506 hammer2_chain_unlock(parent);
507 hammer2_chain_drop(parent);
509 if (save_key == (hammer2_key_t)-1) {
510 hammer2_inode_unlock(ip->pmp->iroot);
512 hammer2_inode_unlock(hmp->spmp->iroot);
519 * Find a specific PFS by name
522 hammer2_ioctl_pfs_lookup(hammer2_inode_t *ip, void *data)
524 const hammer2_inode_data_t *ripdata;
526 hammer2_ioc_pfs_t *pfs;
527 hammer2_chain_t *parent;
528 hammer2_chain_t *chain;
529 hammer2_key_t key_next;
534 hmp = ip->pmp->pfs_hmps[0];
541 hammer2_inode_lock(hmp->spmp->iroot, HAMMER2_RESOLVE_SHARED);
542 parent = hammer2_inode_chain(hmp->spmp->iroot, 0,
543 HAMMER2_RESOLVE_ALWAYS |
544 HAMMER2_RESOLVE_SHARED);
546 pfs->name[sizeof(pfs->name) - 1] = 0;
547 len = strlen(pfs->name);
548 lhc = hammer2_dirhash(pfs->name, len);
550 chain = hammer2_chain_lookup(&parent, &key_next,
551 lhc, lhc + HAMMER2_DIRHASH_LOMASK,
552 &error, HAMMER2_LOOKUP_SHARED);
554 if (hammer2_chain_dirent_test(chain, pfs->name, len))
556 chain = hammer2_chain_next(&parent, chain, &key_next,
558 lhc + HAMMER2_DIRHASH_LOMASK,
559 &error, HAMMER2_LOOKUP_SHARED);
561 error = hammer2_error_to_errno(error);
564 * Load the data being returned by the ioctl.
566 if (chain && chain->error == 0) {
567 KKASSERT(chain->bref.type == HAMMER2_BREF_TYPE_INODE);
568 ripdata = &chain->data->ipdata;
569 pfs->name_key = ripdata->meta.name_key;
570 pfs->pfs_type = ripdata->meta.pfs_type;
571 pfs->pfs_subtype = ripdata->meta.pfs_subtype;
572 pfs->pfs_clid = ripdata->meta.pfs_clid;
573 pfs->pfs_fsid = ripdata->meta.pfs_fsid;
576 hammer2_chain_unlock(chain);
577 hammer2_chain_drop(chain);
578 } else if (error == 0) {
582 hammer2_chain_unlock(parent);
583 hammer2_chain_drop(parent);
585 hammer2_inode_unlock(hmp->spmp->iroot);
591 * Create a new PFS under the super-root
594 hammer2_ioctl_pfs_create(hammer2_inode_t *ip, void *data)
596 hammer2_inode_data_t *nipdata;
597 hammer2_chain_t *nchain;
599 hammer2_dev_t *force_local;
600 hammer2_ioc_pfs_t *pfs;
601 hammer2_inode_t *nip;
605 hmp = ip->pmp->pfs_hmps[0]; /* XXX */
612 if (pfs->name[0] == 0)
614 pfs->name[sizeof(pfs->name) - 1] = 0; /* ensure 0-termination */
616 if (hammer2_ioctl_pfs_lookup(ip, pfs) == 0)
619 hammer2_trans_init(hmp->spmp, 0);
620 mtid = hammer2_trans_sub(hmp->spmp);
621 nip = hammer2_inode_create(hmp->spmp->iroot, hmp->spmp->iroot,
623 pfs->name, strlen(pfs->name), 0,
624 1, HAMMER2_OBJTYPE_DIRECTORY, 0,
625 HAMMER2_INSERT_PFSROOT, &error);
627 nip->flags |= HAMMER2_INODE_NOSIDEQ;
628 hammer2_inode_modify(nip);
629 nchain = hammer2_inode_chain(nip, 0, HAMMER2_RESOLVE_ALWAYS);
630 error = hammer2_chain_modify(nchain, mtid, 0, 0);
631 KKASSERT(error == 0);
632 nipdata = &nchain->data->ipdata;
634 nip->meta.pfs_type = pfs->pfs_type;
635 nip->meta.pfs_subtype = pfs->pfs_subtype;
636 nip->meta.pfs_clid = pfs->pfs_clid;
637 nip->meta.pfs_fsid = pfs->pfs_fsid;
638 nip->meta.op_flags |= HAMMER2_OPFLAG_PFSROOT;
641 * Set default compression and check algorithm. This
642 * can be changed later.
644 * Do not allow compression on PFS's with the special name
645 * "boot", the boot loader can't decompress (yet).
647 nip->meta.comp_algo =
648 HAMMER2_ENC_ALGO(HAMMER2_COMP_NEWFS_DEFAULT);
649 nip->meta.check_algo =
650 HAMMER2_ENC_ALGO( HAMMER2_CHECK_XXHASH64);
652 if (strcasecmp(pfs->name, "boot") == 0) {
653 nip->meta.comp_algo =
654 HAMMER2_ENC_ALGO(HAMMER2_COMP_AUTOZERO);
658 * Super-root isn't mounted, fsync it
660 hammer2_chain_unlock(nchain);
661 hammer2_inode_ref(nip);
662 hammer2_inode_unlock(nip);
663 hammer2_inode_chain_sync(nip);
664 hammer2_inode_chain_flush(nip);
665 KKASSERT(nip->refs == 1);
666 hammer2_inode_drop(nip);
669 * We still have a ref on the chain, relock and associate
670 * with an appropriate PFS.
672 force_local = (hmp->hflags & HMNT2_LOCAL) ? hmp : NULL;
674 hammer2_chain_lock(nchain, HAMMER2_RESOLVE_ALWAYS);
675 nipdata = &nchain->data->ipdata;
676 kprintf("ADD LOCAL PFS (IOCTL): %s\n", nipdata->filename);
677 hammer2_pfsalloc(nchain, nipdata,
678 nchain->bref.modify_tid, force_local);
680 hammer2_chain_unlock(nchain);
681 hammer2_chain_drop(nchain);
684 hammer2_trans_done(hmp->spmp);
690 * Destroy an existing PFS under the super-root
693 hammer2_ioctl_pfs_delete(hammer2_inode_t *ip, void *data)
695 hammer2_ioc_pfs_t *pfs = data;
699 hammer2_xop_unlink_t *xop;
700 hammer2_inode_t *dip;
701 hammer2_inode_t *iroot;
706 * The PFS should be probed, so we should be able to
707 * locate it. We only delete the PFS from the
708 * specific H2 block device (hmp), not all of
709 * them. We must remove the PFS from the cluster
710 * before we can destroy it.
712 hmp = ip->pmp->pfs_hmps[0];
716 pfs->name[sizeof(pfs->name) - 1] = 0; /* ensure termination */
718 lockmgr(&hammer2_mntlk, LK_EXCLUSIVE);
720 TAILQ_FOREACH(pmp, &hammer2_pfslist, mntentry) {
721 for (i = 0; i < HAMMER2_MAXCLUSTER; ++i) {
722 if (pmp->pfs_hmps[i] != hmp)
724 if (pmp->pfs_names[i] &&
725 strcmp(pmp->pfs_names[i], pfs->name) == 0) {
729 if (i != HAMMER2_MAXCLUSTER)
734 lockmgr(&hammer2_mntlk, LK_RELEASE);
739 * Ok, we found the pmp and we have the index. Permanently remove
740 * the PFS from the cluster
743 kprintf("FOUND PFS %s CLINDEX %d\n", pfs->name, i);
744 hammer2_pfsdealloc(pmp, i, 1);
746 lockmgr(&hammer2_mntlk, LK_RELEASE);
749 * Now destroy the PFS under its device using the per-device
754 hammer2_trans_init(spmp, 0);
755 hammer2_inode_lock(dip, 0);
757 xop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING);
758 hammer2_xop_setname(&xop->head, pfs->name, strlen(pfs->name));
760 xop->dopermanent = H2DOPERM_PERMANENT | H2DOPERM_FORCE;
761 hammer2_xop_start(&xop->head, hammer2_xop_unlink);
763 error = hammer2_xop_collect(&xop->head, 0);
765 hammer2_inode_unlock(dip);
769 ip = hammer2_inode_get(dip->pmp, dip, &xop->head.cluster, -1);
770 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
772 hammer2_inode_unlink_finisher(ip, 0);
773 hammer2_inode_unlock(ip);
776 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
779 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
781 hammer2_trans_done(spmp);
783 return (hammer2_error_to_errno(error));
787 hammer2_ioctl_pfs_snapshot(hammer2_inode_t *ip, void *data)
789 const hammer2_inode_data_t *ripdata;
790 hammer2_ioc_pfs_t *pfs = data;
793 hammer2_chain_t *chain;
794 hammer2_inode_t *nip;
804 if (pfs->name[0] == 0)
806 if (pfs->name[sizeof(pfs->name)-1] != 0)
812 hmp = pmp->pfs_hmps[0];
816 lockmgr(&hmp->bulklk, LK_EXCLUSIVE);
818 hammer2_vfs_sync(pmp->mp, MNT_WAIT);
820 hammer2_trans_init(pmp, HAMMER2_TRANS_ISFLUSH);
821 mtid = hammer2_trans_sub(pmp);
822 hammer2_inode_lock(ip, 0);
823 hammer2_inode_modify(ip);
824 ip->meta.pfs_lsnap_tid = mtid;
826 /* XXX cluster it! */
827 chain = hammer2_inode_chain(ip, 0, HAMMER2_RESOLVE_ALWAYS);
829 name_len = strlen(pfs->name);
830 lhc = hammer2_dirhash(pfs->name, name_len);
835 ripdata = &chain->data->ipdata;
837 opfs_clid = ripdata->meta.pfs_clid;
842 * Create the snapshot directory under the super-root
844 * Set PFS type, generate a unique filesystem id, and generate
845 * a cluster id. Use the same clid when snapshotting a PFS root,
846 * which theoretically allows the snapshot to be used as part of
847 * the same cluster (perhaps as a cache).
849 * Copy the (flushed) blockref array. Theoretically we could use
850 * chain_duplicate() but it becomes difficult to disentangle
851 * the shared core so for now just brute-force it.
856 hammer2_chain_unlock(chain);
857 nip = hammer2_inode_create(hmp->spmp->iroot, hmp->spmp->iroot,
859 pfs->name, name_len, 0,
861 HAMMER2_INSERT_PFSROOT, &error);
862 hammer2_chain_lock(chain, HAMMER2_RESOLVE_ALWAYS);
863 ripdata = &chain->data->ipdata;
866 hammer2_dev_t *force_local;
867 hammer2_chain_t *nchain;
868 hammer2_inode_data_t *wipdata;
869 hammer2_key_t starting_inum;
871 nip->flags |= HAMMER2_INODE_NOSIDEQ;
872 hammer2_inode_modify(nip);
873 nchain = hammer2_inode_chain(nip, 0, HAMMER2_RESOLVE_ALWAYS);
874 error = hammer2_chain_modify(nchain, mtid, 0, 0);
875 KKASSERT(error == 0);
876 wipdata = &nchain->data->ipdata;
878 starting_inum = ip->pmp->inode_tid + 1;
879 nip->meta.pfs_inum = starting_inum;
880 nip->meta.pfs_type = HAMMER2_PFSTYPE_MASTER;
881 nip->meta.pfs_subtype = HAMMER2_PFSSUBTYPE_SNAPSHOT;
882 nip->meta.op_flags |= HAMMER2_OPFLAG_PFSROOT;
883 nchain->bref.embed.stats = chain->bref.embed.stats;
885 kern_uuidgen(&nip->meta.pfs_fsid, 1);
889 * Give the snapshot its own private cluster id. As a
890 * snapshot no further synchronization with the original
891 * cluster will be done.
893 if (chain->flags & HAMMER2_CHAIN_PFSBOUNDARY)
894 nip->meta.pfs_clid = opfs_clid;
896 kern_uuidgen(&nip->meta.pfs_clid, 1);
898 kern_uuidgen(&nip->meta.pfs_clid, 1);
899 nchain->bref.flags |= HAMMER2_BREF_FLAG_PFSROOT;
901 /* XXX hack blockset copy */
902 /* XXX doesn't work with real cluster */
903 wipdata->meta = nip->meta;
904 wipdata->u.blockset = ripdata->u.blockset;
906 KKASSERT(wipdata == &nchain->data->ipdata);
908 hammer2_chain_unlock(nchain);
909 hammer2_inode_ref(nip);
910 hammer2_inode_unlock(nip);
911 hammer2_inode_chain_sync(nip);
912 hammer2_inode_chain_flush(nip);
913 KKASSERT(nip->refs == 1);
914 hammer2_inode_drop(nip);
916 force_local = (hmp->hflags & HMNT2_LOCAL) ? hmp : NULL;
918 hammer2_chain_lock(nchain, HAMMER2_RESOLVE_ALWAYS);
919 wipdata = &nchain->data->ipdata;
920 kprintf("SNAPSHOT LOCAL PFS (IOCTL): %s\n", wipdata->filename);
921 hammer2_pfsalloc(nchain, wipdata, nchain->bref.modify_tid,
923 nchain->pmp->inode_tid = starting_inum;
925 hammer2_chain_unlock(nchain);
926 hammer2_chain_drop(nchain);
929 hammer2_chain_unlock(chain);
930 hammer2_chain_drop(chain);
932 hammer2_inode_unlock(ip);
933 hammer2_trans_done(pmp);
935 lockmgr(&hmp->bulklk, LK_RELEASE);
937 return (hammer2_error_to_errno(error));
941 * Retrieve the raw inode structure, non-inclusive of node-specific data.
944 hammer2_ioctl_inode_get(hammer2_inode_t *ip, void *data)
946 hammer2_ioc_inode_t *ino;
947 hammer2_chain_t *chain;
954 hammer2_inode_lock(ip, HAMMER2_RESOLVE_SHARED);
956 ino->inode_count = 0;
957 for (i = 0; i < ip->cluster.nchains; ++i) {
958 if ((chain = ip->cluster.array[i].chain) != NULL) {
959 if (ino->data_count <
960 chain->bref.embed.stats.data_count) {
962 chain->bref.embed.stats.data_count;
964 if (ino->inode_count <
965 chain->bref.embed.stats.inode_count) {
967 chain->bref.embed.stats.inode_count;
971 bzero(&ino->ip_data, sizeof(ino->ip_data));
972 ino->ip_data.meta = ip->meta;
974 hammer2_inode_unlock(ip);
976 return hammer2_error_to_errno(error);
980 * Set various parameters in an inode which cannot be set through
981 * normal filesystem VNOPS.
984 hammer2_ioctl_inode_set(hammer2_inode_t *ip, void *data)
986 hammer2_ioc_inode_t *ino = data;
989 hammer2_trans_init(ip->pmp, 0);
990 hammer2_inode_lock(ip, 0);
992 if ((ino->flags & HAMMER2IOC_INODE_FLAG_CHECK) &&
993 ip->meta.check_algo != ino->ip_data.meta.check_algo) {
994 hammer2_inode_modify(ip);
995 ip->meta.check_algo = ino->ip_data.meta.check_algo;
997 if ((ino->flags & HAMMER2IOC_INODE_FLAG_COMP) &&
998 ip->meta.comp_algo != ino->ip_data.meta.comp_algo) {
999 hammer2_inode_modify(ip);
1000 ip->meta.comp_algo = ino->ip_data.meta.comp_algo;
1004 /* Ignore these flags for now...*/
1005 if ((ino->flags & HAMMER2IOC_INODE_FLAG_IQUOTA) &&
1006 ip->meta.inode_quota != ino->ip_data.meta.inode_quota) {
1007 hammer2_inode_modify(ip);
1008 ip->meta.inode_quota = ino->ip_data.meta.inode_quota;
1010 if ((ino->flags & HAMMER2IOC_INODE_FLAG_DQUOTA) &&
1011 ip->meta.data_quota != ino->ip_data.meta.data_quota) {
1012 hammer2_inode_modify(ip);
1013 ip->meta.data_quota = ino->ip_data.meta.data_quota;
1015 if ((ino->flags & HAMMER2IOC_INODE_FLAG_COPIES) &&
1016 ip->meta.ncopies != ino->ip_data.meta.ncopies) {
1017 hammer2_inode_modify(ip);
1018 ip->meta.ncopies = ino->ip_data.meta.ncopies;
1020 hammer2_inode_unlock(ip);
1021 hammer2_trans_done(ip->pmp);
1023 return (hammer2_error_to_errno(error));
1028 hammer2_ioctl_debug_dump(hammer2_inode_t *ip, u_int flags)
1030 hammer2_chain_t *chain;
1034 for (i = 0; i < ip->cluster.nchains; ++i) {
1035 chain = ip->cluster.array[i].chain;
1038 hammer2_dump_chain(chain, 0, &count, 'i', flags);
1044 * Executes one flush/free pass per call. If trying to recover
1045 * data we just freed up a moment ago it can take up to six passes
1046 * to fully free the blocks. Note that passes occur automatically based
1047 * on free space as the storage fills up, but manual passes may be needed
1048 * if storage becomes almost completely full.
1052 hammer2_ioctl_bulkfree_scan(hammer2_inode_t *ip, void *data)
1054 hammer2_ioc_bulkfree_t *bfi = data;
1057 hammer2_chain_t *vchain;
1064 hmp = pmp->pfs_hmps[0];
1071 * Bulkfree has to be serialized to guarantee at least one sync
1072 * inbetween bulkfrees.
1074 error = lockmgr(&hmp->bflock, LK_EXCLUSIVE | LK_PCATCH);
1079 * sync the filesystem and obtain a snapshot of the synchronized
1080 * hmp volume header. We treat the snapshot as an independent
1083 * If ENOSPC occurs we should continue, because bulkfree is the only
1084 * way to fix that. The flush will have flushed everything it could
1085 * and not left any modified chains. Otherwise an error is fatal.
1087 error = hammer2_vfs_sync(pmp->mp, MNT_WAIT);
1088 if (error && error != ENOSPC)
1092 * If we have an ENOSPC error we have to bulkfree on the live
1093 * topology. Otherwise we can bulkfree on a snapshot.
1096 kprintf("hammer2: WARNING! Bulkfree forced to use live "
1098 vchain = &hmp->vchain;
1099 hammer2_chain_ref(vchain);
1102 vchain = hammer2_chain_bulksnap(hmp);
1107 * Bulkfree on a snapshot does not need a transaction, which allows
1108 * it to run concurrently with any operation other than another
1111 * If we are running bulkfree on the live topology we have to be
1112 * in a FLUSH transaction.
1115 hammer2_trans_init(pmp, HAMMER2_TRANS_ISFLUSH);
1118 hammer2_thr_freeze(&hmp->bfthr);
1119 error = hammer2_bulkfree_pass(hmp, vchain, bfi);
1120 hammer2_thr_unfreeze(&hmp->bfthr);
1123 hammer2_chain_bulkdrop(vchain);
1125 hammer2_chain_drop(vchain);
1126 hammer2_trans_done(pmp);
1128 error = hammer2_error_to_errno(error);
1131 lockmgr(&hmp->bflock, LK_RELEASE);
1136 * Unconditionally delete meta-data in a hammer2 filesystem
1140 hammer2_ioctl_destroy(hammer2_inode_t *ip, void *data)
1142 hammer2_ioc_destroy_t *iocd = data;
1143 hammer2_pfs_t *pmp = ip->pmp;
1152 case HAMMER2_DELETE_FILE:
1154 * Destroy a bad directory entry by name. Caller must
1155 * pass the directory as fd.
1158 hammer2_xop_unlink_t *xop;
1160 if (iocd->path[sizeof(iocd->path)-1]) {
1164 if (ip->meta.type != HAMMER2_OBJTYPE_DIRECTORY) {
1168 hammer2_pfs_memory_wait(pmp);
1169 hammer2_trans_init(pmp, 0);
1170 hammer2_inode_lock(ip, 0);
1172 xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING);
1173 hammer2_xop_setname(&xop->head, iocd->path, strlen(iocd->path));
1175 xop->dopermanent = H2DOPERM_PERMANENT |
1178 hammer2_xop_start(&xop->head, hammer2_xop_unlink);
1180 error = hammer2_xop_collect(&xop->head, 0);
1181 error = hammer2_error_to_errno(error);
1182 hammer2_inode_unlock(ip);
1183 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1184 hammer2_trans_done(pmp);
1187 case HAMMER2_DELETE_INUM:
1189 * Destroy a bad inode by inode number.
1192 hammer2_xop_lookup_t *xop;
1194 if (iocd->inum < 1) {
1198 hammer2_pfs_memory_wait(pmp);
1199 hammer2_trans_init(pmp, 0);
1201 xop = hammer2_xop_alloc(pmp->iroot, 0);
1202 xop->lhc = iocd->inum;
1203 hammer2_xop_start(&xop->head, hammer2_xop_lookup);
1204 error = hammer2_xop_collect(&xop->head, 0);
1206 ip = hammer2_inode_get(pmp, NULL,
1207 &xop->head.cluster, -1);
1208 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1210 ip->meta.nlinks = 1;
1211 hammer2_inode_unlink_finisher(ip, 0);
1212 hammer2_inode_unlock(ip);
1215 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);