2 * Copyright (c) 2011-2015 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@dragonflybsd.org>
6 * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org>
7 * by Daniel Flores (GSOC 2013 - mentored by Matthew Dillon, compression)
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in
17 * the documentation and/or other materials provided with the
19 * 3. Neither the name of The DragonFly Project nor the names of its
20 * contributors may be used to endorse or promote products derived
21 * from this software without specific, prior written permission.
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
26 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
27 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
28 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
29 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
30 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
31 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
32 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
33 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * Per-node backend for kernel filesystem interface.
39 * This executes a VOP concurrently on multiple nodes, each node via its own
40 * thread, and competes to advance the original request. The original
41 * request is retired the moment all requirements are met, even if the
42 * operation is still in-progress on some nodes.
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/kernel.h>
47 #include <sys/fcntl.h>
50 #include <sys/namei.h>
51 #include <sys/mount.h>
52 #include <sys/vnode.h>
53 #include <sys/mountctl.h>
54 #include <sys/dirent.h>
56 #include <sys/objcache.h>
57 #include <sys/event.h>
59 #include <vfs/fifofs/fifo.h>
64 hammer2_xop_readdir(hammer2_xop_t *arg, int clindex)
66 hammer2_xop_readdir_t *xop = &arg->xop_readdir;
67 hammer2_chain_t *parent;
68 hammer2_chain_t *chain;
69 hammer2_key_t key_next;
74 lkey = xop->head.lkey;
75 if (hammer2_debug & 0x0020)
76 kprintf("xop_readdir %p lkey=%016jx\n", xop, lkey);
79 * The inode's chain is the iterator. If we cannot acquire it our
80 * contribution ends here.
82 parent = hammer2_inode_chain(xop->head.ip, clindex,
83 HAMMER2_RESOLVE_ALWAYS |
84 HAMMER2_RESOLVE_SHARED);
86 kprintf("xop_readdir: NULL parent\n");
91 * Directory scan [re]start and loop.
93 * We feed the share-locked chain back to the frontend and must be
94 * sure not to unlock it in our iteration.
96 chain = hammer2_chain_lookup(&parent, &key_next, lkey, lkey,
97 &cache_index, HAMMER2_LOOKUP_SHARED);
99 chain = hammer2_chain_lookup(&parent, &key_next,
100 lkey, (hammer2_key_t)-1,
102 HAMMER2_LOOKUP_SHARED);
104 while (chain && hammer2_xop_active(&xop->head)) {
105 error = hammer2_xop_feed(&xop->head, chain, clindex, 0);
108 chain = hammer2_chain_next(&parent, chain, &key_next,
109 key_next, (hammer2_key_t)-1,
111 HAMMER2_LOOKUP_SHARED |
112 HAMMER2_LOOKUP_NOUNLOCK);
115 hammer2_chain_unlock(chain);
116 hammer2_chain_drop(chain);
118 hammer2_chain_unlock(parent);
119 hammer2_chain_drop(parent);
121 hammer2_xop_feed(&xop->head, NULL, clindex, error);
127 * hammer2_xop_readlink { vp, uio, cred }
130 hammer2_xop_readlink(struct vop_readlink_args *ap)
137 if (vp->v_type != VLNK)
141 /*error = hammer2_xop_read_file(ip, ap->a_uio, 0);*/
146 hammer2_xop_nresolve(struct vop_nresolve_args *ap)
149 hammer2_inode_t *dip;
150 hammer2_cluster_t *cparent;
151 hammer2_cluster_t *cluster;
152 const hammer2_inode_data_t *ripdata;
153 hammer2_key_t key_next;
155 struct namecache *ncp;
162 dip = VTOI(ap->a_dvp);
163 ncp = ap->a_nch->ncp;
165 name_len = ncp->nc_nlen;
166 lhc = hammer2_dirhash(name, name_len);
169 * Note: In DragonFly the kernel handles '.' and '..'.
171 hammer2_inode_lock(dip, HAMMER2_RESOLVE_ALWAYS |
172 HAMMER2_RESOLVE_SHARED);
173 cparent = hammer2_inode_cluster(dip, HAMMER2_RESOLVE_ALWAYS |
174 HAMMER2_RESOLVE_SHARED);
176 cluster = hammer2_cluster_lookup(cparent, &key_next,
177 lhc, lhc + HAMMER2_DIRHASH_LOMASK,
178 HAMMER2_LOOKUP_SHARED);
180 if (hammer2_cluster_type(cluster) == HAMMER2_BREF_TYPE_INODE) {
181 ripdata = &hammer2_cluster_rdata(cluster)->ipdata;
182 if (ripdata->meta.name_len == name_len &&
183 bcmp(ripdata->filename, name, name_len) == 0) {
187 cluster = hammer2_cluster_next(cparent, cluster, &key_next,
189 lhc + HAMMER2_DIRHASH_LOMASK,
190 HAMMER2_LOOKUP_SHARED);
192 hammer2_inode_unlock(dip, cparent);
195 * Resolve hardlink entries before acquiring the inode.
198 ripdata = &hammer2_cluster_rdata(cluster)->ipdata;
199 if (ripdata->meta.type == HAMMER2_OBJTYPE_HARDLINK) {
200 hammer2_tid_t inum = ripdata->meta.inum;
201 error = hammer2_hardlink_find(dip, NULL, &cluster);
203 kprintf("hammer2: unable to find hardlink "
213 * nresolve needs to resolve hardlinks, the original cluster is not
217 ip = hammer2_inode_get(dip->pmp, dip, cluster);
218 ripdata = &hammer2_cluster_rdata(cluster)->ipdata;
219 if (ripdata->meta.type == HAMMER2_OBJTYPE_HARDLINK) {
220 kprintf("nresolve: fixup hardlink\n");
221 hammer2_inode_ref(ip);
222 hammer2_inode_unlock(ip, NULL);
223 hammer2_cluster_unlock(cluster);
224 hammer2_cluster_drop(cluster);
225 hammer2_inode_lock(ip, HAMMER2_RESOLVE_ALWAYS);
226 cluster = hammer2_inode_cluster(ip,
227 HAMMER2_RESOLVE_ALWAYS);
228 ripdata = &hammer2_cluster_rdata(cluster)->ipdata;
229 hammer2_inode_drop(ip);
230 kprintf("nresolve: fixup to type %02x\n",
239 * Deconsolidate any hardlink whos nlinks == 1. Ignore errors.
240 * If an error occurs chain and ip are left alone.
242 * XXX upgrade shared lock?
244 if (ochain && chain &&
245 chain->data->ipdata.meta.nlinks == 1 && !dip->pmp->ronly) {
246 kprintf("hammer2: need to unconsolidate hardlink for %s\n",
247 chain->data->ipdata.filename);
248 /* XXX retain shared lock on dip? (currently not held) */
249 hammer2_trans_init(&trans, dip->pmp, 0);
250 hammer2_hardlink_deconsolidate(&trans, dip, &chain, &ochain);
251 hammer2_trans_done(&trans);
256 * Acquire the related vnode
258 * NOTE: For error processing, only ENOENT resolves the namecache
259 * entry to NULL, otherwise we just return the error and
260 * leave the namecache unresolved.
262 * NOTE: multiple hammer2_inode structures can be aliased to the
263 * same chain element, for example for hardlinks. This
264 * use case does not 'reattach' inode associations that
265 * might already exist, but always allocates a new one.
267 * WARNING: inode structure is locked exclusively via inode_get
268 * but chain was locked shared. inode_unlock()
269 * will handle it properly.
272 vp = hammer2_igetv(ip, cluster, &error);
275 cache_setvp(ap->a_nch, vp);
276 } else if (error == ENOENT) {
277 cache_setvp(ap->a_nch, NULL);
279 hammer2_inode_unlock(ip, cluster);
282 * The vp should not be released until after we've disposed
283 * of our locks, because it might cause vop_inactive() to
290 cache_setvp(ap->a_nch, NULL);
292 KASSERT(error || ap->a_nch->ncp->nc_vp != NULL,
293 ("resolve error %d/%p ap %p\n",
294 error, ap->a_nch->ncp->nc_vp, ap));
300 hammer2_xop_nlookupdotdot(struct vop_nlookupdotdot_args *ap)
302 hammer2_inode_t *dip;
304 hammer2_cluster_t *cparent;
308 dip = VTOI(ap->a_dvp);
310 if ((ip = dip->pip) == NULL) {
315 hammer2_inode_lock(ip, HAMMER2_RESOLVE_ALWAYS);
316 cparent = hammer2_inode_cluster(ip, HAMMER2_RESOLVE_ALWAYS);
317 *ap->a_vpp = hammer2_igetv(ip, cparent, &error);
318 hammer2_inode_unlock(ip, cparent);
325 hammer2_xop_nmkdir(struct vop_nmkdir_args *ap)
327 hammer2_inode_t *dip;
328 hammer2_inode_t *nip;
329 hammer2_trans_t trans;
330 hammer2_cluster_t *cluster;
331 struct namecache *ncp;
337 dip = VTOI(ap->a_dvp);
338 if (dip->pmp->ronly) {
343 ncp = ap->a_nch->ncp;
345 name_len = ncp->nc_nlen;
348 hammer2_pfs_memory_wait(dip->pmp);
349 hammer2_trans_init(&trans, dip->pmp, HAMMER2_TRANS_NEWINODE);
350 nip = hammer2_inode_create(&trans, dip, ap->a_vap, ap->a_cred,
352 &cluster, 0, &error);
354 KKASSERT(nip == NULL);
357 *ap->a_vpp = hammer2_igetv(nip, cluster, &error);
358 hammer2_inode_unlock(nip, cluster);
360 hammer2_trans_done(&trans);
363 cache_setunresolved(ap->a_nch);
364 cache_setvp(ap->a_nch, *ap->a_vpp);
371 * hammer2_xop_nlink { nch, dvp, vp, cred }
373 * Create a hardlink from (vp) to {dvp, nch}.
376 hammer2_xop_nlink(struct vop_nlink_args *ap)
378 hammer2_inode_t *fdip; /* target directory to create link in */
379 hammer2_inode_t *tdip; /* target directory to create link in */
380 hammer2_inode_t *cdip; /* common parent directory */
381 hammer2_inode_t *ip; /* inode we are hardlinking to */
382 hammer2_cluster_t *cluster;
383 hammer2_cluster_t *fdcluster;
384 hammer2_cluster_t *tdcluster;
385 hammer2_cluster_t *cdcluster;
386 hammer2_trans_t trans;
387 struct namecache *ncp;
393 tdip = VTOI(ap->a_dvp);
394 if (tdip->pmp->ronly) {
399 ncp = ap->a_nch->ncp;
401 name_len = ncp->nc_nlen;
404 * ip represents the file being hardlinked. The file could be a
405 * normal file or a hardlink target if it has already been hardlinked.
406 * If ip is a hardlinked target then ip->pip represents the location
407 * of the hardlinked target, NOT the location of the hardlink pointer.
409 * Bump nlinks and potentially also create or move the hardlink
410 * target in the parent directory common to (ip) and (tdip). The
411 * consolidation code can modify ip->cluster and ip->pip. The
412 * returned cluster is locked.
415 hammer2_pfs_memory_wait(ip->pmp);
416 hammer2_trans_init(&trans, ip->pmp, HAMMER2_TRANS_NEWINODE);
419 * The common parent directory must be locked first to avoid deadlocks.
420 * Also note that fdip and/or tdip might match cdip.
423 cdip = hammer2_inode_common_parent(fdip, tdip);
424 hammer2_inode_lock(cdip, HAMMER2_RESOLVE_ALWAYS);
425 hammer2_inode_lock(fdip, HAMMER2_RESOLVE_ALWAYS);
426 hammer2_inode_lock(tdip, HAMMER2_RESOLVE_ALWAYS);
428 cdcluster = hammer2_inode_cluster(cdip, HAMMER2_RESOLVE_ALWAYS);
429 fdcluster = hammer2_inode_cluster(fdip, HAMMER2_RESOLVE_ALWAYS);
430 tdcluster = hammer2_inode_cluster(tdip, HAMMER2_RESOLVE_ALWAYS);
432 hammer2_inode_lock(ip, HAMMER2_RESOLVE_ALWAYS);
433 cluster = hammer2_inode_cluster(ip, HAMMER2_RESOLVE_ALWAYS);
435 error = hammer2_hardlink_consolidate(&trans, ip, &cluster,
441 * Create a directory entry connected to the specified cluster.
443 * WARNING! chain can get moved by the connect (indirectly due to
444 * potential indirect block creation).
446 error = hammer2_inode_connect(&trans,
451 cache_setunresolved(ap->a_nch);
452 cache_setvp(ap->a_nch, ap->a_vp);
455 hammer2_inode_unlock(ip, cluster);
456 hammer2_inode_unlock(tdip, tdcluster);
457 hammer2_inode_unlock(fdip, fdcluster);
458 hammer2_inode_unlock(cdip, cdcluster);
459 hammer2_inode_drop(cdip);
460 hammer2_trans_done(&trans);
467 * hammer2_xop_ncreate { nch, dvp, vpp, cred, vap }
469 * The operating system has already ensured that the directory entry
470 * does not exist and done all appropriate namespace locking.
473 hammer2_xop_ncreate(struct vop_ncreate_args *ap)
475 hammer2_inode_t *dip;
476 hammer2_inode_t *nip;
477 hammer2_trans_t trans;
478 hammer2_cluster_t *ncluster;
479 struct namecache *ncp;
485 dip = VTOI(ap->a_dvp);
486 if (dip->pmp->ronly) {
491 ncp = ap->a_nch->ncp;
493 name_len = ncp->nc_nlen;
494 hammer2_pfs_memory_wait(dip->pmp);
495 hammer2_trans_init(&trans, dip->pmp, HAMMER2_TRANS_NEWINODE);
498 nip = hammer2_inode_create(&trans, dip, ap->a_vap, ap->a_cred,
500 &ncluster, 0, &error);
502 KKASSERT(nip == NULL);
505 *ap->a_vpp = hammer2_igetv(nip, ncluster, &error);
506 hammer2_inode_unlock(nip, ncluster);
508 hammer2_trans_done(&trans);
511 cache_setunresolved(ap->a_nch);
512 cache_setvp(ap->a_nch, *ap->a_vpp);
519 * Make a device node (typically a fifo)
522 hammer2_xop_nmknod(struct vop_nmknod_args *ap)
524 hammer2_inode_t *dip;
525 hammer2_inode_t *nip;
526 hammer2_trans_t trans;
527 hammer2_cluster_t *ncluster;
528 struct namecache *ncp;
534 dip = VTOI(ap->a_dvp);
535 if (dip->pmp->ronly) {
540 ncp = ap->a_nch->ncp;
542 name_len = ncp->nc_nlen;
543 hammer2_pfs_memory_wait(dip->pmp);
544 hammer2_trans_init(&trans, dip->pmp, HAMMER2_TRANS_NEWINODE);
547 nip = hammer2_inode_create(&trans, dip, ap->a_vap, ap->a_cred,
549 &ncluster, 0, &error);
551 KKASSERT(nip == NULL);
554 *ap->a_vpp = hammer2_igetv(nip, ncluster, &error);
555 hammer2_inode_unlock(nip, ncluster);
557 hammer2_trans_done(&trans);
560 cache_setunresolved(ap->a_nch);
561 cache_setvp(ap->a_nch, *ap->a_vpp);
568 * hammer2_xop_nsymlink { nch, dvp, vpp, cred, vap, target }
571 hammer2_xop_nsymlink(struct vop_nsymlink_args *ap)
573 hammer2_inode_t *dip;
574 hammer2_inode_t *nip;
575 hammer2_cluster_t *ncparent;
576 hammer2_trans_t trans;
577 struct namecache *ncp;
582 dip = VTOI(ap->a_dvp);
586 ncp = ap->a_nch->ncp;
588 name_len = ncp->nc_nlen;
589 hammer2_pfs_memory_wait(dip->pmp);
590 hammer2_trans_init(&trans, dip->pmp, HAMMER2_TRANS_NEWINODE);
593 ap->a_vap->va_type = VLNK; /* enforce type */
595 nip = hammer2_inode_create(&trans, dip, ap->a_vap, ap->a_cred,
597 &ncparent, 0, &error);
599 KKASSERT(nip == NULL);
601 hammer2_trans_done(&trans);
604 *ap->a_vpp = hammer2_igetv(nip, ncparent, &error);
607 * Build the softlink (~like file data) and finalize the namecache.
613 hammer2_inode_data_t *nipdata;
615 nipdata = &hammer2_cluster_wdata(ncparent)->ipdata;
616 /* nipdata = &nip->chain->data->ipdata;XXX */
617 bytes = strlen(ap->a_target);
619 if (bytes <= HAMMER2_EMBEDDED_BYTES) {
620 KKASSERT(nipdata->meta.op_flags &
621 HAMMER2_OPFLAG_DIRECTDATA);
622 bcopy(ap->a_target, nipdata->u.data, bytes);
623 nipdata->meta.size = bytes;
624 nip->meta.size = bytes;
625 hammer2_cluster_modsync(ncparent);
626 hammer2_inode_unlock(nip, ncparent);
627 /* nipdata = NULL; not needed */
629 hammer2_inode_unlock(nip, ncparent);
630 /* nipdata = NULL; not needed */
631 bzero(&auio, sizeof(auio));
632 bzero(&aiov, sizeof(aiov));
633 auio.uio_iov = &aiov;
634 auio.uio_segflg = UIO_SYSSPACE;
635 auio.uio_rw = UIO_WRITE;
636 auio.uio_resid = bytes;
638 auio.uio_td = curthread;
639 aiov.iov_base = ap->a_target;
640 aiov.iov_len = bytes;
641 /*error = hammer2_xop_write_file(nip, &auio, IO_APPEND, 0);*/
642 /* XXX handle error */
646 hammer2_inode_unlock(nip, ncparent);
648 hammer2_trans_done(&trans);
654 cache_setunresolved(ap->a_nch);
655 cache_setvp(ap->a_nch, *ap->a_vpp);
661 * hammer2_xop_nremove { nch, dvp, cred }
664 hammer2_xop_nremove(struct vop_nremove_args *ap)
666 hammer2_inode_t *dip;
667 hammer2_trans_t trans;
668 struct namecache *ncp;
674 dip = VTOI(ap->a_dvp);
675 if (dip->pmp->ronly) {
680 ncp = ap->a_nch->ncp;
682 name_len = ncp->nc_nlen;
684 hammer2_pfs_memory_wait(dip->pmp);
685 hammer2_trans_init(&trans, dip->pmp, 0);
686 error = hammer2_unlink_file(&trans, dip, NULL, name, name_len,
687 0, NULL, ap->a_nch, -1);
688 hammer2_run_unlinkq(&trans, dip->pmp);
689 hammer2_trans_done(&trans);
691 cache_unlink(ap->a_nch);
697 * hammer2_xop_nrmdir { nch, dvp, cred }
700 hammer2_xop_nrmdir(struct vop_nrmdir_args *ap)
702 hammer2_inode_t *dip;
703 hammer2_trans_t trans;
704 struct namecache *ncp;
710 dip = VTOI(ap->a_dvp);
711 if (dip->pmp->ronly) {
716 ncp = ap->a_nch->ncp;
718 name_len = ncp->nc_nlen;
720 hammer2_pfs_memory_wait(dip->pmp);
721 hammer2_trans_init(&trans, dip->pmp, 0);
722 hammer2_run_unlinkq(&trans, dip->pmp);
723 error = hammer2_unlink_file(&trans, dip, NULL, name, name_len,
724 1, NULL, ap->a_nch, -1);
725 hammer2_trans_done(&trans);
727 cache_unlink(ap->a_nch);
733 * hammer2_xop_nrename { fnch, tnch, fdvp, tdvp, cred }
736 hammer2_xop_nrename(struct vop_nrename_args *ap)
738 struct namecache *fncp;
739 struct namecache *tncp;
740 hammer2_inode_t *cdip;
741 hammer2_inode_t *fdip;
742 hammer2_inode_t *tdip;
744 hammer2_cluster_t *cluster;
745 hammer2_cluster_t *fdcluster;
746 hammer2_cluster_t *tdcluster;
747 hammer2_cluster_t *cdcluster;
748 hammer2_trans_t trans;
749 const uint8_t *fname;
751 const uint8_t *tname;
757 if (ap->a_fdvp->v_mount != ap->a_tdvp->v_mount)
759 if (ap->a_fdvp->v_mount != ap->a_fnch->ncp->nc_vp->v_mount)
762 fdip = VTOI(ap->a_fdvp); /* source directory */
763 tdip = VTOI(ap->a_tdvp); /* target directory */
765 if (fdip->pmp->ronly)
769 fncp = ap->a_fnch->ncp; /* entry name in source */
770 fname = fncp->nc_name;
771 fname_len = fncp->nc_nlen;
773 tncp = ap->a_tnch->ncp; /* entry name in target */
774 tname = tncp->nc_name;
775 tname_len = tncp->nc_nlen;
777 hammer2_pfs_memory_wait(tdip->pmp);
778 hammer2_trans_init(&trans, tdip->pmp, 0);
781 * ip is the inode being renamed. If this is a hardlink then
782 * ip represents the actual file and not the hardlink marker.
784 ip = VTOI(fncp->nc_vp);
789 * The common parent directory must be locked first to avoid deadlocks.
790 * Also note that fdip and/or tdip might match cdip.
792 * WARNING! fdip may not match ip->pip. That is, if the source file
793 * is already a hardlink then what we are renaming is the
794 * hardlink pointer, not the hardlink itself. The hardlink
795 * directory (ip->pip) will already be at a common parent
798 * Be sure to use ip->pip when finding the common parent
799 * against tdip or we might accidently move the hardlink
800 * target into a subdirectory that makes it inaccessible to
803 cdip = hammer2_inode_common_parent(ip->pip, tdip);
804 hammer2_inode_lock(cdip, HAMMER2_RESOLVE_ALWAYS);
805 hammer2_inode_lock(fdip, HAMMER2_RESOLVE_ALWAYS);
806 hammer2_inode_lock(tdip, HAMMER2_RESOLVE_ALWAYS);
808 cdcluster = hammer2_inode_cluster(cdip, HAMMER2_RESOLVE_ALWAYS);
809 fdcluster = hammer2_inode_cluster(fdip, HAMMER2_RESOLVE_ALWAYS);
810 tdcluster = hammer2_inode_cluster(tdip, HAMMER2_RESOLVE_ALWAYS);
813 * Keep a tight grip on the inode so the temporary unlinking from
814 * the source location prior to linking to the target location
815 * does not cause the cluster to be destroyed.
817 * NOTE: To avoid deadlocks we cannot lock (ip) while we are
818 * unlinking elements from their directories. Locking
819 * the nlinks field does not lock the whole inode.
821 hammer2_inode_ref(ip);
824 * Remove target if it exists.
826 error = hammer2_unlink_file(&trans, tdip, NULL, tname, tname_len,
827 -1, NULL, ap->a_tnch, -1);
829 if (error && error != ENOENT)
833 * When renaming a hardlinked file we may have to re-consolidate
834 * the location of the hardlink target.
836 * If ip represents a regular file the consolidation code essentially
837 * does nothing other than return the same locked cluster that was
840 * The returned cluster will be locked.
842 * WARNING! We do not currently have a local copy of ipdata but
843 * we do use one later remember that it must be reloaded
844 * on any modification to the inode, including connects.
846 hammer2_inode_lock(ip, HAMMER2_RESOLVE_ALWAYS);
847 cluster = hammer2_inode_cluster(ip, HAMMER2_RESOLVE_ALWAYS);
849 error = hammer2_hardlink_consolidate(&trans, ip, &cluster,
855 * Disconnect (fdip, fname) from the source directory. This will
856 * disconnect (ip) if it represents a direct file. If (ip) represents
857 * a hardlink the HARDLINK pointer object will be removed but the
858 * hardlink will stay intact.
860 * Always pass nch as NULL because we intend to reconnect the inode,
861 * so we don't want hammer2_unlink_file() to rename it to the hidden
862 * open-but-unlinked directory.
864 * The target cluster may be marked DELETED but will not be destroyed
865 * since we retain our hold on ip and cluster.
867 * NOTE: We pass nlinks as 0 (not -1) in order to retain the file's
870 error = hammer2_unlink_file(&trans, fdip, ip, fname, fname_len,
871 -1, &hlink, NULL, 0);
872 KKASSERT(error != EAGAIN);
877 * Reconnect ip to target directory using cluster. Chains cannot
878 * actually be moved, so this will duplicate the cluster in the new
879 * spot and assign it to the ip, replacing the old cluster.
881 * WARNING: Because recursive locks are allowed and we unlinked the
882 * file that we have a cluster-in-hand for just above, the
883 * cluster might have been delete-duplicated. We must
884 * refactor the cluster.
886 * WARNING: Chain locks can lock buffer cache buffers, to avoid
887 * deadlocks we want to unlock before issuing a cache_*()
888 * op (that might have to lock a vnode).
890 * NOTE: Pass nlinks as 0 because we retained the link count from
891 * the unlink, so we do not have to modify it.
893 error = hammer2_inode_connect(&trans,
896 tname, tname_len, 0);
898 KKASSERT(cluster != NULL);
899 hammer2_inode_repoint(ip, (hlink ? ip->pip : tdip), cluster);
902 hammer2_inode_unlock(ip, cluster);
904 hammer2_inode_unlock(tdip, tdcluster);
905 hammer2_inode_unlock(fdip, fdcluster);
906 hammer2_inode_unlock(cdip, cdcluster);
907 hammer2_inode_drop(ip);
908 hammer2_inode_drop(cdip);
909 hammer2_run_unlinkq(&trans, fdip->pmp);
910 hammer2_trans_done(&trans);
913 * Issue the namecache update after unlocking all the internal
914 * hammer structures, otherwise we might deadlock.
916 if (tnch_error == 0) {
917 cache_unlink(ap->a_tnch);
918 cache_setunresolved(ap->a_tnch);
921 cache_rename(ap->a_fnch, ap->a_tnch);