2 * Copyright (c) 2011-2015 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@dragonflybsd.org>
6 * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org>
7 * by Daniel Flores (GSOC 2013 - mentored by Matthew Dillon, compression)
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in
17 * the documentation and/or other materials provided with the
19 * 3. Neither the name of The DragonFly Project nor the names of its
20 * contributors may be used to endorse or promote products derived
21 * from this software without specific, prior written permission.
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
26 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
27 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
28 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
29 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
30 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
31 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
32 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
33 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * Kernel Filesystem interface
39 * NOTE! local ipdata pointers must be reloaded on any modifying operation
40 * to the inode as its underlying chain may have changed.
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/kernel.h>
46 #include <sys/fcntl.h>
49 #include <sys/namei.h>
50 #include <sys/mount.h>
51 #include <sys/vnode.h>
52 #include <sys/mountctl.h>
53 #include <sys/dirent.h>
55 #include <sys/objcache.h>
56 #include <sys/event.h>
58 #include <vfs/fifofs/fifo.h>
62 static int hammer2_read_file(hammer2_inode_t *ip, struct uio *uio,
64 static int hammer2_write_file(hammer2_inode_t *ip, struct uio *uio,
65 int ioflag, int seqcount);
66 static void hammer2_extend_file(hammer2_inode_t *ip, hammer2_key_t nsize);
67 static void hammer2_truncate_file(hammer2_inode_t *ip, hammer2_key_t nsize);
69 struct objcache *cache_xops;
73 hammer2_knote(struct vnode *vp, int flags)
76 KNOTE(&vp->v_pollinfo.vpi_kqinfo.ki_note, flags);
80 * Last reference to a vnode is going away but it is still cached.
84 hammer2_vop_inactive(struct vop_inactive_args *ap)
101 * Check for deleted inodes and recycle immediately on the last
102 * release. Be sure to destroy any left-over buffer cache buffers
103 * so we do not waste time trying to flush them.
105 * Note that deleting the file block chains under the inode chain
106 * would just be a waste of energy, so don't do it.
108 * WARNING: nvtruncbuf() can only be safely called without the inode
109 * lock held due to the way our write thread works.
111 if (ip->flags & HAMMER2_INODE_ISUNLINKED) {
116 * Detect updates to the embedded data which may be
117 * synchronized by the strategy code. Simply mark the
118 * inode modified so it gets picked up by our normal flush.
120 nblksize = hammer2_calc_logical(ip, 0, &lbase, NULL);
121 nvtruncbuf(vp, 0, nblksize, 0, 0);
128 * Reclaim a vnode so that it can be reused; after the inode is
129 * disassociated, the filesystem must manage it alone.
133 hammer2_vop_reclaim(struct vop_reclaim_args *ap)
147 * The final close of a deleted file or directory marks it for
148 * destruction. The DELETED flag allows the flusher to shortcut
149 * any modified blocks still unflushed (that is, just ignore them).
151 * HAMMER2 usually does not try to optimize the freemap by returning
152 * deleted blocks to it as it does not usually know how many snapshots
153 * might be referencing portions of the file/dir.
159 * NOTE! We do not attempt to flush chains here, flushing is
160 * really fragile and could also deadlock.
165 * This occurs if the inode was unlinked while open. Reclamation of
166 * these inodes requires processing we cannot safely do here so add
167 * the inode to the sideq in that situation.
169 * A modified inode may require chain synchronization which will no
170 * longer be driven by a sync or fsync without the vnode, also use
171 * the sideq for that.
173 * A reclaim can occur at any time so we cannot safely start a
174 * transaction to handle reclamation of unlinked files. Instead,
175 * the ip is left with a reference and placed on a linked list and
179 if ((ip->flags & (HAMMER2_INODE_ISUNLINKED |
180 HAMMER2_INODE_MODIFIED |
181 HAMMER2_INODE_RESIZED)) &&
182 (ip->flags & HAMMER2_INODE_ISDELETED) == 0) {
183 hammer2_inode_sideq_t *ipul;
185 ipul = kmalloc(sizeof(*ipul), pmp->minode, M_WAITOK | M_ZERO);
188 hammer2_spin_ex(&pmp->list_spin);
189 if ((ip->flags & HAMMER2_INODE_ONSIDEQ) == 0) {
191 atomic_set_int(&ip->flags, HAMMER2_INODE_ONSIDEQ);
192 TAILQ_INSERT_TAIL(&pmp->sideq, ipul, entry);
193 hammer2_spin_unex(&pmp->list_spin);
195 hammer2_spin_unex(&pmp->list_spin);
196 kfree(ipul, pmp->minode);
197 hammer2_inode_drop(ip); /* vp ref */
199 /* retain ref from vp for ipul */
201 hammer2_inode_drop(ip); /* vp ref */
205 * XXX handle background sync when ip dirty, kernel will no longer
206 * notify us regarding this inode because there is no longer a
207 * vnode attached to it.
215 hammer2_vop_fsync(struct vop_fsync_args *ap)
224 /* XXX can't do this yet */
225 hammer2_trans_init(ip->pmp, HAMMER2_TRANS_ISFLUSH);
226 vfsync(vp, ap->a_waitfor, 1, NULL, NULL);
228 hammer2_trans_init(ip->pmp, 0);
229 vfsync(vp, ap->a_waitfor, 1, NULL, NULL);
232 * Calling chain_flush here creates a lot of duplicative
233 * COW operations due to non-optimal vnode ordering.
235 * Only do it for an actual fsync() syscall. The other forms
236 * which call this function will eventually call chain_flush
237 * on the volume root as a catch-all, which is far more optimal.
239 hammer2_inode_lock(ip, 0);
240 if (ip->flags & HAMMER2_INODE_MODIFIED)
241 hammer2_inode_chain_sync(ip);
242 hammer2_inode_unlock(ip);
243 hammer2_trans_done(ip->pmp);
250 hammer2_vop_access(struct vop_access_args *ap)
252 hammer2_inode_t *ip = VTOI(ap->a_vp);
257 hammer2_inode_lock(ip, HAMMER2_RESOLVE_SHARED);
258 uid = hammer2_to_unix_xid(&ip->meta.uid);
259 gid = hammer2_to_unix_xid(&ip->meta.gid);
260 error = vop_helper_access(ap, uid, gid, ip->meta.mode, ip->meta.uflags);
261 hammer2_inode_unlock(ip);
268 hammer2_vop_getattr(struct vop_getattr_args *ap)
274 hammer2_chain_t *chain;
283 hammer2_inode_lock(ip, HAMMER2_RESOLVE_SHARED);
285 vap->va_fsid = pmp->mp->mnt_stat.f_fsid.val[0];
286 vap->va_fileid = ip->meta.inum;
287 vap->va_mode = ip->meta.mode;
288 vap->va_nlink = ip->meta.nlinks;
289 vap->va_uid = hammer2_to_unix_xid(&ip->meta.uid);
290 vap->va_gid = hammer2_to_unix_xid(&ip->meta.gid);
293 vap->va_size = ip->meta.size; /* protected by shared lock */
294 vap->va_blocksize = HAMMER2_PBUFSIZE;
295 vap->va_flags = ip->meta.uflags;
296 hammer2_time_to_timespec(ip->meta.ctime, &vap->va_ctime);
297 hammer2_time_to_timespec(ip->meta.mtime, &vap->va_mtime);
298 hammer2_time_to_timespec(ip->meta.mtime, &vap->va_atime);
301 if (ip->meta.type == HAMMER2_OBJTYPE_DIRECTORY) {
303 * Can't really calculate directory use sans the files under
304 * it, just assume one block for now.
306 vap->va_bytes += HAMMER2_INODE_BYTES;
308 for (i = 0; i < ip->cluster.nchains; ++i) {
309 if ((chain = ip->cluster.array[i].chain) != NULL) {
311 chain->bref.embed.stats.data_count) {
313 chain->bref.embed.stats.data_count;
318 vap->va_type = hammer2_get_vtype(ip->meta.type);
320 vap->va_uid_uuid = ip->meta.uid;
321 vap->va_gid_uuid = ip->meta.gid;
322 vap->va_vaflags = VA_UID_UUID_VALID | VA_GID_UUID_VALID |
325 hammer2_inode_unlock(ip);
332 hammer2_vop_setattr(struct vop_setattr_args *ap)
343 hammer2_update_time(&ctime);
350 hammer2_pfs_memory_wait(ip->pmp);
351 hammer2_trans_init(ip->pmp, 0);
352 hammer2_inode_lock(ip, 0);
355 if (vap->va_flags != VNOVAL) {
358 flags = ip->meta.uflags;
359 error = vop_helper_setattr_flags(&flags, vap->va_flags,
360 hammer2_to_unix_xid(&ip->meta.uid),
363 if (ip->meta.uflags != flags) {
364 hammer2_inode_modify(ip);
365 ip->meta.uflags = flags;
366 ip->meta.ctime = ctime;
367 kflags |= NOTE_ATTRIB;
369 if (ip->meta.uflags & (IMMUTABLE | APPEND)) {
376 if (ip->meta.uflags & (IMMUTABLE | APPEND)) {
380 if (vap->va_uid != (uid_t)VNOVAL || vap->va_gid != (gid_t)VNOVAL) {
381 mode_t cur_mode = ip->meta.mode;
382 uid_t cur_uid = hammer2_to_unix_xid(&ip->meta.uid);
383 gid_t cur_gid = hammer2_to_unix_xid(&ip->meta.gid);
387 error = vop_helper_chown(ap->a_vp, vap->va_uid, vap->va_gid,
389 &cur_uid, &cur_gid, &cur_mode);
391 hammer2_guid_to_uuid(&uuid_uid, cur_uid);
392 hammer2_guid_to_uuid(&uuid_gid, cur_gid);
393 if (bcmp(&uuid_uid, &ip->meta.uid, sizeof(uuid_uid)) ||
394 bcmp(&uuid_gid, &ip->meta.gid, sizeof(uuid_gid)) ||
395 ip->meta.mode != cur_mode
397 hammer2_inode_modify(ip);
398 ip->meta.uid = uuid_uid;
399 ip->meta.gid = uuid_gid;
400 ip->meta.mode = cur_mode;
401 ip->meta.ctime = ctime;
403 kflags |= NOTE_ATTRIB;
410 if (vap->va_size != VNOVAL && ip->meta.size != vap->va_size) {
413 if (vap->va_size == ip->meta.size)
415 if (vap->va_size < ip->meta.size) {
416 hammer2_mtx_ex(&ip->truncate_lock);
417 hammer2_truncate_file(ip, vap->va_size);
418 hammer2_mtx_unlock(&ip->truncate_lock);
419 kflags |= NOTE_WRITE;
421 hammer2_extend_file(ip, vap->va_size);
422 kflags |= NOTE_WRITE | NOTE_EXTEND;
424 hammer2_inode_modify(ip);
425 ip->meta.mtime = ctime;
433 /* atime not supported */
434 if (vap->va_atime.tv_sec != VNOVAL) {
435 hammer2_inode_modify(ip);
436 ip->meta.atime = hammer2_timespec_to_time(&vap->va_atime);
437 kflags |= NOTE_ATTRIB;
440 if (vap->va_mode != (mode_t)VNOVAL) {
441 mode_t cur_mode = ip->meta.mode;
442 uid_t cur_uid = hammer2_to_unix_xid(&ip->meta.uid);
443 gid_t cur_gid = hammer2_to_unix_xid(&ip->meta.gid);
445 error = vop_helper_chmod(ap->a_vp, vap->va_mode, ap->a_cred,
446 cur_uid, cur_gid, &cur_mode);
447 if (error == 0 && ip->meta.mode != cur_mode) {
448 hammer2_inode_modify(ip);
449 ip->meta.mode = cur_mode;
450 ip->meta.ctime = ctime;
451 kflags |= NOTE_ATTRIB;
455 if (vap->va_mtime.tv_sec != VNOVAL) {
456 hammer2_inode_modify(ip);
457 ip->meta.mtime = hammer2_timespec_to_time(&vap->va_mtime);
458 kflags |= NOTE_ATTRIB;
463 * If a truncation occurred we must call inode_fsync() now in order
464 * to trim the related data chains, otherwise a later expansion can
467 * If an extend occured that changed the DIRECTDATA state, we must
468 * call inode_fsync now in order to prepare the inode's indirect
471 if (ip->flags & HAMMER2_INODE_RESIZED)
472 hammer2_inode_chain_sync(ip);
477 hammer2_inode_unlock(ip);
478 hammer2_trans_done(ip->pmp);
479 hammer2_knote(ip->vp, kflags);
486 hammer2_vop_readdir(struct vop_readdir_args *ap)
488 hammer2_xop_readdir_t *xop;
489 hammer2_blockref_t bref;
504 saveoff = uio->uio_offset;
509 * Setup cookies directory entry cookies if requested
511 if (ap->a_ncookies) {
512 ncookies = uio->uio_resid / 16 + 1;
515 cookies = kmalloc(ncookies * sizeof(off_t), M_TEMP, M_WAITOK);
522 hammer2_inode_lock(ip, HAMMER2_RESOLVE_SHARED);
525 * Handle artificial entries. To ensure that only positive 64 bit
526 * quantities are returned to userland we always strip off bit 63.
527 * The hash code is designed such that codes 0x0000-0x7FFF are not
528 * used, allowing us to use these codes for articial entries.
530 * Entry 0 is used for '.' and entry 1 is used for '..'. Do not
531 * allow '..' to cross the mount point into (e.g.) the super-root.
534 inum = ip->meta.inum & HAMMER2_DIRHASH_USERMSK;
535 r = vop_write_dirent(&error, uio, inum, DT_DIR, 1, ".");
539 cookies[cookie_index] = saveoff;
542 if (cookie_index == ncookies)
548 * Be careful with lockorder when accessing ".."
550 * (ip is the current dir. xip is the parent dir).
552 inum = ip->meta.inum & HAMMER2_DIRHASH_USERMSK;
553 if (ip != ip->pmp->iroot)
554 inum = ip->meta.iparent & HAMMER2_DIRHASH_USERMSK;
555 r = vop_write_dirent(&error, uio, inum, DT_DIR, 2, "..");
559 cookies[cookie_index] = saveoff;
562 if (cookie_index == ncookies)
566 lkey = saveoff | HAMMER2_DIRHASH_VISIBLE;
567 if (hammer2_debug & 0x0020)
568 kprintf("readdir: lkey %016jx\n", lkey);
573 * Use XOP for cluster scan.
575 * parent is the inode cluster, already locked for us. Don't
576 * double lock shared locks as this will screw up upgrades.
578 xop = hammer2_xop_alloc(ip, 0);
580 hammer2_xop_start(&xop->head, hammer2_xop_readdir);
583 const hammer2_inode_data_t *ripdata;
587 error = hammer2_xop_collect(&xop->head, 0);
590 if (cookie_index == ncookies)
592 if (hammer2_debug & 0x0020)
593 kprintf("cluster chain %p %p\n",
594 xop->head.cluster.focus,
595 (xop->head.cluster.focus ?
596 xop->head.cluster.focus->data : (void *)-1));
597 hammer2_cluster_bref(&xop->head.cluster, &bref);
599 if (bref.type == HAMMER2_BREF_TYPE_INODE) {
601 &hammer2_cluster_rdata(&xop->head.cluster)->ipdata;
602 dtype = hammer2_get_dtype(ripdata->meta.type);
603 saveoff = bref.key & HAMMER2_DIRHASH_USERMSK;
604 r = vop_write_dirent(&error, uio,
606 HAMMER2_DIRHASH_USERMSK,
608 ripdata->meta.name_len,
613 cookies[cookie_index] = saveoff;
615 } else if (bref.type == HAMMER2_BREF_TYPE_DIRENT) {
616 dtype = hammer2_get_dtype(bref.embed.dirent.type);
617 saveoff = bref.key & HAMMER2_DIRHASH_USERMSK;
618 if (bref.embed.dirent.namlen <=
619 sizeof(bref.check.buf)) {
620 dname = bref.check.buf;
623 hammer2_cluster_rdata(&xop->head.cluster)->buf;
625 r = vop_write_dirent(&error, uio,
626 bref.embed.dirent.inum,
628 bref.embed.dirent.namlen,
633 cookies[cookie_index] = saveoff;
636 /* XXX chain error */
637 kprintf("bad chain type readdir %d\n", bref.type);
640 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
641 if (error == ENOENT) {
644 saveoff = (hammer2_key_t)-1;
646 saveoff = bref.key & HAMMER2_DIRHASH_USERMSK;
649 hammer2_inode_unlock(ip);
651 *ap->a_eofflag = eofflag;
652 if (hammer2_debug & 0x0020)
653 kprintf("readdir: done at %016jx\n", saveoff);
654 uio->uio_offset = saveoff & ~HAMMER2_DIRHASH_VISIBLE;
655 if (error && cookie_index == 0) {
657 kfree(cookies, M_TEMP);
659 *ap->a_cookies = NULL;
663 *ap->a_ncookies = cookie_index;
664 *ap->a_cookies = cookies;
671 * hammer2_vop_readlink { vp, uio, cred }
675 hammer2_vop_readlink(struct vop_readlink_args *ap)
682 if (vp->v_type != VLNK)
686 error = hammer2_read_file(ip, ap->a_uio, 0);
692 hammer2_vop_read(struct vop_read_args *ap)
702 * Read operations supported on this vnode?
705 if (vp->v_type != VREG)
715 seqcount = ap->a_ioflag >> 16;
716 bigread = (uio->uio_resid > 100 * 1024 * 1024);
718 error = hammer2_read_file(ip, uio, seqcount);
724 hammer2_vop_write(struct vop_write_args *ap)
734 * Read operations supported on this vnode?
737 if (vp->v_type != VREG)
746 if (ip->pmp->ronly) {
750 seqcount = ap->a_ioflag >> 16;
753 * Check resource limit
755 if (uio->uio_resid > 0 && (td = uio->uio_td) != NULL && td->td_proc &&
756 uio->uio_offset + uio->uio_resid >
757 td->td_proc->p_rlimit[RLIMIT_FSIZE].rlim_cur) {
758 lwpsignal(td->td_proc, td->td_lwp, SIGXFSZ);
763 * The transaction interlocks against flush initiations
764 * (note: but will run concurrently with the actual flush).
766 * To avoid deadlocking against the VM system, we must flag any
767 * transaction related to the buffer cache or other direct
768 * VM page manipulation.
770 if (uio->uio_segflg == UIO_NOCOPY)
771 hammer2_trans_init(ip->pmp, HAMMER2_TRANS_BUFCACHE);
773 hammer2_trans_init(ip->pmp, 0);
774 error = hammer2_write_file(ip, uio, ap->a_ioflag, seqcount);
775 hammer2_trans_done(ip->pmp);
781 * Perform read operations on a file or symlink given an UNLOCKED
784 * The passed ip is not locked.
788 hammer2_read_file(hammer2_inode_t *ip, struct uio *uio, int seqcount)
799 * WARNING! Assumes that the kernel interlocks size changes at the
802 hammer2_mtx_sh(&ip->lock);
803 hammer2_mtx_sh(&ip->truncate_lock);
804 size = ip->meta.size;
805 hammer2_mtx_unlock(&ip->lock);
807 while (uio->uio_resid > 0 && uio->uio_offset < size) {
814 lblksize = hammer2_calc_logical(ip, uio->uio_offset,
818 error = cluster_read(ip->vp, leof, lbase, lblksize,
819 uio->uio_resid, seqcount * MAXBSIZE,
822 if (uio->uio_segflg == UIO_NOCOPY) {
823 bp = getblk(ip->vp, lbase, lblksize, GETBLK_BHEAVY, 0);
824 if (bp->b_flags & B_CACHE) {
827 if (bp->b_xio.xio_npages != 16)
828 kprintf("NPAGES BAD\n");
829 for (i = 0; i < bp->b_xio.xio_npages; ++i) {
831 m = bp->b_xio.xio_pages[i];
832 if (m == NULL || m->valid == 0) {
833 kprintf("bp %016jx %016jx pg %d inv",
836 kprintf("m->object %p/%p", m->object, ip->vp->v_object);
842 kprintf("b_flags %08x, b_error %d\n", bp->b_flags, bp->b_error);
846 error = bread(ip->vp, lbase, lblksize, &bp);
852 loff = (int)(uio->uio_offset - lbase);
854 if (n > uio->uio_resid)
856 if (n > size - uio->uio_offset)
857 n = (int)(size - uio->uio_offset);
858 bp->b_flags |= B_AGE;
859 uiomovebp(bp, (char *)bp->b_data + loff, n, uio);
862 hammer2_mtx_unlock(&ip->truncate_lock);
868 * Write to the file represented by the inode via the logical buffer cache.
869 * The inode may represent a regular file or a symlink.
871 * The inode must not be locked.
875 hammer2_write_file(hammer2_inode_t *ip, struct uio *uio,
876 int ioflag, int seqcount)
878 hammer2_key_t old_eof;
879 hammer2_key_t new_eof;
888 * WARNING! Assumes that the kernel interlocks size changes at the
891 hammer2_mtx_ex(&ip->lock);
892 hammer2_mtx_sh(&ip->truncate_lock);
893 if (ioflag & IO_APPEND)
894 uio->uio_offset = ip->meta.size;
895 old_eof = ip->meta.size;
898 * Extend the file if necessary. If the write fails at some point
899 * we will truncate it back down to cover as much as we were able
902 * Doing this now makes it easier to calculate buffer sizes in
909 if (uio->uio_offset + uio->uio_resid > old_eof) {
910 new_eof = uio->uio_offset + uio->uio_resid;
912 hammer2_extend_file(ip, new_eof);
913 kflags |= NOTE_EXTEND;
917 hammer2_mtx_unlock(&ip->lock);
922 while (uio->uio_resid > 0) {
931 * Don't allow the buffer build to blow out the buffer
934 if ((ioflag & IO_RECURSE) == 0)
935 bwillwrite(HAMMER2_PBUFSIZE);
938 * This nominally tells us how much we can cluster and
939 * what the logical buffer size needs to be. Currently
940 * we don't try to cluster the write and just handle one
943 lblksize = hammer2_calc_logical(ip, uio->uio_offset,
945 loff = (int)(uio->uio_offset - lbase);
947 KKASSERT(lblksize <= 65536);
950 * Calculate bytes to copy this transfer and whether the
951 * copy completely covers the buffer or not.
955 if (n > uio->uio_resid) {
957 if (loff == lbase && uio->uio_offset + n == new_eof)
965 if (lbase >= new_eof)
971 if (uio->uio_segflg == UIO_NOCOPY) {
973 * Issuing a write with the same data backing the
974 * buffer. Instantiate the buffer to collect the
975 * backing vm pages, then read-in any missing bits.
977 * This case is used by vop_stdputpages().
979 bp = getblk(ip->vp, lbase, lblksize, GETBLK_BHEAVY, 0);
980 if ((bp->b_flags & B_CACHE) == 0) {
982 error = bread(ip->vp, lbase, lblksize, &bp);
984 } else if (trivial) {
986 * Even though we are entirely overwriting the buffer
987 * we may still have to zero it out to avoid a
988 * mmap/write visibility issue.
990 bp = getblk(ip->vp, lbase, lblksize, GETBLK_BHEAVY, 0);
991 if ((bp->b_flags & B_CACHE) == 0)
995 * Partial overwrite, read in any missing bits then
996 * replace the portion being written.
998 * (The strategy code will detect zero-fill physical
999 * blocks for this case).
1001 error = bread(ip->vp, lbase, lblksize, &bp);
1012 * Ok, copy the data in
1014 error = uiomovebp(bp, bp->b_data + loff, n, uio);
1015 kflags |= NOTE_WRITE;
1023 * WARNING: Pageout daemon will issue UIO_NOCOPY writes
1024 * with IO_SYNC or IO_ASYNC set. These writes
1025 * must be handled as the pageout daemon expects.
1027 * NOTE! H2 relies on cluster_write() here because it
1028 * cannot preallocate disk blocks at the logical
1029 * level due to not knowing what the compression
1030 * size will be at this time.
1032 * We must use cluster_write() here and we depend
1033 * on the write-behind feature to flush buffers
1034 * appropriately. If we let the buffer daemons do
1035 * it the block allocations will be all over the
1038 if (ioflag & IO_SYNC) {
1040 } else if ((ioflag & IO_DIRECT) && endofblk) {
1042 } else if (ioflag & IO_ASYNC) {
1044 } else if (ip->vp->v_mount->mnt_flag & MNT_NOCLUSTERW) {
1048 bp->b_flags |= B_CLUSTEROK;
1049 cluster_write(bp, new_eof, lblksize, seqcount);
1051 bp->b_flags |= B_CLUSTEROK;
1058 * Cleanup. If we extended the file EOF but failed to write through
1059 * the entire write is a failure and we have to back-up.
1061 if (error && new_eof != old_eof) {
1062 hammer2_mtx_unlock(&ip->truncate_lock);
1063 hammer2_mtx_ex(&ip->lock);
1064 hammer2_mtx_ex(&ip->truncate_lock);
1065 hammer2_truncate_file(ip, old_eof);
1066 if (ip->flags & HAMMER2_INODE_MODIFIED)
1067 hammer2_inode_chain_sync(ip);
1068 hammer2_mtx_unlock(&ip->lock);
1069 } else if (modified) {
1070 hammer2_mtx_ex(&ip->lock);
1071 hammer2_inode_modify(ip);
1072 hammer2_update_time(&ip->meta.mtime);
1073 if (ip->flags & HAMMER2_INODE_MODIFIED)
1074 hammer2_inode_chain_sync(ip);
1075 hammer2_mtx_unlock(&ip->lock);
1076 hammer2_knote(ip->vp, kflags);
1078 hammer2_trans_assert_strategy(ip->pmp);
1079 hammer2_mtx_unlock(&ip->truncate_lock);
1085 * Truncate the size of a file. The inode must not be locked.
1087 * We must unconditionally set HAMMER2_INODE_RESIZED to properly
1088 * ensure that any on-media data beyond the new file EOF has been destroyed.
1090 * WARNING: nvtruncbuf() can only be safely called without the inode lock
1091 * held due to the way our write thread works. If the truncation
1092 * occurs in the middle of a buffer, nvtruncbuf() is responsible
1093 * for dirtying that buffer and zeroing out trailing bytes.
1095 * WARNING! Assumes that the kernel interlocks size changes at the
1098 * WARNING! Caller assumes responsibility for removing dead blocks
1099 * if INODE_RESIZED is set.
1103 hammer2_truncate_file(hammer2_inode_t *ip, hammer2_key_t nsize)
1105 hammer2_key_t lbase;
1108 hammer2_mtx_unlock(&ip->lock);
1110 nblksize = hammer2_calc_logical(ip, nsize, &lbase, NULL);
1111 nvtruncbuf(ip->vp, nsize,
1112 nblksize, (int)nsize & (nblksize - 1),
1115 hammer2_mtx_ex(&ip->lock);
1116 KKASSERT((ip->flags & HAMMER2_INODE_RESIZED) == 0);
1117 ip->osize = ip->meta.size;
1118 ip->meta.size = nsize;
1119 atomic_set_int(&ip->flags, HAMMER2_INODE_RESIZED);
1120 hammer2_inode_modify(ip);
1124 * Extend the size of a file. The inode must not be locked.
1126 * Even though the file size is changing, we do not have to set the
1127 * INODE_RESIZED bit unless the file size crosses the EMBEDDED_BYTES
1128 * boundary. When this occurs a hammer2_inode_chain_sync() is required
1129 * to prepare the inode cluster's indirect block table, otherwise
1130 * async execution of the strategy code will implode on us.
1132 * WARNING! Assumes that the kernel interlocks size changes at the
1135 * WARNING! Caller assumes responsibility for transitioning out
1136 * of the inode DIRECTDATA mode if INODE_RESIZED is set.
1140 hammer2_extend_file(hammer2_inode_t *ip, hammer2_key_t nsize)
1142 hammer2_key_t lbase;
1143 hammer2_key_t osize;
1147 KKASSERT((ip->flags & HAMMER2_INODE_RESIZED) == 0);
1148 hammer2_inode_modify(ip);
1149 osize = ip->meta.size;
1151 ip->meta.size = nsize;
1153 if (osize <= HAMMER2_EMBEDDED_BYTES && nsize > HAMMER2_EMBEDDED_BYTES) {
1154 atomic_set_int(&ip->flags, HAMMER2_INODE_RESIZED);
1155 hammer2_inode_chain_sync(ip);
1158 hammer2_mtx_unlock(&ip->lock);
1160 oblksize = hammer2_calc_logical(ip, osize, &lbase, NULL);
1161 nblksize = hammer2_calc_logical(ip, nsize, &lbase, NULL);
1167 hammer2_mtx_ex(&ip->lock);
1172 hammer2_vop_nresolve(struct vop_nresolve_args *ap)
1174 hammer2_xop_nresolve_t *xop;
1175 hammer2_inode_t *ip;
1176 hammer2_inode_t *dip;
1177 struct namecache *ncp;
1181 dip = VTOI(ap->a_dvp);
1182 xop = hammer2_xop_alloc(dip, 0);
1184 ncp = ap->a_nch->ncp;
1185 hammer2_xop_setname(&xop->head, ncp->nc_name, ncp->nc_nlen);
1188 * Note: In DragonFly the kernel handles '.' and '..'.
1190 hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);
1191 hammer2_xop_start(&xop->head, hammer2_xop_nresolve);
1193 error = hammer2_xop_collect(&xop->head, 0);
1197 ip = hammer2_inode_get(dip->pmp, dip, &xop->head.cluster, -1);
1199 hammer2_inode_unlock(dip);
1202 * Acquire the related vnode
1204 * NOTE: For error processing, only ENOENT resolves the namecache
1205 * entry to NULL, otherwise we just return the error and
1206 * leave the namecache unresolved.
1208 * NOTE: multiple hammer2_inode structures can be aliased to the
1209 * same chain element, for example for hardlinks. This
1210 * use case does not 'reattach' inode associations that
1211 * might already exist, but always allocates a new one.
1213 * WARNING: inode structure is locked exclusively via inode_get
1214 * but chain was locked shared. inode_unlock()
1215 * will handle it properly.
1218 vp = hammer2_igetv(ip, &error);
1221 cache_setvp(ap->a_nch, vp);
1222 } else if (error == ENOENT) {
1223 cache_setvp(ap->a_nch, NULL);
1225 hammer2_inode_unlock(ip);
1228 * The vp should not be released until after we've disposed
1229 * of our locks, because it might cause vop_inactive() to
1236 cache_setvp(ap->a_nch, NULL);
1238 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1239 KASSERT(error || ap->a_nch->ncp->nc_vp != NULL,
1240 ("resolve error %d/%p ap %p\n",
1241 error, ap->a_nch->ncp->nc_vp, ap));
1248 hammer2_vop_nlookupdotdot(struct vop_nlookupdotdot_args *ap)
1250 hammer2_inode_t *dip;
1254 dip = VTOI(ap->a_dvp);
1255 inum = dip->meta.iparent;
1259 error = hammer2_vfs_vget(ap->a_dvp->v_mount, NULL,
1269 hammer2_vop_nmkdir(struct vop_nmkdir_args *ap)
1271 hammer2_inode_t *dip;
1272 hammer2_inode_t *nip;
1273 struct namecache *ncp;
1274 const uint8_t *name;
1279 dip = VTOI(ap->a_dvp);
1280 if (dip->pmp->ronly)
1283 ncp = ap->a_nch->ncp;
1284 name = ncp->nc_name;
1285 name_len = ncp->nc_nlen;
1287 hammer2_pfs_memory_wait(dip->pmp);
1288 hammer2_trans_init(dip->pmp, 0);
1290 inum = hammer2_trans_newinum(dip->pmp);
1293 * Create the actual inode as a hidden file in the iroot, then
1294 * create the directory entry. The creation of the actual inode
1295 * sets its nlinks to 1 which is the value we desire.
1297 nip = hammer2_inode_create(dip->pmp->iroot, dip, ap->a_vap, ap->a_cred,
1302 error = hammer2_dirent_create(dip, name, name_len,
1303 nip->meta.inum, nip->meta.type);
1307 KKASSERT(nip == NULL);
1310 *ap->a_vpp = hammer2_igetv(nip, &error);
1311 hammer2_inode_unlock(nip);
1315 * Update dip's mtime
1320 hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);
1321 hammer2_update_time(&mtime);
1322 hammer2_inode_modify(dip);
1323 dip->meta.mtime = mtime;
1324 hammer2_inode_unlock(dip);
1327 hammer2_trans_done(dip->pmp);
1330 cache_setunresolved(ap->a_nch);
1331 cache_setvp(ap->a_nch, *ap->a_vpp);
1332 hammer2_knote(ap->a_dvp, NOTE_WRITE | NOTE_LINK);
1339 hammer2_vop_open(struct vop_open_args *ap)
1341 return vop_stdopen(ap);
1345 * hammer2_vop_advlock { vp, id, op, fl, flags }
1349 hammer2_vop_advlock(struct vop_advlock_args *ap)
1351 hammer2_inode_t *ip = VTOI(ap->a_vp);
1354 size = ip->meta.size;
1355 return (lf_advlock(ap, &ip->advlock, size));
1360 hammer2_vop_close(struct vop_close_args *ap)
1362 return vop_stdclose(ap);
1366 * hammer2_vop_nlink { nch, dvp, vp, cred }
1368 * Create a hardlink from (vp) to {dvp, nch}.
1372 hammer2_vop_nlink(struct vop_nlink_args *ap)
1374 hammer2_inode_t *tdip; /* target directory to create link in */
1375 hammer2_inode_t *ip; /* inode we are hardlinking to */
1376 struct namecache *ncp;
1377 const uint8_t *name;
1381 if (ap->a_dvp->v_mount != ap->a_vp->v_mount)
1384 tdip = VTOI(ap->a_dvp);
1385 if (tdip->pmp->ronly)
1388 ncp = ap->a_nch->ncp;
1389 name = ncp->nc_name;
1390 name_len = ncp->nc_nlen;
1393 * ip represents the file being hardlinked. The file could be a
1394 * normal file or a hardlink target if it has already been hardlinked.
1395 * (with the new semantics, it will almost always be a hardlink
1398 * Bump nlinks and potentially also create or move the hardlink
1399 * target in the parent directory common to (ip) and (tdip). The
1400 * consolidation code can modify ip->cluster. The returned cluster
1403 ip = VTOI(ap->a_vp);
1404 KASSERT(ip->pmp, ("ip->pmp is NULL %p %p", ip, ip->pmp));
1405 hammer2_pfs_memory_wait(ip->pmp);
1406 hammer2_trans_init(ip->pmp, 0);
1409 * Target should be an indexed inode or there's no way we will ever
1410 * be able to find it!
1412 KKASSERT((ip->meta.name_key & HAMMER2_DIRHASH_VISIBLE) == 0);
1417 * Can return NULL and error == EXDEV if the common parent
1418 * crosses a directory with the xlink flag set.
1420 hammer2_inode_lock(tdip, 0);
1421 hammer2_inode_lock(ip, 0);
1424 * Create the directory entry and bump nlinks.
1427 error = hammer2_dirent_create(tdip, name, name_len,
1428 ip->meta.inum, ip->meta.type);
1429 hammer2_inode_modify(ip);
1434 * Update dip's mtime
1438 hammer2_update_time(&mtime);
1439 hammer2_inode_modify(tdip);
1440 tdip->meta.mtime = mtime;
1442 cache_setunresolved(ap->a_nch);
1443 cache_setvp(ap->a_nch, ap->a_vp);
1445 hammer2_inode_unlock(ip);
1446 hammer2_inode_unlock(tdip);
1448 hammer2_trans_done(ip->pmp);
1449 hammer2_knote(ap->a_vp, NOTE_LINK);
1450 hammer2_knote(ap->a_dvp, NOTE_WRITE);
1456 * hammer2_vop_ncreate { nch, dvp, vpp, cred, vap }
1458 * The operating system has already ensured that the directory entry
1459 * does not exist and done all appropriate namespace locking.
1463 hammer2_vop_ncreate(struct vop_ncreate_args *ap)
1465 hammer2_inode_t *dip;
1466 hammer2_inode_t *nip;
1467 struct namecache *ncp;
1468 const uint8_t *name;
1473 dip = VTOI(ap->a_dvp);
1474 if (dip->pmp->ronly)
1477 ncp = ap->a_nch->ncp;
1478 name = ncp->nc_name;
1479 name_len = ncp->nc_nlen;
1480 hammer2_pfs_memory_wait(dip->pmp);
1481 hammer2_trans_init(dip->pmp, 0);
1483 inum = hammer2_trans_newinum(dip->pmp);
1486 * Create the actual inode as a hidden file in the iroot, then
1487 * create the directory entry. The creation of the actual inode
1488 * sets its nlinks to 1 which is the value we desire.
1490 nip = hammer2_inode_create(dip->pmp->iroot, dip, ap->a_vap, ap->a_cred,
1496 error = hammer2_dirent_create(dip, name, name_len,
1497 nip->meta.inum, nip->meta.type);
1500 KKASSERT(nip == NULL);
1503 *ap->a_vpp = hammer2_igetv(nip, &error);
1504 hammer2_inode_unlock(nip);
1508 * Update dip's mtime
1513 hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);
1514 hammer2_update_time(&mtime);
1515 hammer2_inode_modify(dip);
1516 dip->meta.mtime = mtime;
1517 hammer2_inode_unlock(dip);
1520 hammer2_trans_done(dip->pmp);
1523 cache_setunresolved(ap->a_nch);
1524 cache_setvp(ap->a_nch, *ap->a_vpp);
1525 hammer2_knote(ap->a_dvp, NOTE_WRITE);
1531 * Make a device node (typically a fifo)
1535 hammer2_vop_nmknod(struct vop_nmknod_args *ap)
1537 hammer2_inode_t *dip;
1538 hammer2_inode_t *nip;
1539 struct namecache *ncp;
1540 const uint8_t *name;
1545 dip = VTOI(ap->a_dvp);
1546 if (dip->pmp->ronly)
1549 ncp = ap->a_nch->ncp;
1550 name = ncp->nc_name;
1551 name_len = ncp->nc_nlen;
1552 hammer2_pfs_memory_wait(dip->pmp);
1553 hammer2_trans_init(dip->pmp, 0);
1556 * Create the device inode and then create the directory entry.
1558 inum = hammer2_trans_newinum(dip->pmp);
1559 nip = hammer2_inode_create(dip->pmp->iroot, dip, ap->a_vap, ap->a_cred,
1564 error = hammer2_dirent_create(dip, name, name_len,
1565 nip->meta.inum, nip->meta.type);
1570 KKASSERT(nip == NULL);
1573 *ap->a_vpp = hammer2_igetv(nip, &error);
1574 hammer2_inode_unlock(nip);
1578 * Update dip's mtime
1583 hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);
1584 hammer2_update_time(&mtime);
1585 hammer2_inode_modify(dip);
1586 dip->meta.mtime = mtime;
1587 hammer2_inode_unlock(dip);
1590 hammer2_trans_done(dip->pmp);
1593 cache_setunresolved(ap->a_nch);
1594 cache_setvp(ap->a_nch, *ap->a_vpp);
1595 hammer2_knote(ap->a_dvp, NOTE_WRITE);
1601 * hammer2_vop_nsymlink { nch, dvp, vpp, cred, vap, target }
1605 hammer2_vop_nsymlink(struct vop_nsymlink_args *ap)
1607 hammer2_inode_t *dip;
1608 hammer2_inode_t *nip;
1609 struct namecache *ncp;
1610 const uint8_t *name;
1615 dip = VTOI(ap->a_dvp);
1616 if (dip->pmp->ronly)
1619 ncp = ap->a_nch->ncp;
1620 name = ncp->nc_name;
1621 name_len = ncp->nc_nlen;
1622 hammer2_pfs_memory_wait(dip->pmp);
1623 hammer2_trans_init(dip->pmp, 0);
1625 ap->a_vap->va_type = VLNK; /* enforce type */
1628 * Create the softlink as an inode and then create the directory
1631 inum = hammer2_trans_newinum(dip->pmp);
1633 nip = hammer2_inode_create(dip->pmp->iroot, dip, ap->a_vap, ap->a_cred,
1638 error = hammer2_dirent_create(dip, name, name_len,
1639 nip->meta.inum, nip->meta.type);
1644 KKASSERT(nip == NULL);
1646 hammer2_trans_done(dip->pmp);
1649 *ap->a_vpp = hammer2_igetv(nip, &error);
1652 * Build the softlink (~like file data) and finalize the namecache.
1659 bytes = strlen(ap->a_target);
1661 hammer2_inode_unlock(nip);
1662 bzero(&auio, sizeof(auio));
1663 bzero(&aiov, sizeof(aiov));
1664 auio.uio_iov = &aiov;
1665 auio.uio_segflg = UIO_SYSSPACE;
1666 auio.uio_rw = UIO_WRITE;
1667 auio.uio_resid = bytes;
1668 auio.uio_iovcnt = 1;
1669 auio.uio_td = curthread;
1670 aiov.iov_base = ap->a_target;
1671 aiov.iov_len = bytes;
1672 error = hammer2_write_file(nip, &auio, IO_APPEND, 0);
1673 /* XXX handle error */
1676 hammer2_inode_unlock(nip);
1680 * Update dip's mtime
1685 hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);
1686 hammer2_update_time(&mtime);
1687 hammer2_inode_modify(dip);
1688 dip->meta.mtime = mtime;
1689 hammer2_inode_unlock(dip);
1692 hammer2_trans_done(dip->pmp);
1695 * Finalize namecache
1698 cache_setunresolved(ap->a_nch);
1699 cache_setvp(ap->a_nch, *ap->a_vpp);
1700 hammer2_knote(ap->a_dvp, NOTE_WRITE);
1706 * hammer2_vop_nremove { nch, dvp, cred }
1710 hammer2_vop_nremove(struct vop_nremove_args *ap)
1712 hammer2_xop_unlink_t *xop;
1713 hammer2_inode_t *dip;
1714 hammer2_inode_t *ip;
1715 struct namecache *ncp;
1719 dip = VTOI(ap->a_dvp);
1720 if (dip->pmp->ronly)
1723 ncp = ap->a_nch->ncp;
1725 hammer2_pfs_memory_wait(dip->pmp);
1726 hammer2_trans_init(dip->pmp, 0);
1727 hammer2_inode_lock(dip, 0);
1730 * The unlink XOP unlinks the path from the directory and
1731 * locates and returns the cluster associated with the real inode.
1732 * We have to handle nlinks here on the frontend.
1734 xop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING);
1735 hammer2_xop_setname(&xop->head, ncp->nc_name, ncp->nc_nlen);
1738 * The namecache entry is locked so nobody can use this namespace.
1739 * Calculate isopen to determine if this namespace has an open vp
1740 * associated with it and resolve the vp only if it does.
1742 * We try to avoid resolving the vnode if nobody has it open, but
1743 * note that the test is via this namespace only.
1745 isopen = cache_isopen(ap->a_nch);
1747 xop->dopermanent = 0;
1748 hammer2_xop_start(&xop->head, hammer2_xop_unlink);
1751 * Collect the real inode and adjust nlinks, destroy the real
1752 * inode if nlinks transitions to 0 and it was the real inode
1753 * (else it has already been removed).
1755 error = hammer2_xop_collect(&xop->head, 0);
1756 hammer2_inode_unlock(dip);
1759 ip = hammer2_inode_get(dip->pmp, dip, &xop->head.cluster, -1);
1760 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1762 hammer2_inode_unlink_finisher(ip, isopen);
1763 hammer2_inode_unlock(ip);
1766 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1770 * Update dip's mtime
1775 hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);
1776 hammer2_update_time(&mtime);
1777 hammer2_inode_modify(dip);
1778 dip->meta.mtime = mtime;
1779 hammer2_inode_unlock(dip);
1782 hammer2_inode_run_sideq(dip->pmp);
1783 hammer2_trans_done(dip->pmp);
1785 cache_unlink(ap->a_nch);
1786 hammer2_knote(ap->a_dvp, NOTE_WRITE);
1792 * hammer2_vop_nrmdir { nch, dvp, cred }
1796 hammer2_vop_nrmdir(struct vop_nrmdir_args *ap)
1798 hammer2_xop_unlink_t *xop;
1799 hammer2_inode_t *dip;
1800 hammer2_inode_t *ip;
1801 struct namecache *ncp;
1805 dip = VTOI(ap->a_dvp);
1806 if (dip->pmp->ronly)
1809 hammer2_pfs_memory_wait(dip->pmp);
1810 hammer2_trans_init(dip->pmp, 0);
1811 hammer2_inode_lock(dip, 0);
1813 xop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING);
1815 ncp = ap->a_nch->ncp;
1816 hammer2_xop_setname(&xop->head, ncp->nc_name, ncp->nc_nlen);
1817 isopen = cache_isopen(ap->a_nch);
1819 xop->dopermanent = 0;
1820 hammer2_xop_start(&xop->head, hammer2_xop_unlink);
1823 * Collect the real inode and adjust nlinks, destroy the real
1824 * inode if nlinks transitions to 0 and it was the real inode
1825 * (else it has already been removed).
1827 error = hammer2_xop_collect(&xop->head, 0);
1828 hammer2_inode_unlock(dip);
1831 ip = hammer2_inode_get(dip->pmp, dip, &xop->head.cluster, -1);
1832 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1834 hammer2_inode_unlink_finisher(ip, isopen);
1835 hammer2_inode_unlock(ip);
1838 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1842 * Update dip's mtime
1847 hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);
1848 hammer2_update_time(&mtime);
1849 hammer2_inode_modify(dip);
1850 dip->meta.mtime = mtime;
1851 hammer2_inode_unlock(dip);
1854 hammer2_inode_run_sideq(dip->pmp);
1855 hammer2_trans_done(dip->pmp);
1857 cache_unlink(ap->a_nch);
1858 hammer2_knote(ap->a_dvp, NOTE_WRITE | NOTE_LINK);
1864 * hammer2_vop_nrename { fnch, tnch, fdvp, tdvp, cred }
1868 hammer2_vop_nrename(struct vop_nrename_args *ap)
1870 struct namecache *fncp;
1871 struct namecache *tncp;
1872 hammer2_inode_t *fdip; /* source directory */
1873 hammer2_inode_t *tdip; /* target directory */
1874 hammer2_inode_t *ip; /* file being renamed */
1875 hammer2_inode_t *tip; /* replaced target during rename or NULL */
1876 const uint8_t *fname;
1878 const uint8_t *tname;
1885 if (ap->a_fdvp->v_mount != ap->a_tdvp->v_mount)
1887 if (ap->a_fdvp->v_mount != ap->a_fnch->ncp->nc_vp->v_mount)
1890 fdip = VTOI(ap->a_fdvp); /* source directory */
1891 tdip = VTOI(ap->a_tdvp); /* target directory */
1893 if (fdip->pmp->ronly)
1896 fncp = ap->a_fnch->ncp; /* entry name in source */
1897 fname = fncp->nc_name;
1898 fname_len = fncp->nc_nlen;
1900 tncp = ap->a_tnch->ncp; /* entry name in target */
1901 tname = tncp->nc_name;
1902 tname_len = tncp->nc_nlen;
1904 hammer2_pfs_memory_wait(tdip->pmp);
1905 hammer2_trans_init(tdip->pmp, 0);
1910 ip = VTOI(fncp->nc_vp);
1911 hammer2_inode_ref(ip); /* extra ref */
1914 * Lookup the target name to determine if a directory entry
1915 * is being overwritten. We only hold related inode locks
1916 * temporarily, the operating system is expected to protect
1917 * against rename races.
1919 tip = tncp->nc_vp ? VTOI(tncp->nc_vp) : NULL;
1921 hammer2_inode_ref(tip); /* extra ref */
1924 * Can return NULL and error == EXDEV if the common parent
1925 * crosses a directory with the xlink flag set.
1927 * For now try to avoid deadlocks with a simple pointer address
1928 * test. (tip) can be NULL.
1932 hammer2_inode_lock(fdip, 0);
1933 hammer2_inode_lock(tdip, 0);
1935 hammer2_inode_lock(tdip, 0);
1936 hammer2_inode_lock(fdip, 0);
1940 hammer2_inode_lock(ip, 0);
1941 hammer2_inode_lock(tip, 0);
1943 hammer2_inode_lock(tip, 0);
1944 hammer2_inode_lock(ip, 0);
1947 hammer2_inode_lock(ip, 0);
1952 * Delete the target namespace.
1954 * REMOVED - NOW FOLDED INTO XOP_NRENAME OPERATION
1957 hammer2_xop_unlink_t *xop2;
1958 hammer2_inode_t *tip;
1962 * The unlink XOP unlinks the path from the directory and
1963 * locates and returns the cluster associated with the real
1964 * inode. We have to handle nlinks here on the frontend.
1966 xop2 = hammer2_xop_alloc(tdip, HAMMER2_XOP_MODIFYING);
1967 hammer2_xop_setname(&xop2->head, tname, tname_len);
1968 isopen = cache_isopen(ap->a_tnch);
1970 xop2->dopermanent = 0;
1971 hammer2_xop_start(&xop2->head, hammer2_xop_unlink);
1974 * Collect the real inode and adjust nlinks, destroy the real
1975 * inode if nlinks transitions to 0 and it was the real inode
1976 * (else it has already been removed).
1978 tnch_error = hammer2_xop_collect(&xop2->head, 0);
1979 /* hammer2_inode_unlock(tdip); */
1981 if (tnch_error == 0) {
1982 tip = hammer2_inode_get(tdip->pmp, NULL,
1983 &xop2->head.cluster, -1);
1984 hammer2_xop_retire(&xop2->head, HAMMER2_XOPMASK_VOP);
1986 hammer2_inode_unlink_finisher(tip, isopen);
1987 hammer2_inode_unlock(tip);
1990 hammer2_xop_retire(&xop2->head, HAMMER2_XOPMASK_VOP);
1992 /* hammer2_inode_lock(tdip, 0); */
1994 if (tnch_error && tnch_error != ENOENT) {
2003 * Resolve the collision space for (tdip, tname, tname_len)
2005 * tdip must be held exclusively locked to prevent races since
2006 * multiple filenames can end up in the same collision space.
2009 hammer2_xop_scanlhc_t *sxop;
2010 hammer2_tid_t lhcbase;
2012 tlhc = hammer2_dirhash(tname, tname_len);
2014 sxop = hammer2_xop_alloc(tdip, HAMMER2_XOP_MODIFYING);
2016 hammer2_xop_start(&sxop->head, hammer2_xop_scanlhc);
2017 while ((error = hammer2_xop_collect(&sxop->head, 0)) == 0) {
2018 if (tlhc != sxop->head.cluster.focus->bref.key)
2022 hammer2_xop_retire(&sxop->head, HAMMER2_XOPMASK_VOP);
2025 if (error != ENOENT)
2030 if ((lhcbase ^ tlhc) & ~HAMMER2_DIRHASH_LOMASK) {
2037 * Ready to go, issue the rename to the backend. Note that meta-data
2038 * updates to the related inodes occur separately from the rename
2041 * NOTE: While it is not necessary to update ip->meta.name*, doing
2042 * so aids catastrophic recovery and debugging.
2045 hammer2_xop_nrename_t *xop4;
2047 xop4 = hammer2_xop_alloc(fdip, HAMMER2_XOP_MODIFYING);
2049 xop4->ip_key = ip->meta.name_key;
2050 hammer2_xop_setip2(&xop4->head, ip);
2051 hammer2_xop_setip3(&xop4->head, tdip);
2052 hammer2_xop_setname(&xop4->head, fname, fname_len);
2053 hammer2_xop_setname2(&xop4->head, tname, tname_len);
2054 hammer2_xop_start(&xop4->head, hammer2_xop_nrename);
2056 error = hammer2_xop_collect(&xop4->head, 0);
2057 hammer2_xop_retire(&xop4->head, HAMMER2_XOPMASK_VOP);
2059 if (error == ENOENT)
2063 * Update inode meta-data.
2065 * WARNING! The in-memory inode (ip) structure does not
2066 * maintain a copy of the inode's filename buffer.
2069 (ip->meta.name_key & HAMMER2_DIRHASH_VISIBLE)) {
2070 hammer2_inode_modify(ip);
2071 ip->meta.name_len = tname_len;
2072 ip->meta.name_key = tlhc;
2075 hammer2_inode_modify(ip);
2076 ip->meta.iparent = tdip->meta.inum;
2084 * If no error, the backend has replaced the target directory entry.
2085 * We must adjust nlinks on the original replace target if it exists.
2087 if (error == 0 && tip) {
2090 isopen = cache_isopen(ap->a_tnch);
2091 hammer2_inode_unlink_finisher(tip, isopen);
2095 * Update directory mtimes to represent the something changed.
2097 if (update_fdip || update_tdip) {
2100 hammer2_update_time(&mtime);
2102 hammer2_inode_modify(fdip);
2103 fdip->meta.mtime = mtime;
2106 hammer2_inode_modify(tdip);
2107 tdip->meta.mtime = mtime;
2111 hammer2_inode_unlock(tip);
2112 hammer2_inode_drop(tip);
2114 hammer2_inode_unlock(ip);
2115 hammer2_inode_unlock(tdip);
2116 hammer2_inode_unlock(fdip);
2117 hammer2_inode_drop(ip);
2118 hammer2_inode_run_sideq(fdip->pmp);
2120 hammer2_trans_done(tdip->pmp);
2123 * Issue the namecache update after unlocking all the internal
2124 * hammer structures, otherwise we might deadlock.
2126 if (error == 0 && tip) {
2127 cache_unlink(ap->a_tnch);
2128 cache_setunresolved(ap->a_tnch);
2131 cache_rename(ap->a_fnch, ap->a_tnch);
2132 hammer2_knote(ap->a_fdvp, NOTE_WRITE);
2133 hammer2_knote(ap->a_tdvp, NOTE_WRITE);
2134 hammer2_knote(fncp->nc_vp, NOTE_RENAME);
2141 * hammer2_vop_ioctl { vp, command, data, fflag, cred }
2145 hammer2_vop_ioctl(struct vop_ioctl_args *ap)
2147 hammer2_inode_t *ip;
2150 ip = VTOI(ap->a_vp);
2152 error = hammer2_ioctl(ip, ap->a_command, (void *)ap->a_data,
2153 ap->a_fflag, ap->a_cred);
2159 hammer2_vop_mountctl(struct vop_mountctl_args *ap)
2166 case (MOUNTCTL_SET_EXPORT):
2167 mp = ap->a_head.a_ops->head.vv_mount;
2170 if (ap->a_ctllen != sizeof(struct export_args))
2173 rc = vfs_export(mp, &pmp->export,
2174 (const struct export_args *)ap->a_ctl);
2177 rc = vop_stdmountctl(ap);
2186 static void filt_hammer2detach(struct knote *kn);
2187 static int filt_hammer2read(struct knote *kn, long hint);
2188 static int filt_hammer2write(struct knote *kn, long hint);
2189 static int filt_hammer2vnode(struct knote *kn, long hint);
2191 static struct filterops hammer2read_filtops =
2192 { FILTEROP_ISFD | FILTEROP_MPSAFE,
2193 NULL, filt_hammer2detach, filt_hammer2read };
2194 static struct filterops hammer2write_filtops =
2195 { FILTEROP_ISFD | FILTEROP_MPSAFE,
2196 NULL, filt_hammer2detach, filt_hammer2write };
2197 static struct filterops hammer2vnode_filtops =
2198 { FILTEROP_ISFD | FILTEROP_MPSAFE,
2199 NULL, filt_hammer2detach, filt_hammer2vnode };
2203 hammer2_vop_kqfilter(struct vop_kqfilter_args *ap)
2205 struct vnode *vp = ap->a_vp;
2206 struct knote *kn = ap->a_kn;
2208 switch (kn->kn_filter) {
2210 kn->kn_fop = &hammer2read_filtops;
2213 kn->kn_fop = &hammer2write_filtops;
2216 kn->kn_fop = &hammer2vnode_filtops;
2219 return (EOPNOTSUPP);
2222 kn->kn_hook = (caddr_t)vp;
2224 knote_insert(&vp->v_pollinfo.vpi_kqinfo.ki_note, kn);
2230 filt_hammer2detach(struct knote *kn)
2232 struct vnode *vp = (void *)kn->kn_hook;
2234 knote_remove(&vp->v_pollinfo.vpi_kqinfo.ki_note, kn);
2238 filt_hammer2read(struct knote *kn, long hint)
2240 struct vnode *vp = (void *)kn->kn_hook;
2241 hammer2_inode_t *ip = VTOI(vp);
2244 if (hint == NOTE_REVOKE) {
2245 kn->kn_flags |= (EV_EOF | EV_NODATA | EV_ONESHOT);
2248 off = ip->meta.size - kn->kn_fp->f_offset;
2249 kn->kn_data = (off < INTPTR_MAX) ? off : INTPTR_MAX;
2250 if (kn->kn_sfflags & NOTE_OLDAPI)
2252 return (kn->kn_data != 0);
2257 filt_hammer2write(struct knote *kn, long hint)
2259 if (hint == NOTE_REVOKE)
2260 kn->kn_flags |= (EV_EOF | EV_NODATA | EV_ONESHOT);
2266 filt_hammer2vnode(struct knote *kn, long hint)
2268 if (kn->kn_sfflags & hint)
2269 kn->kn_fflags |= hint;
2270 if (hint == NOTE_REVOKE) {
2271 kn->kn_flags |= (EV_EOF | EV_NODATA);
2274 return (kn->kn_fflags != 0);
2282 hammer2_vop_markatime(struct vop_markatime_args *ap)
2284 hammer2_inode_t *ip;
2297 hammer2_vop_fifokqfilter(struct vop_kqfilter_args *ap)
2301 error = VOCALL(&fifo_vnode_vops, &ap->a_head);
2303 error = hammer2_vop_kqfilter(ap);
2310 struct vop_ops hammer2_vnode_vops = {
2311 .vop_default = vop_defaultop,
2312 .vop_fsync = hammer2_vop_fsync,
2313 .vop_getpages = vop_stdgetpages,
2314 .vop_putpages = vop_stdputpages,
2315 .vop_access = hammer2_vop_access,
2316 .vop_advlock = hammer2_vop_advlock,
2317 .vop_close = hammer2_vop_close,
2318 .vop_nlink = hammer2_vop_nlink,
2319 .vop_ncreate = hammer2_vop_ncreate,
2320 .vop_nsymlink = hammer2_vop_nsymlink,
2321 .vop_nremove = hammer2_vop_nremove,
2322 .vop_nrmdir = hammer2_vop_nrmdir,
2323 .vop_nrename = hammer2_vop_nrename,
2324 .vop_getattr = hammer2_vop_getattr,
2325 .vop_setattr = hammer2_vop_setattr,
2326 .vop_readdir = hammer2_vop_readdir,
2327 .vop_readlink = hammer2_vop_readlink,
2328 .vop_getpages = vop_stdgetpages,
2329 .vop_putpages = vop_stdputpages,
2330 .vop_read = hammer2_vop_read,
2331 .vop_write = hammer2_vop_write,
2332 .vop_open = hammer2_vop_open,
2333 .vop_inactive = hammer2_vop_inactive,
2334 .vop_reclaim = hammer2_vop_reclaim,
2335 .vop_nresolve = hammer2_vop_nresolve,
2336 .vop_nlookupdotdot = hammer2_vop_nlookupdotdot,
2337 .vop_nmkdir = hammer2_vop_nmkdir,
2338 .vop_nmknod = hammer2_vop_nmknod,
2339 .vop_ioctl = hammer2_vop_ioctl,
2340 .vop_mountctl = hammer2_vop_mountctl,
2341 .vop_bmap = hammer2_vop_bmap,
2342 .vop_strategy = hammer2_vop_strategy,
2343 .vop_kqfilter = hammer2_vop_kqfilter
2346 struct vop_ops hammer2_spec_vops = {
2347 .vop_default = vop_defaultop,
2348 .vop_fsync = hammer2_vop_fsync,
2349 .vop_read = vop_stdnoread,
2350 .vop_write = vop_stdnowrite,
2351 .vop_access = hammer2_vop_access,
2352 .vop_close = hammer2_vop_close,
2353 .vop_markatime = hammer2_vop_markatime,
2354 .vop_getattr = hammer2_vop_getattr,
2355 .vop_inactive = hammer2_vop_inactive,
2356 .vop_reclaim = hammer2_vop_reclaim,
2357 .vop_setattr = hammer2_vop_setattr
2360 struct vop_ops hammer2_fifo_vops = {
2361 .vop_default = fifo_vnoperate,
2362 .vop_fsync = hammer2_vop_fsync,
2364 .vop_read = hammer2_vop_fiforead,
2365 .vop_write = hammer2_vop_fifowrite,
2367 .vop_access = hammer2_vop_access,
2369 .vop_close = hammer2_vop_fifoclose,
2371 .vop_markatime = hammer2_vop_markatime,
2372 .vop_getattr = hammer2_vop_getattr,
2373 .vop_inactive = hammer2_vop_inactive,
2374 .vop_reclaim = hammer2_vop_reclaim,
2375 .vop_setattr = hammer2_vop_setattr,
2376 .vop_kqfilter = hammer2_vop_fifokqfilter