2 * Copyright (c) 2011-2015 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@dragonflybsd.org>
6 * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org>
7 * by Daniel Flores (GSOC 2013 - mentored by Matthew Dillon, compression)
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in
17 * the documentation and/or other materials provided with the
19 * 3. Neither the name of The DragonFly Project nor the names of its
20 * contributors may be used to endorse or promote products derived
21 * from this software without specific, prior written permission.
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
26 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
27 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
28 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
29 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
30 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
31 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
32 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
33 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * Kernel Filesystem interface
39 * NOTE! local ipdata pointers must be reloaded on any modifying operation
40 * to the inode as its underlying chain may have changed.
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/kernel.h>
46 #include <sys/fcntl.h>
49 #include <sys/namei.h>
50 #include <sys/mount.h>
51 #include <sys/vnode.h>
52 #include <sys/mountctl.h>
53 #include <sys/dirent.h>
55 #include <sys/objcache.h>
56 #include <sys/event.h>
58 #include <vfs/fifofs/fifo.h>
62 static int hammer2_read_file(hammer2_inode_t *ip, struct uio *uio,
64 static int hammer2_write_file(hammer2_inode_t *ip, struct uio *uio,
65 int ioflag, int seqcount);
66 static void hammer2_extend_file(hammer2_inode_t *ip, hammer2_key_t nsize);
67 static void hammer2_truncate_file(hammer2_inode_t *ip, hammer2_key_t nsize);
69 struct objcache *cache_xops;
73 hammer2_knote(struct vnode *vp, int flags)
76 KNOTE(&vp->v_pollinfo.vpi_kqinfo.ki_note, flags);
80 * Last reference to a vnode is going away but it is still cached.
84 hammer2_vop_inactive(struct vop_inactive_args *ap)
103 * Check for deleted inodes and recycle immediately on the last
104 * release. Be sure to destroy any left-over buffer cache buffers
105 * so we do not waste time trying to flush them.
107 * WARNING: nvtruncbuf() can only be safely called without the inode
108 * lock held due to the way our write thread works.
110 if (ip->flags & HAMMER2_INODE_ISUNLINKED) {
115 * Detect updates to the embedded data which may be
116 * synchronized by the strategy code. Simply mark the
117 * inode modified so it gets picked up by our normal flush.
119 nblksize = hammer2_calc_logical(ip, 0, &lbase, NULL);
120 nvtruncbuf(vp, 0, nblksize, 0, 0);
128 * Reclaim a vnode so that it can be reused; after the inode is
129 * disassociated, the filesystem must manage it alone.
133 hammer2_vop_reclaim(struct vop_reclaim_args *ap)
149 * The final close of a deleted file or directory marks it for
150 * destruction. The DELETED flag allows the flusher to shortcut
151 * any modified blocks still unflushed (that is, just ignore them).
153 * HAMMER2 usually does not try to optimize the freemap by returning
154 * deleted blocks to it as it does not usually know how many snapshots
155 * might be referencing portions of the file/dir.
161 * NOTE! We do not attempt to flush chains here, flushing is
162 * really fragile and could also deadlock.
167 * An unlinked inode may have been relinked to the ihidden directory.
168 * This occurs if the inode was unlinked while open. Reclamation of
169 * these inodes requires processing we cannot safely do here so add
170 * the inode to the unlinkq in that situation.
172 * A reclaim can occur at any time so we cannot safely start a
173 * transaction to handle reclamation of unlinked files. Instead,
174 * the ip is left with a reference and placed on a linked list and
177 if ((ip->flags & HAMMER2_INODE_ISUNLINKED) &&
178 (ip->flags & HAMMER2_INODE_ISDELETED) == 0) {
179 hammer2_inode_unlink_t *ipul;
181 ipul = kmalloc(sizeof(*ipul), pmp->minode, M_WAITOK | M_ZERO);
184 hammer2_spin_ex(&pmp->list_spin);
185 TAILQ_INSERT_TAIL(&pmp->unlinkq, ipul, entry);
186 hammer2_spin_unex(&pmp->list_spin);
187 /* retain ref from vp for ipul */
189 hammer2_inode_drop(ip); /* vp ref */
193 * XXX handle background sync when ip dirty, kernel will no longer
194 * notify us regarding this inode because there is no longer a
195 * vnode attached to it.
204 hammer2_vop_fsync(struct vop_fsync_args *ap)
214 /* XXX can't do this yet */
215 hammer2_trans_init(ip->pmp, HAMMER2_TRANS_ISFLUSH);
216 vfsync(vp, ap->a_waitfor, 1, NULL, NULL);
218 hammer2_trans_init(ip->pmp, 0);
219 vfsync(vp, ap->a_waitfor, 1, NULL, NULL);
222 * Calling chain_flush here creates a lot of duplicative
223 * COW operations due to non-optimal vnode ordering.
225 * Only do it for an actual fsync() syscall. The other forms
226 * which call this function will eventually call chain_flush
227 * on the volume root as a catch-all, which is far more optimal.
229 hammer2_inode_lock(ip, 0);
230 if (ip->flags & HAMMER2_INODE_MODIFIED)
231 hammer2_inode_fsync(ip);
232 hammer2_inode_unlock(ip);
233 hammer2_trans_done(ip->pmp);
241 hammer2_vop_access(struct vop_access_args *ap)
243 hammer2_inode_t *ip = VTOI(ap->a_vp);
249 hammer2_inode_lock(ip, HAMMER2_RESOLVE_SHARED);
250 uid = hammer2_to_unix_xid(&ip->meta.uid);
251 gid = hammer2_to_unix_xid(&ip->meta.gid);
252 error = vop_helper_access(ap, uid, gid, ip->meta.mode, ip->meta.uflags);
253 hammer2_inode_unlock(ip);
261 hammer2_vop_getattr(struct vop_getattr_args *ap)
267 hammer2_chain_t *chain;
277 hammer2_inode_lock(ip, HAMMER2_RESOLVE_SHARED);
279 vap->va_fsid = pmp->mp->mnt_stat.f_fsid.val[0];
280 vap->va_fileid = ip->meta.inum;
281 vap->va_mode = ip->meta.mode;
282 vap->va_nlink = ip->meta.nlinks;
283 vap->va_uid = hammer2_to_unix_xid(&ip->meta.uid);
284 vap->va_gid = hammer2_to_unix_xid(&ip->meta.gid);
287 vap->va_size = ip->meta.size; /* protected by shared lock */
288 vap->va_blocksize = HAMMER2_PBUFSIZE;
289 vap->va_flags = ip->meta.uflags;
290 hammer2_time_to_timespec(ip->meta.ctime, &vap->va_ctime);
291 hammer2_time_to_timespec(ip->meta.mtime, &vap->va_mtime);
292 hammer2_time_to_timespec(ip->meta.mtime, &vap->va_atime);
295 if (ip->meta.type == HAMMER2_OBJTYPE_DIRECTORY) {
297 * Can't really calculate directory use sans the files under
298 * it, just assume one block for now.
300 vap->va_bytes += HAMMER2_INODE_BYTES;
302 for (i = 0; i < ip->cluster.nchains; ++i) {
303 if ((chain = ip->cluster.array[i].chain) != NULL) {
304 if (vap->va_bytes < chain->bref.data_count)
305 vap->va_bytes = chain->bref.data_count;
309 vap->va_type = hammer2_get_vtype(ip->meta.type);
311 vap->va_uid_uuid = ip->meta.uid;
312 vap->va_gid_uuid = ip->meta.gid;
313 vap->va_vaflags = VA_UID_UUID_VALID | VA_GID_UUID_VALID |
316 hammer2_inode_unlock(ip);
324 hammer2_vop_setattr(struct vop_setattr_args *ap)
336 hammer2_update_time(&ctime);
340 if (ip->pmp->ronly) {
345 hammer2_pfs_memory_wait(ip->pmp);
346 hammer2_trans_init(ip->pmp, 0);
347 hammer2_inode_lock(ip, 0);
350 if (vap->va_flags != VNOVAL) {
353 flags = ip->meta.uflags;
354 error = vop_helper_setattr_flags(&flags, vap->va_flags,
355 hammer2_to_unix_xid(&ip->meta.uid),
358 if (ip->meta.uflags != flags) {
359 hammer2_inode_modify(ip);
360 ip->meta.uflags = flags;
361 ip->meta.ctime = ctime;
362 kflags |= NOTE_ATTRIB;
364 if (ip->meta.uflags & (IMMUTABLE | APPEND)) {
371 if (ip->meta.uflags & (IMMUTABLE | APPEND)) {
375 if (vap->va_uid != (uid_t)VNOVAL || vap->va_gid != (gid_t)VNOVAL) {
376 mode_t cur_mode = ip->meta.mode;
377 uid_t cur_uid = hammer2_to_unix_xid(&ip->meta.uid);
378 gid_t cur_gid = hammer2_to_unix_xid(&ip->meta.gid);
382 error = vop_helper_chown(ap->a_vp, vap->va_uid, vap->va_gid,
384 &cur_uid, &cur_gid, &cur_mode);
386 hammer2_guid_to_uuid(&uuid_uid, cur_uid);
387 hammer2_guid_to_uuid(&uuid_gid, cur_gid);
388 if (bcmp(&uuid_uid, &ip->meta.uid, sizeof(uuid_uid)) ||
389 bcmp(&uuid_gid, &ip->meta.gid, sizeof(uuid_gid)) ||
390 ip->meta.mode != cur_mode
392 hammer2_inode_modify(ip);
393 ip->meta.uid = uuid_uid;
394 ip->meta.gid = uuid_gid;
395 ip->meta.mode = cur_mode;
396 ip->meta.ctime = ctime;
398 kflags |= NOTE_ATTRIB;
405 if (vap->va_size != VNOVAL && ip->meta.size != vap->va_size) {
408 if (vap->va_size == ip->meta.size)
410 if (vap->va_size < ip->meta.size) {
411 hammer2_truncate_file(ip, vap->va_size);
413 hammer2_extend_file(ip, vap->va_size);
415 hammer2_inode_modify(ip);
416 ip->meta.mtime = ctime;
424 /* atime not supported */
425 if (vap->va_atime.tv_sec != VNOVAL) {
426 hammer2_inode_modify(ip);
427 ip->meta.atime = hammer2_timespec_to_time(&vap->va_atime);
428 kflags |= NOTE_ATTRIB;
431 if (vap->va_mode != (mode_t)VNOVAL) {
432 mode_t cur_mode = ip->meta.mode;
433 uid_t cur_uid = hammer2_to_unix_xid(&ip->meta.uid);
434 gid_t cur_gid = hammer2_to_unix_xid(&ip->meta.gid);
436 error = vop_helper_chmod(ap->a_vp, vap->va_mode, ap->a_cred,
437 cur_uid, cur_gid, &cur_mode);
438 if (error == 0 && ip->meta.mode != cur_mode) {
439 hammer2_inode_modify(ip);
440 ip->meta.mode = cur_mode;
441 ip->meta.ctime = ctime;
442 kflags |= NOTE_ATTRIB;
446 if (vap->va_mtime.tv_sec != VNOVAL) {
447 hammer2_inode_modify(ip);
448 ip->meta.mtime = hammer2_timespec_to_time(&vap->va_mtime);
449 kflags |= NOTE_ATTRIB;
454 * If a truncation occurred we must call inode_fsync() now in order
455 * to trim the related data chains, otherwise a later expansion can
458 * If an extend occured that changed the DIRECTDATA state, we must
459 * call inode_fsync now in order to prepare the inode's indirect
462 if (ip->flags & HAMMER2_INODE_RESIZED)
463 hammer2_inode_fsync(ip);
468 hammer2_inode_unlock(ip);
469 hammer2_trans_done(ip->pmp);
470 hammer2_knote(ip->vp, kflags);
478 hammer2_vop_readdir(struct vop_readdir_args *ap)
480 hammer2_xop_readdir_t *xop;
481 hammer2_blockref_t bref;
498 saveoff = uio->uio_offset;
503 * Setup cookies directory entry cookies if requested
505 if (ap->a_ncookies) {
506 ncookies = uio->uio_resid / 16 + 1;
509 cookies = kmalloc(ncookies * sizeof(off_t), M_TEMP, M_WAITOK);
516 hammer2_inode_lock(ip, HAMMER2_RESOLVE_SHARED);
519 * Handle artificial entries. To ensure that only positive 64 bit
520 * quantities are returned to userland we always strip off bit 63.
521 * The hash code is designed such that codes 0x0000-0x7FFF are not
522 * used, allowing us to use these codes for articial entries.
524 * Entry 0 is used for '.' and entry 1 is used for '..'. Do not
525 * allow '..' to cross the mount point into (e.g.) the super-root.
528 inum = ip->meta.inum & HAMMER2_DIRHASH_USERMSK;
529 r = vop_write_dirent(&error, uio, inum, DT_DIR, 1, ".");
533 cookies[cookie_index] = saveoff;
536 if (cookie_index == ncookies)
542 * Be careful with lockorder when accessing ".."
544 * (ip is the current dir. xip is the parent dir).
546 inum = ip->meta.inum & HAMMER2_DIRHASH_USERMSK;
547 if (ip->pip && ip != ip->pmp->iroot)
548 inum = ip->pip->meta.inum & HAMMER2_DIRHASH_USERMSK;
549 r = vop_write_dirent(&error, uio, inum, DT_DIR, 2, "..");
553 cookies[cookie_index] = saveoff;
556 if (cookie_index == ncookies)
560 lkey = saveoff | HAMMER2_DIRHASH_VISIBLE;
561 if (hammer2_debug & 0x0020)
562 kprintf("readdir: lkey %016jx\n", lkey);
567 * Use XOP for cluster scan.
569 * parent is the inode cluster, already locked for us. Don't
570 * double lock shared locks as this will screw up upgrades.
572 xop = hammer2_xop_alloc(ip, 0);
574 hammer2_xop_start(&xop->head, hammer2_xop_readdir);
577 const hammer2_inode_data_t *ripdata;
579 error = hammer2_xop_collect(&xop->head, 0);
582 if (cookie_index == ncookies)
584 if (hammer2_debug & 0x0020)
585 kprintf("cluster chain %p %p\n",
586 xop->head.cluster.focus,
587 (xop->head.cluster.focus ?
588 xop->head.cluster.focus->data : (void *)-1));
589 ripdata = &hammer2_cluster_rdata(&xop->head.cluster)->ipdata;
590 hammer2_cluster_bref(&xop->head.cluster, &bref);
591 if (bref.type == HAMMER2_BREF_TYPE_INODE) {
592 dtype = hammer2_get_dtype(ripdata);
593 saveoff = bref.key & HAMMER2_DIRHASH_USERMSK;
594 r = vop_write_dirent(&error, uio,
596 HAMMER2_DIRHASH_USERMSK,
598 ripdata->meta.name_len,
603 cookies[cookie_index] = saveoff;
606 /* XXX chain error */
607 kprintf("bad chain type readdir %d\n", bref.type);
610 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
611 if (error == ENOENT) {
614 saveoff = (hammer2_key_t)-1;
616 saveoff = bref.key & HAMMER2_DIRHASH_USERMSK;
619 hammer2_inode_unlock(ip);
621 *ap->a_eofflag = eofflag;
622 if (hammer2_debug & 0x0020)
623 kprintf("readdir: done at %016jx\n", saveoff);
624 uio->uio_offset = saveoff & ~HAMMER2_DIRHASH_VISIBLE;
625 if (error && cookie_index == 0) {
627 kfree(cookies, M_TEMP);
629 *ap->a_cookies = NULL;
633 *ap->a_ncookies = cookie_index;
634 *ap->a_cookies = cookies;
642 * hammer2_vop_readlink { vp, uio, cred }
646 hammer2_vop_readlink(struct vop_readlink_args *ap)
653 if (vp->v_type != VLNK)
657 error = hammer2_read_file(ip, ap->a_uio, 0);
663 hammer2_vop_read(struct vop_read_args *ap)
673 * Read operations supported on this vnode?
676 if (vp->v_type != VREG)
686 seqcount = ap->a_ioflag >> 16;
687 bigread = (uio->uio_resid > 100 * 1024 * 1024);
689 error = hammer2_read_file(ip, uio, seqcount);
695 hammer2_vop_write(struct vop_write_args *ap)
705 * Read operations supported on this vnode?
708 if (vp->v_type != VREG)
717 if (ip->pmp->ronly) {
721 seqcount = ap->a_ioflag >> 16;
724 * Check resource limit
726 if (uio->uio_resid > 0 && (td = uio->uio_td) != NULL && td->td_proc &&
727 uio->uio_offset + uio->uio_resid >
728 td->td_proc->p_rlimit[RLIMIT_FSIZE].rlim_cur) {
729 lwpsignal(td->td_proc, td->td_lwp, SIGXFSZ);
734 * The transaction interlocks against flushes initiations
735 * (note: but will run concurrently with the actual flush).
737 hammer2_trans_init(ip->pmp, 0);
738 error = hammer2_write_file(ip, uio, ap->a_ioflag, seqcount);
739 hammer2_trans_done(ip->pmp);
745 * Perform read operations on a file or symlink given an UNLOCKED
748 * The passed ip is not locked.
752 hammer2_read_file(hammer2_inode_t *ip, struct uio *uio, int seqcount)
763 * WARNING! Assumes that the kernel interlocks size changes at the
766 hammer2_mtx_sh(&ip->lock);
767 size = ip->meta.size;
768 hammer2_mtx_unlock(&ip->lock);
770 while (uio->uio_resid > 0 && uio->uio_offset < size) {
777 lblksize = hammer2_calc_logical(ip, uio->uio_offset,
780 error = cluster_read(ip->vp, leof, lbase, lblksize,
781 uio->uio_resid, seqcount * BKVASIZE,
786 loff = (int)(uio->uio_offset - lbase);
788 if (n > uio->uio_resid)
790 if (n > size - uio->uio_offset)
791 n = (int)(size - uio->uio_offset);
792 bp->b_flags |= B_AGE;
793 uiomove((char *)bp->b_data + loff, n, uio);
800 * Write to the file represented by the inode via the logical buffer cache.
801 * The inode may represent a regular file or a symlink.
803 * The inode must not be locked.
807 hammer2_write_file(hammer2_inode_t *ip, struct uio *uio,
808 int ioflag, int seqcount)
810 hammer2_key_t old_eof;
811 hammer2_key_t new_eof;
820 * WARNING! Assumes that the kernel interlocks size changes at the
823 hammer2_mtx_ex(&ip->lock);
824 if (ioflag & IO_APPEND)
825 uio->uio_offset = ip->meta.size;
826 old_eof = ip->meta.size;
829 * Extend the file if necessary. If the write fails at some point
830 * we will truncate it back down to cover as much as we were able
833 * Doing this now makes it easier to calculate buffer sizes in
840 if (uio->uio_offset + uio->uio_resid > old_eof) {
841 new_eof = uio->uio_offset + uio->uio_resid;
843 hammer2_extend_file(ip, new_eof);
844 kflags |= NOTE_EXTEND;
848 hammer2_mtx_unlock(&ip->lock);
853 while (uio->uio_resid > 0) {
862 * Don't allow the buffer build to blow out the buffer
865 if ((ioflag & IO_RECURSE) == 0)
866 bwillwrite(HAMMER2_PBUFSIZE);
869 * This nominally tells us how much we can cluster and
870 * what the logical buffer size needs to be. Currently
871 * we don't try to cluster the write and just handle one
874 lblksize = hammer2_calc_logical(ip, uio->uio_offset,
876 loff = (int)(uio->uio_offset - lbase);
878 KKASSERT(lblksize <= 65536);
881 * Calculate bytes to copy this transfer and whether the
882 * copy completely covers the buffer or not.
886 if (n > uio->uio_resid) {
888 if (loff == lbase && uio->uio_offset + n == new_eof)
900 if (uio->uio_segflg == UIO_NOCOPY) {
902 * Issuing a write with the same data backing the
903 * buffer. Instantiate the buffer to collect the
904 * backing vm pages, then read-in any missing bits.
906 * This case is used by vop_stdputpages().
908 bp = getblk(ip->vp, lbase, lblksize, GETBLK_BHEAVY, 0);
909 if ((bp->b_flags & B_CACHE) == 0) {
911 error = bread(ip->vp, lbase, lblksize, &bp);
913 } else if (trivial) {
915 * Even though we are entirely overwriting the buffer
916 * we may still have to zero it out to avoid a
917 * mmap/write visibility issue.
919 bp = getblk(ip->vp, lbase, lblksize, GETBLK_BHEAVY, 0);
920 if ((bp->b_flags & B_CACHE) == 0)
924 * Partial overwrite, read in any missing bits then
925 * replace the portion being written.
927 * (The strategy code will detect zero-fill physical
928 * blocks for this case).
930 error = bread(ip->vp, lbase, lblksize, &bp);
941 * Ok, copy the data in
943 error = uiomove(bp->b_data + loff, n, uio);
944 kflags |= NOTE_WRITE;
952 * WARNING: Pageout daemon will issue UIO_NOCOPY writes
953 * with IO_SYNC or IO_ASYNC set. These writes
954 * must be handled as the pageout daemon expects.
956 if (ioflag & IO_SYNC) {
958 } else if ((ioflag & IO_DIRECT) && endofblk) {
960 } else if (ioflag & IO_ASYNC) {
968 * Cleanup. If we extended the file EOF but failed to write through
969 * the entire write is a failure and we have to back-up.
971 if (error && new_eof != old_eof) {
972 hammer2_mtx_ex(&ip->lock);
973 hammer2_truncate_file(ip, old_eof);
974 if (ip->flags & HAMMER2_INODE_MODIFIED)
975 hammer2_inode_fsync(ip);
976 hammer2_mtx_unlock(&ip->lock);
977 } else if (modified) {
978 hammer2_mtx_ex(&ip->lock);
979 hammer2_inode_modify(ip);
980 hammer2_update_time(&ip->meta.mtime);
981 if (ip->flags & HAMMER2_INODE_MODIFIED)
982 hammer2_inode_fsync(ip);
983 hammer2_mtx_unlock(&ip->lock);
984 hammer2_knote(ip->vp, kflags);
986 hammer2_trans_assert_strategy(ip->pmp);
992 * Truncate the size of a file. The inode must not be locked.
994 * We must unconditionally set HAMMER2_INODE_RESIZED to properly
995 * ensure that any on-media data beyond the new file EOF has been destroyed.
997 * WARNING: nvtruncbuf() can only be safely called without the inode lock
998 * held due to the way our write thread works. If the truncation
999 * occurs in the middle of a buffer, nvtruncbuf() is responsible
1000 * for dirtying that buffer and zeroing out trailing bytes.
1002 * WARNING! Assumes that the kernel interlocks size changes at the
1005 * WARNING! Caller assumes responsibility for removing dead blocks
1006 * if INODE_RESIZED is set.
1010 hammer2_truncate_file(hammer2_inode_t *ip, hammer2_key_t nsize)
1012 hammer2_key_t lbase;
1016 hammer2_mtx_unlock(&ip->lock);
1018 nblksize = hammer2_calc_logical(ip, nsize, &lbase, NULL);
1019 nvtruncbuf(ip->vp, nsize,
1020 nblksize, (int)nsize & (nblksize - 1),
1023 hammer2_mtx_ex(&ip->lock);
1024 KKASSERT((ip->flags & HAMMER2_INODE_RESIZED) == 0);
1025 ip->osize = ip->meta.size;
1026 ip->meta.size = nsize;
1027 atomic_set_int(&ip->flags, HAMMER2_INODE_MODIFIED |
1028 HAMMER2_INODE_RESIZED);
1033 * Extend the size of a file. The inode must not be locked.
1035 * Even though the file size is changing, we do not have to set the
1036 * INODE_RESIZED bit unless the file size crosses the EMBEDDED_BYTES
1037 * boundary. When this occurs a hammer2_inode_fsync() is required
1038 * to prepare the inode cluster's indirect block table.
1040 * WARNING! Assumes that the kernel interlocks size changes at the
1043 * WARNING! Caller assumes responsibility for transitioning out
1044 * of the inode DIRECTDATA mode if INODE_RESIZED is set.
1048 hammer2_extend_file(hammer2_inode_t *ip, hammer2_key_t nsize)
1050 hammer2_key_t lbase;
1051 hammer2_key_t osize;
1057 KKASSERT((ip->flags & HAMMER2_INODE_RESIZED) == 0);
1058 osize = ip->meta.size;
1060 ip->meta.size = nsize;
1061 atomic_set_int(&ip->flags, HAMMER2_INODE_MODIFIED);
1063 if (osize <= HAMMER2_EMBEDDED_BYTES && nsize > HAMMER2_EMBEDDED_BYTES)
1064 atomic_set_int(&ip->flags, HAMMER2_INODE_RESIZED);
1066 hammer2_mtx_unlock(&ip->lock);
1068 oblksize = hammer2_calc_logical(ip, osize, &lbase, NULL);
1069 nblksize = hammer2_calc_logical(ip, nsize, &lbase, NULL);
1075 hammer2_mtx_ex(&ip->lock);
1082 hammer2_vop_nresolve(struct vop_nresolve_args *ap)
1084 hammer2_xop_nresolve_t *xop;
1085 hammer2_inode_t *ip;
1086 hammer2_inode_t *dip;
1087 struct namecache *ncp;
1092 dip = VTOI(ap->a_dvp);
1093 xop = hammer2_xop_alloc(dip, 0);
1095 ncp = ap->a_nch->ncp;
1096 hammer2_xop_setname(&xop->head, ncp->nc_name, ncp->nc_nlen);
1099 * Note: In DragonFly the kernel handles '.' and '..'.
1101 hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);
1102 hammer2_xop_start(&xop->head, hammer2_xop_nresolve);
1104 error = hammer2_xop_collect(&xop->head, 0);
1108 ip = hammer2_inode_get(dip->pmp, dip, &xop->head.cluster, -1);
1110 hammer2_inode_unlock(dip);
1113 * Acquire the related vnode
1115 * NOTE: For error processing, only ENOENT resolves the namecache
1116 * entry to NULL, otherwise we just return the error and
1117 * leave the namecache unresolved.
1119 * NOTE: multiple hammer2_inode structures can be aliased to the
1120 * same chain element, for example for hardlinks. This
1121 * use case does not 'reattach' inode associations that
1122 * might already exist, but always allocates a new one.
1124 * WARNING: inode structure is locked exclusively via inode_get
1125 * but chain was locked shared. inode_unlock()
1126 * will handle it properly.
1129 vp = hammer2_igetv(ip, &error);
1132 cache_setvp(ap->a_nch, vp);
1133 } else if (error == ENOENT) {
1134 cache_setvp(ap->a_nch, NULL);
1136 hammer2_inode_unlock(ip);
1139 * The vp should not be released until after we've disposed
1140 * of our locks, because it might cause vop_inactive() to
1147 cache_setvp(ap->a_nch, NULL);
1149 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1150 KASSERT(error || ap->a_nch->ncp->nc_vp != NULL,
1151 ("resolve error %d/%p ap %p\n",
1152 error, ap->a_nch->ncp->nc_vp, ap));
1160 hammer2_vop_nlookupdotdot(struct vop_nlookupdotdot_args *ap)
1162 hammer2_inode_t *dip;
1163 hammer2_inode_t *ip;
1167 dip = VTOI(ap->a_dvp);
1169 if ((ip = dip->pip) == NULL) {
1174 hammer2_inode_lock(ip, 0);
1175 *ap->a_vpp = hammer2_igetv(ip, &error);
1176 hammer2_inode_unlock(ip);
1184 hammer2_vop_nmkdir(struct vop_nmkdir_args *ap)
1186 hammer2_inode_t *dip;
1187 hammer2_inode_t *nip;
1188 struct namecache *ncp;
1189 const uint8_t *name;
1194 dip = VTOI(ap->a_dvp);
1195 if (dip->pmp->ronly) {
1200 ncp = ap->a_nch->ncp;
1201 name = ncp->nc_name;
1202 name_len = ncp->nc_nlen;
1204 hammer2_pfs_memory_wait(dip->pmp);
1205 hammer2_trans_init(dip->pmp, 0);
1206 nip = hammer2_inode_create(dip, ap->a_vap, ap->a_cred,
1208 hammer2_trans_newinum(dip->pmp), 0, 0,
1211 KKASSERT(nip == NULL);
1214 *ap->a_vpp = hammer2_igetv(nip, &error);
1215 hammer2_inode_unlock(nip);
1217 hammer2_trans_done(dip->pmp);
1220 cache_setunresolved(ap->a_nch);
1221 cache_setvp(ap->a_nch, *ap->a_vpp);
1229 hammer2_vop_open(struct vop_open_args *ap)
1231 return vop_stdopen(ap);
1235 * hammer2_vop_advlock { vp, id, op, fl, flags }
1239 hammer2_vop_advlock(struct vop_advlock_args *ap)
1241 hammer2_inode_t *ip = VTOI(ap->a_vp);
1244 size = ip->meta.size;
1245 return (lf_advlock(ap, &ip->advlock, size));
1250 hammer2_vop_close(struct vop_close_args *ap)
1252 return vop_stdclose(ap);
1256 * hammer2_vop_nlink { nch, dvp, vp, cred }
1258 * Create a hardlink from (vp) to {dvp, nch}.
1262 hammer2_vop_nlink(struct vop_nlink_args *ap)
1264 hammer2_xop_nlink_t *xop1;
1265 hammer2_inode_t *fdip; /* target directory to create link in */
1266 hammer2_inode_t *tdip; /* target directory to create link in */
1267 hammer2_inode_t *cdip; /* common parent directory */
1268 hammer2_inode_t *ip; /* inode we are hardlinking to */
1269 struct namecache *ncp;
1270 const uint8_t *name;
1275 tdip = VTOI(ap->a_dvp);
1276 if (tdip->pmp->ronly) {
1281 ncp = ap->a_nch->ncp;
1282 name = ncp->nc_name;
1283 name_len = ncp->nc_nlen;
1286 * ip represents the file being hardlinked. The file could be a
1287 * normal file or a hardlink target if it has already been hardlinked.
1288 * If ip is a hardlinked target then ip->pip represents the location
1289 * of the hardlinked target, NOT the location of the hardlink pointer.
1291 * Bump nlinks and potentially also create or move the hardlink
1292 * target in the parent directory common to (ip) and (tdip). The
1293 * consolidation code can modify ip->cluster and ip->pip. The
1294 * returned cluster is locked.
1296 ip = VTOI(ap->a_vp);
1297 hammer2_pfs_memory_wait(ip->pmp);
1298 hammer2_trans_init(ip->pmp, 0);
1301 * The common parent directory must be locked first to avoid deadlocks.
1302 * Also note that fdip and/or tdip might match cdip.
1305 cdip = hammer2_inode_common_parent(fdip, tdip);
1306 hammer2_inode_lock(cdip, 0);
1307 hammer2_inode_lock(fdip, 0);
1308 hammer2_inode_lock(tdip, 0);
1309 hammer2_inode_lock(ip, 0);
1313 * If ip is not a hardlink target we must convert it to a hardlink.
1314 * If fdip != cdip we must shift the inode to cdip.
1316 if (fdip != cdip || (ip->meta.name_key & HAMMER2_DIRHASH_VISIBLE)) {
1317 xop1 = hammer2_xop_alloc(fdip, HAMMER2_XOP_MODIFYING);
1318 hammer2_xop_setip2(&xop1->head, ip);
1319 hammer2_xop_setip3(&xop1->head, cdip);
1321 hammer2_xop_start(&xop1->head, hammer2_xop_nlink);
1322 error = hammer2_xop_collect(&xop1->head, 0);
1323 hammer2_xop_retire(&xop1->head, HAMMER2_XOPMASK_VOP);
1324 if (error == ENOENT)
1329 * Must synchronize original inode whos chains are now a hardlink
1330 * target. We must match what the backend XOP did to the
1333 if (error == 0 && (ip->meta.name_key & HAMMER2_DIRHASH_VISIBLE)) {
1334 hammer2_inode_modify(ip);
1335 ip->meta.name_key = ip->meta.inum;
1336 ip->meta.name_len = 18; /* "0x%016jx" */
1340 * Create the hardlink target and bump nlinks.
1343 hammer2_inode_create(tdip, NULL, NULL,
1346 HAMMER2_OBJTYPE_HARDLINK, ip->meta.type,
1348 hammer2_inode_modify(ip);
1352 cache_setunresolved(ap->a_nch);
1353 cache_setvp(ap->a_nch, ap->a_vp);
1355 hammer2_inode_unlock(ip);
1356 hammer2_inode_unlock(tdip);
1357 hammer2_inode_unlock(fdip);
1358 hammer2_inode_unlock(cdip);
1359 hammer2_inode_drop(cdip);
1360 hammer2_trans_done(ip->pmp);
1367 * hammer2_vop_ncreate { nch, dvp, vpp, cred, vap }
1369 * The operating system has already ensured that the directory entry
1370 * does not exist and done all appropriate namespace locking.
1374 hammer2_vop_ncreate(struct vop_ncreate_args *ap)
1376 hammer2_inode_t *dip;
1377 hammer2_inode_t *nip;
1378 struct namecache *ncp;
1379 const uint8_t *name;
1384 dip = VTOI(ap->a_dvp);
1385 if (dip->pmp->ronly) {
1390 ncp = ap->a_nch->ncp;
1391 name = ncp->nc_name;
1392 name_len = ncp->nc_nlen;
1393 hammer2_pfs_memory_wait(dip->pmp);
1394 hammer2_trans_init(dip->pmp, 0);
1396 nip = hammer2_inode_create(dip, ap->a_vap, ap->a_cred,
1398 hammer2_trans_newinum(dip->pmp), 0, 0,
1401 KKASSERT(nip == NULL);
1404 *ap->a_vpp = hammer2_igetv(nip, &error);
1405 hammer2_inode_unlock(nip);
1407 hammer2_trans_done(dip->pmp);
1410 cache_setunresolved(ap->a_nch);
1411 cache_setvp(ap->a_nch, *ap->a_vpp);
1418 * Make a device node (typically a fifo)
1422 hammer2_vop_nmknod(struct vop_nmknod_args *ap)
1424 hammer2_inode_t *dip;
1425 hammer2_inode_t *nip;
1426 struct namecache *ncp;
1427 const uint8_t *name;
1432 dip = VTOI(ap->a_dvp);
1433 if (dip->pmp->ronly) {
1438 ncp = ap->a_nch->ncp;
1439 name = ncp->nc_name;
1440 name_len = ncp->nc_nlen;
1441 hammer2_pfs_memory_wait(dip->pmp);
1442 hammer2_trans_init(dip->pmp, 0);
1444 nip = hammer2_inode_create(dip, ap->a_vap, ap->a_cred,
1446 hammer2_trans_newinum(dip->pmp), 0, 0,
1449 KKASSERT(nip == NULL);
1452 *ap->a_vpp = hammer2_igetv(nip, &error);
1453 hammer2_inode_unlock(nip);
1455 hammer2_trans_done(dip->pmp);
1458 cache_setunresolved(ap->a_nch);
1459 cache_setvp(ap->a_nch, *ap->a_vpp);
1466 * hammer2_vop_nsymlink { nch, dvp, vpp, cred, vap, target }
1470 hammer2_vop_nsymlink(struct vop_nsymlink_args *ap)
1472 hammer2_inode_t *dip;
1473 hammer2_inode_t *nip;
1474 struct namecache *ncp;
1475 const uint8_t *name;
1479 dip = VTOI(ap->a_dvp);
1480 if (dip->pmp->ronly)
1483 ncp = ap->a_nch->ncp;
1484 name = ncp->nc_name;
1485 name_len = ncp->nc_nlen;
1486 hammer2_pfs_memory_wait(dip->pmp);
1487 hammer2_trans_init(dip->pmp, 0);
1489 ap->a_vap->va_type = VLNK; /* enforce type */
1491 nip = hammer2_inode_create(dip, ap->a_vap, ap->a_cred,
1493 hammer2_trans_newinum(dip->pmp), 0, 0,
1496 KKASSERT(nip == NULL);
1498 hammer2_trans_done(dip->pmp);
1501 *ap->a_vpp = hammer2_igetv(nip, &error);
1504 * Build the softlink (~like file data) and finalize the namecache.
1511 bytes = strlen(ap->a_target);
1513 hammer2_inode_unlock(nip);
1514 bzero(&auio, sizeof(auio));
1515 bzero(&aiov, sizeof(aiov));
1516 auio.uio_iov = &aiov;
1517 auio.uio_segflg = UIO_SYSSPACE;
1518 auio.uio_rw = UIO_WRITE;
1519 auio.uio_resid = bytes;
1520 auio.uio_iovcnt = 1;
1521 auio.uio_td = curthread;
1522 aiov.iov_base = ap->a_target;
1523 aiov.iov_len = bytes;
1524 error = hammer2_write_file(nip, &auio, IO_APPEND, 0);
1525 /* XXX handle error */
1528 hammer2_inode_unlock(nip);
1530 hammer2_trans_done(dip->pmp);
1533 * Finalize namecache
1536 cache_setunresolved(ap->a_nch);
1537 cache_setvp(ap->a_nch, *ap->a_vpp);
1538 /* hammer2_knote(ap->a_dvp, NOTE_WRITE); */
1544 * hammer2_vop_nremove { nch, dvp, cred }
1548 hammer2_vop_nremove(struct vop_nremove_args *ap)
1550 hammer2_xop_unlink_t *xop;
1551 hammer2_inode_t *dip;
1552 hammer2_inode_t *ip;
1553 struct namecache *ncp;
1558 dip = VTOI(ap->a_dvp);
1559 if (dip->pmp->ronly) {
1564 ncp = ap->a_nch->ncp;
1566 hammer2_pfs_memory_wait(dip->pmp);
1567 hammer2_trans_init(dip->pmp, 0);
1568 hammer2_inode_lock(dip, 0);
1571 * The unlink XOP unlinks the path from the directory and
1572 * locates and returns the cluster associated with the real inode.
1573 * We have to handle nlinks here on the frontend.
1575 xop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING);
1576 hammer2_xop_setname(&xop->head, ncp->nc_name, ncp->nc_nlen);
1577 isopen = cache_isopen(ap->a_nch);
1579 xop->dopermanent = isopen ? 0 : HAMMER2_DELETE_PERMANENT;
1580 hammer2_xop_start(&xop->head, hammer2_xop_unlink);
1583 * Collect the real inode and adjust nlinks, destroy the real
1584 * inode if nlinks transitions to 0 and it was the real inode
1585 * (else it has already been removed).
1587 error = hammer2_xop_collect(&xop->head, 0);
1588 hammer2_inode_unlock(dip);
1591 ip = hammer2_inode_get(dip->pmp, dip, &xop->head.cluster, -1);
1592 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1594 hammer2_inode_unlink_finisher(ip, isopen);
1595 hammer2_inode_unlock(ip);
1598 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1601 hammer2_inode_run_unlinkq(dip->pmp);
1602 hammer2_trans_done(dip->pmp);
1604 cache_unlink(ap->a_nch);
1610 * hammer2_vop_nrmdir { nch, dvp, cred }
1614 hammer2_vop_nrmdir(struct vop_nrmdir_args *ap)
1616 hammer2_xop_unlink_t *xop;
1617 hammer2_inode_t *dip;
1618 hammer2_inode_t *ip;
1619 struct namecache *ncp;
1624 dip = VTOI(ap->a_dvp);
1625 if (dip->pmp->ronly) {
1630 hammer2_pfs_memory_wait(dip->pmp);
1631 hammer2_trans_init(dip->pmp, 0);
1632 hammer2_inode_lock(dip, 0);
1634 xop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING);
1636 ncp = ap->a_nch->ncp;
1637 hammer2_xop_setname(&xop->head, ncp->nc_name, ncp->nc_nlen);
1638 isopen = cache_isopen(ap->a_nch);
1640 xop->dopermanent = isopen ? 0 : HAMMER2_DELETE_PERMANENT;
1641 hammer2_xop_start(&xop->head, hammer2_xop_unlink);
1644 * Collect the real inode and adjust nlinks, destroy the real
1645 * inode if nlinks transitions to 0 and it was the real inode
1646 * (else it has already been removed).
1648 error = hammer2_xop_collect(&xop->head, 0);
1649 hammer2_inode_unlock(dip);
1652 ip = hammer2_inode_get(dip->pmp, dip, &xop->head.cluster, -1);
1653 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1655 hammer2_inode_unlink_finisher(ip, isopen);
1656 hammer2_inode_unlock(ip);
1659 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1661 hammer2_inode_run_unlinkq(dip->pmp);
1662 hammer2_trans_done(dip->pmp);
1664 cache_unlink(ap->a_nch);
1670 * hammer2_vop_nrename { fnch, tnch, fdvp, tdvp, cred }
1674 hammer2_vop_nrename(struct vop_nrename_args *ap)
1676 struct namecache *fncp;
1677 struct namecache *tncp;
1678 hammer2_inode_t *cdip;
1679 hammer2_inode_t *fdip;
1680 hammer2_inode_t *tdip;
1681 hammer2_inode_t *ip;
1682 const uint8_t *fname;
1684 const uint8_t *tname;
1690 if (ap->a_fdvp->v_mount != ap->a_tdvp->v_mount)
1692 if (ap->a_fdvp->v_mount != ap->a_fnch->ncp->nc_vp->v_mount)
1695 fdip = VTOI(ap->a_fdvp); /* source directory */
1696 tdip = VTOI(ap->a_tdvp); /* target directory */
1698 if (fdip->pmp->ronly)
1702 fncp = ap->a_fnch->ncp; /* entry name in source */
1703 fname = fncp->nc_name;
1704 fname_len = fncp->nc_nlen;
1706 tncp = ap->a_tnch->ncp; /* entry name in target */
1707 tname = tncp->nc_name;
1708 tname_len = tncp->nc_nlen;
1710 hammer2_pfs_memory_wait(tdip->pmp);
1711 hammer2_trans_init(tdip->pmp, 0);
1714 * ip is the inode being renamed. If this is a hardlink then
1715 * ip represents the actual file and not the hardlink marker.
1717 ip = VTOI(fncp->nc_vp);
1720 * The common parent directory must be locked first to avoid deadlocks.
1721 * Also note that fdip and/or tdip might match cdip.
1723 cdip = hammer2_inode_common_parent(ip->pip, tdip);
1724 hammer2_inode_lock(cdip, 0);
1725 hammer2_inode_lock(fdip, 0);
1726 hammer2_inode_lock(tdip, 0);
1727 hammer2_inode_ref(ip); /* extra ref */
1731 * If ip is a hardlink target and fdip != cdip we must shift the
1735 (ip->meta.name_key & HAMMER2_DIRHASH_VISIBLE) == 0) {
1736 hammer2_xop_nlink_t *xop1;
1738 xop1 = hammer2_xop_alloc(fdip, HAMMER2_XOP_MODIFYING);
1739 hammer2_xop_setip2(&xop1->head, ip);
1740 hammer2_xop_setip3(&xop1->head, cdip);
1742 hammer2_xop_start(&xop1->head, hammer2_xop_nlink);
1743 error = hammer2_xop_collect(&xop1->head, 0);
1744 hammer2_xop_retire(&xop1->head, HAMMER2_XOPMASK_VOP);
1748 * Delete the target namespace.
1751 hammer2_xop_unlink_t *xop2;
1752 hammer2_inode_t *tip;
1756 * The unlink XOP unlinks the path from the directory and
1757 * locates and returns the cluster associated with the real
1758 * inode. We have to handle nlinks here on the frontend.
1760 xop2 = hammer2_xop_alloc(tdip, HAMMER2_XOP_MODIFYING);
1761 hammer2_xop_setname(&xop2->head, tname, tname_len);
1762 isopen = cache_isopen(ap->a_tnch);
1764 xop2->dopermanent = isopen ? 0 : HAMMER2_DELETE_PERMANENT;
1765 hammer2_xop_start(&xop2->head, hammer2_xop_unlink);
1768 * Collect the real inode and adjust nlinks, destroy the real
1769 * inode if nlinks transitions to 0 and it was the real inode
1770 * (else it has already been removed).
1772 tnch_error = hammer2_xop_collect(&xop2->head, 0);
1773 /* hammer2_inode_unlock(tdip); */
1775 if (tnch_error == 0) {
1776 tip = hammer2_inode_get(tdip->pmp, NULL,
1777 &xop2->head.cluster, -1);
1778 hammer2_xop_retire(&xop2->head, HAMMER2_XOPMASK_VOP);
1780 hammer2_inode_unlink_finisher(tip, isopen);
1781 hammer2_inode_unlock(tip);
1784 hammer2_xop_retire(&xop2->head, HAMMER2_XOPMASK_VOP);
1786 /* hammer2_inode_lock(tdip, 0); */
1788 if (tnch_error && tnch_error != ENOENT) {
1795 * Resolve the collision space for (tdip, tname, tname_len)
1797 * tdip must be held exclusively locked to prevent races.
1800 hammer2_xop_scanlhc_t *sxop;
1801 hammer2_tid_t lhcbase;
1803 tlhc = hammer2_dirhash(tname, tname_len);
1805 sxop = hammer2_xop_alloc(tdip, HAMMER2_XOP_MODIFYING);
1807 hammer2_xop_start(&sxop->head, hammer2_xop_scanlhc);
1808 while ((error = hammer2_xop_collect(&sxop->head, 0)) == 0) {
1809 if (tlhc != sxop->head.cluster.focus->bref.key)
1813 hammer2_xop_retire(&sxop->head, HAMMER2_XOPMASK_VOP);
1816 if (error != ENOENT)
1821 if ((lhcbase ^ tlhc) & ~HAMMER2_DIRHASH_LOMASK) {
1828 * Everything is setup, do the rename.
1830 * We have to synchronize ip->meta to the underlying operation.
1832 * NOTE: To avoid deadlocks we cannot lock (ip) while we are
1833 * unlinking elements from their directories. Locking
1834 * the nlinks field does not lock the whole inode.
1836 hammer2_inode_lock(ip, 0);
1838 hammer2_xop_nrename_t *xop4;
1840 xop4 = hammer2_xop_alloc(fdip, HAMMER2_XOP_MODIFYING);
1842 xop4->ip_key = ip->meta.name_key;
1843 hammer2_xop_setip2(&xop4->head, ip);
1844 hammer2_xop_setip3(&xop4->head, tdip);
1845 hammer2_xop_setname(&xop4->head, fname, fname_len);
1846 hammer2_xop_setname2(&xop4->head, tname, tname_len);
1847 hammer2_xop_start(&xop4->head, hammer2_xop_nrename);
1849 error = hammer2_xop_collect(&xop4->head, 0);
1850 hammer2_xop_retire(&xop4->head, HAMMER2_XOPMASK_VOP);
1852 if (error == ENOENT)
1855 (ip->meta.name_key & HAMMER2_DIRHASH_VISIBLE)) {
1856 hammer2_inode_modify(ip);
1857 ip->meta.name_len = tname_len;
1858 ip->meta.name_key = tlhc;
1864 * Fixup ip->pip if we were renaming the actual file and not a
1867 if (error == 0 && (ip->meta.name_key & HAMMER2_DIRHASH_VISIBLE)) {
1868 hammer2_inode_t *opip;
1870 if (ip->pip != tdip) {
1871 hammer2_inode_ref(tdip);
1875 hammer2_inode_drop(opip);
1878 hammer2_inode_unlock(ip);
1880 hammer2_inode_unlock(tdip);
1881 hammer2_inode_unlock(fdip);
1882 hammer2_inode_unlock(cdip);
1883 hammer2_inode_drop(ip);
1884 hammer2_inode_drop(cdip);
1885 hammer2_inode_run_unlinkq(fdip->pmp);
1886 hammer2_trans_done(tdip->pmp);
1889 * Issue the namecache update after unlocking all the internal
1890 * hammer structures, otherwise we might deadlock.
1892 if (tnch_error == 0) {
1893 cache_unlink(ap->a_tnch);
1894 cache_setunresolved(ap->a_tnch);
1897 cache_rename(ap->a_fnch, ap->a_tnch);
1904 * hammer2_vop_ioctl { vp, command, data, fflag, cred }
1908 hammer2_vop_ioctl(struct vop_ioctl_args *ap)
1910 hammer2_inode_t *ip;
1914 ip = VTOI(ap->a_vp);
1916 error = hammer2_ioctl(ip, ap->a_command, (void *)ap->a_data,
1917 ap->a_fflag, ap->a_cred);
1924 hammer2_vop_mountctl(struct vop_mountctl_args *ap)
1932 case (MOUNTCTL_SET_EXPORT):
1933 mp = ap->a_head.a_ops->head.vv_mount;
1936 if (ap->a_ctllen != sizeof(struct export_args))
1939 rc = vfs_export(mp, &pmp->export,
1940 (const struct export_args *)ap->a_ctl);
1943 rc = vop_stdmountctl(ap);
1953 static void filt_hammer2detach(struct knote *kn);
1954 static int filt_hammer2read(struct knote *kn, long hint);
1955 static int filt_hammer2write(struct knote *kn, long hint);
1956 static int filt_hammer2vnode(struct knote *kn, long hint);
1958 static struct filterops hammer2read_filtops =
1959 { FILTEROP_ISFD | FILTEROP_MPSAFE,
1960 NULL, filt_hammer2detach, filt_hammer2read };
1961 static struct filterops hammer2write_filtops =
1962 { FILTEROP_ISFD | FILTEROP_MPSAFE,
1963 NULL, filt_hammer2detach, filt_hammer2write };
1964 static struct filterops hammer2vnode_filtops =
1965 { FILTEROP_ISFD | FILTEROP_MPSAFE,
1966 NULL, filt_hammer2detach, filt_hammer2vnode };
1970 hammer2_vop_kqfilter(struct vop_kqfilter_args *ap)
1972 struct vnode *vp = ap->a_vp;
1973 struct knote *kn = ap->a_kn;
1975 switch (kn->kn_filter) {
1977 kn->kn_fop = &hammer2read_filtops;
1980 kn->kn_fop = &hammer2write_filtops;
1983 kn->kn_fop = &hammer2vnode_filtops;
1986 return (EOPNOTSUPP);
1989 kn->kn_hook = (caddr_t)vp;
1991 knote_insert(&vp->v_pollinfo.vpi_kqinfo.ki_note, kn);
1997 filt_hammer2detach(struct knote *kn)
1999 struct vnode *vp = (void *)kn->kn_hook;
2001 knote_remove(&vp->v_pollinfo.vpi_kqinfo.ki_note, kn);
2005 filt_hammer2read(struct knote *kn, long hint)
2007 struct vnode *vp = (void *)kn->kn_hook;
2008 hammer2_inode_t *ip = VTOI(vp);
2011 if (hint == NOTE_REVOKE) {
2012 kn->kn_flags |= (EV_EOF | EV_NODATA | EV_ONESHOT);
2015 off = ip->meta.size - kn->kn_fp->f_offset;
2016 kn->kn_data = (off < INTPTR_MAX) ? off : INTPTR_MAX;
2017 if (kn->kn_sfflags & NOTE_OLDAPI)
2019 return (kn->kn_data != 0);
2024 filt_hammer2write(struct knote *kn, long hint)
2026 if (hint == NOTE_REVOKE)
2027 kn->kn_flags |= (EV_EOF | EV_NODATA | EV_ONESHOT);
2033 filt_hammer2vnode(struct knote *kn, long hint)
2035 if (kn->kn_sfflags & hint)
2036 kn->kn_fflags |= hint;
2037 if (hint == NOTE_REVOKE) {
2038 kn->kn_flags |= (EV_EOF | EV_NODATA);
2041 return (kn->kn_fflags != 0);
2049 hammer2_vop_markatime(struct vop_markatime_args *ap)
2051 hammer2_inode_t *ip;
2064 hammer2_vop_fifokqfilter(struct vop_kqfilter_args *ap)
2068 error = VOCALL(&fifo_vnode_vops, &ap->a_head);
2070 error = hammer2_vop_kqfilter(ap);
2077 struct vop_ops hammer2_vnode_vops = {
2078 .vop_default = vop_defaultop,
2079 .vop_fsync = hammer2_vop_fsync,
2080 .vop_getpages = vop_stdgetpages,
2081 .vop_putpages = vop_stdputpages,
2082 .vop_access = hammer2_vop_access,
2083 .vop_advlock = hammer2_vop_advlock,
2084 .vop_close = hammer2_vop_close,
2085 .vop_nlink = hammer2_vop_nlink,
2086 .vop_ncreate = hammer2_vop_ncreate,
2087 .vop_nsymlink = hammer2_vop_nsymlink,
2088 .vop_nremove = hammer2_vop_nremove,
2089 .vop_nrmdir = hammer2_vop_nrmdir,
2090 .vop_nrename = hammer2_vop_nrename,
2091 .vop_getattr = hammer2_vop_getattr,
2092 .vop_setattr = hammer2_vop_setattr,
2093 .vop_readdir = hammer2_vop_readdir,
2094 .vop_readlink = hammer2_vop_readlink,
2095 .vop_getpages = vop_stdgetpages,
2096 .vop_putpages = vop_stdputpages,
2097 .vop_read = hammer2_vop_read,
2098 .vop_write = hammer2_vop_write,
2099 .vop_open = hammer2_vop_open,
2100 .vop_inactive = hammer2_vop_inactive,
2101 .vop_reclaim = hammer2_vop_reclaim,
2102 .vop_nresolve = hammer2_vop_nresolve,
2103 .vop_nlookupdotdot = hammer2_vop_nlookupdotdot,
2104 .vop_nmkdir = hammer2_vop_nmkdir,
2105 .vop_nmknod = hammer2_vop_nmknod,
2106 .vop_ioctl = hammer2_vop_ioctl,
2107 .vop_mountctl = hammer2_vop_mountctl,
2108 .vop_bmap = hammer2_vop_bmap,
2109 .vop_strategy = hammer2_vop_strategy,
2110 .vop_kqfilter = hammer2_vop_kqfilter
2113 struct vop_ops hammer2_spec_vops = {
2114 .vop_default = vop_defaultop,
2115 .vop_fsync = hammer2_vop_fsync,
2116 .vop_read = vop_stdnoread,
2117 .vop_write = vop_stdnowrite,
2118 .vop_access = hammer2_vop_access,
2119 .vop_close = hammer2_vop_close,
2120 .vop_markatime = hammer2_vop_markatime,
2121 .vop_getattr = hammer2_vop_getattr,
2122 .vop_inactive = hammer2_vop_inactive,
2123 .vop_reclaim = hammer2_vop_reclaim,
2124 .vop_setattr = hammer2_vop_setattr
2127 struct vop_ops hammer2_fifo_vops = {
2128 .vop_default = fifo_vnoperate,
2129 .vop_fsync = hammer2_vop_fsync,
2131 .vop_read = hammer2_vop_fiforead,
2132 .vop_write = hammer2_vop_fifowrite,
2134 .vop_access = hammer2_vop_access,
2136 .vop_close = hammer2_vop_fifoclose,
2138 .vop_markatime = hammer2_vop_markatime,
2139 .vop_getattr = hammer2_vop_getattr,
2140 .vop_inactive = hammer2_vop_inactive,
2141 .vop_reclaim = hammer2_vop_reclaim,
2142 .vop_setattr = hammer2_vop_setattr,
2143 .vop_kqfilter = hammer2_vop_fifokqfilter