2 * Copyright (c) 2011-2012 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@dragonflybsd.org>
6 * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the
18 * 3. Neither the name of The DragonFly Project nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific, prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/kernel.h>
38 #include <sys/fcntl.h>
41 #include <sys/namei.h>
42 #include <sys/mount.h>
43 #include <sys/vnode.h>
44 #include <sys/mountctl.h>
45 #include <sys/dirent.h>
50 #define ZFOFFSET (-2LL)
52 static int hammer2_read_file(hammer2_inode_t *ip, struct uio *uio,
54 static int hammer2_write_file(hammer2_inode_t *ip, struct uio *uio, int ioflag,
56 static hammer2_off_t hammer2_assign_physical(hammer2_inode_t *ip,
57 hammer2_key_t lbase, int lblksize, int *errorp);
58 static void hammer2_extend_file(hammer2_inode_t *ip, hammer2_key_t nsize);
59 static void hammer2_truncate_file(hammer2_inode_t *ip, hammer2_key_t nsize);
63 hammer2_knote(struct vnode *vp, int flags)
66 KNOTE(&vp->v_pollinfo.vpi_kqinfo.ki_note, flags);
70 * Last reference to a vnode is going away but it is still cached.
74 hammer2_vop_inactive(struct vop_inactive_args *ap)
77 struct hammer2_inode *ip;
79 struct hammer2_mount *hmp;
94 * Detect updates to the embedded data which may be synchronized by
95 * the strategy code. Simply mark the inode modified so it gets
96 * picked up by our normal flush.
98 if (ip->chain.flags & HAMMER2_CHAIN_DIRTYEMBED) {
99 hammer2_inode_lock_ex(ip);
100 atomic_clear_int(&ip->chain.flags, HAMMER2_CHAIN_DIRTYEMBED);
101 hammer2_chain_modify(ip->hmp, &ip->chain, 0);
102 hammer2_inode_unlock_ex(ip);
106 * Check for deleted inodes and recycle immediately.
108 if (ip->chain.flags & HAMMER2_CHAIN_DELETED) {
115 * Reclaim a vnode so that it can be reused; after the inode is
116 * disassociated, the filesystem must manage it alone.
120 hammer2_vop_reclaim(struct vop_reclaim_args *ap)
122 struct hammer2_inode *ip;
123 struct hammer2_mount *hmp;
133 * Set SUBMODIFIED so we can detect and propagate the DESTROYED
134 * bit in the flush code.
136 hammer2_inode_lock_ex(ip);
139 if (ip->chain.flags & HAMMER2_CHAIN_DELETED) {
140 atomic_set_int(&ip->chain.flags, HAMMER2_CHAIN_DESTROYED |
141 HAMMER2_CHAIN_SUBMODIFIED);
143 hammer2_chain_flush(hmp, &ip->chain, 0);
144 hammer2_inode_unlock_ex(ip);
145 hammer2_chain_drop(hmp, &ip->chain); /* vp ref */
148 * XXX handle background sync when ip dirty, kernel will no longer
149 * notify us regarding this inode because there is no longer a
150 * vnode attached to it.
158 hammer2_vop_fsync(struct vop_fsync_args *ap)
160 struct hammer2_inode *ip;
161 struct hammer2_mount *hmp;
168 hammer2_inode_lock_ex(ip);
169 vfsync(vp, ap->a_waitfor, 1, NULL, NULL);
172 * Detect updates to the embedded data which may be synchronized by
173 * the strategy code. Simply mark the inode modified so it gets
174 * picked up by our normal flush.
176 if (ip->chain.flags & HAMMER2_CHAIN_DIRTYEMBED) {
177 atomic_clear_int(&ip->chain.flags, HAMMER2_CHAIN_DIRTYEMBED);
178 hammer2_chain_modify(hmp, &ip->chain, 0);
182 * Calling chain_flush here creates a lot of duplicative
183 * COW operations due to non-optimal vnode ordering.
185 * Only do it for an actual fsync() syscall. The other forms
186 * which call this function will eventually call chain_flush
187 * on the volume root as a catch-all, which is far more optimal.
189 if (ap->a_flags & VOP_FSYNC_SYSCALL)
190 hammer2_chain_flush(hmp, &ip->chain, 0);
191 hammer2_inode_unlock_ex(ip);
197 hammer2_vop_access(struct vop_access_args *ap)
199 hammer2_inode_t *ip = VTOI(ap->a_vp);
204 uid = hammer2_to_unix_xid(&ip->ip_data.uid);
205 gid = hammer2_to_unix_xid(&ip->ip_data.gid);
207 error = vop_helper_access(ap, uid, gid, ip->ip_data.mode,
214 hammer2_vop_getattr(struct vop_getattr_args *ap)
216 hammer2_pfsmount_t *pmp;
227 hammer2_inode_lock_sh(ip);
229 vap->va_fsid = pmp->mp->mnt_stat.f_fsid.val[0];
230 vap->va_fileid = ip->ip_data.inum;
231 vap->va_mode = ip->ip_data.mode;
232 vap->va_nlink = ip->ip_data.nlinks;
233 vap->va_uid = hammer2_to_unix_xid(&ip->ip_data.uid);
234 vap->va_gid = hammer2_to_unix_xid(&ip->ip_data.gid);
237 vap->va_size = ip->ip_data.size;
238 vap->va_blocksize = HAMMER2_PBUFSIZE;
239 vap->va_flags = ip->ip_data.uflags;
240 hammer2_time_to_timespec(ip->ip_data.ctime, &vap->va_ctime);
241 hammer2_time_to_timespec(ip->ip_data.mtime, &vap->va_mtime);
242 hammer2_time_to_timespec(ip->ip_data.mtime, &vap->va_atime);
244 vap->va_bytes = vap->va_size; /* XXX */
245 vap->va_type = hammer2_get_vtype(ip);
247 vap->va_uid_uuid = ip->ip_data.uid;
248 vap->va_gid_uuid = ip->ip_data.gid;
249 vap->va_vaflags = VA_UID_UUID_VALID | VA_GID_UUID_VALID |
252 hammer2_inode_unlock_sh(ip);
259 hammer2_vop_setattr(struct vop_setattr_args *ap)
261 hammer2_mount_t *hmp;
272 hammer2_update_time(&ctime);
280 hammer2_inode_lock_ex(ip);
283 if (vap->va_flags != VNOVAL) {
286 flags = ip->ip_data.uflags;
287 error = vop_helper_setattr_flags(&flags, vap->va_flags,
288 hammer2_to_unix_xid(&ip->ip_data.uid),
291 if (ip->ip_data.uflags != flags) {
292 hammer2_chain_modify(hmp, &ip->chain, 0);
293 ip->ip_data.uflags = flags;
294 ip->ip_data.ctime = ctime;
295 kflags |= NOTE_ATTRIB;
297 if (ip->ip_data.uflags & (IMMUTABLE | APPEND)) {
304 if (ip->ip_data.uflags & (IMMUTABLE | APPEND)) {
308 if (vap->va_uid != (uid_t)VNOVAL || vap->va_gid != (gid_t)VNOVAL) {
309 mode_t cur_mode = ip->ip_data.mode;
310 uid_t cur_uid = hammer2_to_unix_xid(&ip->ip_data.uid);
311 gid_t cur_gid = hammer2_to_unix_xid(&ip->ip_data.gid);
315 error = vop_helper_chown(ap->a_vp, vap->va_uid, vap->va_gid,
317 &cur_uid, &cur_gid, &cur_mode);
319 hammer2_guid_to_uuid(&uuid_uid, cur_uid);
320 hammer2_guid_to_uuid(&uuid_gid, cur_gid);
321 if (bcmp(&uuid_uid, &ip->ip_data.uid,
323 bcmp(&uuid_gid, &ip->ip_data.gid,
325 ip->ip_data.mode != cur_mode
327 hammer2_chain_modify(hmp, &ip->chain, 0);
328 ip->ip_data.uid = uuid_uid;
329 ip->ip_data.gid = uuid_gid;
330 ip->ip_data.mode = cur_mode;
331 ip->ip_data.ctime = ctime;
333 kflags |= NOTE_ATTRIB;
340 if (vap->va_size != VNOVAL && ip->ip_data.size != vap->va_size) {
343 if (vap->va_size == ip->ip_data.size)
345 if (vap->va_size < ip->ip_data.size) {
346 hammer2_truncate_file(ip, vap->va_size);
348 hammer2_extend_file(ip, vap->va_size);
358 /* atime not supported */
359 if (vap->va_atime.tv_sec != VNOVAL) {
360 hammer2_chain_modify(hmp, &ip->chain, 0);
361 ip->ip_data.atime = hammer2_timespec_to_time(&vap->va_atime);
362 kflags |= NOTE_ATTRIB;
365 if (vap->va_mtime.tv_sec != VNOVAL) {
366 hammer2_chain_modify(hmp, &ip->chain, 0);
367 ip->ip_data.mtime = hammer2_timespec_to_time(&vap->va_mtime);
368 kflags |= NOTE_ATTRIB;
370 if (vap->va_mode != (mode_t)VNOVAL) {
371 mode_t cur_mode = ip->ip_data.mode;
372 uid_t cur_uid = hammer2_to_unix_xid(&ip->ip_data.uid);
373 gid_t cur_gid = hammer2_to_unix_xid(&ip->ip_data.gid);
375 error = vop_helper_chmod(ap->a_vp, vap->va_mode, ap->a_cred,
376 cur_uid, cur_gid, &cur_mode);
377 if (error == 0 && ip->ip_data.mode != cur_mode) {
378 ip->ip_data.mode = cur_mode;
379 ip->ip_data.ctime = ctime;
380 kflags |= NOTE_ATTRIB;
384 hammer2_inode_unlock_ex(ip);
390 hammer2_vop_readdir(struct vop_readdir_args *ap)
392 hammer2_mount_t *hmp;
394 hammer2_inode_t *xip;
395 hammer2_chain_t *parent;
396 hammer2_chain_t *chain;
410 saveoff = uio->uio_offset;
413 * Setup cookies directory entry cookies if requested
415 if (ap->a_ncookies) {
416 ncookies = uio->uio_resid / 16 + 1;
419 cookies = kmalloc(ncookies * sizeof(off_t), M_TEMP, M_WAITOK);
427 * Handle artificial entries. To ensure that only positive 64 bit
428 * quantities are returned to userland we always strip off bit 63.
429 * The hash code is designed such that codes 0x0000-0x7FFF are not
430 * used, allowing us to use these codes for articial entries.
432 * Entry 0 is used for '.' and entry 1 is used for '..'. Do not
433 * allow '..' to cross the mount point into (e.g.) the super-root.
436 chain = (void *)(intptr_t)-1; /* non-NULL for early goto done case */
439 r = vop_write_dirent(&error, uio,
441 HAMMER2_DIRHASH_USERMSK,
446 cookies[cookie_index] = saveoff;
449 if (cookie_index == ncookies)
453 if (ip->pip == NULL || ip == ip->pmp->iroot)
458 r = vop_write_dirent(&error, uio,
460 HAMMER2_DIRHASH_USERMSK,
465 cookies[cookie_index] = saveoff;
468 if (cookie_index == ncookies)
472 lkey = saveoff | HAMMER2_DIRHASH_VISIBLE;
475 error = hammer2_chain_lock(hmp, parent, HAMMER2_RESOLVE_ALWAYS);
477 hammer2_chain_unlock(hmp, parent);
480 chain = hammer2_chain_lookup(hmp, &parent, lkey, lkey, 0);
482 chain = hammer2_chain_lookup(hmp, &parent,
483 lkey, (hammer2_key_t)-1, 0);
486 if (chain->bref.type == HAMMER2_BREF_TYPE_INODE) {
487 dtype = hammer2_get_dtype(chain->u.ip);
488 saveoff = chain->bref.key & HAMMER2_DIRHASH_USERMSK;
489 r = vop_write_dirent(&error, uio,
490 chain->u.ip->ip_data.inum &
491 HAMMER2_DIRHASH_USERMSK,
492 dtype, chain->u.ip->ip_data.name_len,
493 chain->u.ip->ip_data.filename);
497 cookies[cookie_index] = saveoff;
500 /* XXX chain error */
501 kprintf("bad chain type readdir %d\n",
506 * Keys may not be returned in order so once we have a
507 * placemarker (chain) the scan must allow the full range
508 * or some entries will be missed.
510 chain = hammer2_chain_next(hmp, &parent, chain,
511 HAMMER2_DIRHASH_VISIBLE,
512 (hammer2_key_t)-1, 0);
514 saveoff = (chain->bref.key &
515 HAMMER2_DIRHASH_USERMSK) + 1;
517 saveoff = (hammer2_key_t)-1;
519 if (cookie_index == ncookies)
523 hammer2_chain_unlock(hmp, chain);
524 hammer2_chain_unlock(hmp, parent);
527 *ap->a_eofflag = (chain == NULL);
528 uio->uio_offset = saveoff & ~HAMMER2_DIRHASH_VISIBLE;
529 if (error && cookie_index == 0) {
531 kfree(cookies, M_TEMP);
533 *ap->a_cookies = NULL;
537 *ap->a_ncookies = cookie_index;
538 *ap->a_cookies = cookies;
545 * hammer2_vop_readlink { vp, uio, cred }
549 hammer2_vop_readlink(struct vop_readlink_args *ap)
552 hammer2_mount_t *hmp;
557 if (vp->v_type != VLNK)
562 error = hammer2_read_file(ip, ap->a_uio, 0);
568 hammer2_vop_read(struct vop_read_args *ap)
571 hammer2_mount_t *hmp;
579 * Read operations supported on this vnode?
582 if (vp->v_type != VREG)
593 seqcount = ap->a_ioflag >> 16;
594 bigread = (uio->uio_resid > 100 * 1024 * 1024);
596 error = hammer2_read_file(ip, uio, seqcount);
602 hammer2_vop_write(struct vop_write_args *ap)
606 hammer2_mount_t *hmp;
614 * Read operations supported on this vnode?
617 if (vp->v_type != VREG)
630 seqcount = ap->a_ioflag >> 16;
631 bigwrite = (uio->uio_resid > 100 * 1024 * 1024);
634 * Check resource limit
636 if (uio->uio_resid > 0 && (td = uio->uio_td) != NULL && td->td_proc &&
637 uio->uio_offset + uio->uio_resid >
638 td->td_proc->p_rlimit[RLIMIT_FSIZE].rlim_cur) {
639 lwpsignal(td->td_proc, td->td_lwp, SIGXFSZ);
643 bigwrite = (uio->uio_resid > 100 * 1024 * 1024);
646 * ip must be locked if extending the file.
647 * ip must be locked to avoid racing a truncation.
649 * ip must be marked modified, particularly because the write
650 * might wind up being copied into the embedded data area.
652 hammer2_inode_lock_ex(ip);
653 error = hammer2_write_file(ip, uio, ap->a_ioflag, seqcount);
655 hammer2_inode_unlock_ex(ip);
660 * Perform read operations on a file or symlink given an UNLOCKED
665 hammer2_read_file(hammer2_inode_t *ip, struct uio *uio, int seqcount)
675 while (uio->uio_resid > 0 && uio->uio_offset < ip->ip_data.size) {
682 lblksize = hammer2_calc_logical(ip, uio->uio_offset,
685 error = cluster_read(ip->vp, leof, lbase, lblksize,
686 uio->uio_resid, seqcount * BKVASIZE,
691 loff = (int)(uio->uio_offset - lbase);
693 if (n > uio->uio_resid)
695 if (n > ip->ip_data.size - uio->uio_offset)
696 n = (int)(ip->ip_data.size - uio->uio_offset);
697 bp->b_flags |= B_AGE;
698 uiomove((char *)bp->b_data + loff, n, uio);
705 * Called with a locked (ip) to do the underlying write to a file or
706 * to build the symlink target.
710 hammer2_write_file(hammer2_inode_t *ip, struct uio *uio,
711 int ioflag, int seqcount)
713 hammer2_key_t old_eof;
722 if (ioflag & IO_APPEND)
723 uio->uio_offset = ip->ip_data.size;
728 * Extend the file if necessary. If the write fails at some point
729 * we will truncate it back down to cover as much as we were able
732 * Doing this now makes it easier to calculate buffer sizes in
735 old_eof = ip->ip_data.size;
736 if (uio->uio_offset + uio->uio_resid > ip->ip_data.size) {
738 hammer2_extend_file(ip, uio->uio_offset + uio->uio_resid);
739 kflags |= NOTE_EXTEND;
745 while (uio->uio_resid > 0) {
754 * Don't allow the buffer build to blow out the buffer
757 if ((ioflag & IO_RECURSE) == 0) {
759 * XXX should try to leave this unlocked through
762 hammer2_chain_unlock(ip->hmp, &ip->chain);
763 bwillwrite(HAMMER2_PBUFSIZE);
764 hammer2_chain_lock(ip->hmp, &ip->chain,
765 HAMMER2_RESOLVE_ALWAYS);
768 /* XXX bigwrite & signal check test */
771 * This nominally tells us how much we can cluster and
772 * what the logical buffer size needs to be. Currently
773 * we don't try to cluster the write and just handle one
776 lblksize = hammer2_calc_logical(ip, uio->uio_offset,
778 loff = (int)(uio->uio_offset - lbase);
781 * Calculate bytes to copy this transfer and whether the
782 * copy completely covers the buffer or not.
786 if (n > uio->uio_resid) {
788 if (uio->uio_offset + n == ip->ip_data.size)
790 } else if (loff == 0) {
797 if (uio->uio_segflg == UIO_NOCOPY) {
799 * Issuing a write with the same data backing the
800 * buffer. Instantiate the buffer to collect the
801 * backing vm pages, then read-in any missing bits.
803 * This case is used by vop_stdputpages().
805 bp = getblk(ip->vp, lbase, lblksize, GETBLK_BHEAVY, 0);
806 if ((bp->b_flags & B_CACHE) == 0) {
808 error = bread(ip->vp, lbase, lblksize, &bp);
810 } else if (trivial) {
812 * Even though we are entirely overwriting the buffer
813 * we may still have to zero it out to avoid a
814 * mmap/write visibility issue.
816 bp = getblk(ip->vp, lbase, lblksize, GETBLK_BHEAVY, 0);
817 if ((bp->b_flags & B_CACHE) == 0)
821 * Partial overwrite, read in any missing bits then
822 * replace the portion being written.
824 * (The strategy code will detect zero-fill physical
825 * blocks for this case).
827 error = bread(ip->vp, lbase, lblksize, &bp);
838 * We have to assign physical storage to the buffer we intend
839 * to dirty or write now to avoid deadlocks in the strategy
842 * This can return NOOFFSET for inode-embedded data. The
843 * strategy code will take care of it in that case.
845 bp->b_bio2.bio_offset =
846 hammer2_assign_physical(ip, lbase, lblksize, &error);
853 * Ok, copy the data in
855 hammer2_chain_unlock(ip->hmp, &ip->chain);
856 error = uiomove(bp->b_data + loff, n, uio);
857 hammer2_chain_lock(ip->hmp, &ip->chain, HAMMER2_RESOLVE_ALWAYS);
858 kflags |= NOTE_WRITE;
866 /* XXX update ip_data.mtime */
869 * Once we dirty a buffer any cached offset becomes invalid.
871 * NOTE: For cluster_write() always use the trailing block
872 * size, which is HAMMER2_PBUFSIZE. lblksize is the
873 * eof-straddling blocksize and is incorrect.
875 bp->b_flags |= B_AGE;
876 if (ioflag & IO_SYNC) {
878 } else if ((ioflag & IO_DIRECT) && loff + n == lblksize) {
879 bp->b_flags |= B_CLUSTEROK;
881 } else if (ioflag & IO_ASYNC) {
883 } else if (hammer2_cluster_enable) {
884 bp->b_flags |= B_CLUSTEROK;
885 cluster_write(bp, leof, HAMMER2_PBUFSIZE, seqcount);
887 bp->b_flags |= B_CLUSTEROK;
893 * Cleanup. If we extended the file EOF but failed to write through
894 * the entire write is a failure and we have to back-up.
896 if (error && ip->ip_data.size != old_eof) {
897 hammer2_truncate_file(ip, old_eof);
898 } else if (modified) {
899 hammer2_chain_modify(ip->hmp, &ip->chain, 0);
900 hammer2_update_time(&ip->ip_data.mtime);
902 hammer2_knote(ip->vp, kflags);
907 * Assign physical storage to a logical block.
909 * NOOFFSET is returned if the data is inode-embedded. In this case the
910 * strategy code will simply bcopy() the data into the inode.
912 * The inode's delta_dcount is adjusted.
916 hammer2_assign_physical(hammer2_inode_t *ip, hammer2_key_t lbase,
917 int lblksize, int *errorp)
919 hammer2_mount_t *hmp;
920 hammer2_chain_t *parent;
921 hammer2_chain_t *chain;
928 * Locate the chain associated with lbase, return a locked chain.
929 * However, do not instantiate any data reference (which utilizes a
930 * device buffer) because we will be using direct IO via the
931 * logical buffer cache buffer.
934 hammer2_chain_lock(hmp, parent, HAMMER2_RESOLVE_ALWAYS);
936 chain = hammer2_chain_lookup(hmp, &parent,
938 HAMMER2_LOOKUP_NODATA);
942 * We found a hole, create a new chain entry.
944 * NOTE: DATA chains are created without device backing
945 * store (nor do we want any).
947 chain = hammer2_chain_create(hmp, parent, NULL,
948 lbase, HAMMER2_PBUFRADIX,
949 HAMMER2_BREF_TYPE_DATA,
951 pbase = chain->bref.data_off & ~HAMMER2_OFF_MASK_RADIX;
952 ip->delta_dcount += lblksize;
954 switch (chain->bref.type) {
955 case HAMMER2_BREF_TYPE_INODE:
957 * The data is embedded in the inode. The
958 * caller is responsible for marking the inode
959 * modified and copying the data to the embedded
964 case HAMMER2_BREF_TYPE_DATA:
965 if (chain->bytes != lblksize) {
966 panic("hammer2_assign_physical: "
967 "size mismatch %d/%d\n",
968 lblksize, chain->bytes);
970 hammer2_chain_modify(hmp, chain,
971 HAMMER2_MODIFY_OPTDATA);
972 pbase = chain->bref.data_off & ~HAMMER2_OFF_MASK_RADIX;
975 panic("hammer2_assign_physical: bad type");
983 hammer2_chain_unlock(hmp, chain);
984 hammer2_chain_unlock(hmp, parent);
990 * Truncate the size of a file.
992 * This routine adjusts ip->ip_data.size smaller, destroying any related
993 * data beyond the new EOF and potentially resizing the block straddling
996 * The inode must be locked.
1000 hammer2_truncate_file(hammer2_inode_t *ip, hammer2_key_t nsize)
1002 hammer2_chain_t *parent;
1003 hammer2_chain_t *chain;
1004 hammer2_mount_t *hmp = ip->hmp;
1005 hammer2_key_t lbase;
1013 hammer2_chain_modify(hmp, &ip->chain, 0);
1017 * Destroy any logical buffer cache buffers beyond the file EOF.
1019 * We call nvtruncbuf() w/ trivial == 1 to prevent it from messing
1020 * around with the buffer straddling EOF, because we need to assign
1021 * a new physical offset to it.
1024 nvtruncbuf(ip->vp, nsize,
1025 HAMMER2_PBUFSIZE, (int)nsize & HAMMER2_PBUFMASK,
1030 * Setup for lookup/search
1032 parent = &ip->chain;
1033 error = hammer2_chain_lock(hmp, parent, HAMMER2_RESOLVE_ALWAYS);
1035 hammer2_chain_unlock(hmp, parent);
1036 /* XXX error reporting */
1041 * Handle the case where a chain/logical-buffer straddles the new
1042 * EOF. We told nvtruncbuf() above not to mess with the logical
1043 * buffer straddling the EOF because we need to reassign its storage
1044 * and can't let the strategy code do it for us.
1046 loff = (int)nsize & HAMMER2_PBUFMASK;
1047 if (loff && ip->vp) {
1048 oblksize = hammer2_calc_logical(ip, nsize, &lbase, &leof);
1049 error = bread(ip->vp, lbase, oblksize, &bp);
1050 KKASSERT(error == 0);
1052 ip->ip_data.size = nsize;
1053 nblksize = hammer2_calc_logical(ip, nsize, &lbase, &leof);
1056 * Fixup the chain element. If we have a logical buffer in-hand
1057 * we don't want to create a conflicting device buffer.
1060 chain = hammer2_chain_lookup(hmp, &parent, lbase, lbase,
1061 HAMMER2_LOOKUP_NODATA);
1063 allocbuf(bp, nblksize);
1064 switch(chain->bref.type) {
1065 case HAMMER2_BREF_TYPE_DATA:
1066 hammer2_chain_resize(ip, chain,
1067 hammer2_bytes_to_radix(nblksize),
1068 HAMMER2_MODIFY_OPTDATA);
1069 bzero(bp->b_data + loff, nblksize - loff);
1070 bp->b_bio2.bio_offset = chain->bref.data_off &
1073 case HAMMER2_BREF_TYPE_INODE:
1074 bzero(bp->b_data + loff, nblksize - loff);
1075 bp->b_bio2.bio_offset = NOOFFSET;
1078 panic("hammer2_truncate_file: bad type");
1081 hammer2_chain_unlock(hmp, chain);
1082 bp->b_flags |= B_CLUSTEROK;
1086 * Destroy clean buffer w/ wrong buffer size. Retain
1089 bp->b_flags |= B_RELBUF;
1090 KKASSERT(bp->b_bio2.bio_offset == NOOFFSET);
1091 KKASSERT((bp->b_flags & B_DIRTY) == 0);
1096 * WARNING: This utilizes a device buffer for the data.
1098 * XXX case should not occur
1100 panic("hammer2_truncate_file: non-zero truncation, no-vnode");
1101 chain = hammer2_chain_lookup(hmp, &parent, lbase, lbase, 0);
1103 switch(chain->bref.type) {
1104 case HAMMER2_BREF_TYPE_DATA:
1105 hammer2_chain_resize(ip, chain,
1106 hammer2_bytes_to_radix(nblksize),
1108 hammer2_chain_modify(hmp, chain, 0);
1109 bzero(chain->data->buf + loff, nblksize - loff);
1111 case HAMMER2_BREF_TYPE_INODE:
1112 if (loff < HAMMER2_EMBEDDED_BYTES) {
1113 hammer2_chain_modify(hmp, chain, 0);
1114 bzero(chain->data->ipdata.u.data + loff,
1115 HAMMER2_EMBEDDED_BYTES - loff);
1119 hammer2_chain_unlock(hmp, chain);
1124 * Clean up any fragmentory VM pages now that we have properly
1125 * resized the straddling buffer. These pages are no longer
1126 * part of the buffer.
1129 nvtruncbuf(ip->vp, nsize,
1130 nblksize, (int)nsize & (nblksize - 1),
1135 * Destroy any physical blocks after the new EOF point.
1137 lbase = (nsize + HAMMER2_PBUFMASK64) & ~HAMMER2_PBUFMASK64;
1138 chain = hammer2_chain_lookup(hmp, &parent,
1139 lbase, (hammer2_key_t)-1,
1140 HAMMER2_LOOKUP_NODATA);
1143 * Degenerate embedded data case, nothing to loop on.
1145 if (chain->bref.type == HAMMER2_BREF_TYPE_INODE) {
1146 hammer2_chain_unlock(hmp, chain);
1151 * Delete physical data blocks past the file EOF.
1153 if (chain->bref.type == HAMMER2_BREF_TYPE_DATA) {
1154 ip->delta_dcount -= chain->bytes;
1155 hammer2_chain_delete(hmp, parent, chain);
1157 /* XXX check parent if empty indirect block & delete */
1158 chain = hammer2_chain_next(hmp, &parent, chain,
1159 lbase, (hammer2_key_t)-1,
1160 HAMMER2_LOOKUP_NODATA);
1162 hammer2_chain_unlock(hmp, parent);
1166 * Extend the size of a file. The inode must be locked.
1168 * We may have to resize the block straddling the old EOF.
1172 hammer2_extend_file(hammer2_inode_t *ip, hammer2_key_t nsize)
1174 hammer2_mount_t *hmp;
1175 hammer2_chain_t *parent;
1176 hammer2_chain_t *chain;
1178 hammer2_key_t osize;
1179 hammer2_key_t obase;
1180 hammer2_key_t nbase;
1190 hammer2_chain_modify(hmp, &ip->chain, 0);
1193 * Nothing to do if the direct-data case is still intact
1195 if ((ip->ip_data.op_flags & HAMMER2_OPFLAG_DIRECTDATA) &&
1196 nsize <= HAMMER2_EMBEDDED_BYTES) {
1197 ip->ip_data.size = nsize;
1202 * Calculate the blocksize at the original EOF and resize the block
1203 * if necessary. Adjust the file size in the inode.
1205 osize = ip->ip_data.size;
1206 oblksize = hammer2_calc_logical(ip, osize, &obase, &leof);
1207 ip->ip_data.size = nsize;
1208 nblksize = hammer2_calc_logical(ip, osize, &nbase, &leof);
1211 * Do all required vnode operations, but do not mess with the
1212 * buffer straddling the orignal EOF.
1215 ip->ip_data.size, nsize,
1217 0, (int)nsize & HAMMER2_PBUFMASK,
1221 * Early return if we have no more work to do.
1223 if (obase == nbase && oblksize == nblksize &&
1224 (ip->ip_data.op_flags & HAMMER2_OPFLAG_DIRECTDATA) == 0) {
1229 * We have work to do, including possibly resizing the buffer
1230 * at the EOF point and turning off DIRECTDATA mode.
1233 if (((int)osize & HAMMER2_PBUFMASK)) {
1234 error = bread(ip->vp, obase, oblksize, &bp);
1235 KKASSERT(error == 0);
1237 if (obase != nbase) {
1238 allocbuf(bp, HAMMER2_PBUFSIZE);
1240 allocbuf(bp, nblksize);
1246 * Disable direct-data mode by loading up a buffer cache buffer
1247 * with the data, then converting the inode data area into the
1248 * inode indirect block array area.
1250 if (ip->ip_data.op_flags & HAMMER2_OPFLAG_DIRECTDATA) {
1251 ip->ip_data.op_flags &= ~HAMMER2_OPFLAG_DIRECTDATA;
1252 bzero(&ip->ip_data.u.blockset, sizeof(ip->ip_data.u.blockset));
1256 * Resize the chain element at the old EOF.
1258 if (((int)osize & HAMMER2_PBUFMASK)) {
1259 parent = &ip->chain;
1260 error = hammer2_chain_lock(hmp, parent, HAMMER2_RESOLVE_ALWAYS);
1261 KKASSERT(error == 0);
1263 nradix = hammer2_bytes_to_radix(nblksize);
1265 chain = hammer2_chain_lookup(hmp, &parent,
1267 HAMMER2_LOOKUP_NODATA);
1268 if (chain == NULL) {
1269 chain = hammer2_chain_create(hmp, parent, NULL,
1271 HAMMER2_BREF_TYPE_DATA,
1273 ip->delta_dcount += nblksize;
1275 KKASSERT(chain->bref.type == HAMMER2_BREF_TYPE_DATA);
1276 hammer2_chain_resize(ip, chain, nradix,
1277 HAMMER2_MODIFY_OPTDATA);
1279 bp->b_bio2.bio_offset = chain->bref.data_off &
1281 hammer2_chain_unlock(hmp, chain);
1282 bp->b_flags |= B_CLUSTEROK;
1284 hammer2_chain_unlock(hmp, parent);
1290 hammer2_vop_nresolve(struct vop_nresolve_args *ap)
1292 hammer2_inode_t *dip;
1293 hammer2_inode_t *ip;
1294 hammer2_mount_t *hmp;
1295 hammer2_chain_t *parent;
1296 hammer2_chain_t *chain;
1297 struct namecache *ncp;
1298 const uint8_t *name;
1304 dip = VTOI(ap->a_dvp);
1306 ncp = ap->a_nch->ncp;
1307 name = ncp->nc_name;
1308 name_len = ncp->nc_nlen;
1309 lhc = hammer2_dirhash(name, name_len);
1312 * Note: In DragonFly the kernel handles '.' and '..'.
1314 parent = &dip->chain;
1315 hammer2_chain_lock(hmp, parent, HAMMER2_RESOLVE_ALWAYS);
1316 chain = hammer2_chain_lookup(hmp, &parent,
1317 lhc, lhc + HAMMER2_DIRHASH_LOMASK,
1320 if (chain->bref.type == HAMMER2_BREF_TYPE_INODE &&
1322 name_len == chain->data->ipdata.name_len &&
1323 bcmp(name, chain->data->ipdata.filename, name_len) == 0) {
1326 chain = hammer2_chain_next(hmp, &parent, chain,
1327 lhc, lhc + HAMMER2_DIRHASH_LOMASK,
1330 hammer2_chain_unlock(hmp, parent);
1333 * If the inode represents a forwarding entry for a hardlink we have
1334 * to locate the actual inode. The original ip is saved for possible
1335 * deconsolidation. (ip) will only be set to non-NULL when we have
1336 * to locate the real file via a hardlink. ip will be referenced but
1337 * not locked in that situation. chain is passed in locked and
1341 if (chain && chain->u.ip->ip_data.type == HAMMER2_OBJTYPE_HARDLINK) {
1342 kprintf("hammer2: need to find hardlink for %s\n",
1343 chain->u.ip->ip_data.filename);
1344 error = hammer2_hardlink_find(dip, &chain, &ip);
1347 hammer2_chain_unlock(hmp, chain);
1355 * Deconsolidate any hardlink whos nlinks == 1. Ignore errors.
1356 * If an error occurs chain and ip are left alone.
1358 if (ip && chain && chain->u.ip->ip_data.nlinks == 1 && !hmp->ronly) {
1359 kprintf("hammer2: need to unconsolidate hardlink for %s\n",
1360 chain->u.ip->ip_data.filename);
1361 hammer2_hardlink_deconsolidate(dip, &chain, &ip);
1365 * Acquire the related vnode
1368 vp = hammer2_igetv(chain->u.ip, &error);
1371 cache_setvp(ap->a_nch, vp);
1374 hammer2_chain_unlock(hmp, chain);
1378 cache_setvp(ap->a_nch, NULL);
1381 hammer2_inode_drop(ip);
1387 hammer2_vop_nlookupdotdot(struct vop_nlookupdotdot_args *ap)
1389 hammer2_inode_t *dip;
1390 hammer2_inode_t *ip;
1391 hammer2_mount_t *hmp;
1394 dip = VTOI(ap->a_dvp);
1397 if ((ip = dip->pip) == NULL) {
1401 hammer2_chain_lock(hmp, &ip->chain, HAMMER2_RESOLVE_ALWAYS);
1402 *ap->a_vpp = hammer2_igetv(ip, &error);
1403 hammer2_chain_unlock(hmp, &ip->chain);
1410 hammer2_vop_nmkdir(struct vop_nmkdir_args *ap)
1412 hammer2_mount_t *hmp;
1413 hammer2_inode_t *dip;
1414 hammer2_inode_t *nip;
1415 struct namecache *ncp;
1416 const uint8_t *name;
1420 dip = VTOI(ap->a_dvp);
1425 ncp = ap->a_nch->ncp;
1426 name = ncp->nc_name;
1427 name_len = ncp->nc_nlen;
1429 error = hammer2_inode_create(dip, ap->a_vap, ap->a_cred,
1430 name, name_len, &nip);
1432 KKASSERT(nip == NULL);
1436 *ap->a_vpp = hammer2_igetv(nip, &error);
1437 hammer2_chain_unlock(hmp, &nip->chain);
1440 cache_setunresolved(ap->a_nch);
1441 cache_setvp(ap->a_nch, *ap->a_vpp);
1447 * Return the largest contiguous physical disk range for the logical
1450 * (struct vnode *vp, off_t loffset, off_t *doffsetp, int *runp, int *runb)
1454 hammer2_vop_bmap(struct vop_bmap_args *ap)
1457 hammer2_mount_t *hmp;
1458 hammer2_inode_t *ip;
1459 hammer2_chain_t *parent;
1460 hammer2_chain_t *chain;
1464 hammer2_off_t pbytes;
1465 hammer2_off_t array[HAMMER2_BMAP_COUNT][2];
1470 * Only supported on regular files
1472 * Only supported for read operations (required for cluster_read).
1473 * The block allocation is delayed for write operations.
1476 if (vp->v_type != VREG)
1477 return (EOPNOTSUPP);
1478 if (ap->a_cmd != BUF_CMD_READ)
1479 return (EOPNOTSUPP);
1483 bzero(array, sizeof(array));
1486 * Calculate logical range
1488 KKASSERT((ap->a_loffset & HAMMER2_LBUFMASK64) == 0);
1489 lbeg = ap->a_loffset & HAMMER2_OFF_MASK_HI;
1490 lend = lbeg + HAMMER2_BMAP_COUNT * HAMMER2_PBUFSIZE - 1;
1493 loff = ap->a_loffset & HAMMER2_OFF_MASK_LO;
1495 parent = &ip->chain;
1496 hammer2_chain_lock(hmp, parent, HAMMER2_RESOLVE_ALWAYS);
1497 chain = hammer2_chain_lookup(hmp, &parent,
1499 HAMMER2_LOOKUP_NODATA);
1500 if (chain == NULL) {
1501 *ap->a_doffsetp = ZFOFFSET;
1502 hammer2_chain_unlock(hmp, parent);
1507 if (chain->bref.type == HAMMER2_BREF_TYPE_DATA) {
1508 ai = (chain->bref.key - lbeg) / HAMMER2_PBUFSIZE;
1509 KKASSERT(ai >= 0 && ai < HAMMER2_BMAP_COUNT);
1510 array[ai][0] = chain->bref.data_off & HAMMER2_OFF_MASK;
1511 array[ai][1] = chain->bytes;
1513 chain = hammer2_chain_next(hmp, &parent, chain,
1515 HAMMER2_LOOKUP_NODATA);
1517 hammer2_chain_unlock(hmp, parent);
1520 * If the requested loffset is not mappable physically we can't
1521 * bmap. The caller will have to access the file data via a
1524 if (array[0][0] == 0 || array[0][1] < loff + HAMMER2_LBUFSIZE) {
1525 *ap->a_doffsetp = NOOFFSET;
1530 * Calculate the physical disk offset range for array[0]
1532 pbeg = array[0][0] + loff;
1533 pbytes = array[0][1] - loff;
1535 for (ai = 1; ai < HAMMER2_BMAP_COUNT; ++ai) {
1536 if (array[ai][0] != pbeg + pbytes)
1538 pbytes += array[ai][1];
1541 *ap->a_doffsetp = pbeg;
1543 *ap->a_runp = pbytes;
1549 hammer2_vop_open(struct vop_open_args *ap)
1551 return vop_stdopen(ap);
1555 * hammer2_vop_advlock { vp, id, op, fl, flags }
1559 hammer2_vop_advlock(struct vop_advlock_args *ap)
1561 hammer2_inode_t *ip = VTOI(ap->a_vp);
1563 return (lf_advlock(ap, &ip->advlock, ip->ip_data.size));
1569 hammer2_vop_close(struct vop_close_args *ap)
1571 return vop_stdclose(ap);
1575 * hammer2_vop_nlink { nch, dvp, vp, cred }
1577 * Create a hardlink from (vp) to {dvp, nch}.
1581 hammer2_vop_nlink(struct vop_nlink_args *ap)
1583 hammer2_inode_t *dip; /* target directory to create link in */
1584 hammer2_inode_t *ip; /* inode we are hardlinking to */
1585 hammer2_inode_t *oip;
1586 hammer2_mount_t *hmp;
1587 struct namecache *ncp;
1588 const uint8_t *name;
1592 dip = VTOI(ap->a_dvp);
1598 * (ip) is the inode we are linking to.
1600 ip = oip = VTOI(ap->a_vp);
1601 hammer2_inode_lock_nlinks(ip);
1603 ncp = ap->a_nch->ncp;
1604 name = ncp->nc_name;
1605 name_len = ncp->nc_nlen;
1608 * Create a consolidated real file for the hardlink, adjust (ip),
1609 * and move the nlinks lock if necessary. Tell the function to
1610 * bump the hardlink count on the consolidated file.
1612 error = hammer2_hardlink_consolidate(&ip, dip);
1617 * If the consolidation changed ip to a HARDLINK pointer we have
1618 * to adjust the vnode to point to the actual ip.
1620 * XXX this can race against concurrent vnode ops.
1623 hammer2_chain_ref(hmp, &ip->chain);
1624 hammer2_inode_lock_ex(ip);
1625 hammer2_inode_lock_ex(oip);
1627 ap->a_vp->v_data = ip;
1629 hammer2_inode_unlock_ex(oip);
1630 hammer2_inode_unlock_ex(ip);
1631 hammer2_chain_drop(hmp, &oip->chain);
1635 * The act of connecting the existing (ip) will properly bump the
1636 * nlinks count. However, vp will incorrectly point at the old
1637 * inode which has now been turned into a OBJTYPE_HARDLINK pointer.
1639 * We must reconnect the vp.
1641 hammer2_chain_lock(hmp, &ip->chain, HAMMER2_RESOLVE_ALWAYS);
1642 error = hammer2_inode_connect(dip, ip, name, name_len);
1643 hammer2_chain_unlock(hmp, &ip->chain);
1645 cache_setunresolved(ap->a_nch);
1646 cache_setvp(ap->a_nch, ap->a_vp);
1649 hammer2_inode_unlock_nlinks(ip);
1654 * hammer2_vop_ncreate { nch, dvp, vpp, cred, vap }
1656 * The operating system has already ensured that the directory entry
1657 * does not exist and done all appropriate namespace locking.
1661 hammer2_vop_ncreate(struct vop_ncreate_args *ap)
1663 hammer2_mount_t *hmp;
1664 hammer2_inode_t *dip;
1665 hammer2_inode_t *nip;
1666 struct namecache *ncp;
1667 const uint8_t *name;
1671 dip = VTOI(ap->a_dvp);
1676 ncp = ap->a_nch->ncp;
1677 name = ncp->nc_name;
1678 name_len = ncp->nc_nlen;
1680 error = hammer2_inode_create(dip, ap->a_vap, ap->a_cred,
1681 name, name_len, &nip);
1683 KKASSERT(nip == NULL);
1687 *ap->a_vpp = hammer2_igetv(nip, &error);
1688 hammer2_chain_unlock(hmp, &nip->chain);
1691 cache_setunresolved(ap->a_nch);
1692 cache_setvp(ap->a_nch, *ap->a_vpp);
1698 * hammer2_vop_nsymlink { nch, dvp, vpp, cred, vap, target }
1702 hammer2_vop_nsymlink(struct vop_nsymlink_args *ap)
1704 hammer2_mount_t *hmp;
1705 hammer2_inode_t *dip;
1706 hammer2_inode_t *nip;
1707 struct namecache *ncp;
1708 const uint8_t *name;
1712 dip = VTOI(ap->a_dvp);
1717 ncp = ap->a_nch->ncp;
1718 name = ncp->nc_name;
1719 name_len = ncp->nc_nlen;
1721 ap->a_vap->va_type = VLNK; /* enforce type */
1723 error = hammer2_inode_create(dip, ap->a_vap, ap->a_cred,
1724 name, name_len, &nip);
1726 KKASSERT(nip == NULL);
1730 *ap->a_vpp = hammer2_igetv(nip, &error);
1733 * Build the softlink (~like file data) and finalize the namecache.
1740 bytes = strlen(ap->a_target);
1742 if (bytes <= HAMMER2_EMBEDDED_BYTES) {
1743 KKASSERT(nip->ip_data.op_flags &
1744 HAMMER2_OPFLAG_DIRECTDATA);
1745 bcopy(ap->a_target, nip->ip_data.u.data, bytes);
1746 nip->ip_data.size = bytes;
1748 bzero(&auio, sizeof(auio));
1749 bzero(&aiov, sizeof(aiov));
1750 auio.uio_iov = &aiov;
1751 auio.uio_segflg = UIO_SYSSPACE;
1752 auio.uio_rw = UIO_WRITE;
1753 auio.uio_resid = bytes;
1754 auio.uio_iovcnt = 1;
1755 auio.uio_td = curthread;
1756 aiov.iov_base = ap->a_target;
1757 aiov.iov_len = bytes;
1758 error = hammer2_write_file(nip, &auio, IO_APPEND, 0);
1759 /* XXX handle error */
1763 hammer2_chain_unlock(hmp, &nip->chain);
1766 * Finalize namecache
1769 cache_setunresolved(ap->a_nch);
1770 cache_setvp(ap->a_nch, *ap->a_vpp);
1771 /* hammer2_knote(ap->a_dvp, NOTE_WRITE); */
1777 * hammer2_vop_nremove { nch, dvp, cred }
1781 hammer2_vop_nremove(struct vop_nremove_args *ap)
1783 hammer2_inode_t *dip;
1784 hammer2_mount_t *hmp;
1785 struct namecache *ncp;
1786 const uint8_t *name;
1790 dip = VTOI(ap->a_dvp);
1795 ncp = ap->a_nch->ncp;
1796 name = ncp->nc_name;
1797 name_len = ncp->nc_nlen;
1799 error = hammer2_unlink_file(dip, name, name_len, 0);
1802 cache_setunresolved(ap->a_nch);
1803 cache_setvp(ap->a_nch, NULL);
1809 * hammer2_vop_nrmdir { nch, dvp, cred }
1813 hammer2_vop_nrmdir(struct vop_nrmdir_args *ap)
1815 hammer2_inode_t *dip;
1816 hammer2_mount_t *hmp;
1817 struct namecache *ncp;
1818 const uint8_t *name;
1822 dip = VTOI(ap->a_dvp);
1827 ncp = ap->a_nch->ncp;
1828 name = ncp->nc_name;
1829 name_len = ncp->nc_nlen;
1831 error = hammer2_unlink_file(dip, name, name_len, 1);
1834 cache_setunresolved(ap->a_nch);
1835 cache_setvp(ap->a_nch, NULL);
1841 * hammer2_vop_nrename { fnch, tnch, fdvp, tdvp, cred }
1845 hammer2_vop_nrename(struct vop_nrename_args *ap)
1847 struct namecache *fncp;
1848 struct namecache *tncp;
1849 hammer2_inode_t *fdip;
1850 hammer2_inode_t *tdip;
1851 hammer2_inode_t *ip;
1852 hammer2_mount_t *hmp;
1853 const uint8_t *fname;
1855 const uint8_t *tname;
1859 if (ap->a_fdvp->v_mount != ap->a_tdvp->v_mount)
1861 if (ap->a_fdvp->v_mount != ap->a_fnch->ncp->nc_vp->v_mount)
1864 fdip = VTOI(ap->a_fdvp); /* source directory */
1865 tdip = VTOI(ap->a_tdvp); /* target directory */
1867 hmp = fdip->hmp; /* check read-only filesystem */
1871 fncp = ap->a_fnch->ncp; /* entry name in source */
1872 fname = fncp->nc_name;
1873 fname_len = fncp->nc_nlen;
1875 tncp = ap->a_tnch->ncp; /* entry name in target */
1876 tname = tncp->nc_name;
1877 tname_len = tncp->nc_nlen;
1880 * ip is the inode being removed. If this is a hardlink then
1881 * ip represents the actual file and not the hardlink marker.
1883 ip = VTOI(fncp->nc_vp);
1886 * Keep a tight grip on the inode as removing it should disconnect
1887 * it and we don't want to destroy it.
1889 * NOTE: To avoid deadlocks we cannot lock (ip) while we are
1890 * unlinking elements from their directories. Locking
1891 * the nlinks field does not lock the whole inode.
1893 hammer2_inode_lock_nlinks(ip);
1896 * Remove target if it exists
1898 error = hammer2_unlink_file(tdip, tname, tname_len, -1);
1899 if (error && error != ENOENT)
1901 cache_setunresolved(ap->a_tnch);
1902 cache_setvp(ap->a_tnch, NULL);
1905 * Disconnect (fdip, fname) from the source directory. This will
1906 * disconnect (ip) if it represents a direct file. If (ip) represents
1907 * a hardlink the HARDLINK pointer object will be removed but the
1908 * hardlink will stay intact.
1910 * If (ip) is already hardlinked we have to resolve to a consolidated
1911 * file but we do not bump the nlinks count. (ip) must hold the nlinks
1912 * lock & ref for the operation. If the consolidated file has been
1913 * relocated (ip) will be adjusted and the related nlinks lock moved
1916 * If (ip) does not have multiple links we can just copy the physical
1917 * contents of the inode.
1919 if (ip->ip_data.nlinks > 1) {
1920 error = hammer2_hardlink_consolidate(&ip, tdip);
1924 error = hammer2_unlink_file(fdip, fname, fname_len, -1);
1929 * Reconnect ip to target directory.
1931 * WARNING: chain locks can lock buffer cache buffers, to avoid
1932 * deadlocks we want to unlock before issuing a cache_*()
1933 * op (that might have to lock a vnode).
1935 hammer2_chain_lock(hmp, &ip->chain, HAMMER2_RESOLVE_ALWAYS);
1936 error = hammer2_inode_connect(tdip, ip, tname, tname_len);
1937 hammer2_chain_unlock(hmp, &ip->chain);
1940 cache_rename(ap->a_fnch, ap->a_tnch);
1943 hammer2_inode_unlock_nlinks(ip);
1948 static int hammer2_strategy_read(struct vop_strategy_args *ap);
1949 static int hammer2_strategy_write(struct vop_strategy_args *ap);
1953 hammer2_vop_strategy(struct vop_strategy_args *ap)
1964 error = hammer2_strategy_read(ap);
1965 ++hammer2_iod_file_read;
1968 error = hammer2_strategy_write(ap);
1969 ++hammer2_iod_file_write;
1972 bp->b_error = error = EINVAL;
1973 bp->b_flags |= B_ERROR;
1983 hammer2_strategy_read(struct vop_strategy_args *ap)
1988 hammer2_mount_t *hmp;
1989 hammer2_inode_t *ip;
1990 hammer2_chain_t *parent;
1991 hammer2_chain_t *chain;
1992 hammer2_key_t lbase;
1996 ip = VTOI(ap->a_vp);
1998 nbio = push_bio(bio);
2000 lbase = bio->bio_offset;
2002 KKASSERT(((int)lbase & HAMMER2_PBUFMASK) == 0);
2005 * We must characterize the logical->physical translation if it
2006 * has not already been cached.
2008 * Physical data references < LBUFSIZE are never cached. This
2009 * includes both small-block allocations and inode-embedded data.
2011 if (nbio->bio_offset == NOOFFSET) {
2012 parent = &ip->chain;
2013 hammer2_chain_lock(hmp, parent, HAMMER2_RESOLVE_ALWAYS);
2015 chain = hammer2_chain_lookup(hmp, &parent, lbase, lbase,
2016 HAMMER2_LOOKUP_NODATA);
2017 if (chain == NULL) {
2021 nbio->bio_offset = ZFOFFSET;
2022 } else if (chain->bref.type == HAMMER2_BREF_TYPE_INODE) {
2024 * Data is embedded in the inode (do nothing)
2026 KKASSERT(chain == parent);
2027 hammer2_chain_unlock(hmp, chain);
2028 } else if (chain->bref.type == HAMMER2_BREF_TYPE_DATA) {
2032 KKASSERT(bp->b_bcount == chain->bytes);
2033 nbio->bio_offset = chain->bref.data_off &
2035 hammer2_chain_unlock(hmp, chain);
2036 KKASSERT(nbio->bio_offset != 0);
2038 panic("hammer2_strategy_read: unknown bref type");
2040 hammer2_chain_unlock(hmp, parent);
2043 if (hammer2_debug & 0x0020) {
2044 kprintf("read %016jx %016jx\n",
2045 bio->bio_offset, nbio->bio_offset);
2048 if (nbio->bio_offset == ZFOFFSET) {
2054 bzero(bp->b_data, bp->b_bcount);
2056 } else if (nbio->bio_offset != NOOFFSET) {
2058 * Forward direct IO to the device
2060 vn_strategy(hmp->devvp, nbio);
2063 * Data is embedded in inode.
2065 bcopy(chain->data->ipdata.u.data, bp->b_data,
2066 HAMMER2_EMBEDDED_BYTES);
2067 bzero(bp->b_data + HAMMER2_EMBEDDED_BYTES,
2068 bp->b_bcount - HAMMER2_EMBEDDED_BYTES);
2078 hammer2_strategy_write(struct vop_strategy_args *ap)
2083 hammer2_mount_t *hmp;
2084 hammer2_inode_t *ip;
2088 ip = VTOI(ap->a_vp);
2090 nbio = push_bio(bio);
2092 KKASSERT((bio->bio_offset & HAMMER2_PBUFMASK64) == 0);
2093 KKASSERT(nbio->bio_offset != 0 && nbio->bio_offset != ZFOFFSET);
2095 if (nbio->bio_offset == NOOFFSET) {
2097 * Must be embedded in the inode.
2099 KKASSERT(bio->bio_offset == 0);
2100 bcopy(bp->b_data, ip->ip_data.u.data, HAMMER2_EMBEDDED_BYTES);
2106 * This special flag does not follow the normal MODIFY rules
2107 * because we might deadlock on ip. Instead we depend on
2108 * VOP_FSYNC() to detect the case.
2110 atomic_set_int(&ip->chain.flags, HAMMER2_CHAIN_DIRTYEMBED);
2113 * Forward direct IO to the device
2115 vn_strategy(hmp->devvp, nbio);
2121 * hammer2_vop_ioctl { vp, command, data, fflag, cred }
2125 hammer2_vop_ioctl(struct vop_ioctl_args *ap)
2127 hammer2_mount_t *hmp;
2128 hammer2_inode_t *ip;
2131 ip = VTOI(ap->a_vp);
2134 error = hammer2_ioctl(ip, ap->a_command, (void *)ap->a_data,
2135 ap->a_fflag, ap->a_cred);
2141 hammer2_vop_mountctl(struct vop_mountctl_args *ap)
2144 hammer2_pfsmount_t *pmp;
2148 case (MOUNTCTL_SET_EXPORT):
2149 mp = ap->a_head.a_ops->head.vv_mount;
2152 if (ap->a_ctllen != sizeof(struct export_args))
2155 rc = vfs_export(mp, &pmp->export,
2156 (const struct export_args *)ap->a_ctl);
2159 rc = vop_stdmountctl(ap);
2165 struct vop_ops hammer2_vnode_vops = {
2166 .vop_default = vop_defaultop,
2167 .vop_fsync = hammer2_vop_fsync,
2168 .vop_getpages = vop_stdgetpages,
2169 .vop_putpages = vop_stdputpages,
2170 .vop_access = hammer2_vop_access,
2171 .vop_advlock = hammer2_vop_advlock,
2172 .vop_close = hammer2_vop_close,
2173 .vop_nlink = hammer2_vop_nlink,
2174 .vop_ncreate = hammer2_vop_ncreate,
2175 .vop_nsymlink = hammer2_vop_nsymlink,
2176 .vop_nremove = hammer2_vop_nremove,
2177 .vop_nrmdir = hammer2_vop_nrmdir,
2178 .vop_nrename = hammer2_vop_nrename,
2179 .vop_getattr = hammer2_vop_getattr,
2180 .vop_setattr = hammer2_vop_setattr,
2181 .vop_readdir = hammer2_vop_readdir,
2182 .vop_readlink = hammer2_vop_readlink,
2183 .vop_getpages = vop_stdgetpages,
2184 .vop_putpages = vop_stdputpages,
2185 .vop_read = hammer2_vop_read,
2186 .vop_write = hammer2_vop_write,
2187 .vop_open = hammer2_vop_open,
2188 .vop_inactive = hammer2_vop_inactive,
2189 .vop_reclaim = hammer2_vop_reclaim,
2190 .vop_nresolve = hammer2_vop_nresolve,
2191 .vop_nlookupdotdot = hammer2_vop_nlookupdotdot,
2192 .vop_nmkdir = hammer2_vop_nmkdir,
2193 .vop_ioctl = hammer2_vop_ioctl,
2194 .vop_mountctl = hammer2_vop_mountctl,
2195 .vop_bmap = hammer2_vop_bmap,
2196 .vop_strategy = hammer2_vop_strategy,
2199 struct vop_ops hammer2_spec_vops = {
2203 struct vop_ops hammer2_fifo_vops = {