2 * Copyright (c) 2011-2012 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@dragonflybsd.org>
6 * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the
18 * 3. Neither the name of The DragonFly Project nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific, prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/kernel.h>
38 #include <sys/fcntl.h>
41 #include <sys/namei.h>
42 #include <sys/mount.h>
43 #include <sys/vnode.h>
44 #include <sys/mountctl.h>
45 #include <sys/dirent.h>
50 #define ZFOFFSET (-2LL)
52 static int hammer2_read_file(hammer2_inode_t *ip, struct uio *uio,
54 static int hammer2_write_file(hammer2_inode_t *ip, struct uio *uio, int ioflag,
56 static hammer2_off_t hammer2_assign_physical(hammer2_inode_t *ip,
57 hammer2_key_t lbase, int lblksize, int *errorp);
58 static void hammer2_extend_file(hammer2_inode_t *ip, hammer2_key_t nsize);
59 static void hammer2_truncate_file(hammer2_inode_t *ip, hammer2_key_t nsize);
62 * Last reference to a vnode is going away but it is still cached.
66 hammer2_vop_inactive(struct vop_inactive_args *ap)
69 struct hammer2_inode *ip;
71 struct hammer2_mount *hmp;
86 * Detect updates to the embedded data which may be synchronized by
87 * the strategy code. Simply mark the inode modified so it gets
88 * picked up by our normal flush.
90 if (ip->chain.flags & HAMMER2_CHAIN_DIRTYEMBED) {
91 hammer2_inode_lock_ex(ip);
92 atomic_clear_int(&ip->chain.flags, HAMMER2_CHAIN_DIRTYEMBED);
93 hammer2_chain_modify(ip->hmp, &ip->chain, 0);
94 hammer2_inode_unlock_ex(ip);
98 * Check for deleted inodes and recycle immediately.
100 if (ip->chain.flags & HAMMER2_CHAIN_DELETED) {
107 * Reclaim a vnode so that it can be reused; after the inode is
108 * disassociated, the filesystem must manage it alone.
112 hammer2_vop_reclaim(struct vop_reclaim_args *ap)
114 struct hammer2_inode *ip;
115 struct hammer2_mount *hmp;
125 * Set SUBMODIFIED so we can detect and propagate the DESTROYED
126 * bit in the flush code.
128 hammer2_inode_lock_ex(ip);
131 if (ip->chain.flags & HAMMER2_CHAIN_DELETED) {
132 atomic_set_int(&ip->chain.flags, HAMMER2_CHAIN_DESTROYED |
133 HAMMER2_CHAIN_SUBMODIFIED);
135 hammer2_chain_flush(hmp, &ip->chain);
136 hammer2_inode_unlock_ex(ip);
137 hammer2_chain_drop(hmp, &ip->chain); /* vp ref */
140 * XXX handle background sync when ip dirty, kernel will no longer
141 * notify us regarding this inode because there is no longer a
142 * vnode attached to it.
150 hammer2_vop_fsync(struct vop_fsync_args *ap)
152 struct hammer2_inode *ip;
153 struct hammer2_mount *hmp;
160 hammer2_inode_lock_ex(ip);
161 vfsync(vp, ap->a_waitfor, 1, NULL, NULL);
164 * Detect updates to the embedded data which may be synchronized by
165 * the strategy code. Simply mark the inode modified so it gets
166 * picked up by our normal flush.
168 if (ip->chain.flags & HAMMER2_CHAIN_DIRTYEMBED) {
169 atomic_clear_int(&ip->chain.flags, HAMMER2_CHAIN_DIRTYEMBED);
170 hammer2_chain_modify(hmp, &ip->chain, 0);
174 * Calling chain_flush here creates a lot of duplicative
175 * COW operations due to non-optimal vnode ordering.
177 * Only do it for an actual fsync() syscall. The other forms
178 * which call this function will eventually call chain_flush
179 * on the volume root as a catch-all, which is far more optimal.
181 if (ap->a_flags & VOP_FSYNC_SYSCALL)
182 hammer2_chain_flush(hmp, &ip->chain);
183 hammer2_inode_unlock_ex(ip);
189 hammer2_vop_access(struct vop_access_args *ap)
191 hammer2_inode_t *ip = VTOI(ap->a_vp);
196 uid = hammer2_to_unix_xid(&ip->ip_data.uid);
197 gid = hammer2_to_unix_xid(&ip->ip_data.gid);
199 error = vop_helper_access(ap, uid, gid, ip->ip_data.mode,
206 hammer2_vop_getattr(struct vop_getattr_args *ap)
208 hammer2_pfsmount_t *pmp;
219 hammer2_inode_lock_sh(ip);
221 vap->va_fsid = pmp->mp->mnt_stat.f_fsid.val[0];
222 vap->va_fileid = ip->ip_data.inum;
223 vap->va_mode = ip->ip_data.mode;
224 vap->va_nlink = ip->ip_data.nlinks;
229 vap->va_size = ip->ip_data.size;
230 vap->va_blocksize = HAMMER2_PBUFSIZE;
231 vap->va_flags = ip->ip_data.uflags;
232 hammer2_time_to_timespec(ip->ip_data.ctime, &vap->va_ctime);
233 hammer2_time_to_timespec(ip->ip_data.mtime, &vap->va_mtime);
234 hammer2_time_to_timespec(ip->ip_data.mtime, &vap->va_atime);
236 vap->va_bytes = vap->va_size; /* XXX */
237 vap->va_type = hammer2_get_vtype(ip);
239 vap->va_uid_uuid = ip->ip_data.uid;
240 vap->va_gid_uuid = ip->ip_data.gid;
241 vap->va_vaflags = VA_UID_UUID_VALID | VA_GID_UUID_VALID |
244 hammer2_inode_unlock_sh(ip);
251 hammer2_vop_setattr(struct vop_setattr_args *ap)
253 hammer2_mount_t *hmp;
271 hammer2_inode_lock_ex(ip);
274 if (vap->va_flags != VNOVAL) {
277 flags = ip->ip_data.uflags;
278 error = vop_helper_setattr_flags(&flags, vap->va_flags,
279 hammer2_to_unix_xid(&ip->ip_data.uid),
282 if (ip->ip_data.uflags != flags) {
283 hammer2_chain_modify(hmp, &ip->chain, 0);
284 ip->ip_data.uflags = flags;
286 kflags |= NOTE_ATTRIB;
288 if (ip->ip_data.uflags & (IMMUTABLE | APPEND)) {
295 if (ip->ip_data.uflags & (IMMUTABLE | APPEND)) {
304 if (vap->va_size != VNOVAL && ip->ip_data.size != vap->va_size) {
307 if (vap->va_size == ip->ip_data.size)
309 if (vap->va_size < ip->ip_data.size) {
310 hammer2_truncate_file(ip, vap->va_size);
312 hammer2_extend_file(ip, vap->va_size);
322 hammer2_inode_unlock_ex(ip);
328 hammer2_vop_readdir(struct vop_readdir_args *ap)
330 hammer2_mount_t *hmp;
332 hammer2_inode_t *xip;
333 hammer2_chain_t *parent;
334 hammer2_chain_t *chain;
348 saveoff = uio->uio_offset;
351 * Setup cookies directory entry cookies if requested
353 if (ap->a_ncookies) {
354 ncookies = uio->uio_resid / 16 + 1;
357 cookies = kmalloc(ncookies * sizeof(off_t), M_TEMP, M_WAITOK);
365 * Handle artificial entries. To ensure that only positive 64 bit
366 * quantities are returned to userland we always strip off bit 63.
367 * The hash code is designed such that codes 0x0000-0x7FFF are not
368 * used, allowing us to use these codes for articial entries.
370 * Entry 0 is used for '.' and entry 1 is used for '..'. Do not
371 * allow '..' to cross the mount point into (e.g.) the super-root.
374 chain = (void *)(intptr_t)-1; /* non-NULL for early goto done case */
377 r = vop_write_dirent(&error, uio,
379 HAMMER2_DIRHASH_USERMSK,
384 cookies[cookie_index] = saveoff;
387 if (cookie_index == ncookies)
391 if (ip->pip == NULL || ip == ip->pmp->iroot)
396 r = vop_write_dirent(&error, uio,
398 HAMMER2_DIRHASH_USERMSK,
403 cookies[cookie_index] = saveoff;
406 if (cookie_index == ncookies)
410 lkey = saveoff | HAMMER2_DIRHASH_VISIBLE;
413 error = hammer2_chain_lock(hmp, parent, HAMMER2_RESOLVE_ALWAYS);
415 hammer2_chain_unlock(hmp, parent);
418 chain = hammer2_chain_lookup(hmp, &parent, lkey, lkey, 0);
420 chain = hammer2_chain_lookup(hmp, &parent,
421 lkey, (hammer2_key_t)-1, 0);
424 if (chain->bref.type == HAMMER2_BREF_TYPE_INODE) {
425 dtype = hammer2_get_dtype(chain->u.ip);
426 saveoff = chain->bref.key & HAMMER2_DIRHASH_USERMSK;
427 r = vop_write_dirent(&error, uio,
428 chain->u.ip->ip_data.inum &
429 HAMMER2_DIRHASH_USERMSK,
430 dtype, chain->u.ip->ip_data.name_len,
431 chain->u.ip->ip_data.filename);
435 cookies[cookie_index] = saveoff;
438 /* XXX chain error */
439 kprintf("bad chain type readdir %d\n",
444 * Keys may not be returned in order so once we have a
445 * placemarker (chain) the scan must allow the full range
446 * or some entries will be missed.
448 chain = hammer2_chain_next(hmp, &parent, chain,
449 0, (hammer2_key_t)-1, 0);
451 saveoff = (chain->bref.key &
452 HAMMER2_DIRHASH_USERMSK) + 1;
454 saveoff = (hammer2_key_t)-1;
456 if (cookie_index == ncookies)
460 hammer2_chain_unlock(hmp, chain);
461 hammer2_chain_unlock(hmp, parent);
464 *ap->a_eofflag = (chain == NULL);
465 uio->uio_offset = saveoff & ~HAMMER2_DIRHASH_VISIBLE;
466 if (error && cookie_index == 0) {
468 kfree(cookies, M_TEMP);
470 *ap->a_cookies = NULL;
474 *ap->a_ncookies = cookie_index;
475 *ap->a_cookies = cookies;
482 * hammer2_vop_readlink { vp, uio, cred }
486 hammer2_vop_readlink(struct vop_readlink_args *ap)
489 hammer2_mount_t *hmp;
494 if (vp->v_type != VLNK)
499 error = hammer2_read_file(ip, ap->a_uio, 0);
505 hammer2_vop_read(struct vop_read_args *ap)
508 hammer2_mount_t *hmp;
516 * Read operations supported on this vnode?
519 if (vp->v_type != VREG)
530 seqcount = ap->a_ioflag >> 16;
531 bigread = (uio->uio_resid > 100 * 1024 * 1024);
533 error = hammer2_read_file(ip, uio, seqcount);
539 hammer2_vop_write(struct vop_write_args *ap)
543 hammer2_mount_t *hmp;
551 * Read operations supported on this vnode?
554 if (vp->v_type != VREG)
567 seqcount = ap->a_ioflag >> 16;
568 bigwrite = (uio->uio_resid > 100 * 1024 * 1024);
571 * Check resource limit
573 if (uio->uio_resid > 0 && (td = uio->uio_td) != NULL && td->td_proc &&
574 uio->uio_offset + uio->uio_resid >
575 td->td_proc->p_rlimit[RLIMIT_FSIZE].rlim_cur) {
576 lwpsignal(td->td_proc, td->td_lwp, SIGXFSZ);
580 bigwrite = (uio->uio_resid > 100 * 1024 * 1024);
583 * ip must be locked if extending the file.
584 * ip must be locked to avoid racing a truncation.
586 * ip must be marked modified, particularly because the write
587 * might wind up being copied into the embedded data area.
589 hammer2_inode_lock_ex(ip);
590 hammer2_chain_modify(hmp, &ip->chain, 0);
591 error = hammer2_write_file(ip, uio, ap->a_ioflag, seqcount);
593 hammer2_inode_unlock_ex(ip);
598 * Perform read operations on a file or symlink given an UNLOCKED
603 hammer2_read_file(hammer2_inode_t *ip, struct uio *uio, int seqcount)
613 while (uio->uio_resid > 0 && uio->uio_offset < ip->ip_data.size) {
620 lblksize = hammer2_calc_logical(ip, uio->uio_offset,
623 error = cluster_read(ip->vp, leof, lbase, lblksize,
624 uio->uio_resid, seqcount * BKVASIZE,
629 loff = (int)(uio->uio_offset - lbase);
631 if (n > uio->uio_resid)
633 if (n > ip->ip_data.size - uio->uio_offset)
634 n = (int)(ip->ip_data.size - uio->uio_offset);
635 bp->b_flags |= B_AGE;
636 uiomove((char *)bp->b_data + loff, n, uio);
643 * Called with a locked (ip) to do the underlying write to a file or
644 * to build the symlink target.
648 hammer2_write_file(hammer2_inode_t *ip, struct uio *uio,
649 int ioflag, int seqcount)
651 hammer2_key_t old_eof;
659 if (ioflag & IO_APPEND)
660 uio->uio_offset = ip->ip_data.size;
665 * Extend the file if necessary. If the write fails at some point
666 * we will truncate it back down to cover as much as we were able
669 * Doing this now makes it easier to calculate buffer sizes in
672 old_eof = ip->ip_data.size;
673 if (uio->uio_offset + uio->uio_resid > ip->ip_data.size) {
674 hammer2_extend_file(ip, uio->uio_offset + uio->uio_resid);
675 kflags |= NOTE_EXTEND;
681 while (uio->uio_resid > 0) {
690 * Don't allow the buffer build to blow out the buffer
693 if ((ioflag & IO_RECURSE) == 0) {
695 * XXX should try to leave this unlocked through
698 hammer2_chain_unlock(ip->hmp, &ip->chain);
699 bwillwrite(HAMMER2_PBUFSIZE);
700 hammer2_chain_lock(ip->hmp, &ip->chain,
701 HAMMER2_RESOLVE_ALWAYS);
704 /* XXX bigwrite & signal check test */
707 * This nominally tells us how much we can cluster and
708 * what the logical buffer size needs to be. Currently
709 * we don't try to cluster the write and just handle one
712 lblksize = hammer2_calc_logical(ip, uio->uio_offset,
714 loff = (int)(uio->uio_offset - lbase);
717 * Calculate bytes to copy this transfer and whether the
718 * copy completely covers the buffer or not.
722 if (n > uio->uio_resid) {
724 if (uio->uio_offset + n == ip->ip_data.size)
726 } else if (loff == 0) {
733 if (uio->uio_segflg == UIO_NOCOPY) {
735 * Issuing a write with the same data backing the
736 * buffer. Instantiate the buffer to collect the
737 * backing vm pages, then read-in any missing bits.
739 * This case is used by vop_stdputpages().
741 bp = getblk(ip->vp, lbase, lblksize, GETBLK_BHEAVY, 0);
742 if ((bp->b_flags & B_CACHE) == 0) {
744 error = bread(ip->vp, lbase, lblksize, &bp);
746 } else if (trivial) {
748 * Even though we are entirely overwriting the buffer
749 * we may still have to zero it out to avoid a
750 * mmap/write visibility issue.
752 bp = getblk(ip->vp, lbase, lblksize, GETBLK_BHEAVY, 0);
753 if ((bp->b_flags & B_CACHE) == 0)
757 * Partial overwrite, read in any missing bits then
758 * replace the portion being written.
760 * (The strategy code will detect zero-fill physical
761 * blocks for this case).
763 error = bread(ip->vp, lbase, lblksize, &bp);
774 * We have to assign physical storage to the buffer we intend
775 * to dirty or write now to avoid deadlocks in the strategy
778 * This can return NOOFFSET for inode-embedded data. The
779 * strategy code will take care of it in that case.
781 bp->b_bio2.bio_offset =
782 hammer2_assign_physical(ip, lbase, lblksize, &error);
789 * Ok, copy the data in
791 hammer2_chain_unlock(ip->hmp, &ip->chain);
792 error = uiomove(bp->b_data + loff, n, uio);
793 hammer2_chain_lock(ip->hmp, &ip->chain, HAMMER2_RESOLVE_ALWAYS);
794 kflags |= NOTE_WRITE;
801 /* XXX update ino_data.mtime */
804 * Once we dirty a buffer any cached offset becomes invalid.
806 * NOTE: For cluster_write() always use the trailing block
807 * size, which is HAMMER2_PBUFSIZE. lblksize is the
808 * eof-straddling blocksize and is incorrect.
810 bp->b_flags |= B_AGE;
811 if (ioflag & IO_SYNC) {
813 } else if ((ioflag & IO_DIRECT) && loff + n == lblksize) {
814 bp->b_flags |= B_CLUSTEROK;
816 } else if (ioflag & IO_ASYNC) {
818 } else if (hammer2_cluster_enable) {
819 bp->b_flags |= B_CLUSTEROK;
820 cluster_write(bp, leof, HAMMER2_PBUFSIZE, seqcount);
822 bp->b_flags |= B_CLUSTEROK;
828 * Cleanup. If we extended the file EOF but failed to write through
829 * the entire write is a failure and we have to back-up.
831 if (error && ip->ip_data.size != old_eof)
832 hammer2_truncate_file(ip, old_eof);
833 /* hammer2_knote(ip->vp, kflags); */
838 * Assign physical storage to a logical block.
840 * NOOFFSET is returned if the data is inode-embedded. In this case the
841 * strategy code will simply bcopy() the data into the inode.
845 hammer2_assign_physical(hammer2_inode_t *ip, hammer2_key_t lbase,
846 int lblksize, int *errorp)
848 hammer2_mount_t *hmp;
849 hammer2_chain_t *parent;
850 hammer2_chain_t *chain;
857 * Locate the chain associated with lbase, return a locked chain.
858 * However, do not instantiate any data reference (which utilizes a
859 * device buffer) because we will be using direct IO via the
860 * logical buffer cache buffer.
863 hammer2_chain_lock(hmp, parent, HAMMER2_RESOLVE_ALWAYS);
865 chain = hammer2_chain_lookup(hmp, &parent,
867 HAMMER2_LOOKUP_NODATA);
871 * We found a hole, create a new chain entry.
873 * NOTE: DATA chains are created without device backing
874 * store (nor do we want any).
876 chain = hammer2_chain_create(hmp, parent, NULL,
877 lbase, HAMMER2_PBUFRADIX,
878 HAMMER2_BREF_TYPE_DATA,
880 pbase = chain->bref.data_off & ~HAMMER2_OFF_MASK_RADIX;
882 switch (chain->bref.type) {
883 case HAMMER2_BREF_TYPE_INODE:
885 * The data is embedded in the inode. The
886 * caller is responsible for marking the inode
887 * modified and copying the data to the embedded
892 case HAMMER2_BREF_TYPE_DATA:
893 if (chain->bytes != lblksize) {
894 panic("hammer2_assign_physical: "
895 "size mismatch %d/%d\n",
896 lblksize, chain->bytes);
898 hammer2_chain_modify(hmp, chain,
899 HAMMER2_MODIFY_OPTDATA);
900 pbase = chain->bref.data_off & ~HAMMER2_OFF_MASK_RADIX;
903 panic("hammer2_assign_physical: bad type");
911 hammer2_chain_unlock(hmp, chain);
912 hammer2_chain_unlock(hmp, parent);
918 * Truncate the size of a file.
920 * This routine adjusts ip->ip_data.size smaller, destroying any related
921 * data beyond the new EOF and potentially resizing the block straddling
924 * The inode must be locked.
928 hammer2_truncate_file(hammer2_inode_t *ip, hammer2_key_t nsize)
930 hammer2_chain_t *parent;
931 hammer2_chain_t *chain;
932 hammer2_mount_t *hmp = ip->hmp;
941 hammer2_chain_modify(hmp, &ip->chain, 0);
945 * Destroy any logical buffer cache buffers beyond the file EOF.
947 * We call nvtruncbuf() w/ trivial == 1 to prevent it from messing
948 * around with the buffer straddling EOF, because we need to assign
949 * a new physical offset to it.
952 nvtruncbuf(ip->vp, nsize,
953 HAMMER2_PBUFSIZE, (int)nsize & HAMMER2_PBUFMASK,
958 * Setup for lookup/search
961 error = hammer2_chain_lock(hmp, parent, HAMMER2_RESOLVE_ALWAYS);
963 hammer2_chain_unlock(hmp, parent);
964 /* XXX error reporting */
969 * Handle the case where a chain/logical-buffer straddles the new
970 * EOF. We told nvtruncbuf() above not to mess with the logical
971 * buffer straddling the EOF because we need to reassign its storage
972 * and can't let the strategy code do it for us.
974 loff = (int)nsize & HAMMER2_PBUFMASK;
975 if (loff && ip->vp) {
976 oblksize = hammer2_calc_logical(ip, nsize, &lbase, &leof);
977 error = bread(ip->vp, lbase, oblksize, &bp);
978 KKASSERT(error == 0);
980 ip->ip_data.size = nsize;
981 nblksize = hammer2_calc_logical(ip, nsize, &lbase, &leof);
984 * Fixup the chain element. If we have a logical buffer in-hand
985 * we don't want to create a conflicting device buffer.
988 chain = hammer2_chain_lookup(hmp, &parent, lbase, lbase,
989 HAMMER2_LOOKUP_NODATA);
991 allocbuf(bp, nblksize);
992 switch(chain->bref.type) {
993 case HAMMER2_BREF_TYPE_DATA:
994 hammer2_chain_resize(hmp, chain,
995 hammer2_bytes_to_radix(nblksize),
996 HAMMER2_MODIFY_OPTDATA);
997 bzero(bp->b_data + loff, nblksize - loff);
998 bp->b_bio2.bio_offset = chain->bref.data_off &
1001 case HAMMER2_BREF_TYPE_INODE:
1002 bzero(bp->b_data + loff, nblksize - loff);
1003 bp->b_bio2.bio_offset = NOOFFSET;
1006 panic("hammer2_truncate_file: bad type");
1009 hammer2_chain_unlock(hmp, chain);
1010 bp->b_flags |= B_CLUSTEROK;
1014 * Destroy clean buffer w/ wrong buffer size. Retain
1017 bp->b_flags |= B_RELBUF;
1018 KKASSERT(bp->b_bio2.bio_offset == NOOFFSET);
1019 KKASSERT((bp->b_flags & B_DIRTY) == 0);
1024 * WARNING: This utilizes a device buffer for the data.
1026 * XXX case should not occur
1028 panic("hammer2_truncate_file: non-zero truncation, no-vnode");
1029 chain = hammer2_chain_lookup(hmp, &parent, lbase, lbase, 0);
1031 switch(chain->bref.type) {
1032 case HAMMER2_BREF_TYPE_DATA:
1033 hammer2_chain_resize(hmp, chain,
1034 hammer2_bytes_to_radix(nblksize),
1036 hammer2_chain_modify(hmp, chain, 0);
1037 bzero(chain->data->buf + loff, nblksize - loff);
1039 case HAMMER2_BREF_TYPE_INODE:
1040 if (loff < HAMMER2_EMBEDDED_BYTES) {
1041 hammer2_chain_modify(hmp, chain, 0);
1042 bzero(chain->data->ipdata.u.data + loff,
1043 HAMMER2_EMBEDDED_BYTES - loff);
1047 hammer2_chain_unlock(hmp, chain);
1052 * Clean up any fragmentory VM pages now that we have properly
1053 * resized the straddling buffer. These pages are no longer
1054 * part of the buffer.
1057 nvtruncbuf(ip->vp, nsize,
1058 nblksize, (int)nsize & (nblksize - 1),
1063 * Destroy any physical blocks after the new EOF point.
1065 lbase = (nsize + HAMMER2_PBUFMASK64) & ~HAMMER2_PBUFMASK64;
1066 chain = hammer2_chain_lookup(hmp, &parent,
1067 lbase, (hammer2_key_t)-1,
1068 HAMMER2_LOOKUP_NODATA);
1071 * Degenerate embedded data case, nothing to loop on.
1073 if (chain->bref.type == HAMMER2_BREF_TYPE_INODE) {
1074 hammer2_chain_unlock(hmp, chain);
1079 * Delete physical data blocks past the file EOF.
1081 if (chain->bref.type == HAMMER2_BREF_TYPE_DATA) {
1082 hammer2_chain_delete(hmp, parent, chain);
1084 /* XXX check parent if empty indirect block & delete */
1085 chain = hammer2_chain_next(hmp, &parent, chain,
1086 lbase, (hammer2_key_t)-1,
1087 HAMMER2_LOOKUP_NODATA);
1089 hammer2_chain_unlock(hmp, parent);
1093 * Extend the size of a file. The inode must be locked.
1095 * We may have to resize the block straddling the old EOF.
1099 hammer2_extend_file(hammer2_inode_t *ip, hammer2_key_t nsize)
1101 hammer2_mount_t *hmp;
1102 hammer2_chain_t *parent;
1103 hammer2_chain_t *chain;
1105 hammer2_key_t osize;
1106 hammer2_key_t obase;
1107 hammer2_key_t nbase;
1117 hammer2_chain_modify(hmp, &ip->chain, 0);
1120 * Nothing to do if the direct-data case is still intact
1122 if ((ip->ip_data.op_flags & HAMMER2_OPFLAG_DIRECTDATA) &&
1123 nsize <= HAMMER2_EMBEDDED_BYTES) {
1124 ip->ip_data.size = nsize;
1129 * Calculate the blocksize at the original EOF and resize the block
1130 * if necessary. Adjust the file size in the inode.
1132 osize = ip->ip_data.size;
1133 oblksize = hammer2_calc_logical(ip, osize, &obase, &leof);
1134 ip->ip_data.size = nsize;
1135 nblksize = hammer2_calc_logical(ip, osize, &nbase, &leof);
1138 * Do all required vnode operations, but do not mess with the
1139 * buffer straddling the orignal EOF.
1142 ip->ip_data.size, nsize,
1144 0, (int)nsize & HAMMER2_PBUFMASK,
1148 * Early return if we have no more work to do.
1150 if (obase == nbase && oblksize == nblksize &&
1151 (ip->ip_data.op_flags & HAMMER2_OPFLAG_DIRECTDATA) == 0) {
1156 * We have work to do, including possibly resizing the buffer
1157 * at the EOF point and turning off DIRECTDATA mode.
1160 if (((int)osize & HAMMER2_PBUFMASK)) {
1161 error = bread(ip->vp, obase, oblksize, &bp);
1162 KKASSERT(error == 0);
1164 if (obase != nbase) {
1165 allocbuf(bp, HAMMER2_PBUFSIZE);
1167 allocbuf(bp, nblksize);
1173 * Disable direct-data mode by loading up a buffer cache buffer
1174 * with the data, then converting the inode data area into the
1175 * inode indirect block array area.
1177 if (ip->ip_data.op_flags & HAMMER2_OPFLAG_DIRECTDATA) {
1178 ip->ip_data.op_flags &= ~HAMMER2_OPFLAG_DIRECTDATA;
1179 bzero(&ip->ip_data.u.blockset, sizeof(ip->ip_data.u.blockset));
1183 * Resize the chain element at the old EOF.
1185 if (((int)osize & HAMMER2_PBUFMASK)) {
1186 parent = &ip->chain;
1187 error = hammer2_chain_lock(hmp, parent, HAMMER2_RESOLVE_ALWAYS);
1188 KKASSERT(error == 0);
1190 nradix = hammer2_bytes_to_radix(nblksize);
1192 chain = hammer2_chain_lookup(hmp, &parent,
1194 HAMMER2_LOOKUP_NODATA);
1195 if (chain == NULL) {
1196 chain = hammer2_chain_create(hmp, parent, NULL,
1198 HAMMER2_BREF_TYPE_DATA,
1201 KKASSERT(chain->bref.type == HAMMER2_BREF_TYPE_DATA);
1202 hammer2_chain_resize(hmp, chain, nradix,
1203 HAMMER2_MODIFY_OPTDATA);
1205 bp->b_bio2.bio_offset = chain->bref.data_off &
1207 hammer2_chain_unlock(hmp, chain);
1208 bp->b_flags |= B_CLUSTEROK;
1210 hammer2_chain_unlock(hmp, parent);
1216 hammer2_vop_nresolve(struct vop_nresolve_args *ap)
1218 hammer2_inode_t *dip;
1219 hammer2_mount_t *hmp;
1220 hammer2_chain_t *parent;
1221 hammer2_chain_t *chain;
1222 struct namecache *ncp;
1223 const uint8_t *name;
1229 dip = VTOI(ap->a_dvp);
1231 ncp = ap->a_nch->ncp;
1232 name = ncp->nc_name;
1233 name_len = ncp->nc_nlen;
1234 lhc = hammer2_dirhash(name, name_len);
1237 * Note: In DragonFly the kernel handles '.' and '..'.
1239 parent = &dip->chain;
1240 hammer2_chain_lock(hmp, parent, HAMMER2_RESOLVE_ALWAYS);
1241 chain = hammer2_chain_lookup(hmp, &parent,
1242 lhc, lhc + HAMMER2_DIRHASH_LOMASK,
1245 if (chain->bref.type == HAMMER2_BREF_TYPE_INODE &&
1247 name_len == chain->data->ipdata.name_len &&
1248 bcmp(name, chain->data->ipdata.filename, name_len) == 0) {
1251 chain = hammer2_chain_next(hmp, &parent, chain,
1252 lhc, lhc + HAMMER2_DIRHASH_LOMASK,
1255 hammer2_chain_unlock(hmp, parent);
1258 vp = hammer2_igetv(chain->u.ip, &error);
1261 cache_setvp(ap->a_nch, vp);
1264 hammer2_chain_unlock(hmp, chain);
1267 cache_setvp(ap->a_nch, NULL);
1274 hammer2_vop_nlookupdotdot(struct vop_nlookupdotdot_args *ap)
1276 hammer2_inode_t *dip;
1277 hammer2_inode_t *ip;
1278 hammer2_mount_t *hmp;
1281 dip = VTOI(ap->a_dvp);
1284 if ((ip = dip->pip) == NULL) {
1288 hammer2_chain_lock(hmp, &ip->chain, HAMMER2_RESOLVE_ALWAYS);
1289 *ap->a_vpp = hammer2_igetv(ip, &error);
1290 hammer2_chain_unlock(hmp, &ip->chain);
1297 hammer2_vop_nmkdir(struct vop_nmkdir_args *ap)
1299 hammer2_mount_t *hmp;
1300 hammer2_inode_t *dip;
1301 hammer2_inode_t *nip;
1302 struct namecache *ncp;
1303 const uint8_t *name;
1307 dip = VTOI(ap->a_dvp);
1312 ncp = ap->a_nch->ncp;
1313 name = ncp->nc_name;
1314 name_len = ncp->nc_nlen;
1316 error = hammer2_inode_create(dip, ap->a_vap, ap->a_cred,
1317 name, name_len, &nip);
1319 KKASSERT(nip == NULL);
1323 *ap->a_vpp = hammer2_igetv(nip, &error);
1324 hammer2_chain_unlock(hmp, &nip->chain);
1327 cache_setunresolved(ap->a_nch);
1328 cache_setvp(ap->a_nch, *ap->a_vpp);
1334 * Return the largest contiguous physical disk range for the logical
1337 * (struct vnode *vp, off_t loffset, off_t *doffsetp, int *runp, int *runb)
1341 hammer2_vop_bmap(struct vop_bmap_args *ap)
1344 hammer2_mount_t *hmp;
1345 hammer2_inode_t *ip;
1346 hammer2_chain_t *parent;
1347 hammer2_chain_t *chain;
1351 hammer2_off_t pbytes;
1352 hammer2_off_t array[HAMMER2_BMAP_COUNT][2];
1357 * Only supported on regular files
1359 * Only supported for read operations (required for cluster_read).
1360 * The block allocation is delayed for write operations.
1363 if (vp->v_type != VREG)
1364 return (EOPNOTSUPP);
1365 if (ap->a_cmd != BUF_CMD_READ)
1366 return (EOPNOTSUPP);
1370 bzero(array, sizeof(array));
1373 * Calculate logical range
1375 KKASSERT((ap->a_loffset & HAMMER2_LBUFMASK64) == 0);
1376 lbeg = ap->a_loffset & HAMMER2_OFF_MASK_HI;
1377 lend = lbeg + HAMMER2_BMAP_COUNT * HAMMER2_PBUFSIZE - 1;
1380 loff = ap->a_loffset & HAMMER2_OFF_MASK_LO;
1382 parent = &ip->chain;
1383 hammer2_chain_lock(hmp, parent, HAMMER2_RESOLVE_ALWAYS);
1384 chain = hammer2_chain_lookup(hmp, &parent,
1386 HAMMER2_LOOKUP_NODATA);
1387 if (chain == NULL) {
1388 *ap->a_doffsetp = ZFOFFSET;
1389 hammer2_chain_unlock(hmp, parent);
1394 if (chain->bref.type == HAMMER2_BREF_TYPE_DATA) {
1395 ai = (chain->bref.key - lbeg) / HAMMER2_PBUFSIZE;
1396 KKASSERT(ai >= 0 && ai < HAMMER2_BMAP_COUNT);
1397 array[ai][0] = chain->bref.data_off & HAMMER2_OFF_MASK;
1398 array[ai][1] = chain->bytes;
1400 chain = hammer2_chain_next(hmp, &parent, chain,
1402 HAMMER2_LOOKUP_NODATA);
1404 hammer2_chain_unlock(hmp, parent);
1407 * If the requested loffset is not mappable physically we can't
1408 * bmap. The caller will have to access the file data via a
1411 if (array[0][0] == 0 || array[0][1] < loff + HAMMER2_LBUFSIZE) {
1412 *ap->a_doffsetp = NOOFFSET;
1417 * Calculate the physical disk offset range for array[0]
1419 pbeg = array[0][0] + loff;
1420 pbytes = array[0][1] - loff;
1422 for (ai = 1; ai < HAMMER2_BMAP_COUNT; ++ai) {
1423 if (array[ai][0] != pbeg + pbytes)
1425 pbytes += array[ai][1];
1428 *ap->a_doffsetp = pbeg;
1430 *ap->a_runp = pbytes;
1436 hammer2_vop_open(struct vop_open_args *ap)
1438 return vop_stdopen(ap);
1442 * hammer2_vop_advlock { vp, id, op, fl, flags }
1446 hammer2_vop_advlock(struct vop_advlock_args *ap)
1448 hammer2_inode_t *ip = VTOI(ap->a_vp);
1450 return (lf_advlock(ap, &ip->advlock, ip->ip_data.size));
1456 hammer2_vop_close(struct vop_close_args *ap)
1458 return vop_stdclose(ap);
1462 * hammer2_vop_nlink { nch, dvp, vp, cred }
1464 * Create a hardlink to vp.
1468 hammer2_vop_nlink(struct vop_nlink_args *ap)
1470 hammer2_inode_t *dip;
1471 hammer2_inode_t *ip; /* inode we are hardlinking to */
1472 hammer2_mount_t *hmp;
1473 struct namecache *ncp;
1474 const uint8_t *name;
1478 dip = VTOI(ap->a_dvp);
1483 ip = VTOI(ap->a_vp);
1485 ncp = ap->a_nch->ncp;
1486 name = ncp->nc_name;
1487 name_len = ncp->nc_nlen;
1489 error = hammer2_hardlink_create(ip, dip, name, name_len);
1491 cache_setunresolved(ap->a_nch);
1492 cache_setvp(ap->a_nch, ap->a_vp);
1498 * hammer2_vop_ncreate { nch, dvp, vpp, cred, vap }
1500 * The operating system has already ensured that the directory entry
1501 * does not exist and done all appropriate namespace locking.
1505 hammer2_vop_ncreate(struct vop_ncreate_args *ap)
1507 hammer2_mount_t *hmp;
1508 hammer2_inode_t *dip;
1509 hammer2_inode_t *nip;
1510 struct namecache *ncp;
1511 const uint8_t *name;
1515 dip = VTOI(ap->a_dvp);
1520 ncp = ap->a_nch->ncp;
1521 name = ncp->nc_name;
1522 name_len = ncp->nc_nlen;
1524 error = hammer2_inode_create(dip, ap->a_vap, ap->a_cred,
1525 name, name_len, &nip);
1527 KKASSERT(nip == NULL);
1531 *ap->a_vpp = hammer2_igetv(nip, &error);
1532 hammer2_chain_unlock(hmp, &nip->chain);
1535 cache_setunresolved(ap->a_nch);
1536 cache_setvp(ap->a_nch, *ap->a_vpp);
1542 * hammer2_vop_nsymlink { nch, dvp, vpp, cred, vap, target }
1546 hammer2_vop_nsymlink(struct vop_nsymlink_args *ap)
1548 hammer2_mount_t *hmp;
1549 hammer2_inode_t *dip;
1550 hammer2_inode_t *nip;
1551 struct namecache *ncp;
1552 const uint8_t *name;
1556 dip = VTOI(ap->a_dvp);
1561 ncp = ap->a_nch->ncp;
1562 name = ncp->nc_name;
1563 name_len = ncp->nc_nlen;
1565 ap->a_vap->va_type = VLNK; /* enforce type */
1567 error = hammer2_inode_create(dip, ap->a_vap, ap->a_cred,
1568 name, name_len, &nip);
1570 KKASSERT(nip == NULL);
1574 *ap->a_vpp = hammer2_igetv(nip, &error);
1577 * Build the softlink (~like file data) and finalize the namecache.
1584 bytes = strlen(ap->a_target);
1586 if (bytes <= HAMMER2_EMBEDDED_BYTES) {
1587 KKASSERT(nip->ip_data.op_flags &
1588 HAMMER2_OPFLAG_DIRECTDATA);
1589 bcopy(ap->a_target, nip->ip_data.u.data, bytes);
1590 nip->ip_data.size = bytes;
1592 bzero(&auio, sizeof(auio));
1593 bzero(&aiov, sizeof(aiov));
1594 auio.uio_iov = &aiov;
1595 auio.uio_segflg = UIO_SYSSPACE;
1596 auio.uio_rw = UIO_WRITE;
1597 auio.uio_resid = bytes;
1598 auio.uio_iovcnt = 1;
1599 auio.uio_td = curthread;
1600 aiov.iov_base = ap->a_target;
1601 aiov.iov_len = bytes;
1602 error = hammer2_write_file(nip, &auio, IO_APPEND, 0);
1603 /* XXX handle error */
1607 hammer2_chain_unlock(hmp, &nip->chain);
1610 * Finalize namecache
1613 cache_setunresolved(ap->a_nch);
1614 cache_setvp(ap->a_nch, *ap->a_vpp);
1615 /* hammer2_knote(ap->a_dvp, NOTE_WRITE); */
1621 * hammer2_vop_nremove { nch, dvp, cred }
1625 hammer2_vop_nremove(struct vop_nremove_args *ap)
1627 hammer2_inode_t *dip;
1628 hammer2_mount_t *hmp;
1629 struct namecache *ncp;
1630 const uint8_t *name;
1634 dip = VTOI(ap->a_dvp);
1639 ncp = ap->a_nch->ncp;
1640 name = ncp->nc_name;
1641 name_len = ncp->nc_nlen;
1643 error = hammer2_unlink_file(dip, name, name_len, 0, 1);
1646 cache_setunresolved(ap->a_nch);
1647 cache_setvp(ap->a_nch, NULL);
1653 * hammer2_vop_nrmdir { nch, dvp, cred }
1657 hammer2_vop_nrmdir(struct vop_nrmdir_args *ap)
1659 hammer2_inode_t *dip;
1660 hammer2_mount_t *hmp;
1661 struct namecache *ncp;
1662 const uint8_t *name;
1666 dip = VTOI(ap->a_dvp);
1671 ncp = ap->a_nch->ncp;
1672 name = ncp->nc_name;
1673 name_len = ncp->nc_nlen;
1675 error = hammer2_unlink_file(dip, name, name_len, 1, 1);
1678 cache_setunresolved(ap->a_nch);
1679 cache_setvp(ap->a_nch, NULL);
1685 * hammer2_vop_nrename { fnch, tnch, fdvp, tdvp, cred }
1689 hammer2_vop_nrename(struct vop_nrename_args *ap)
1691 struct namecache *fncp;
1692 struct namecache *tncp;
1693 hammer2_inode_t *fdip;
1694 hammer2_inode_t *tdip;
1695 hammer2_inode_t *ip;
1696 hammer2_mount_t *hmp;
1697 const uint8_t *fname;
1699 const uint8_t *tname;
1703 if (ap->a_fdvp->v_mount != ap->a_tdvp->v_mount)
1705 if (ap->a_fdvp->v_mount != ap->a_fnch->ncp->nc_vp->v_mount)
1708 fdip = VTOI(ap->a_fdvp); /* source directory */
1709 tdip = VTOI(ap->a_tdvp); /* target directory */
1711 hmp = fdip->hmp; /* check read-only filesystem */
1715 fncp = ap->a_fnch->ncp; /* entry name in source */
1716 fname = fncp->nc_name;
1717 fname_len = fncp->nc_nlen;
1719 tncp = ap->a_tnch->ncp; /* entry name in target */
1720 tname = tncp->nc_name;
1721 tname_len = tncp->nc_nlen;
1723 ip = VTOI(fncp->nc_vp); /* inode being moved */
1726 * Keep a tight grip on the inode as removing it should disconnect
1727 * it and we don't want to destroy it.
1729 * NOTE: To avoid deadlocks we cannot lock (ip) while we are
1730 * unlinking elements from their directories.
1732 hammer2_chain_ref(hmp, &ip->chain);
1735 * Remove target if it exists
1737 error = hammer2_unlink_file(tdip, tname, tname_len, -1, 1);
1738 if (error && error != ENOENT)
1740 cache_setunresolved(ap->a_tnch);
1741 cache_setvp(ap->a_tnch, NULL);
1744 * Disconnect ip from the source directory, do not adjust
1745 * the link count. Note that rename doesn't need to understand
1746 * whether this is a hardlink or not, we can just rename the
1747 * forwarding entry and don't even have to adjust the related
1748 * hardlink's link count.
1750 error = hammer2_unlink_file(fdip, fname, fname_len, -1, 0);
1754 if (ip->chain.parent != NULL)
1755 panic("hammer2_vop_nrename(): rename source != ip!");
1758 * Reconnect ip to target directory.
1760 * WARNING: chain locks can lock buffer cache buffers, to avoid
1761 * deadlocks we want to unlock before issuing a cache_*()
1762 * op (that might have to lock a vnode).
1764 hammer2_chain_lock(hmp, &ip->chain, HAMMER2_RESOLVE_ALWAYS);
1765 error = hammer2_inode_connect(tdip, ip, tname, tname_len);
1766 hammer2_chain_unlock(hmp, &ip->chain);
1769 cache_rename(ap->a_fnch, ap->a_tnch);
1772 hammer2_chain_drop(hmp, &ip->chain); /* from ref up top */
1777 static int hammer2_strategy_read(struct vop_strategy_args *ap);
1778 static int hammer2_strategy_write(struct vop_strategy_args *ap);
1782 hammer2_vop_strategy(struct vop_strategy_args *ap)
1793 error = hammer2_strategy_read(ap);
1794 ++hammer2_iod_file_read;
1797 error = hammer2_strategy_write(ap);
1798 ++hammer2_iod_file_write;
1801 bp->b_error = error = EINVAL;
1802 bp->b_flags |= B_ERROR;
1812 hammer2_strategy_read(struct vop_strategy_args *ap)
1817 hammer2_mount_t *hmp;
1818 hammer2_inode_t *ip;
1819 hammer2_chain_t *parent;
1820 hammer2_chain_t *chain;
1821 hammer2_key_t lbase;
1825 ip = VTOI(ap->a_vp);
1827 nbio = push_bio(bio);
1829 lbase = bio->bio_offset;
1831 KKASSERT(((int)lbase & HAMMER2_PBUFMASK) == 0);
1834 * We must characterize the logical->physical translation if it
1835 * has not already been cached.
1837 * Physical data references < LBUFSIZE are never cached. This
1838 * includes both small-block allocations and inode-embedded data.
1840 if (nbio->bio_offset == NOOFFSET) {
1841 parent = &ip->chain;
1842 hammer2_chain_lock(hmp, parent, HAMMER2_RESOLVE_ALWAYS);
1844 chain = hammer2_chain_lookup(hmp, &parent, lbase, lbase,
1845 HAMMER2_LOOKUP_NODATA);
1846 if (chain == NULL) {
1850 nbio->bio_offset = ZFOFFSET;
1851 } else if (chain->bref.type == HAMMER2_BREF_TYPE_INODE) {
1853 * Data is embedded in the inode (do nothing)
1855 KKASSERT(chain == parent);
1856 hammer2_chain_unlock(hmp, chain);
1857 } else if (chain->bref.type == HAMMER2_BREF_TYPE_DATA) {
1861 KKASSERT(bp->b_bcount == chain->bytes);
1862 nbio->bio_offset = chain->bref.data_off &
1864 hammer2_chain_unlock(hmp, chain);
1865 KKASSERT(nbio->bio_offset != 0);
1867 panic("hammer2_strategy_read: unknown bref type");
1869 hammer2_chain_unlock(hmp, parent);
1872 if (hammer2_debug & 0x0020) {
1873 kprintf("read %016jx %016jx\n",
1874 bio->bio_offset, nbio->bio_offset);
1877 if (nbio->bio_offset == ZFOFFSET) {
1883 bzero(bp->b_data, bp->b_bcount);
1885 } else if (nbio->bio_offset != NOOFFSET) {
1887 * Forward direct IO to the device
1889 vn_strategy(hmp->devvp, nbio);
1892 * Data is embedded in inode.
1894 bcopy(chain->data->ipdata.u.data, bp->b_data,
1895 HAMMER2_EMBEDDED_BYTES);
1896 bzero(bp->b_data + HAMMER2_EMBEDDED_BYTES,
1897 bp->b_bcount - HAMMER2_EMBEDDED_BYTES);
1907 hammer2_strategy_write(struct vop_strategy_args *ap)
1912 hammer2_mount_t *hmp;
1913 hammer2_inode_t *ip;
1917 ip = VTOI(ap->a_vp);
1919 nbio = push_bio(bio);
1921 KKASSERT((bio->bio_offset & HAMMER2_PBUFMASK64) == 0);
1922 KKASSERT(nbio->bio_offset != 0 && nbio->bio_offset != ZFOFFSET);
1924 if (nbio->bio_offset == NOOFFSET) {
1926 * Must be embedded in the inode.
1928 KKASSERT(bio->bio_offset == 0);
1929 bcopy(bp->b_data, ip->ip_data.u.data, HAMMER2_EMBEDDED_BYTES);
1935 * This special flag does not follow the normal MODIFY rules
1936 * because we might deadlock on ip. Instead we depend on
1937 * VOP_FSYNC() to detect the case.
1939 atomic_set_int(&ip->chain.flags, HAMMER2_CHAIN_DIRTYEMBED);
1942 * Forward direct IO to the device
1944 vn_strategy(hmp->devvp, nbio);
1950 * hammer2_vop_ioctl { vp, command, data, fflag, cred }
1954 hammer2_vop_ioctl(struct vop_ioctl_args *ap)
1956 hammer2_mount_t *hmp;
1957 hammer2_inode_t *ip;
1960 ip = VTOI(ap->a_vp);
1963 error = hammer2_ioctl(ip, ap->a_command, (void *)ap->a_data,
1964 ap->a_fflag, ap->a_cred);
1970 hammer2_vop_mountctl(struct vop_mountctl_args *ap)
1973 hammer2_pfsmount_t *pmp;
1977 case (MOUNTCTL_SET_EXPORT):
1978 mp = ap->a_head.a_ops->head.vv_mount;
1981 if (ap->a_ctllen != sizeof(struct export_args))
1984 rc = vfs_export(mp, &pmp->export,
1985 (const struct export_args *)ap->a_ctl);
1988 rc = vop_stdmountctl(ap);
1994 struct vop_ops hammer2_vnode_vops = {
1995 .vop_default = vop_defaultop,
1996 .vop_fsync = hammer2_vop_fsync,
1997 .vop_getpages = vop_stdgetpages,
1998 .vop_putpages = vop_stdputpages,
1999 .vop_access = hammer2_vop_access,
2000 .vop_advlock = hammer2_vop_advlock,
2001 .vop_close = hammer2_vop_close,
2002 .vop_nlink = hammer2_vop_nlink,
2003 .vop_ncreate = hammer2_vop_ncreate,
2004 .vop_nsymlink = hammer2_vop_nsymlink,
2005 .vop_nremove = hammer2_vop_nremove,
2006 .vop_nrmdir = hammer2_vop_nrmdir,
2007 .vop_nrename = hammer2_vop_nrename,
2008 .vop_getattr = hammer2_vop_getattr,
2009 .vop_setattr = hammer2_vop_setattr,
2010 .vop_readdir = hammer2_vop_readdir,
2011 .vop_readlink = hammer2_vop_readlink,
2012 .vop_getpages = vop_stdgetpages,
2013 .vop_putpages = vop_stdputpages,
2014 .vop_read = hammer2_vop_read,
2015 .vop_write = hammer2_vop_write,
2016 .vop_open = hammer2_vop_open,
2017 .vop_inactive = hammer2_vop_inactive,
2018 .vop_reclaim = hammer2_vop_reclaim,
2019 .vop_nresolve = hammer2_vop_nresolve,
2020 .vop_nlookupdotdot = hammer2_vop_nlookupdotdot,
2021 .vop_nmkdir = hammer2_vop_nmkdir,
2022 .vop_ioctl = hammer2_vop_ioctl,
2023 .vop_mountctl = hammer2_vop_mountctl,
2024 .vop_bmap = hammer2_vop_bmap,
2025 .vop_strategy = hammer2_vop_strategy,
2028 struct vop_ops hammer2_spec_vops = {
2032 struct vop_ops hammer2_fifo_vops = {