2 * Copyright (c) 2011-2014 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@dragonflybsd.org>
6 * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org>
7 * by Daniel Flores (GSOC 2013 - mentored by Matthew Dillon, compression)
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in
17 * the documentation and/or other materials provided with the
19 * 3. Neither the name of The DragonFly Project nor the names of its
20 * contributors may be used to endorse or promote products derived
21 * from this software without specific, prior written permission.
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
26 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
27 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
28 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
29 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
30 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
31 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
32 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
33 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * Kernel Filesystem interface
39 * NOTE! local ipdata pointers must be reloaded on any modifying operation
40 * to the inode as its underlying chain may have changed.
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/kernel.h>
46 #include <sys/fcntl.h>
49 #include <sys/namei.h>
50 #include <sys/mount.h>
51 #include <sys/vnode.h>
52 #include <sys/mountctl.h>
53 #include <sys/dirent.h>
55 #include <sys/objcache.h>
56 #include <sys/event.h>
58 #include <vfs/fifofs/fifo.h>
61 #include "hammer2_lz4.h"
63 #include "zlib/hammer2_zlib.h"
65 #define ZFOFFSET (-2LL)
67 static int hammer2_read_file(hammer2_inode_t *ip, struct uio *uio,
69 static int hammer2_write_file(hammer2_inode_t *ip, struct uio *uio,
70 int ioflag, int seqcount);
71 static void hammer2_extend_file(hammer2_inode_t *ip, hammer2_key_t nsize);
72 static void hammer2_truncate_file(hammer2_inode_t *ip, hammer2_key_t nsize);
74 struct objcache *cache_buffer_read;
75 struct objcache *cache_buffer_write;
78 * Callback used in read path in case that a block is compressed with LZ4.
82 hammer2_decompress_LZ4_callback(const char *data, u_int bytes, struct bio *bio)
85 char *compressed_buffer;
92 if bio->bio_caller_info2.index &&
93 bio->bio_caller_info1.uvalue32 !=
94 crc32(bp->b_data, bp->b_bufsize) --- return error
97 KKASSERT(bp->b_bufsize <= HAMMER2_PBUFSIZE);
98 compressed_size = *(const int *)data;
99 KKASSERT(compressed_size <= bytes - sizeof(int));
101 compressed_buffer = objcache_get(cache_buffer_read, M_INTWAIT);
102 result = LZ4_decompress_safe(__DECONST(char *, &data[sizeof(int)]),
107 kprintf("READ PATH: Error during decompression."
109 (intmax_t)bio->bio_offset, bytes);
110 /* make sure it isn't random garbage */
111 bzero(compressed_buffer, bp->b_bufsize);
113 KKASSERT(result <= bp->b_bufsize);
114 bcopy(compressed_buffer, bp->b_data, bp->b_bufsize);
115 if (result < bp->b_bufsize)
116 bzero(bp->b_data + result, bp->b_bufsize - result);
117 objcache_put(cache_buffer_read, compressed_buffer);
119 bp->b_flags |= B_AGE;
123 * Callback used in read path in case that a block is compressed with ZLIB.
124 * It is almost identical to LZ4 callback, so in theory they can be unified,
125 * but we didn't want to make changes in bio structure for that.
129 hammer2_decompress_ZLIB_callback(const char *data, u_int bytes, struct bio *bio)
132 char *compressed_buffer;
133 z_stream strm_decompress;
139 KKASSERT(bp->b_bufsize <= HAMMER2_PBUFSIZE);
140 strm_decompress.avail_in = 0;
141 strm_decompress.next_in = Z_NULL;
143 ret = inflateInit(&strm_decompress);
146 kprintf("HAMMER2 ZLIB: Fatal error in inflateInit.\n");
148 compressed_buffer = objcache_get(cache_buffer_read, M_INTWAIT);
149 strm_decompress.next_in = __DECONST(char *, data);
151 /* XXX supply proper size, subset of device bp */
152 strm_decompress.avail_in = bytes;
153 strm_decompress.next_out = compressed_buffer;
154 strm_decompress.avail_out = bp->b_bufsize;
156 ret = inflate(&strm_decompress, Z_FINISH);
157 if (ret != Z_STREAM_END) {
158 kprintf("HAMMER2 ZLIB: Fatar error during decompression.\n");
159 bzero(compressed_buffer, bp->b_bufsize);
161 bcopy(compressed_buffer, bp->b_data, bp->b_bufsize);
162 result = bp->b_bufsize - strm_decompress.avail_out;
163 if (result < bp->b_bufsize)
164 bzero(bp->b_data + result, strm_decompress.avail_out);
165 objcache_put(cache_buffer_read, compressed_buffer);
166 ret = inflateEnd(&strm_decompress);
169 bp->b_flags |= B_AGE;
174 hammer2_knote(struct vnode *vp, int flags)
177 KNOTE(&vp->v_pollinfo.vpi_kqinfo.ki_note, flags);
181 * Last reference to a vnode is going away but it is still cached.
185 hammer2_vop_inactive(struct vop_inactive_args *ap)
188 hammer2_cluster_t *cparent;
203 * Detect updates to the embedded data which may be synchronized by
204 * the strategy code. Simply mark the inode modified so it gets
205 * picked up by our normal flush.
207 cparent = hammer2_inode_lock_ex(ip);
211 * Check for deleted inodes and recycle immediately.
213 if (hammer2_cluster_unlinked(cparent) & HAMMER2_CHAIN_UNLINKED) {
214 hammer2_inode_unlock_ex(ip, cparent);
217 hammer2_inode_unlock_ex(ip, cparent);
223 * Reclaim a vnode so that it can be reused; after the inode is
224 * disassociated, the filesystem must manage it alone.
228 hammer2_vop_reclaim(struct vop_reclaim_args *ap)
230 hammer2_cluster_t *cluster;
232 hammer2_pfsmount_t *pmp;
241 * Inode must be locked for reclaim.
244 cluster = hammer2_inode_lock_ex(ip);
247 * The final close of a deleted file or directory marks it for
248 * destruction. The DELETED flag allows the flusher to shortcut
249 * any modified blocks still unflushed (that is, just ignore them).
251 * HAMMER2 usually does not try to optimize the freemap by returning
252 * deleted blocks to it as it does not usually know how many snapshots
253 * might be referencing portions of the file/dir.
259 * NOTE! We do not attempt to flush chains here, flushing is
260 * really fragile and could also deadlock.
265 * A reclaim can occur at any time so we cannot safely start a
266 * transaction to handle reclamation of unlinked files. Instead,
267 * the ip is left with a reference and placed on a linked list and
270 if (hammer2_cluster_unlinked(cluster)) {
271 hammer2_inode_unlink_t *ipul;
273 ipul = kmalloc(sizeof(*ipul), pmp->minode, M_WAITOK | M_ZERO);
276 spin_lock(&pmp->unlinkq_spin);
277 TAILQ_INSERT_TAIL(&pmp->unlinkq, ipul, entry);
278 spin_unlock(&pmp->unlinkq_spin);
279 hammer2_inode_unlock_ex(ip, cluster); /* unlock */
280 /* retain ref from vp for ipul */
282 hammer2_inode_unlock_ex(ip, cluster); /* unlock */
283 hammer2_inode_drop(ip); /* vp ref */
285 /* cluster no longer referenced */
286 /* cluster = NULL; not needed */
289 * XXX handle background sync when ip dirty, kernel will no longer
290 * notify us regarding this inode because there is no longer a
291 * vnode attached to it.
299 hammer2_vop_fsync(struct vop_fsync_args *ap)
302 hammer2_trans_t trans;
303 hammer2_cluster_t *cluster;
310 /* XXX can't do this yet */
311 hammer2_trans_init(&trans, ip->pmp, HAMMER2_TRANS_ISFLUSH);
312 vfsync(vp, ap->a_waitfor, 1, NULL, NULL);
314 hammer2_trans_init(&trans, ip->pmp, 0);
315 vfsync(vp, ap->a_waitfor, 1, NULL, NULL);
318 * Calling chain_flush here creates a lot of duplicative
319 * COW operations due to non-optimal vnode ordering.
321 * Only do it for an actual fsync() syscall. The other forms
322 * which call this function will eventually call chain_flush
323 * on the volume root as a catch-all, which is far more optimal.
325 cluster = hammer2_inode_lock_ex(ip);
326 atomic_clear_int(&ip->flags, HAMMER2_INODE_MODIFIED);
328 if (ip->flags & (HAMMER2_INODE_RESIZED|HAMMER2_INODE_MTIME))
329 hammer2_inode_fsync(&trans, ip, cluster);
333 * XXX creates discontinuity w/modify_tid
335 if (ap->a_flags & VOP_FSYNC_SYSCALL) {
336 hammer2_flush(&trans, cluster);
339 hammer2_inode_unlock_ex(ip, cluster);
340 hammer2_trans_done(&trans);
347 hammer2_vop_access(struct vop_access_args *ap)
349 hammer2_inode_t *ip = VTOI(ap->a_vp);
350 const hammer2_inode_data_t *ipdata;
351 hammer2_cluster_t *cluster;
356 cluster = hammer2_inode_lock_sh(ip);
357 ipdata = &hammer2_cluster_data(cluster)->ipdata;
358 uid = hammer2_to_unix_xid(&ipdata->uid);
359 gid = hammer2_to_unix_xid(&ipdata->gid);
360 error = vop_helper_access(ap, uid, gid, ipdata->mode, ipdata->uflags);
361 hammer2_inode_unlock_sh(ip, cluster);
368 hammer2_vop_getattr(struct vop_getattr_args *ap)
370 const hammer2_inode_data_t *ipdata;
371 hammer2_cluster_t *cluster;
372 hammer2_pfsmount_t *pmp;
383 cluster = hammer2_inode_lock_sh(ip);
384 ipdata = &hammer2_cluster_data(cluster)->ipdata;
385 KKASSERT(hammer2_cluster_type(cluster) == HAMMER2_BREF_TYPE_INODE);
387 vap->va_fsid = pmp->mp->mnt_stat.f_fsid.val[0];
388 vap->va_fileid = ipdata->inum;
389 vap->va_mode = ipdata->mode;
390 vap->va_nlink = ipdata->nlinks;
391 vap->va_uid = hammer2_to_unix_xid(&ipdata->uid);
392 vap->va_gid = hammer2_to_unix_xid(&ipdata->gid);
395 vap->va_size = ip->size; /* protected by shared lock */
396 vap->va_blocksize = HAMMER2_PBUFSIZE;
397 vap->va_flags = ipdata->uflags;
398 hammer2_time_to_timespec(ipdata->ctime, &vap->va_ctime);
399 hammer2_time_to_timespec(ipdata->mtime, &vap->va_mtime);
400 hammer2_time_to_timespec(ipdata->mtime, &vap->va_atime);
402 vap->va_bytes = vap->va_size; /* XXX */
403 vap->va_type = hammer2_get_vtype(ipdata);
405 vap->va_uid_uuid = ipdata->uid;
406 vap->va_gid_uuid = ipdata->gid;
407 vap->va_vaflags = VA_UID_UUID_VALID | VA_GID_UUID_VALID |
410 hammer2_inode_unlock_sh(ip, cluster);
417 hammer2_vop_setattr(struct vop_setattr_args *ap)
419 const hammer2_inode_data_t *ripdata;
420 hammer2_inode_data_t *wipdata;
422 hammer2_cluster_t *cluster;
423 hammer2_trans_t trans;
434 hammer2_update_time(&ctime);
441 hammer2_pfs_memory_wait(ip->pmp);
442 hammer2_trans_init(&trans, ip->pmp, 0);
443 cluster = hammer2_inode_lock_ex(ip);
444 ripdata = &hammer2_cluster_data(cluster)->ipdata;
447 if (vap->va_flags != VNOVAL) {
450 flags = ripdata->uflags;
451 error = vop_helper_setattr_flags(&flags, vap->va_flags,
452 hammer2_to_unix_xid(&ripdata->uid),
455 if (ripdata->uflags != flags) {
456 wipdata = hammer2_cluster_modify_ip(&trans, ip,
458 wipdata->uflags = flags;
459 wipdata->ctime = ctime;
460 kflags |= NOTE_ATTRIB;
464 if (ripdata->uflags & (IMMUTABLE | APPEND)) {
471 if (ripdata->uflags & (IMMUTABLE | APPEND)) {
475 if (vap->va_uid != (uid_t)VNOVAL || vap->va_gid != (gid_t)VNOVAL) {
476 mode_t cur_mode = ripdata->mode;
477 uid_t cur_uid = hammer2_to_unix_xid(&ripdata->uid);
478 gid_t cur_gid = hammer2_to_unix_xid(&ripdata->gid);
482 error = vop_helper_chown(ap->a_vp, vap->va_uid, vap->va_gid,
484 &cur_uid, &cur_gid, &cur_mode);
486 hammer2_guid_to_uuid(&uuid_uid, cur_uid);
487 hammer2_guid_to_uuid(&uuid_gid, cur_gid);
488 if (bcmp(&uuid_uid, &ripdata->uid, sizeof(uuid_uid)) ||
489 bcmp(&uuid_gid, &ripdata->gid, sizeof(uuid_gid)) ||
490 ripdata->mode != cur_mode
492 wipdata = hammer2_cluster_modify_ip(&trans, ip,
494 wipdata->uid = uuid_uid;
495 wipdata->gid = uuid_gid;
496 wipdata->mode = cur_mode;
497 wipdata->ctime = ctime;
501 kflags |= NOTE_ATTRIB;
508 if (vap->va_size != VNOVAL && ip->size != vap->va_size) {
511 if (vap->va_size == ip->size)
513 hammer2_inode_unlock_ex(ip, cluster);
514 if (vap->va_size < ip->size) {
515 hammer2_truncate_file(ip, vap->va_size);
517 hammer2_extend_file(ip, vap->va_size);
519 cluster = hammer2_inode_lock_ex(ip);
521 ripdata = &hammer2_cluster_data(cluster)->ipdata;
530 /* atime not supported */
531 if (vap->va_atime.tv_sec != VNOVAL) {
532 wipdata = hammer2_cluster_modify_ip(&trans, ip, cluster, 0);
533 wipdata->atime = hammer2_timespec_to_time(&vap->va_atime);
534 kflags |= NOTE_ATTRIB;
539 if (vap->va_mtime.tv_sec != VNOVAL) {
540 wipdata = hammer2_cluster_modify_ip(&trans, ip, cluster, 0);
541 wipdata->mtime = hammer2_timespec_to_time(&vap->va_mtime);
542 kflags |= NOTE_ATTRIB;
547 if (vap->va_mode != (mode_t)VNOVAL) {
548 mode_t cur_mode = ripdata->mode;
549 uid_t cur_uid = hammer2_to_unix_xid(&ripdata->uid);
550 gid_t cur_gid = hammer2_to_unix_xid(&ripdata->gid);
552 error = vop_helper_chmod(ap->a_vp, vap->va_mode, ap->a_cred,
553 cur_uid, cur_gid, &cur_mode);
554 if (error == 0 && ripdata->mode != cur_mode) {
555 wipdata = hammer2_cluster_modify_ip(&trans, ip,
557 wipdata->mode = cur_mode;
558 wipdata->ctime = ctime;
559 kflags |= NOTE_ATTRIB;
566 * If a truncation occurred we must call inode_fsync() now in order
567 * to trim the related data chains, otherwise a later expansion can
571 hammer2_cluster_modsync(cluster);
574 hammer2_inode_fsync(&trans, ip, cluster);
577 * Cleanup. If domtime is set an additional inode modification
578 * must be flagged. All other modifications will have already
579 * set INODE_MODIFIED and called vsetisdirty().
583 atomic_set_int(&ip->flags, HAMMER2_INODE_MODIFIED |
584 HAMMER2_INODE_MTIME);
588 hammer2_cluster_modsync(cluster);
589 hammer2_inode_unlock_ex(ip, cluster);
590 hammer2_trans_done(&trans);
591 hammer2_knote(ip->vp, kflags);
598 hammer2_vop_readdir(struct vop_readdir_args *ap)
600 const hammer2_inode_data_t *ipdata;
602 hammer2_inode_t *xip;
603 hammer2_cluster_t *cparent;
604 hammer2_cluster_t *cluster;
605 hammer2_cluster_t *xcluster;
606 hammer2_blockref_t bref;
608 hammer2_key_t key_next;
622 saveoff = uio->uio_offset;
625 * Setup cookies directory entry cookies if requested
627 if (ap->a_ncookies) {
628 ncookies = uio->uio_resid / 16 + 1;
631 cookies = kmalloc(ncookies * sizeof(off_t), M_TEMP, M_WAITOK);
638 cparent = hammer2_inode_lock_sh(ip);
639 ipdata = &hammer2_cluster_data(cparent)->ipdata;
642 * Handle artificial entries. To ensure that only positive 64 bit
643 * quantities are returned to userland we always strip off bit 63.
644 * The hash code is designed such that codes 0x0000-0x7FFF are not
645 * used, allowing us to use these codes for articial entries.
647 * Entry 0 is used for '.' and entry 1 is used for '..'. Do not
648 * allow '..' to cross the mount point into (e.g.) the super-root.
651 cluster = (void *)(intptr_t)-1; /* non-NULL for early goto done case */
654 inum = ipdata->inum & HAMMER2_DIRHASH_USERMSK;
655 r = vop_write_dirent(&error, uio, inum, DT_DIR, 1, ".");
659 cookies[cookie_index] = saveoff;
662 if (cookie_index == ncookies)
668 * Be careful with lockorder when accessing ".."
670 * (ip is the current dir. xip is the parent dir).
672 inum = ipdata->inum & HAMMER2_DIRHASH_USERMSK;
673 while (ip->pip != NULL && ip != ip->pmp->iroot) {
675 hammer2_inode_ref(xip);
676 hammer2_inode_unlock_sh(ip, cparent);
677 xcluster = hammer2_inode_lock_sh(xip);
678 cparent = hammer2_inode_lock_sh(ip);
679 hammer2_inode_drop(xip);
680 ipdata = &hammer2_cluster_data(cparent)->ipdata;
681 if (xip == ip->pip) {
682 inum = hammer2_cluster_data(xcluster)->
683 ipdata.inum & HAMMER2_DIRHASH_USERMSK;
684 hammer2_inode_unlock_sh(xip, xcluster);
687 hammer2_inode_unlock_sh(xip, xcluster);
689 r = vop_write_dirent(&error, uio, inum, DT_DIR, 2, "..");
693 cookies[cookie_index] = saveoff;
696 if (cookie_index == ncookies)
700 lkey = saveoff | HAMMER2_DIRHASH_VISIBLE;
701 if (hammer2_debug & 0x0020)
702 kprintf("readdir: lkey %016jx\n", lkey);
705 * parent is the inode cluster, already locked for us. Don't
706 * double lock shared locks as this will screw up upgrades.
711 cluster = hammer2_cluster_lookup(cparent, &key_next, lkey, lkey,
712 HAMMER2_LOOKUP_SHARED, &ddflag);
713 if (cluster == NULL) {
714 cluster = hammer2_cluster_lookup(cparent, &key_next,
715 lkey, (hammer2_key_t)-1,
716 HAMMER2_LOOKUP_SHARED, &ddflag);
719 hammer2_cluster_bref(cluster, &bref);
721 if (hammer2_debug & 0x0020)
722 kprintf("readdir: p=%p chain=%p %016jx (next %016jx)\n",
723 cparent->focus, cluster->focus,
726 if (bref.type == HAMMER2_BREF_TYPE_INODE) {
727 ipdata = &hammer2_cluster_data(cluster)->ipdata;
728 dtype = hammer2_get_dtype(ipdata);
729 saveoff = bref.key & HAMMER2_DIRHASH_USERMSK;
730 r = vop_write_dirent(&error, uio,
732 HAMMER2_DIRHASH_USERMSK,
739 cookies[cookie_index] = saveoff;
742 /* XXX chain error */
743 kprintf("bad chain type readdir %d\n", bref.type);
747 * Keys may not be returned in order so once we have a
748 * placemarker (cluster) the scan must allow the full range
749 * or some entries will be missed.
751 cluster = hammer2_cluster_next(cparent, cluster, &key_next,
752 key_next, (hammer2_key_t)-1,
753 HAMMER2_LOOKUP_SHARED);
755 hammer2_cluster_bref(cluster, &bref);
756 saveoff = (bref.key & HAMMER2_DIRHASH_USERMSK) + 1;
758 saveoff = (hammer2_key_t)-1;
760 if (cookie_index == ncookies)
764 hammer2_cluster_unlock(cluster);
766 hammer2_inode_unlock_sh(ip, cparent);
768 *ap->a_eofflag = (cluster == NULL);
769 if (hammer2_debug & 0x0020)
770 kprintf("readdir: done at %016jx\n", saveoff);
771 uio->uio_offset = saveoff & ~HAMMER2_DIRHASH_VISIBLE;
772 if (error && cookie_index == 0) {
774 kfree(cookies, M_TEMP);
776 *ap->a_cookies = NULL;
780 *ap->a_ncookies = cookie_index;
781 *ap->a_cookies = cookies;
788 * hammer2_vop_readlink { vp, uio, cred }
792 hammer2_vop_readlink(struct vop_readlink_args *ap)
799 if (vp->v_type != VLNK)
803 error = hammer2_read_file(ip, ap->a_uio, 0);
809 hammer2_vop_read(struct vop_read_args *ap)
819 * Read operations supported on this vnode?
822 if (vp->v_type != VREG)
832 seqcount = ap->a_ioflag >> 16;
833 bigread = (uio->uio_resid > 100 * 1024 * 1024);
835 error = hammer2_read_file(ip, uio, seqcount);
841 hammer2_vop_write(struct vop_write_args *ap)
844 hammer2_trans_t trans;
853 * Read operations supported on this vnode?
856 if (vp->v_type != VREG)
868 seqcount = ap->a_ioflag >> 16;
869 bigwrite = (uio->uio_resid > 100 * 1024 * 1024);
872 * Check resource limit
874 if (uio->uio_resid > 0 && (td = uio->uio_td) != NULL && td->td_proc &&
875 uio->uio_offset + uio->uio_resid >
876 td->td_proc->p_rlimit[RLIMIT_FSIZE].rlim_cur) {
877 lwpsignal(td->td_proc, td->td_lwp, SIGXFSZ);
881 bigwrite = (uio->uio_resid > 100 * 1024 * 1024);
884 * The transaction interlocks against flushes initiations
885 * (note: but will run concurrently with the actual flush).
887 hammer2_trans_init(&trans, ip->pmp, 0);
888 error = hammer2_write_file(ip, uio, ap->a_ioflag, seqcount);
889 hammer2_trans_done(&trans);
895 * Perform read operations on a file or symlink given an UNLOCKED
898 * The passed ip is not locked.
902 hammer2_read_file(hammer2_inode_t *ip, struct uio *uio, int seqcount)
913 ccms_thread_lock(&ip->topo_cst, CCMS_STATE_EXCLUSIVE);
915 ccms_thread_unlock(&ip->topo_cst);
917 while (uio->uio_resid > 0 && uio->uio_offset < size) {
924 lblksize = hammer2_calc_logical(ip, uio->uio_offset,
927 error = cluster_read(ip->vp, leof, lbase, lblksize,
928 uio->uio_resid, seqcount * BKVASIZE,
933 loff = (int)(uio->uio_offset - lbase);
935 if (n > uio->uio_resid)
937 if (n > size - uio->uio_offset)
938 n = (int)(size - uio->uio_offset);
939 bp->b_flags |= B_AGE;
940 uiomove((char *)bp->b_data + loff, n, uio);
947 * Write to the file represented by the inode via the logical buffer cache.
948 * The inode may represent a regular file or a symlink.
950 * The inode must not be locked.
954 hammer2_write_file(hammer2_inode_t *ip,
955 struct uio *uio, int ioflag, int seqcount)
957 hammer2_key_t old_eof;
958 hammer2_key_t new_eof;
967 ccms_thread_lock(&ip->topo_cst, CCMS_STATE_EXCLUSIVE);
968 if (ioflag & IO_APPEND)
969 uio->uio_offset = ip->size;
971 ccms_thread_unlock(&ip->topo_cst);
974 * Extend the file if necessary. If the write fails at some point
975 * we will truncate it back down to cover as much as we were able
978 * Doing this now makes it easier to calculate buffer sizes in
985 if (uio->uio_offset + uio->uio_resid > old_eof) {
986 new_eof = uio->uio_offset + uio->uio_resid;
988 hammer2_extend_file(ip, new_eof);
989 kflags |= NOTE_EXTEND;
997 while (uio->uio_resid > 0) {
1006 * Don't allow the buffer build to blow out the buffer
1009 if ((ioflag & IO_RECURSE) == 0)
1010 bwillwrite(HAMMER2_PBUFSIZE);
1013 * This nominally tells us how much we can cluster and
1014 * what the logical buffer size needs to be. Currently
1015 * we don't try to cluster the write and just handle one
1018 lblksize = hammer2_calc_logical(ip, uio->uio_offset,
1020 loff = (int)(uio->uio_offset - lbase);
1022 KKASSERT(lblksize <= 65536);
1025 * Calculate bytes to copy this transfer and whether the
1026 * copy completely covers the buffer or not.
1029 n = lblksize - loff;
1030 if (n > uio->uio_resid) {
1032 if (loff == lbase && uio->uio_offset + n == new_eof)
1044 if (uio->uio_segflg == UIO_NOCOPY) {
1046 * Issuing a write with the same data backing the
1047 * buffer. Instantiate the buffer to collect the
1048 * backing vm pages, then read-in any missing bits.
1050 * This case is used by vop_stdputpages().
1052 bp = getblk(ip->vp, lbase, lblksize, GETBLK_BHEAVY, 0);
1053 if ((bp->b_flags & B_CACHE) == 0) {
1055 error = bread(ip->vp, lbase, lblksize, &bp);
1057 } else if (trivial) {
1059 * Even though we are entirely overwriting the buffer
1060 * we may still have to zero it out to avoid a
1061 * mmap/write visibility issue.
1063 bp = getblk(ip->vp, lbase, lblksize, GETBLK_BHEAVY, 0);
1064 if ((bp->b_flags & B_CACHE) == 0)
1068 * Partial overwrite, read in any missing bits then
1069 * replace the portion being written.
1071 * (The strategy code will detect zero-fill physical
1072 * blocks for this case).
1074 error = bread(ip->vp, lbase, lblksize, &bp);
1085 * Ok, copy the data in
1087 error = uiomove(bp->b_data + loff, n, uio);
1088 kflags |= NOTE_WRITE;
1096 * WARNING: Pageout daemon will issue UIO_NOCOPY writes
1097 * with IO_SYNC or IO_ASYNC set. These writes
1098 * must be handled as the pageout daemon expects.
1100 if (ioflag & IO_SYNC) {
1102 } else if ((ioflag & IO_DIRECT) && endofblk) {
1104 } else if (ioflag & IO_ASYNC) {
1112 * Cleanup. If we extended the file EOF but failed to write through
1113 * the entire write is a failure and we have to back-up.
1115 if (error && new_eof != old_eof) {
1116 hammer2_truncate_file(ip, old_eof);
1117 } else if (modified) {
1118 ccms_thread_lock(&ip->topo_cst, CCMS_STATE_EXCLUSIVE);
1119 hammer2_update_time(&ip->mtime);
1120 atomic_set_int(&ip->flags, HAMMER2_INODE_MTIME);
1121 ccms_thread_unlock(&ip->topo_cst);
1123 atomic_set_int(&ip->flags, HAMMER2_INODE_MODIFIED);
1124 hammer2_knote(ip->vp, kflags);
1125 vsetisdirty(ip->vp);
1131 * Truncate the size of a file. The inode must not be locked.
1133 * NOTE: Caller handles setting HAMMER2_INODE_MODIFIED
1137 hammer2_truncate_file(hammer2_inode_t *ip, hammer2_key_t nsize)
1139 hammer2_key_t lbase;
1143 nblksize = hammer2_calc_logical(ip, nsize, &lbase, NULL);
1144 nvtruncbuf(ip->vp, nsize,
1145 nblksize, (int)nsize & (nblksize - 1),
1148 ccms_thread_lock(&ip->topo_cst, CCMS_STATE_EXCLUSIVE);
1150 atomic_set_int(&ip->flags, HAMMER2_INODE_RESIZED);
1151 ccms_thread_unlock(&ip->topo_cst);
1155 * Extend the size of a file. The inode must not be locked.
1157 * NOTE: Caller handles setting HAMMER2_INODE_MODIFIED
1161 hammer2_extend_file(hammer2_inode_t *ip, hammer2_key_t nsize)
1163 hammer2_key_t lbase;
1164 hammer2_key_t osize;
1168 ccms_thread_lock(&ip->topo_cst, CCMS_STATE_EXCLUSIVE);
1171 ccms_thread_unlock(&ip->topo_cst);
1174 oblksize = hammer2_calc_logical(ip, osize, &lbase, NULL);
1175 nblksize = hammer2_calc_logical(ip, nsize, &lbase, NULL);
1181 atomic_set_int(&ip->flags, HAMMER2_INODE_RESIZED);
1186 hammer2_vop_nresolve(struct vop_nresolve_args *ap)
1188 hammer2_inode_t *ip;
1189 hammer2_inode_t *dip;
1190 hammer2_cluster_t *cparent;
1191 hammer2_cluster_t *cluster;
1192 const hammer2_inode_data_t *ipdata;
1193 hammer2_key_t key_next;
1195 struct namecache *ncp;
1196 const uint8_t *name;
1202 dip = VTOI(ap->a_dvp);
1203 ncp = ap->a_nch->ncp;
1204 name = ncp->nc_name;
1205 name_len = ncp->nc_nlen;
1206 lhc = hammer2_dirhash(name, name_len);
1209 * Note: In DragonFly the kernel handles '.' and '..'.
1211 cparent = hammer2_inode_lock_sh(dip);
1212 cluster = hammer2_cluster_lookup(cparent, &key_next,
1213 lhc, lhc + HAMMER2_DIRHASH_LOMASK,
1214 HAMMER2_LOOKUP_SHARED, &ddflag);
1216 if (hammer2_cluster_type(cluster) == HAMMER2_BREF_TYPE_INODE) {
1217 ipdata = &hammer2_cluster_data(cluster)->ipdata;
1218 if (ipdata->name_len == name_len &&
1219 bcmp(ipdata->filename, name, name_len) == 0) {
1223 cluster = hammer2_cluster_next(cparent, cluster, &key_next,
1225 lhc + HAMMER2_DIRHASH_LOMASK,
1226 HAMMER2_LOOKUP_SHARED);
1228 hammer2_inode_unlock_sh(dip, cparent);
1231 * Resolve hardlink entries before acquiring the inode.
1234 ipdata = &hammer2_cluster_data(cluster)->ipdata;
1235 if (ipdata->type == HAMMER2_OBJTYPE_HARDLINK) {
1236 hammer2_tid_t inum = ipdata->inum;
1237 error = hammer2_hardlink_find(dip, cluster);
1239 kprintf("hammer2: unable to find hardlink "
1240 "0x%016jx\n", inum);
1241 hammer2_cluster_unlock(cluster);
1248 * nresolve needs to resolve hardlinks, the original cluster is not
1252 ip = hammer2_inode_get(dip->pmp, dip, cluster);
1253 ipdata = &hammer2_cluster_data(cluster)->ipdata;
1254 if (ipdata->type == HAMMER2_OBJTYPE_HARDLINK) {
1255 kprintf("nresolve: fixup hardlink\n");
1256 hammer2_inode_ref(ip);
1257 hammer2_inode_unlock_ex(ip, NULL);
1258 hammer2_cluster_unlock(cluster);
1259 cluster = hammer2_inode_lock_ex(ip);
1260 ipdata = &hammer2_cluster_data(cluster)->ipdata;
1261 kprintf("nresolve: fixup to type %02x\n", ipdata->type);
1269 * Deconsolidate any hardlink whos nlinks == 1. Ignore errors.
1270 * If an error occurs chain and ip are left alone.
1272 * XXX upgrade shared lock?
1274 if (ochain && chain &&
1275 chain->data->ipdata.nlinks == 1 && !dip->pmp->ronly) {
1276 kprintf("hammer2: need to unconsolidate hardlink for %s\n",
1277 chain->data->ipdata.filename);
1278 /* XXX retain shared lock on dip? (currently not held) */
1279 hammer2_trans_init(&trans, dip->pmp, 0);
1280 hammer2_hardlink_deconsolidate(&trans, dip, &chain, &ochain);
1281 hammer2_trans_done(&trans);
1286 * Acquire the related vnode
1288 * NOTE: For error processing, only ENOENT resolves the namecache
1289 * entry to NULL, otherwise we just return the error and
1290 * leave the namecache unresolved.
1292 * NOTE: multiple hammer2_inode structures can be aliased to the
1293 * same chain element, for example for hardlinks. This
1294 * use case does not 'reattach' inode associations that
1295 * might already exist, but always allocates a new one.
1297 * WARNING: inode structure is locked exclusively via inode_get
1298 * but chain was locked shared. inode_unlock_ex()
1299 * will handle it properly.
1302 vp = hammer2_igetv(ip, cluster, &error);
1305 cache_setvp(ap->a_nch, vp);
1306 } else if (error == ENOENT) {
1307 cache_setvp(ap->a_nch, NULL);
1309 hammer2_inode_unlock_ex(ip, cluster);
1312 * The vp should not be released until after we've disposed
1313 * of our locks, because it might cause vop_inactive() to
1320 cache_setvp(ap->a_nch, NULL);
1322 KASSERT(error || ap->a_nch->ncp->nc_vp != NULL,
1323 ("resolve error %d/%p ap %p\n",
1324 error, ap->a_nch->ncp->nc_vp, ap));
1330 hammer2_vop_nlookupdotdot(struct vop_nlookupdotdot_args *ap)
1332 hammer2_inode_t *dip;
1333 hammer2_inode_t *ip;
1334 hammer2_cluster_t *cparent;
1337 dip = VTOI(ap->a_dvp);
1339 if ((ip = dip->pip) == NULL) {
1343 cparent = hammer2_inode_lock_ex(ip);
1344 *ap->a_vpp = hammer2_igetv(ip, cparent, &error);
1345 hammer2_inode_unlock_ex(ip, cparent);
1352 hammer2_vop_nmkdir(struct vop_nmkdir_args *ap)
1354 hammer2_inode_t *dip;
1355 hammer2_inode_t *nip;
1356 hammer2_trans_t trans;
1357 hammer2_cluster_t *cluster;
1358 struct namecache *ncp;
1359 const uint8_t *name;
1363 dip = VTOI(ap->a_dvp);
1364 if (dip->pmp->ronly)
1367 ncp = ap->a_nch->ncp;
1368 name = ncp->nc_name;
1369 name_len = ncp->nc_nlen;
1372 hammer2_pfs_memory_wait(dip->pmp);
1373 hammer2_trans_init(&trans, dip->pmp, HAMMER2_TRANS_NEWINODE);
1374 nip = hammer2_inode_create(&trans, dip, ap->a_vap, ap->a_cred,
1375 name, name_len, &cluster, &error);
1376 cluster->focus->inode_reason = 1;
1378 KKASSERT(nip == NULL);
1381 *ap->a_vpp = hammer2_igetv(nip, cluster, &error);
1382 hammer2_inode_unlock_ex(nip, cluster);
1384 hammer2_trans_done(&trans);
1387 cache_setunresolved(ap->a_nch);
1388 cache_setvp(ap->a_nch, *ap->a_vpp);
1394 * Return the largest contiguous physical disk range for the logical
1395 * request, in bytes.
1397 * (struct vnode *vp, off_t loffset, off_t *doffsetp, int *runp, int *runb)
1399 * Basically disabled, the logical buffer write thread has to deal with
1400 * buffers one-at-a-time.
1404 hammer2_vop_bmap(struct vop_bmap_args *ap)
1406 *ap->a_doffsetp = NOOFFSET;
1411 return (EOPNOTSUPP);
1416 hammer2_vop_open(struct vop_open_args *ap)
1418 return vop_stdopen(ap);
1422 * hammer2_vop_advlock { vp, id, op, fl, flags }
1426 hammer2_vop_advlock(struct vop_advlock_args *ap)
1428 hammer2_inode_t *ip = VTOI(ap->a_vp);
1429 const hammer2_inode_data_t *ipdata;
1430 hammer2_cluster_t *cparent;
1433 cparent = hammer2_inode_lock_sh(ip);
1434 ipdata = &hammer2_cluster_data(cparent)->ipdata;
1435 size = ipdata->size;
1436 hammer2_inode_unlock_sh(ip, cparent);
1437 return (lf_advlock(ap, &ip->advlock, size));
1443 hammer2_vop_close(struct vop_close_args *ap)
1445 return vop_stdclose(ap);
1449 * hammer2_vop_nlink { nch, dvp, vp, cred }
1451 * Create a hardlink from (vp) to {dvp, nch}.
1455 hammer2_vop_nlink(struct vop_nlink_args *ap)
1457 hammer2_inode_t *fdip; /* target directory to create link in */
1458 hammer2_inode_t *tdip; /* target directory to create link in */
1459 hammer2_inode_t *cdip; /* common parent directory */
1460 hammer2_inode_t *ip; /* inode we are hardlinking to */
1461 hammer2_cluster_t *cluster;
1462 hammer2_cluster_t *fdcluster;
1463 hammer2_cluster_t *tdcluster;
1464 hammer2_cluster_t *cdcluster;
1465 hammer2_trans_t trans;
1466 struct namecache *ncp;
1467 const uint8_t *name;
1471 tdip = VTOI(ap->a_dvp);
1472 if (tdip->pmp->ronly)
1475 ncp = ap->a_nch->ncp;
1476 name = ncp->nc_name;
1477 name_len = ncp->nc_nlen;
1480 * ip represents the file being hardlinked. The file could be a
1481 * normal file or a hardlink target if it has already been hardlinked.
1482 * If ip is a hardlinked target then ip->pip represents the location
1483 * of the hardlinked target, NOT the location of the hardlink pointer.
1485 * Bump nlinks and potentially also create or move the hardlink
1486 * target in the parent directory common to (ip) and (tdip). The
1487 * consolidation code can modify ip->cluster and ip->pip. The
1488 * returned cluster is locked.
1490 ip = VTOI(ap->a_vp);
1491 hammer2_pfs_memory_wait(ip->pmp);
1492 hammer2_trans_init(&trans, ip->pmp, HAMMER2_TRANS_NEWINODE);
1495 * The common parent directory must be locked first to avoid deadlocks.
1496 * Also note that fdip and/or tdip might match cdip.
1499 cdip = hammer2_inode_common_parent(fdip, tdip);
1500 cdcluster = hammer2_inode_lock_ex(cdip);
1501 fdcluster = hammer2_inode_lock_ex(fdip);
1502 tdcluster = hammer2_inode_lock_ex(tdip);
1503 cluster = hammer2_inode_lock_ex(ip);
1504 error = hammer2_hardlink_consolidate(&trans, ip, &cluster,
1505 cdip, cdcluster, 1);
1510 * Create a directory entry connected to the specified cluster.
1512 * WARNING! chain can get moved by the connect (indirectly due to
1513 * potential indirect block creation).
1515 error = hammer2_inode_connect(&trans, &cluster, 1,
1519 cache_setunresolved(ap->a_nch);
1520 cache_setvp(ap->a_nch, ap->a_vp);
1523 hammer2_inode_unlock_ex(ip, cluster);
1524 hammer2_inode_unlock_ex(tdip, tdcluster);
1525 hammer2_inode_unlock_ex(fdip, fdcluster);
1526 hammer2_inode_unlock_ex(cdip, cdcluster);
1527 hammer2_trans_done(&trans);
1533 * hammer2_vop_ncreate { nch, dvp, vpp, cred, vap }
1535 * The operating system has already ensured that the directory entry
1536 * does not exist and done all appropriate namespace locking.
1540 hammer2_vop_ncreate(struct vop_ncreate_args *ap)
1542 hammer2_inode_t *dip;
1543 hammer2_inode_t *nip;
1544 hammer2_trans_t trans;
1545 hammer2_cluster_t *ncluster;
1546 struct namecache *ncp;
1547 const uint8_t *name;
1551 dip = VTOI(ap->a_dvp);
1552 if (dip->pmp->ronly)
1555 ncp = ap->a_nch->ncp;
1556 name = ncp->nc_name;
1557 name_len = ncp->nc_nlen;
1558 hammer2_pfs_memory_wait(dip->pmp);
1559 hammer2_trans_init(&trans, dip->pmp, HAMMER2_TRANS_NEWINODE);
1562 nip = hammer2_inode_create(&trans, dip, ap->a_vap, ap->a_cred,
1563 name, name_len, &ncluster, &error);
1564 ncluster->focus->inode_reason = 2;
1566 KKASSERT(nip == NULL);
1569 *ap->a_vpp = hammer2_igetv(nip, ncluster, &error);
1570 hammer2_inode_unlock_ex(nip, ncluster);
1572 hammer2_trans_done(&trans);
1575 cache_setunresolved(ap->a_nch);
1576 cache_setvp(ap->a_nch, *ap->a_vpp);
1582 * Make a device node (typically a fifo)
1586 hammer2_vop_nmknod(struct vop_nmknod_args *ap)
1588 hammer2_inode_t *dip;
1589 hammer2_inode_t *nip;
1590 hammer2_trans_t trans;
1591 hammer2_cluster_t *ncluster;
1592 struct namecache *ncp;
1593 const uint8_t *name;
1597 dip = VTOI(ap->a_dvp);
1598 if (dip->pmp->ronly)
1601 ncp = ap->a_nch->ncp;
1602 name = ncp->nc_name;
1603 name_len = ncp->nc_nlen;
1604 hammer2_pfs_memory_wait(dip->pmp);
1605 hammer2_trans_init(&trans, dip->pmp, HAMMER2_TRANS_NEWINODE);
1608 nip = hammer2_inode_create(&trans, dip, ap->a_vap, ap->a_cred,
1609 name, name_len, &ncluster, &error);
1610 ncluster->focus->inode_reason = 3;
1612 KKASSERT(nip == NULL);
1615 *ap->a_vpp = hammer2_igetv(nip, ncluster, &error);
1616 hammer2_inode_unlock_ex(nip, ncluster);
1618 hammer2_trans_done(&trans);
1621 cache_setunresolved(ap->a_nch);
1622 cache_setvp(ap->a_nch, *ap->a_vpp);
1628 * hammer2_vop_nsymlink { nch, dvp, vpp, cred, vap, target }
1632 hammer2_vop_nsymlink(struct vop_nsymlink_args *ap)
1634 hammer2_inode_t *dip;
1635 hammer2_inode_t *nip;
1636 hammer2_cluster_t *ncparent;
1637 hammer2_trans_t trans;
1638 struct namecache *ncp;
1639 const uint8_t *name;
1643 dip = VTOI(ap->a_dvp);
1644 if (dip->pmp->ronly)
1647 ncp = ap->a_nch->ncp;
1648 name = ncp->nc_name;
1649 name_len = ncp->nc_nlen;
1650 hammer2_pfs_memory_wait(dip->pmp);
1651 hammer2_trans_init(&trans, dip->pmp, HAMMER2_TRANS_NEWINODE);
1654 ap->a_vap->va_type = VLNK; /* enforce type */
1656 nip = hammer2_inode_create(&trans, dip, ap->a_vap, ap->a_cred,
1657 name, name_len, &ncparent, &error);
1658 ncparent->focus->inode_reason = 4;
1660 KKASSERT(nip == NULL);
1662 hammer2_trans_done(&trans);
1665 *ap->a_vpp = hammer2_igetv(nip, ncparent, &error);
1668 * Build the softlink (~like file data) and finalize the namecache.
1674 hammer2_inode_data_t *nipdata;
1676 nipdata = &hammer2_cluster_wdata(ncparent)->ipdata;
1677 /* nipdata = &nip->chain->data->ipdata;XXX */
1678 bytes = strlen(ap->a_target);
1680 if (bytes <= HAMMER2_EMBEDDED_BYTES) {
1681 KKASSERT(nipdata->op_flags &
1682 HAMMER2_OPFLAG_DIRECTDATA);
1683 bcopy(ap->a_target, nipdata->u.data, bytes);
1684 nipdata->size = bytes;
1686 hammer2_cluster_modsync(ncparent);
1687 hammer2_inode_unlock_ex(nip, ncparent);
1688 /* nipdata = NULL; not needed */
1690 hammer2_inode_unlock_ex(nip, ncparent);
1691 /* nipdata = NULL; not needed */
1692 bzero(&auio, sizeof(auio));
1693 bzero(&aiov, sizeof(aiov));
1694 auio.uio_iov = &aiov;
1695 auio.uio_segflg = UIO_SYSSPACE;
1696 auio.uio_rw = UIO_WRITE;
1697 auio.uio_resid = bytes;
1698 auio.uio_iovcnt = 1;
1699 auio.uio_td = curthread;
1700 aiov.iov_base = ap->a_target;
1701 aiov.iov_len = bytes;
1702 error = hammer2_write_file(nip, &auio, IO_APPEND, 0);
1703 /* XXX handle error */
1707 hammer2_inode_unlock_ex(nip, ncparent);
1709 hammer2_trans_done(&trans);
1712 * Finalize namecache
1715 cache_setunresolved(ap->a_nch);
1716 cache_setvp(ap->a_nch, *ap->a_vpp);
1717 /* hammer2_knote(ap->a_dvp, NOTE_WRITE); */
1723 * hammer2_vop_nremove { nch, dvp, cred }
1727 hammer2_vop_nremove(struct vop_nremove_args *ap)
1729 hammer2_inode_t *dip;
1730 hammer2_trans_t trans;
1731 struct namecache *ncp;
1732 const uint8_t *name;
1736 dip = VTOI(ap->a_dvp);
1737 if (dip->pmp->ronly)
1740 ncp = ap->a_nch->ncp;
1741 name = ncp->nc_name;
1742 name_len = ncp->nc_nlen;
1744 hammer2_pfs_memory_wait(dip->pmp);
1745 hammer2_trans_init(&trans, dip->pmp, 0);
1746 error = hammer2_unlink_file(&trans, dip, name, name_len,
1747 0, NULL, ap->a_nch);
1748 hammer2_trans_done(&trans);
1750 cache_unlink(ap->a_nch);
1755 * hammer2_vop_nrmdir { nch, dvp, cred }
1759 hammer2_vop_nrmdir(struct vop_nrmdir_args *ap)
1761 hammer2_inode_t *dip;
1762 hammer2_trans_t trans;
1763 struct namecache *ncp;
1764 const uint8_t *name;
1768 dip = VTOI(ap->a_dvp);
1769 if (dip->pmp->ronly)
1772 ncp = ap->a_nch->ncp;
1773 name = ncp->nc_name;
1774 name_len = ncp->nc_nlen;
1776 hammer2_pfs_memory_wait(dip->pmp);
1777 hammer2_trans_init(&trans, dip->pmp, 0);
1778 hammer2_run_unlinkq(&trans, dip->pmp);
1779 error = hammer2_unlink_file(&trans, dip, name, name_len,
1780 1, NULL, ap->a_nch);
1781 hammer2_trans_done(&trans);
1783 cache_unlink(ap->a_nch);
1788 * hammer2_vop_nrename { fnch, tnch, fdvp, tdvp, cred }
1792 hammer2_vop_nrename(struct vop_nrename_args *ap)
1794 struct namecache *fncp;
1795 struct namecache *tncp;
1796 hammer2_inode_t *cdip;
1797 hammer2_inode_t *fdip;
1798 hammer2_inode_t *tdip;
1799 hammer2_inode_t *ip;
1800 hammer2_cluster_t *cluster;
1801 hammer2_cluster_t *fdcluster;
1802 hammer2_cluster_t *tdcluster;
1803 hammer2_cluster_t *cdcluster;
1804 hammer2_trans_t trans;
1805 const uint8_t *fname;
1807 const uint8_t *tname;
1812 if (ap->a_fdvp->v_mount != ap->a_tdvp->v_mount)
1814 if (ap->a_fdvp->v_mount != ap->a_fnch->ncp->nc_vp->v_mount)
1817 fdip = VTOI(ap->a_fdvp); /* source directory */
1818 tdip = VTOI(ap->a_tdvp); /* target directory */
1820 if (fdip->pmp->ronly)
1823 fncp = ap->a_fnch->ncp; /* entry name in source */
1824 fname = fncp->nc_name;
1825 fname_len = fncp->nc_nlen;
1827 tncp = ap->a_tnch->ncp; /* entry name in target */
1828 tname = tncp->nc_name;
1829 tname_len = tncp->nc_nlen;
1831 hammer2_pfs_memory_wait(tdip->pmp);
1832 hammer2_trans_init(&trans, tdip->pmp, 0);
1835 * ip is the inode being renamed. If this is a hardlink then
1836 * ip represents the actual file and not the hardlink marker.
1838 ip = VTOI(fncp->nc_vp);
1843 * The common parent directory must be locked first to avoid deadlocks.
1844 * Also note that fdip and/or tdip might match cdip.
1846 * WARNING! fdip may not match ip->pip. That is, if the source file
1847 * is already a hardlink then what we are renaming is the
1848 * hardlink pointer, not the hardlink itself. The hardlink
1849 * directory (ip->pip) will already be at a common parent
1852 * Be sure to use ip->pip when finding the common parent
1853 * against tdip or we might accidently move the hardlink
1854 * target into a subdirectory that makes it inaccessible to
1857 cdip = hammer2_inode_common_parent(ip->pip, tdip);
1858 cdcluster = hammer2_inode_lock_ex(cdip);
1859 fdcluster = hammer2_inode_lock_ex(fdip);
1860 tdcluster = hammer2_inode_lock_ex(tdip);
1863 * Keep a tight grip on the inode so the temporary unlinking from
1864 * the source location prior to linking to the target location
1865 * does not cause the cluster to be destroyed.
1867 * NOTE: To avoid deadlocks we cannot lock (ip) while we are
1868 * unlinking elements from their directories. Locking
1869 * the nlinks field does not lock the whole inode.
1871 hammer2_inode_ref(ip);
1874 * Remove target if it exists
1876 error = hammer2_unlink_file(&trans, tdip, tname, tname_len,
1877 -1, NULL, ap->a_tnch);
1878 if (error && error != ENOENT)
1880 cache_setunresolved(ap->a_tnch);
1883 * When renaming a hardlinked file we may have to re-consolidate
1884 * the location of the hardlink target. Also adjust nlinks by +1
1885 * to counter-act the unlink below.
1887 * If ip represents a regular file the consolidation code essentially
1888 * does nothing other than return the same locked cluster that was
1891 * The returned cluster will be locked.
1893 * WARNING! We do not currently have a local copy of ipdata but
1894 * we do use one later remember that it must be reloaded
1895 * on any modification to the inode, including connects.
1897 cluster = hammer2_inode_lock_ex(ip);
1898 error = hammer2_hardlink_consolidate(&trans, ip, &cluster,
1899 cdip, cdcluster, 1);
1904 * Disconnect (fdip, fname) from the source directory. This will
1905 * disconnect (ip) if it represents a direct file. If (ip) represents
1906 * a hardlink the HARDLINK pointer object will be removed but the
1907 * hardlink will stay intact.
1909 * Always pass nch as NULL because we intend to reconnect the inode,
1910 * so we don't want hammer2_unlink_file() to rename it to the hidden
1911 * open-but-unlinked directory.
1913 * The target cluster may be marked DELETED but will not be destroyed
1914 * since we retain our hold on ip and cluster.
1916 error = hammer2_unlink_file(&trans, fdip, fname, fname_len,
1918 KKASSERT(error != EAGAIN);
1923 * Reconnect ip to target directory using cluster. Chains cannot
1924 * actually be moved, so this will duplicate the cluster in the new
1925 * spot and assign it to the ip, replacing the old cluster.
1927 * WARNING: Because recursive locks are allowed and we unlinked the
1928 * file that we have a cluster-in-hand for just above, the
1929 * cluster might have been delete-duplicated. We must
1930 * refactor the cluster.
1932 * WARNING: Chain locks can lock buffer cache buffers, to avoid
1933 * deadlocks we want to unlock before issuing a cache_*()
1934 * op (that might have to lock a vnode).
1936 hammer2_cluster_refactor(cluster);
1937 error = hammer2_inode_connect(&trans, &cluster, hlink,
1939 tname, tname_len, 0);
1940 cluster->focus->inode_reason = 5;
1942 KKASSERT(cluster != NULL);
1943 hammer2_inode_repoint(ip, (hlink ? ip->pip : tdip), cluster);
1946 hammer2_inode_unlock_ex(ip, cluster);
1947 hammer2_inode_unlock_ex(tdip, tdcluster);
1948 hammer2_inode_unlock_ex(fdip, fdcluster);
1949 hammer2_inode_unlock_ex(cdip, cdcluster);
1950 hammer2_inode_drop(ip);
1951 hammer2_trans_done(&trans);
1954 * Issue the namecache update after unlocking all the internal
1955 * hammer structures, otherwise we might deadlock.
1958 cache_rename(ap->a_fnch, ap->a_tnch);
1966 * WARNING: The strategy code cannot safely use hammer2 transactions
1967 * as this can deadlock against vfs_sync's vfsync() call
1968 * if multiple flushes are queued.
1970 static int hammer2_strategy_read(struct vop_strategy_args *ap);
1971 static int hammer2_strategy_write(struct vop_strategy_args *ap);
1972 static void hammer2_strategy_read_callback(hammer2_io_t *dio,
1973 hammer2_cluster_t *cluster,
1974 hammer2_chain_t *chain,
1975 void *arg_p, off_t arg_o);
1979 hammer2_vop_strategy(struct vop_strategy_args *ap)
1990 error = hammer2_strategy_read(ap);
1991 ++hammer2_iod_file_read;
1994 error = hammer2_strategy_write(ap);
1995 ++hammer2_iod_file_write;
1998 bp->b_error = error = EINVAL;
1999 bp->b_flags |= B_ERROR;
2009 hammer2_strategy_read(struct vop_strategy_args *ap)
2014 hammer2_inode_t *ip;
2015 hammer2_cluster_t *cparent;
2016 hammer2_cluster_t *cluster;
2017 hammer2_key_t key_dummy;
2018 hammer2_key_t lbase;
2024 ip = VTOI(ap->a_vp);
2025 nbio = push_bio(bio);
2027 lbase = bio->bio_offset;
2028 KKASSERT(((int)lbase & HAMMER2_PBUFMASK) == 0);
2030 cparent = hammer2_inode_lock_sh(ip);
2031 cluster = hammer2_cluster_lookup(cparent, &key_dummy,
2033 HAMMER2_LOOKUP_NODATA |
2034 HAMMER2_LOOKUP_SHARED,
2036 hammer2_inode_unlock_sh(ip, cparent);
2039 * Data is zero-fill if no cluster could be found
2040 * (XXX or EIO on a cluster failure).
2042 if (cluster == NULL) {
2045 bzero(bp->b_data, bp->b_bcount);
2051 * Cluster elements must be type INODE or type DATA, but the
2052 * compression mode (or not) for DATA chains can be different for
2053 * each chain. This will be handled by the callback.
2055 btype = hammer2_cluster_type(cluster);
2056 if (btype != HAMMER2_BREF_TYPE_INODE &&
2057 btype != HAMMER2_BREF_TYPE_DATA) {
2058 panic("READ PATH: hammer2_strategy_read: unknown bref type");
2060 hammer2_chain_load_async(cluster, hammer2_strategy_read_callback, nbio);
2065 * Read callback for block that is not compressed.
2069 hammer2_strategy_read_callback(hammer2_io_t *dio,
2070 hammer2_cluster_t *cluster,
2071 hammer2_chain_t *chain,
2072 void *arg_p, off_t arg_o)
2074 struct bio *bio = arg_p;
2075 struct buf *bp = bio->bio_buf;
2080 * Extract data and handle iteration on I/O failure. arg_o is the
2081 * cluster index for iteration.
2084 if (dio->bp->b_flags & B_ERROR) {
2086 if (i >= cluster->nchains) {
2087 bp->b_flags |= B_ERROR;
2088 bp->b_error = dio->bp->b_error;
2090 hammer2_cluster_unlock(cluster);
2092 chain = cluster->array[i];
2093 kprintf("hammer2: IO CHAIN-%d %p\n", i, chain);
2094 hammer2_adjreadcounter(&chain->bref,
2096 hammer2_io_breadcb(chain->hmp,
2097 chain->bref.data_off,
2099 hammer2_strategy_read_callback,
2105 data = hammer2_io_data(dio, chain->bref.data_off);
2107 data = (void *)chain->data;
2110 if (chain->bref.type == HAMMER2_BREF_TYPE_INODE) {
2112 * Data is embedded in the inode (copy from inode).
2114 bcopy(((hammer2_inode_data_t *)data)->u.data,
2115 bp->b_data, HAMMER2_EMBEDDED_BYTES);
2116 bzero(bp->b_data + HAMMER2_EMBEDDED_BYTES,
2117 bp->b_bcount - HAMMER2_EMBEDDED_BYTES);
2120 } else if (chain->bref.type == HAMMER2_BREF_TYPE_DATA) {
2122 * Data is on-media, issue device I/O and copy.
2124 * XXX direct-IO shortcut could go here XXX.
2126 switch (HAMMER2_DEC_COMP(chain->bref.methods)) {
2127 case HAMMER2_COMP_LZ4:
2128 hammer2_decompress_LZ4_callback(data, chain->bytes,
2131 case HAMMER2_COMP_ZLIB:
2132 hammer2_decompress_ZLIB_callback(data, chain->bytes,
2135 case HAMMER2_COMP_NONE:
2136 KKASSERT(chain->bytes <= bp->b_bcount);
2137 bcopy(data, bp->b_data, chain->bytes);
2138 if (chain->bytes < bp->b_bcount) {
2139 bzero(bp->b_data + chain->bytes,
2140 bp->b_bcount - chain->bytes);
2142 bp->b_flags |= B_NOTMETA;
2147 panic("hammer2_strategy_read: "
2148 "unknown compression type");
2151 /* bqrelse the dio to help stabilize the call to panic() */
2153 hammer2_io_bqrelse(&dio);
2154 panic("hammer2_strategy_read: unknown bref type");
2156 hammer2_cluster_unlock(cluster);
2162 hammer2_strategy_write(struct vop_strategy_args *ap)
2164 hammer2_pfsmount_t *pmp;
2167 hammer2_inode_t *ip;
2171 ip = VTOI(ap->a_vp);
2174 hammer2_lwinprog_ref(pmp);
2175 mtx_lock(&pmp->wthread_mtx);
2176 if (TAILQ_EMPTY(&pmp->wthread_bioq.queue)) {
2177 bioq_insert_tail(&pmp->wthread_bioq, ap->a_bio);
2178 mtx_unlock(&pmp->wthread_mtx);
2179 wakeup(&pmp->wthread_bioq);
2181 bioq_insert_tail(&pmp->wthread_bioq, ap->a_bio);
2182 mtx_unlock(&pmp->wthread_mtx);
2184 hammer2_lwinprog_wait(pmp);
2190 * hammer2_vop_ioctl { vp, command, data, fflag, cred }
2194 hammer2_vop_ioctl(struct vop_ioctl_args *ap)
2196 hammer2_inode_t *ip;
2199 ip = VTOI(ap->a_vp);
2201 error = hammer2_ioctl(ip, ap->a_command, (void *)ap->a_data,
2202 ap->a_fflag, ap->a_cred);
2208 hammer2_vop_mountctl(struct vop_mountctl_args *ap)
2211 hammer2_pfsmount_t *pmp;
2215 case (MOUNTCTL_SET_EXPORT):
2216 mp = ap->a_head.a_ops->head.vv_mount;
2219 if (ap->a_ctllen != sizeof(struct export_args))
2222 rc = vfs_export(mp, &pmp->export,
2223 (const struct export_args *)ap->a_ctl);
2226 rc = vop_stdmountctl(ap);
2233 * This handles unlinked open files after the cnode is finally dereferenced.
2236 hammer2_run_unlinkq(hammer2_trans_t *trans, hammer2_pfsmount_t *pmp)
2238 hammer2_inode_unlink_t *ipul;
2239 hammer2_inode_t *ip;
2240 hammer2_cluster_t *cluster;
2242 if (TAILQ_EMPTY(&pmp->unlinkq))
2245 spin_lock(&pmp->unlinkq_spin);
2246 while ((ipul = TAILQ_FIRST(&pmp->unlinkq)) != NULL) {
2247 TAILQ_REMOVE(&pmp->unlinkq, ipul, entry);
2248 spin_unlock(&pmp->unlinkq_spin);
2250 kfree(ipul, pmp->minode);
2252 cluster = hammer2_inode_lock_ex(ip);
2253 KKASSERT(cluster->focus->flags & HAMMER2_CHAIN_UNLINKED);
2254 kprintf("hammer2: unlink on reclaim: %s\n",
2255 cluster->focus->data->ipdata.filename);
2256 hammer2_cluster_delete(trans, cluster, 0);
2257 hammer2_inode_unlock_ex(ip, cluster); /* inode lock */
2258 hammer2_inode_drop(ip); /* ipul ref */
2260 spin_lock(&pmp->unlinkq_spin);
2262 spin_unlock(&pmp->unlinkq_spin);
2269 static void filt_hammer2detach(struct knote *kn);
2270 static int filt_hammer2read(struct knote *kn, long hint);
2271 static int filt_hammer2write(struct knote *kn, long hint);
2272 static int filt_hammer2vnode(struct knote *kn, long hint);
2274 static struct filterops hammer2read_filtops =
2275 { FILTEROP_ISFD | FILTEROP_MPSAFE,
2276 NULL, filt_hammer2detach, filt_hammer2read };
2277 static struct filterops hammer2write_filtops =
2278 { FILTEROP_ISFD | FILTEROP_MPSAFE,
2279 NULL, filt_hammer2detach, filt_hammer2write };
2280 static struct filterops hammer2vnode_filtops =
2281 { FILTEROP_ISFD | FILTEROP_MPSAFE,
2282 NULL, filt_hammer2detach, filt_hammer2vnode };
2286 hammer2_vop_kqfilter(struct vop_kqfilter_args *ap)
2288 struct vnode *vp = ap->a_vp;
2289 struct knote *kn = ap->a_kn;
2291 switch (kn->kn_filter) {
2293 kn->kn_fop = &hammer2read_filtops;
2296 kn->kn_fop = &hammer2write_filtops;
2299 kn->kn_fop = &hammer2vnode_filtops;
2302 return (EOPNOTSUPP);
2305 kn->kn_hook = (caddr_t)vp;
2307 knote_insert(&vp->v_pollinfo.vpi_kqinfo.ki_note, kn);
2313 filt_hammer2detach(struct knote *kn)
2315 struct vnode *vp = (void *)kn->kn_hook;
2317 knote_remove(&vp->v_pollinfo.vpi_kqinfo.ki_note, kn);
2321 filt_hammer2read(struct knote *kn, long hint)
2323 struct vnode *vp = (void *)kn->kn_hook;
2324 hammer2_inode_t *ip = VTOI(vp);
2327 if (hint == NOTE_REVOKE) {
2328 kn->kn_flags |= (EV_EOF | EV_NODATA | EV_ONESHOT);
2331 off = ip->size - kn->kn_fp->f_offset;
2332 kn->kn_data = (off < INTPTR_MAX) ? off : INTPTR_MAX;
2333 if (kn->kn_sfflags & NOTE_OLDAPI)
2335 return (kn->kn_data != 0);
2340 filt_hammer2write(struct knote *kn, long hint)
2342 if (hint == NOTE_REVOKE)
2343 kn->kn_flags |= (EV_EOF | EV_NODATA | EV_ONESHOT);
2349 filt_hammer2vnode(struct knote *kn, long hint)
2351 if (kn->kn_sfflags & hint)
2352 kn->kn_fflags |= hint;
2353 if (hint == NOTE_REVOKE) {
2354 kn->kn_flags |= (EV_EOF | EV_NODATA);
2357 return (kn->kn_fflags != 0);
2365 hammer2_vop_markatime(struct vop_markatime_args *ap)
2367 hammer2_inode_t *ip;
2380 hammer2_vop_fifokqfilter(struct vop_kqfilter_args *ap)
2384 error = VOCALL(&fifo_vnode_vops, &ap->a_head);
2386 error = hammer2_vop_kqfilter(ap);
2393 struct vop_ops hammer2_vnode_vops = {
2394 .vop_default = vop_defaultop,
2395 .vop_fsync = hammer2_vop_fsync,
2396 .vop_getpages = vop_stdgetpages,
2397 .vop_putpages = vop_stdputpages,
2398 .vop_access = hammer2_vop_access,
2399 .vop_advlock = hammer2_vop_advlock,
2400 .vop_close = hammer2_vop_close,
2401 .vop_nlink = hammer2_vop_nlink,
2402 .vop_ncreate = hammer2_vop_ncreate,
2403 .vop_nsymlink = hammer2_vop_nsymlink,
2404 .vop_nremove = hammer2_vop_nremove,
2405 .vop_nrmdir = hammer2_vop_nrmdir,
2406 .vop_nrename = hammer2_vop_nrename,
2407 .vop_getattr = hammer2_vop_getattr,
2408 .vop_setattr = hammer2_vop_setattr,
2409 .vop_readdir = hammer2_vop_readdir,
2410 .vop_readlink = hammer2_vop_readlink,
2411 .vop_getpages = vop_stdgetpages,
2412 .vop_putpages = vop_stdputpages,
2413 .vop_read = hammer2_vop_read,
2414 .vop_write = hammer2_vop_write,
2415 .vop_open = hammer2_vop_open,
2416 .vop_inactive = hammer2_vop_inactive,
2417 .vop_reclaim = hammer2_vop_reclaim,
2418 .vop_nresolve = hammer2_vop_nresolve,
2419 .vop_nlookupdotdot = hammer2_vop_nlookupdotdot,
2420 .vop_nmkdir = hammer2_vop_nmkdir,
2421 .vop_nmknod = hammer2_vop_nmknod,
2422 .vop_ioctl = hammer2_vop_ioctl,
2423 .vop_mountctl = hammer2_vop_mountctl,
2424 .vop_bmap = hammer2_vop_bmap,
2425 .vop_strategy = hammer2_vop_strategy,
2426 .vop_kqfilter = hammer2_vop_kqfilter
2429 struct vop_ops hammer2_spec_vops = {
2430 .vop_default = vop_defaultop,
2431 .vop_fsync = hammer2_vop_fsync,
2432 .vop_read = vop_stdnoread,
2433 .vop_write = vop_stdnowrite,
2434 .vop_access = hammer2_vop_access,
2435 .vop_close = hammer2_vop_close,
2436 .vop_markatime = hammer2_vop_markatime,
2437 .vop_getattr = hammer2_vop_getattr,
2438 .vop_inactive = hammer2_vop_inactive,
2439 .vop_reclaim = hammer2_vop_reclaim,
2440 .vop_setattr = hammer2_vop_setattr
2443 struct vop_ops hammer2_fifo_vops = {
2444 .vop_default = fifo_vnoperate,
2445 .vop_fsync = hammer2_vop_fsync,
2447 .vop_read = hammer2_vop_fiforead,
2448 .vop_write = hammer2_vop_fifowrite,
2450 .vop_access = hammer2_vop_access,
2452 .vop_close = hammer2_vop_fifoclose,
2454 .vop_markatime = hammer2_vop_markatime,
2455 .vop_getattr = hammer2_vop_getattr,
2456 .vop_inactive = hammer2_vop_inactive,
2457 .vop_reclaim = hammer2_vop_reclaim,
2458 .vop_setattr = hammer2_vop_setattr,
2459 .vop_kqfilter = hammer2_vop_fifokqfilter