2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 #include <sys/fcntl.h>
36 #include <sys/namecache.h>
37 #include <sys/event.h>
38 #include <sys/dirent.h>
40 #include <vm/swap_pager.h>
41 #include <vfs/fifofs/fifo.h>
48 /*static int hammer_vop_vnoperate(struct vop_generic_args *);*/
49 static int hammer_vop_fsync(struct vop_fsync_args *);
50 static int hammer_vop_read(struct vop_read_args *);
51 static int hammer_vop_write(struct vop_write_args *);
52 static int hammer_vop_access(struct vop_access_args *);
53 static int hammer_vop_advlock(struct vop_advlock_args *);
54 static int hammer_vop_close(struct vop_close_args *);
55 static int hammer_vop_ncreate(struct vop_ncreate_args *);
56 static int hammer_vop_getattr(struct vop_getattr_args *);
57 static int hammer_vop_nresolve(struct vop_nresolve_args *);
58 static int hammer_vop_nlookupdotdot(struct vop_nlookupdotdot_args *);
59 static int hammer_vop_nlink(struct vop_nlink_args *);
60 static int hammer_vop_nmkdir(struct vop_nmkdir_args *);
61 static int hammer_vop_nmknod(struct vop_nmknod_args *);
62 static int hammer_vop_open(struct vop_open_args *);
63 static int hammer_vop_print(struct vop_print_args *);
64 static int hammer_vop_readdir(struct vop_readdir_args *);
65 static int hammer_vop_readlink(struct vop_readlink_args *);
66 static int hammer_vop_nremove(struct vop_nremove_args *);
67 static int hammer_vop_nrename(struct vop_nrename_args *);
68 static int hammer_vop_nrmdir(struct vop_nrmdir_args *);
69 static int hammer_vop_markatime(struct vop_markatime_args *);
70 static int hammer_vop_setattr(struct vop_setattr_args *);
71 static int hammer_vop_strategy(struct vop_strategy_args *);
72 static int hammer_vop_bmap(struct vop_bmap_args *ap);
73 static int hammer_vop_nsymlink(struct vop_nsymlink_args *);
74 static int hammer_vop_nwhiteout(struct vop_nwhiteout_args *);
75 static int hammer_vop_ioctl(struct vop_ioctl_args *);
76 static int hammer_vop_mountctl(struct vop_mountctl_args *);
77 static int hammer_vop_kqfilter (struct vop_kqfilter_args *);
79 static int hammer_vop_fifoclose (struct vop_close_args *);
80 static int hammer_vop_fiforead (struct vop_read_args *);
81 static int hammer_vop_fifowrite (struct vop_write_args *);
82 static int hammer_vop_fifokqfilter (struct vop_kqfilter_args *);
84 struct vop_ops hammer_vnode_vops = {
85 .vop_default = vop_defaultop,
86 .vop_fsync = hammer_vop_fsync,
87 .vop_getpages = vop_stdgetpages,
88 .vop_putpages = vop_stdputpages,
89 .vop_read = hammer_vop_read,
90 .vop_write = hammer_vop_write,
91 .vop_access = hammer_vop_access,
92 .vop_advlock = hammer_vop_advlock,
93 .vop_close = hammer_vop_close,
94 .vop_ncreate = hammer_vop_ncreate,
95 .vop_getattr = hammer_vop_getattr,
96 .vop_inactive = hammer_vop_inactive,
97 .vop_reclaim = hammer_vop_reclaim,
98 .vop_nresolve = hammer_vop_nresolve,
99 .vop_nlookupdotdot = hammer_vop_nlookupdotdot,
100 .vop_nlink = hammer_vop_nlink,
101 .vop_nmkdir = hammer_vop_nmkdir,
102 .vop_nmknod = hammer_vop_nmknod,
103 .vop_open = hammer_vop_open,
104 .vop_pathconf = vop_stdpathconf,
105 .vop_print = hammer_vop_print,
106 .vop_readdir = hammer_vop_readdir,
107 .vop_readlink = hammer_vop_readlink,
108 .vop_nremove = hammer_vop_nremove,
109 .vop_nrename = hammer_vop_nrename,
110 .vop_nrmdir = hammer_vop_nrmdir,
111 .vop_markatime = hammer_vop_markatime,
112 .vop_setattr = hammer_vop_setattr,
113 .vop_bmap = hammer_vop_bmap,
114 .vop_strategy = hammer_vop_strategy,
115 .vop_nsymlink = hammer_vop_nsymlink,
116 .vop_nwhiteout = hammer_vop_nwhiteout,
117 .vop_ioctl = hammer_vop_ioctl,
118 .vop_mountctl = hammer_vop_mountctl,
119 .vop_kqfilter = hammer_vop_kqfilter
122 struct vop_ops hammer_spec_vops = {
123 .vop_default = vop_defaultop,
124 .vop_fsync = hammer_vop_fsync,
125 .vop_read = vop_stdnoread,
126 .vop_write = vop_stdnowrite,
127 .vop_access = hammer_vop_access,
128 .vop_close = hammer_vop_close,
129 .vop_markatime = hammer_vop_markatime,
130 .vop_getattr = hammer_vop_getattr,
131 .vop_inactive = hammer_vop_inactive,
132 .vop_reclaim = hammer_vop_reclaim,
133 .vop_setattr = hammer_vop_setattr
136 struct vop_ops hammer_fifo_vops = {
137 .vop_default = fifo_vnoperate,
138 .vop_fsync = hammer_vop_fsync,
139 .vop_read = hammer_vop_fiforead,
140 .vop_write = hammer_vop_fifowrite,
141 .vop_access = hammer_vop_access,
142 .vop_close = hammer_vop_fifoclose,
143 .vop_markatime = hammer_vop_markatime,
144 .vop_getattr = hammer_vop_getattr,
145 .vop_inactive = hammer_vop_inactive,
146 .vop_reclaim = hammer_vop_reclaim,
147 .vop_setattr = hammer_vop_setattr,
148 .vop_kqfilter = hammer_vop_fifokqfilter
153 hammer_knote(struct vnode *vp, int flags)
156 KNOTE(&vp->v_pollinfo.vpi_kqinfo.ki_note, flags);
159 #ifdef DEBUG_TRUNCATE
160 struct hammer_inode *HammerTruncIp;
163 static int hammer_dounlink(hammer_transaction_t trans, struct nchandle *nch,
164 struct vnode *dvp, struct ucred *cred,
165 int flags, int isdir);
166 static int hammer_vop_strategy_read(struct vop_strategy_args *ap);
167 static int hammer_vop_strategy_write(struct vop_strategy_args *ap);
172 hammer_vop_vnoperate(struct vop_generic_args *)
174 return (VOCALL(&hammer_vnode_vops, ap));
179 * hammer_vop_fsync { vp, waitfor }
181 * fsync() an inode to disk and wait for it to be completely committed
182 * such that the information would not be undone if a crash occured after
185 * NOTE: HAMMER's fsync()'s are going to remain expensive until we implement
186 * a REDO log. A sysctl is provided to relax HAMMER's fsync()
189 * Ultimately the combination of a REDO log and use of fast storage
190 * to front-end cluster caches will make fsync fast, but it aint
191 * here yet. And, in anycase, we need real transactional
192 * all-or-nothing features which are not restricted to a single file.
196 hammer_vop_fsync(struct vop_fsync_args *ap)
198 hammer_inode_t ip = VTOI(ap->a_vp);
199 hammer_mount_t hmp = ip->hmp;
200 int waitfor = ap->a_waitfor;
203 lwkt_gettoken(&hmp->fs_token);
206 * Fsync rule relaxation (default is either full synchronous flush
207 * or REDO semantics with synchronous flush).
209 if (ap->a_flags & VOP_FSYNC_SYSCALL) {
210 switch(hammer_fsync_mode) {
213 /* no REDO, full synchronous flush */
217 /* no REDO, full asynchronous flush */
218 if (waitfor == MNT_WAIT)
219 waitfor = MNT_NOWAIT;
222 /* REDO semantics, synchronous flush */
223 if (hmp->version < HAMMER_VOL_VERSION_FOUR)
225 mode = HAMMER_FLUSH_UNDOS_AUTO;
228 /* REDO semantics, relaxed asynchronous flush */
229 if (hmp->version < HAMMER_VOL_VERSION_FOUR)
231 mode = HAMMER_FLUSH_UNDOS_RELAXED;
232 if (waitfor == MNT_WAIT)
233 waitfor = MNT_NOWAIT;
236 /* ignore the fsync() system call */
237 lwkt_reltoken(&hmp->fs_token);
240 /* we have to do something */
241 mode = HAMMER_FLUSH_UNDOS_RELAXED;
242 if (waitfor == MNT_WAIT)
243 waitfor = MNT_NOWAIT;
248 * Fast fsync only needs to flush the UNDO/REDO fifo if
249 * HAMMER_INODE_REDO is non-zero and the only modifications
250 * made to the file are write or write-extends.
252 if ((ip->flags & HAMMER_INODE_REDO) &&
253 (ip->flags & HAMMER_INODE_MODMASK_NOREDO) == 0) {
254 ++hammer_count_fsyncs;
255 hammer_flusher_flush_undos(hmp, mode);
257 if (ip->vp && (ip->flags & HAMMER_INODE_MODMASK) == 0)
259 lwkt_reltoken(&hmp->fs_token);
264 * REDO is enabled by fsync(), the idea being we really only
265 * want to lay down REDO records when programs are using
266 * fsync() heavily. The first fsync() on the file starts
267 * the gravy train going and later fsync()s keep it hot by
268 * resetting the redo_count.
270 * We weren't running REDOs before now so we have to fall
271 * through and do a full fsync of what we have.
273 if (hmp->version >= HAMMER_VOL_VERSION_FOUR &&
274 (hmp->flags & HAMMER_MOUNT_REDO_RECOVERY_RUN) == 0) {
275 ip->flags |= HAMMER_INODE_REDO;
282 * Do a full flush sequence.
284 * Attempt to release the vnode while waiting for the inode to
285 * finish flushing. This can really mess up inactive->reclaim
286 * sequences so only do it if the vnode is active.
288 * WARNING! The VX lock functions must be used. vn_lock() will
289 * fail when this is part of a VOP_RECLAIM sequence.
291 ++hammer_count_fsyncs;
292 vfsync(ap->a_vp, waitfor, 1, NULL, NULL);
293 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
294 if (waitfor == MNT_WAIT) {
297 if ((ap->a_vp->v_flag & VRECLAIMED) == 0) {
303 hammer_wait_inode(ip);
307 if (ip->vp && (ip->flags & HAMMER_INODE_MODMASK) == 0)
309 lwkt_reltoken(&hmp->fs_token);
314 * hammer_vop_read { vp, uio, ioflag, cred }
316 * MPSAFE (for the cache safe does not require fs_token)
320 hammer_vop_read(struct vop_read_args *ap)
322 struct hammer_transaction trans;
337 if (ap->a_vp->v_type != VREG)
346 * Attempt to shortcut directly to the VM object using lwbufs.
347 * This is much faster than instantiating buffer cache buffers.
349 resid = uio->uio_resid;
350 error = vop_helper_read_shortcut(ap);
351 hammer_stats_file_read += resid - uio->uio_resid;
354 if (uio->uio_resid == 0)
358 * Allow the UIO's size to override the sequential heuristic.
360 blksize = hammer_blocksize(uio->uio_offset);
361 seqcount = (uio->uio_resid + (BKVASIZE - 1)) / BKVASIZE;
362 ioseqcount = (ap->a_ioflag >> 16);
363 if (seqcount < ioseqcount)
364 seqcount = ioseqcount;
367 * If reading or writing a huge amount of data we have to break
368 * atomicy and allow the operation to be interrupted by a signal
369 * or it can DOS the machine.
371 bigread = (uio->uio_resid > 100 * 1024 * 1024);
374 * Access the data typically in HAMMER_BUFSIZE blocks via the
375 * buffer cache, but HAMMER may use a variable block size based
378 * XXX Temporary hack, delay the start transaction while we remain
379 * MPSAFE. NOTE: ino_data.size cannot change while vnode is
382 while (uio->uio_resid > 0 && uio->uio_offset < ip->ino_data.size) {
386 blksize = hammer_blocksize(uio->uio_offset);
387 offset = (int)uio->uio_offset & (blksize - 1);
388 base_offset = uio->uio_offset - offset;
390 if (bigread && (error = hammer_signal_check(ip->hmp)) != 0)
396 bp = getblk(ap->a_vp, base_offset, blksize, 0, 0);
397 if ((bp->b_flags & (B_INVAL | B_CACHE | B_RAM)) == B_CACHE) {
398 bp->b_flags &= ~B_AGE;
402 if (ap->a_ioflag & IO_NRDELAY) {
404 return (EWOULDBLOCK);
410 if (got_trans == 0) {
411 hammer_start_transaction(&trans, ip->hmp);
416 * NOTE: A valid bp has already been acquired, but was not
419 if (hammer_cluster_enable) {
421 * Use file_limit to prevent cluster_read() from
422 * creating buffers of the wrong block size past
425 file_limit = ip->ino_data.size;
426 if (base_offset < HAMMER_XDEMARC &&
427 file_limit > HAMMER_XDEMARC) {
428 file_limit = HAMMER_XDEMARC;
430 error = cluster_readx(ap->a_vp,
431 file_limit, base_offset,
432 blksize, uio->uio_resid,
433 seqcount * BKVASIZE, &bp);
435 error = breadnx(ap->a_vp, base_offset, blksize,
443 if ((hammer_debug_io & 0x0001) && (bp->b_flags & B_IODEBUG)) {
444 kprintf("doff %016jx read file %016jx@%016jx\n",
445 (intmax_t)bp->b_bio2.bio_offset,
446 (intmax_t)ip->obj_id,
447 (intmax_t)bp->b_loffset);
449 bp->b_flags &= ~B_IODEBUG;
450 if (blksize == HAMMER_XBUFSIZE)
451 bp->b_flags |= B_CLUSTEROK;
453 n = blksize - offset;
454 if (n > uio->uio_resid)
456 if (n > ip->ino_data.size - uio->uio_offset)
457 n = (int)(ip->ino_data.size - uio->uio_offset);
460 * Set B_AGE, data has a lower priority than meta-data.
462 * Use a hold/unlock/drop sequence to run the uiomove
463 * with the buffer unlocked, avoiding deadlocks against
464 * read()s on mmap()'d spaces.
466 bp->b_flags |= B_AGE;
467 error = uiomovebp(bp, (char *)bp->b_data + offset, n, uio);
472 hammer_stats_file_read += n;
478 * Try to update the atime with just the inode lock for maximum
479 * concurrency. If we can't shortcut it we have to get the full
482 if (got_trans == 0 && hammer_update_atime_quick(ip) < 0) {
483 hammer_start_transaction(&trans, ip->hmp);
488 if ((ip->flags & HAMMER_INODE_RO) == 0 &&
489 (ip->hmp->mp->mnt_flag & MNT_NOATIME) == 0) {
490 lwkt_gettoken(&hmp->fs_token);
491 ip->ino_data.atime = trans.time;
492 hammer_modify_inode(&trans, ip, HAMMER_INODE_ATIME);
493 hammer_done_transaction(&trans);
494 lwkt_reltoken(&hmp->fs_token);
496 hammer_done_transaction(&trans);
503 * hammer_vop_write { vp, uio, ioflag, cred }
507 hammer_vop_write(struct vop_write_args *ap)
509 struct hammer_transaction trans;
510 struct hammer_inode *ip;
525 if (ap->a_vp->v_type != VREG)
531 seqcount = ap->a_ioflag >> 16;
533 if (ip->flags & HAMMER_INODE_RO)
537 * Create a transaction to cover the operations we perform.
539 hammer_start_transaction(&trans, hmp);
545 if (ap->a_ioflag & IO_APPEND)
546 uio->uio_offset = ip->ino_data.size;
549 * Check for illegal write offsets. Valid range is 0...2^63-1.
551 * NOTE: the base_off assignment is required to work around what
552 * I consider to be a GCC-4 optimization bug.
554 if (uio->uio_offset < 0) {
555 hammer_done_transaction(&trans);
558 base_offset = uio->uio_offset + uio->uio_resid; /* work around gcc-4 */
559 if (uio->uio_resid > 0 && base_offset <= uio->uio_offset) {
560 hammer_done_transaction(&trans);
564 if (uio->uio_resid > 0 && (td = uio->uio_td) != NULL && td->td_proc &&
565 base_offset > td->td_proc->p_rlimit[RLIMIT_FSIZE].rlim_cur) {
566 hammer_done_transaction(&trans);
567 lwpsignal(td->td_proc, td->td_lwp, SIGXFSZ);
572 * If reading or writing a huge amount of data we have to break
573 * atomicy and allow the operation to be interrupted by a signal
574 * or it can DOS the machine.
576 * Preset redo_count so we stop generating REDOs earlier if the
579 * redo_count is heuristical, SMP races are ok
581 bigwrite = (uio->uio_resid > 100 * 1024 * 1024);
582 if ((ip->flags & HAMMER_INODE_REDO) &&
583 ip->redo_count < hammer_limit_redo) {
584 ip->redo_count += uio->uio_resid;
588 * Access the data typically in HAMMER_BUFSIZE blocks via the
589 * buffer cache, but HAMMER may use a variable block size based
592 while (uio->uio_resid > 0) {
600 if ((error = hammer_checkspace(hmp, HAMMER_CHKSPC_WRITE)) != 0)
602 if (bigwrite && (error = hammer_signal_check(hmp)) != 0)
605 blksize = hammer_blocksize(uio->uio_offset);
608 * Control the number of pending records associated with
609 * this inode. If too many have accumulated start a
610 * flush. Try to maintain a pipeline with the flusher.
612 * NOTE: It is possible for other sources to grow the
613 * records but not necessarily issue another flush,
614 * so use a timeout and ensure that a re-flush occurs.
616 if (ip->rsv_recs >= hammer_limit_inode_recs) {
617 lwkt_gettoken(&hmp->fs_token);
618 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
619 while (ip->rsv_recs >= hammer_limit_inode_recs * 2) {
620 ip->flags |= HAMMER_INODE_RECSW;
621 tsleep(&ip->rsv_recs, 0, "hmrwww", hz);
622 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
624 lwkt_reltoken(&hmp->fs_token);
628 * Do not allow HAMMER to blow out the buffer cache. Very
629 * large UIOs can lockout other processes due to bwillwrite()
632 * The hammer inode is not locked during these operations.
633 * The vnode is locked which can interfere with the pageout
634 * daemon for non-UIO_NOCOPY writes but should not interfere
635 * with the buffer cache. Even so, we cannot afford to
636 * allow the pageout daemon to build up too many dirty buffer
639 * Only call this if we aren't being recursively called from
640 * a virtual disk device (vn), else we may deadlock.
642 if ((ap->a_ioflag & IO_RECURSE) == 0)
646 * Calculate the blocksize at the current offset and figure
647 * out how much we can actually write.
649 blkmask = blksize - 1;
650 offset = (int)uio->uio_offset & blkmask;
651 base_offset = uio->uio_offset & ~(int64_t)blkmask;
652 n = blksize - offset;
653 if (n > uio->uio_resid) {
659 nsize = uio->uio_offset + n;
660 if (nsize > ip->ino_data.size) {
661 if (uio->uio_offset > ip->ino_data.size)
665 nvextendbuf(ap->a_vp,
668 hammer_blocksize(ip->ino_data.size),
669 hammer_blocksize(nsize),
670 hammer_blockoff(ip->ino_data.size),
671 hammer_blockoff(nsize),
674 kflags |= NOTE_EXTEND;
677 if (uio->uio_segflg == UIO_NOCOPY) {
679 * Issuing a write with the same data backing the
680 * buffer. Instantiate the buffer to collect the
681 * backing vm pages, then read-in any missing bits.
683 * This case is used by vop_stdputpages().
685 bp = getblk(ap->a_vp, base_offset,
686 blksize, GETBLK_BHEAVY, 0);
687 if ((bp->b_flags & B_CACHE) == 0) {
689 error = bread(ap->a_vp, base_offset,
692 } else if (offset == 0 && uio->uio_resid >= blksize) {
694 * Even though we are entirely overwriting the buffer
695 * we may still have to zero it out to avoid a
696 * mmap/write visibility issue.
698 bp = getblk(ap->a_vp, base_offset, blksize, GETBLK_BHEAVY, 0);
699 if ((bp->b_flags & B_CACHE) == 0)
701 } else if (base_offset >= ip->ino_data.size) {
703 * If the base offset of the buffer is beyond the
704 * file EOF, we don't have to issue a read.
706 bp = getblk(ap->a_vp, base_offset,
707 blksize, GETBLK_BHEAVY, 0);
711 * Partial overwrite, read in any missing bits then
712 * replace the portion being written.
714 error = bread(ap->a_vp, base_offset, blksize, &bp);
719 error = uiomovebp(bp, bp->b_data + offset, n, uio);
721 lwkt_gettoken(&hmp->fs_token);
724 * Generate REDO records if enabled and redo_count will not
725 * exceeded the limit.
727 * If redo_count exceeds the limit we stop generating records
728 * and clear HAMMER_INODE_REDO. This will cause the next
729 * fsync() to do a full meta-data sync instead of just an
730 * UNDO/REDO fifo update.
732 * When clearing HAMMER_INODE_REDO any pre-existing REDOs
733 * will still be tracked. The tracks will be terminated
734 * when the related meta-data (including possible data
735 * modifications which are not tracked via REDO) is
738 if ((ip->flags & HAMMER_INODE_REDO) && error == 0) {
739 if (ip->redo_count < hammer_limit_redo) {
740 bp->b_flags |= B_VFSFLAG1;
741 error = hammer_generate_redo(&trans, ip,
742 base_offset + offset,
747 ip->flags &= ~HAMMER_INODE_REDO;
752 * If we screwed up we have to undo any VM size changes we
758 nvtruncbuf(ap->a_vp, ip->ino_data.size,
759 hammer_blocksize(ip->ino_data.size),
760 hammer_blockoff(ip->ino_data.size),
763 lwkt_reltoken(&hmp->fs_token);
766 kflags |= NOTE_WRITE;
767 hammer_stats_file_write += n;
768 if (blksize == HAMMER_XBUFSIZE)
769 bp->b_flags |= B_CLUSTEROK;
770 if (ip->ino_data.size < uio->uio_offset) {
771 ip->ino_data.size = uio->uio_offset;
772 flags = HAMMER_INODE_SDIRTY;
776 ip->ino_data.mtime = trans.time;
777 flags |= HAMMER_INODE_MTIME | HAMMER_INODE_BUFS;
778 hammer_modify_inode(&trans, ip, flags);
781 * Once we dirty the buffer any cached zone-X offset
782 * becomes invalid. HAMMER NOTE: no-history mode cannot
783 * allow overwriting over the same data sector unless
784 * we provide UNDOs for the old data, which we don't.
786 bp->b_bio2.bio_offset = NOOFFSET;
788 lwkt_reltoken(&hmp->fs_token);
791 * Final buffer disposition.
793 * Because meta-data updates are deferred, HAMMER is
794 * especially sensitive to excessive bdwrite()s because
795 * the I/O stream is not broken up by disk reads. So the
796 * buffer cache simply cannot keep up.
798 * WARNING! blksize is variable. cluster_write() is
799 * expected to not blow up if it encounters
800 * buffers that do not match the passed blksize.
802 * NOTE! Hammer shouldn't need to bawrite()/cluster_write().
803 * The ip->rsv_recs check should burst-flush the data.
804 * If we queue it immediately the buf could be left
805 * locked on the device queue for a very long time.
807 * However, failing to flush a dirty buffer out when
808 * issued from the pageout daemon can result in a low
809 * memory deadlock against bio_page_alloc(), so we
810 * have to bawrite() on IO_ASYNC as well.
812 * NOTE! To avoid degenerate stalls due to mismatched block
813 * sizes we only honor IO_DIRECT on the write which
814 * abuts the end of the buffer. However, we must
815 * honor IO_SYNC in case someone is silly enough to
816 * configure a HAMMER file as swap, or when HAMMER
817 * is serving NFS (for commits). Ick ick.
819 bp->b_flags |= B_AGE;
820 if (blksize == HAMMER_XBUFSIZE)
821 bp->b_flags |= B_CLUSTEROK;
823 if (ap->a_ioflag & IO_SYNC) {
825 } else if ((ap->a_ioflag & IO_DIRECT) && endofblk) {
827 } else if (ap->a_ioflag & IO_ASYNC) {
829 } else if (hammer_cluster_enable &&
830 !(ap->a_vp->v_mount->mnt_flag & MNT_NOCLUSTERW)) {
831 if (base_offset < HAMMER_XDEMARC)
832 cluster_eof = hammer_blockdemarc(base_offset,
835 cluster_eof = ip->ino_data.size;
836 cluster_write(bp, cluster_eof, blksize, seqcount);
841 hammer_done_transaction(&trans);
842 hammer_knote(ap->a_vp, kflags);
848 * hammer_vop_access { vp, mode, cred }
850 * MPSAFE - does not require fs_token
854 hammer_vop_access(struct vop_access_args *ap)
856 struct hammer_inode *ip = VTOI(ap->a_vp);
861 ++hammer_stats_file_iopsr;
862 uid = hammer_to_unix_xid(&ip->ino_data.uid);
863 gid = hammer_to_unix_xid(&ip->ino_data.gid);
865 error = vop_helper_access(ap, uid, gid, ip->ino_data.mode,
866 ip->ino_data.uflags);
871 * hammer_vop_advlock { vp, id, op, fl, flags }
873 * MPSAFE - does not require fs_token
877 hammer_vop_advlock(struct vop_advlock_args *ap)
879 hammer_inode_t ip = VTOI(ap->a_vp);
881 return (lf_advlock(ap, &ip->advlock, ip->ino_data.size));
885 * hammer_vop_close { vp, fflag }
887 * We can only sync-on-close for normal closes. XXX disabled for now.
891 hammer_vop_close(struct vop_close_args *ap)
894 struct vnode *vp = ap->a_vp;
895 hammer_inode_t ip = VTOI(vp);
897 if (ip->flags & (HAMMER_INODE_CLOSESYNC|HAMMER_INODE_CLOSEASYNC)) {
898 if (vn_islocked(vp) == LK_EXCLUSIVE &&
899 (vp->v_flag & (VINACTIVE|VRECLAIMED)) == 0) {
900 if (ip->flags & HAMMER_INODE_CLOSESYNC)
903 waitfor = MNT_NOWAIT;
904 ip->flags &= ~(HAMMER_INODE_CLOSESYNC |
905 HAMMER_INODE_CLOSEASYNC);
906 VOP_FSYNC(vp, MNT_NOWAIT, waitfor);
910 return (vop_stdclose(ap));
914 * hammer_vop_ncreate { nch, dvp, vpp, cred, vap }
916 * The operating system has already ensured that the directory entry
917 * does not exist and done all appropriate namespace locking.
921 hammer_vop_ncreate(struct vop_ncreate_args *ap)
923 struct hammer_transaction trans;
924 struct hammer_inode *dip;
925 struct hammer_inode *nip;
926 struct nchandle *nch;
931 dip = VTOI(ap->a_dvp);
934 if (dip->flags & HAMMER_INODE_RO)
936 if ((error = hammer_checkspace(hmp, HAMMER_CHKSPC_CREATE)) != 0)
940 * Create a transaction to cover the operations we perform.
942 lwkt_gettoken(&hmp->fs_token);
943 hammer_start_transaction(&trans, hmp);
944 ++hammer_stats_file_iopsw;
947 * Create a new filesystem object of the requested type. The
948 * returned inode will be referenced and shared-locked to prevent
949 * it from being moved to the flusher.
951 error = hammer_create_inode(&trans, ap->a_vap, ap->a_cred,
952 dip, nch->ncp->nc_name, nch->ncp->nc_nlen,
955 hkprintf("hammer_create_inode error %d\n", error);
956 hammer_done_transaction(&trans);
958 lwkt_reltoken(&hmp->fs_token);
963 * Add the new filesystem object to the directory. This will also
964 * bump the inode's link count.
966 error = hammer_ip_add_directory(&trans, dip,
967 nch->ncp->nc_name, nch->ncp->nc_nlen,
970 hkprintf("hammer_ip_add_directory error %d\n", error);
976 hammer_rel_inode(nip, 0);
977 hammer_done_transaction(&trans);
980 error = hammer_get_vnode(nip, ap->a_vpp);
981 hammer_done_transaction(&trans);
982 hammer_rel_inode(nip, 0);
984 cache_setunresolved(ap->a_nch);
985 cache_setvp(ap->a_nch, *ap->a_vpp);
987 hammer_knote(ap->a_dvp, NOTE_WRITE);
989 lwkt_reltoken(&hmp->fs_token);
994 * hammer_vop_getattr { vp, vap }
996 * Retrieve an inode's attribute information. When accessing inodes
997 * historically we fake the atime field to ensure consistent results.
998 * The atime field is stored in the B-Tree element and allowed to be
999 * updated without cycling the element.
1001 * MPSAFE - does not require fs_token
1005 hammer_vop_getattr(struct vop_getattr_args *ap)
1007 struct hammer_inode *ip = VTOI(ap->a_vp);
1008 struct vattr *vap = ap->a_vap;
1011 * We want the fsid to be different when accessing a filesystem
1012 * with different as-of's so programs like diff don't think
1013 * the files are the same.
1015 * We also want the fsid to be the same when comparing snapshots,
1016 * or when comparing mirrors (which might be backed by different
1017 * physical devices). HAMMER fsids are based on the PFS's
1018 * shared_uuid field.
1020 * XXX there is a chance of collision here. The va_fsid reported
1021 * by stat is different from the more involved fsid used in the
1024 ++hammer_stats_file_iopsr;
1025 hammer_lock_sh(&ip->lock);
1026 vap->va_fsid = ip->pfsm->fsid_udev ^ (u_int32_t)ip->obj_asof ^
1027 (u_int32_t)(ip->obj_asof >> 32);
1029 vap->va_fileid = ip->ino_leaf.base.obj_id;
1030 vap->va_mode = ip->ino_data.mode;
1031 vap->va_nlink = ip->ino_data.nlinks;
1032 vap->va_uid = hammer_to_unix_xid(&ip->ino_data.uid);
1033 vap->va_gid = hammer_to_unix_xid(&ip->ino_data.gid);
1036 vap->va_size = ip->ino_data.size;
1039 * Special case for @@PFS softlinks. The actual size of the
1040 * expanded softlink is "@@0x%016llx:%05d" == 26 bytes.
1041 * or for MAX_TID is "@@-1:%05d" == 10 bytes.
1043 * Note that userspace hammer command does not allow users to
1044 * create a @@PFS softlink under an existing other PFS (id!=0)
1045 * so the ip localization here for @@PFS softlink is always 0.
1047 if (ip->ino_data.obj_type == HAMMER_OBJTYPE_SOFTLINK &&
1048 ip->ino_data.size == 10 &&
1049 ip->obj_asof == HAMMER_MAX_TID &&
1050 ip->obj_localization == 0 &&
1051 strncmp(ip->ino_data.ext.symlink, "@@PFS", 5) == 0) {
1052 if (ip->pfsm->pfsd.mirror_flags & HAMMER_PFSD_SLAVE)
1059 * We must provide a consistent atime and mtime for snapshots
1060 * so people can do a 'tar cf - ... | md5' on them and get
1061 * consistent results.
1063 if (ip->flags & HAMMER_INODE_RO) {
1064 hammer_time_to_timespec(ip->ino_data.ctime, &vap->va_atime);
1065 hammer_time_to_timespec(ip->ino_data.ctime, &vap->va_mtime);
1067 hammer_time_to_timespec(ip->ino_data.atime, &vap->va_atime);
1068 hammer_time_to_timespec(ip->ino_data.mtime, &vap->va_mtime);
1070 hammer_time_to_timespec(ip->ino_data.ctime, &vap->va_ctime);
1071 vap->va_flags = ip->ino_data.uflags;
1072 vap->va_gen = 1; /* hammer inums are unique for all time */
1073 vap->va_blocksize = HAMMER_BUFSIZE;
1074 if (ip->ino_data.size >= HAMMER_XDEMARC) {
1075 vap->va_bytes = (ip->ino_data.size + HAMMER_XBUFMASK64) &
1077 } else if (ip->ino_data.size > HAMMER_BUFSIZE / 2) {
1078 vap->va_bytes = (ip->ino_data.size + HAMMER_BUFMASK64) &
1081 vap->va_bytes = (ip->ino_data.size + 15) & ~15;
1084 vap->va_type = hammer_get_vnode_type(ip->ino_data.obj_type);
1085 vap->va_filerev = 0; /* XXX */
1086 vap->va_uid_uuid = ip->ino_data.uid;
1087 vap->va_gid_uuid = ip->ino_data.gid;
1088 vap->va_fsid_uuid = ip->hmp->fsid;
1089 vap->va_vaflags = VA_UID_UUID_VALID | VA_GID_UUID_VALID |
1092 switch (ip->ino_data.obj_type) {
1093 case HAMMER_OBJTYPE_CDEV:
1094 case HAMMER_OBJTYPE_BDEV:
1095 vap->va_rmajor = ip->ino_data.rmajor;
1096 vap->va_rminor = ip->ino_data.rminor;
1101 hammer_unlock(&ip->lock);
1106 * hammer_vop_nresolve { nch, dvp, cred }
1108 * Locate the requested directory entry.
1112 hammer_vop_nresolve(struct vop_nresolve_args *ap)
1114 struct hammer_transaction trans;
1115 struct namecache *ncp;
1120 struct hammer_cursor cursor;
1129 u_int32_t localization;
1130 u_int32_t max_iterations;
1133 * Misc initialization, plus handle as-of name extensions. Look for
1134 * the '@@' extension. Note that as-of files and directories cannot
1137 dip = VTOI(ap->a_dvp);
1138 ncp = ap->a_nch->ncp;
1139 asof = dip->obj_asof;
1140 localization = dip->obj_localization; /* for code consistency */
1141 nlen = ncp->nc_nlen;
1142 flags = dip->flags & HAMMER_INODE_RO;
1146 lwkt_gettoken(&hmp->fs_token);
1147 hammer_simple_transaction(&trans, hmp);
1148 ++hammer_stats_file_iopsr;
1150 for (i = 0; i < nlen; ++i) {
1151 if (ncp->nc_name[i] == '@' && ncp->nc_name[i+1] == '@') {
1152 error = hammer_str_to_tid(ncp->nc_name + i + 2,
1153 &ispfs, &asof, &localization);
1158 if (asof != HAMMER_MAX_TID)
1159 flags |= HAMMER_INODE_RO;
1166 * If this is a PFS softlink we dive into the PFS
1168 if (ispfs && nlen == 0) {
1169 ip = hammer_get_inode(&trans, dip, HAMMER_OBJID_ROOT,
1173 error = hammer_get_vnode(ip, &vp);
1174 hammer_rel_inode(ip, 0);
1180 cache_setvp(ap->a_nch, vp);
1187 * If there is no path component the time extension is relative to dip.
1188 * e.g. "fubar/@@<snapshot>"
1190 * "." is handled by the kernel, but ".@@<snapshot>" is not.
1191 * e.g. "fubar/.@@<snapshot>"
1193 * ".." is handled by the kernel. We do not currently handle
1196 if (nlen == 0 || (nlen == 1 && ncp->nc_name[0] == '.')) {
1197 ip = hammer_get_inode(&trans, dip, dip->obj_id,
1198 asof, dip->obj_localization,
1201 error = hammer_get_vnode(ip, &vp);
1202 hammer_rel_inode(ip, 0);
1208 cache_setvp(ap->a_nch, vp);
1215 * Calculate the namekey and setup the key range for the scan. This
1216 * works kinda like a chained hash table where the lower 32 bits
1217 * of the namekey synthesize the chain.
1219 * The key range is inclusive of both key_beg and key_end.
1221 namekey = hammer_directory_namekey(dip, ncp->nc_name, nlen,
1224 error = hammer_init_cursor(&trans, &cursor, &dip->cache[1], dip);
1225 cursor.key_beg.localization = dip->obj_localization +
1226 hammer_dir_localization(dip);
1227 cursor.key_beg.obj_id = dip->obj_id;
1228 cursor.key_beg.key = namekey;
1229 cursor.key_beg.create_tid = 0;
1230 cursor.key_beg.delete_tid = 0;
1231 cursor.key_beg.rec_type = HAMMER_RECTYPE_DIRENTRY;
1232 cursor.key_beg.obj_type = 0;
1234 cursor.key_end = cursor.key_beg;
1235 cursor.key_end.key += max_iterations;
1237 cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE | HAMMER_CURSOR_ASOF;
1240 * Scan all matching records (the chain), locate the one matching
1241 * the requested path component.
1243 * The hammer_ip_*() functions merge in-memory records with on-disk
1244 * records for the purposes of the search.
1247 localization = HAMMER_DEF_LOCALIZATION;
1250 error = hammer_ip_first(&cursor);
1251 while (error == 0) {
1252 error = hammer_ip_resolve_data(&cursor);
1255 if (nlen == cursor.leaf->data_len - HAMMER_ENTRY_NAME_OFF &&
1256 bcmp(ncp->nc_name, cursor.data->entry.name, nlen) == 0) {
1257 obj_id = cursor.data->entry.obj_id;
1258 localization = cursor.data->entry.localization;
1261 error = hammer_ip_next(&cursor);
1264 hammer_done_cursor(&cursor);
1267 * Lookup the obj_id. This should always succeed. If it does not
1268 * the filesystem may be damaged and we return a dummy inode.
1271 ip = hammer_get_inode(&trans, dip, obj_id,
1274 if (error == ENOENT) {
1275 kprintf("HAMMER: WARNING: Missing "
1276 "inode for dirent \"%s\"\n"
1277 "\tobj_id = %016llx, asof=%016llx, lo=%08x\n",
1279 (long long)obj_id, (long long)asof,
1282 ip = hammer_get_dummy_inode(&trans, dip, obj_id,
1287 error = hammer_get_vnode(ip, &vp);
1288 hammer_rel_inode(ip, 0);
1294 cache_setvp(ap->a_nch, vp);
1297 } else if (error == ENOENT) {
1298 cache_setvp(ap->a_nch, NULL);
1301 hammer_done_transaction(&trans);
1302 lwkt_reltoken(&hmp->fs_token);
1307 * hammer_vop_nlookupdotdot { dvp, vpp, cred }
1309 * Locate the parent directory of a directory vnode.
1311 * dvp is referenced but not locked. *vpp must be returned referenced and
1312 * locked. A parent_obj_id of 0 does not necessarily indicate that we are
1313 * at the root, instead it could indicate that the directory we were in was
1316 * NOTE: as-of sequences are not linked into the directory structure. If
1317 * we are at the root with a different asof then the mount point, reload
1318 * the same directory with the mount point's asof. I'm not sure what this
1319 * will do to NFS. We encode ASOF stamps in NFS file handles so it might not
1320 * get confused, but it hasn't been tested.
1324 hammer_vop_nlookupdotdot(struct vop_nlookupdotdot_args *ap)
1326 struct hammer_transaction trans;
1327 struct hammer_inode *dip;
1328 struct hammer_inode *ip;
1330 int64_t parent_obj_id;
1331 u_int32_t parent_obj_localization;
1335 dip = VTOI(ap->a_dvp);
1336 asof = dip->obj_asof;
1340 * Whos are parent? This could be the root of a pseudo-filesystem
1341 * whos parent is in another localization domain.
1343 lwkt_gettoken(&hmp->fs_token);
1344 parent_obj_id = dip->ino_data.parent_obj_id;
1345 if (dip->obj_id == HAMMER_OBJID_ROOT)
1346 parent_obj_localization = dip->ino_data.ext.obj.parent_obj_localization;
1348 parent_obj_localization = dip->obj_localization;
1351 * It's probably a PFS root when dip->ino_data.parent_obj_id is 0.
1353 if (parent_obj_id == 0) {
1354 if (dip->obj_id == HAMMER_OBJID_ROOT &&
1355 asof != hmp->asof) {
1356 parent_obj_id = dip->obj_id;
1358 *ap->a_fakename = kmalloc(19, M_TEMP, M_WAITOK);
1359 ksnprintf(*ap->a_fakename, 19, "0x%016llx",
1360 (long long)dip->obj_asof);
1363 lwkt_reltoken(&hmp->fs_token);
1368 hammer_simple_transaction(&trans, hmp);
1369 ++hammer_stats_file_iopsr;
1371 ip = hammer_get_inode(&trans, dip, parent_obj_id,
1372 asof, parent_obj_localization,
1373 dip->flags, &error);
1375 error = hammer_get_vnode(ip, ap->a_vpp);
1376 hammer_rel_inode(ip, 0);
1380 hammer_done_transaction(&trans);
1381 lwkt_reltoken(&hmp->fs_token);
1386 * hammer_vop_nlink { nch, dvp, vp, cred }
1390 hammer_vop_nlink(struct vop_nlink_args *ap)
1392 struct hammer_transaction trans;
1393 struct hammer_inode *dip;
1394 struct hammer_inode *ip;
1395 struct nchandle *nch;
1399 if (ap->a_dvp->v_mount != ap->a_vp->v_mount)
1403 dip = VTOI(ap->a_dvp);
1404 ip = VTOI(ap->a_vp);
1407 if (dip->obj_localization != ip->obj_localization)
1410 if (dip->flags & HAMMER_INODE_RO)
1412 if (ip->flags & HAMMER_INODE_RO)
1414 if ((error = hammer_checkspace(hmp, HAMMER_CHKSPC_CREATE)) != 0)
1418 * Create a transaction to cover the operations we perform.
1420 lwkt_gettoken(&hmp->fs_token);
1421 hammer_start_transaction(&trans, hmp);
1422 ++hammer_stats_file_iopsw;
1425 * Add the filesystem object to the directory. Note that neither
1426 * dip nor ip are referenced or locked, but their vnodes are
1427 * referenced. This function will bump the inode's link count.
1429 error = hammer_ip_add_directory(&trans, dip,
1430 nch->ncp->nc_name, nch->ncp->nc_nlen,
1437 cache_setunresolved(nch);
1438 cache_setvp(nch, ap->a_vp);
1440 hammer_done_transaction(&trans);
1441 hammer_knote(ap->a_vp, NOTE_LINK);
1442 hammer_knote(ap->a_dvp, NOTE_WRITE);
1443 lwkt_reltoken(&hmp->fs_token);
1448 * hammer_vop_nmkdir { nch, dvp, vpp, cred, vap }
1450 * The operating system has already ensured that the directory entry
1451 * does not exist and done all appropriate namespace locking.
1455 hammer_vop_nmkdir(struct vop_nmkdir_args *ap)
1457 struct hammer_transaction trans;
1458 struct hammer_inode *dip;
1459 struct hammer_inode *nip;
1460 struct nchandle *nch;
1465 dip = VTOI(ap->a_dvp);
1468 if (dip->flags & HAMMER_INODE_RO)
1470 if ((error = hammer_checkspace(hmp, HAMMER_CHKSPC_CREATE)) != 0)
1474 * Create a transaction to cover the operations we perform.
1476 lwkt_gettoken(&hmp->fs_token);
1477 hammer_start_transaction(&trans, hmp);
1478 ++hammer_stats_file_iopsw;
1481 * Create a new filesystem object of the requested type. The
1482 * returned inode will be referenced but not locked.
1484 error = hammer_create_inode(&trans, ap->a_vap, ap->a_cred,
1485 dip, nch->ncp->nc_name, nch->ncp->nc_nlen,
1488 hkprintf("hammer_mkdir error %d\n", error);
1489 hammer_done_transaction(&trans);
1491 lwkt_reltoken(&hmp->fs_token);
1495 * Add the new filesystem object to the directory. This will also
1496 * bump the inode's link count.
1498 error = hammer_ip_add_directory(&trans, dip,
1499 nch->ncp->nc_name, nch->ncp->nc_nlen,
1502 hkprintf("hammer_mkdir (add) error %d\n", error);
1508 hammer_rel_inode(nip, 0);
1511 error = hammer_get_vnode(nip, ap->a_vpp);
1512 hammer_rel_inode(nip, 0);
1514 cache_setunresolved(ap->a_nch);
1515 cache_setvp(ap->a_nch, *ap->a_vpp);
1518 hammer_done_transaction(&trans);
1520 hammer_knote(ap->a_dvp, NOTE_WRITE | NOTE_LINK);
1521 lwkt_reltoken(&hmp->fs_token);
1526 * hammer_vop_nmknod { nch, dvp, vpp, cred, vap }
1528 * The operating system has already ensured that the directory entry
1529 * does not exist and done all appropriate namespace locking.
1533 hammer_vop_nmknod(struct vop_nmknod_args *ap)
1535 struct hammer_transaction trans;
1536 struct hammer_inode *dip;
1537 struct hammer_inode *nip;
1538 struct nchandle *nch;
1543 dip = VTOI(ap->a_dvp);
1546 if (dip->flags & HAMMER_INODE_RO)
1548 if ((error = hammer_checkspace(hmp, HAMMER_CHKSPC_CREATE)) != 0)
1552 * Create a transaction to cover the operations we perform.
1554 lwkt_gettoken(&hmp->fs_token);
1555 hammer_start_transaction(&trans, hmp);
1556 ++hammer_stats_file_iopsw;
1559 * Create a new filesystem object of the requested type. The
1560 * returned inode will be referenced but not locked.
1562 * If mknod specifies a directory a pseudo-fs is created.
1564 error = hammer_create_inode(&trans, ap->a_vap, ap->a_cred,
1565 dip, nch->ncp->nc_name, nch->ncp->nc_nlen,
1568 hammer_done_transaction(&trans);
1570 lwkt_reltoken(&hmp->fs_token);
1575 * Add the new filesystem object to the directory. This will also
1576 * bump the inode's link count.
1578 error = hammer_ip_add_directory(&trans, dip,
1579 nch->ncp->nc_name, nch->ncp->nc_nlen,
1586 hammer_rel_inode(nip, 0);
1589 error = hammer_get_vnode(nip, ap->a_vpp);
1590 hammer_rel_inode(nip, 0);
1592 cache_setunresolved(ap->a_nch);
1593 cache_setvp(ap->a_nch, *ap->a_vpp);
1596 hammer_done_transaction(&trans);
1598 hammer_knote(ap->a_dvp, NOTE_WRITE);
1599 lwkt_reltoken(&hmp->fs_token);
1604 * hammer_vop_open { vp, mode, cred, fp }
1606 * MPSAFE (does not require fs_token)
1610 hammer_vop_open(struct vop_open_args *ap)
1614 ++hammer_stats_file_iopsr;
1615 ip = VTOI(ap->a_vp);
1617 if ((ap->a_mode & FWRITE) && (ip->flags & HAMMER_INODE_RO))
1619 return(vop_stdopen(ap));
1623 * hammer_vop_print { vp }
1627 hammer_vop_print(struct vop_print_args *ap)
1633 * hammer_vop_readdir { vp, uio, cred, *eofflag, *ncookies, off_t **cookies }
1637 hammer_vop_readdir(struct vop_readdir_args *ap)
1639 struct hammer_transaction trans;
1640 struct hammer_cursor cursor;
1641 struct hammer_inode *ip;
1644 hammer_base_elm_t base;
1653 ++hammer_stats_file_iopsr;
1654 ip = VTOI(ap->a_vp);
1656 saveoff = uio->uio_offset;
1659 if (ap->a_ncookies) {
1660 ncookies = uio->uio_resid / 16 + 1;
1661 if (ncookies > 1024)
1663 cookies = kmalloc(ncookies * sizeof(off_t), M_TEMP, M_WAITOK);
1671 lwkt_gettoken(&hmp->fs_token);
1672 hammer_simple_transaction(&trans, hmp);
1675 * Handle artificial entries
1677 * It should be noted that the minimum value for a directory
1678 * hash key on-media is 0x0000000100000000, so we can use anything
1679 * less then that to represent our 'special' key space.
1683 r = vop_write_dirent(&error, uio, ip->obj_id, DT_DIR, 1, ".");
1687 cookies[cookie_index] = saveoff;
1690 if (cookie_index == ncookies)
1694 if (ip->ino_data.parent_obj_id) {
1695 r = vop_write_dirent(&error, uio,
1696 ip->ino_data.parent_obj_id,
1699 r = vop_write_dirent(&error, uio,
1700 ip->obj_id, DT_DIR, 2, "..");
1705 cookies[cookie_index] = saveoff;
1708 if (cookie_index == ncookies)
1713 * Key range (begin and end inclusive) to scan. Directory keys
1714 * directly translate to a 64 bit 'seek' position.
1716 hammer_init_cursor(&trans, &cursor, &ip->cache[1], ip);
1717 cursor.key_beg.localization = ip->obj_localization +
1718 hammer_dir_localization(ip);
1719 cursor.key_beg.obj_id = ip->obj_id;
1720 cursor.key_beg.create_tid = 0;
1721 cursor.key_beg.delete_tid = 0;
1722 cursor.key_beg.rec_type = HAMMER_RECTYPE_DIRENTRY;
1723 cursor.key_beg.obj_type = 0;
1724 cursor.key_beg.key = saveoff;
1726 cursor.key_end = cursor.key_beg;
1727 cursor.key_end.key = HAMMER_MAX_KEY;
1728 cursor.asof = ip->obj_asof;
1729 cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE | HAMMER_CURSOR_ASOF;
1731 error = hammer_ip_first(&cursor);
1733 while (error == 0) {
1734 error = hammer_ip_resolve_data(&cursor);
1737 base = &cursor.leaf->base;
1738 saveoff = base->key;
1739 KKASSERT(cursor.leaf->data_len > HAMMER_ENTRY_NAME_OFF);
1741 if (base->obj_id != ip->obj_id)
1742 panic("readdir: bad record at %p", cursor.node);
1745 * Convert pseudo-filesystems into softlinks
1747 dtype = hammer_get_dtype(cursor.leaf->base.obj_type);
1748 r = vop_write_dirent(
1749 &error, uio, cursor.data->entry.obj_id,
1751 cursor.leaf->data_len - HAMMER_ENTRY_NAME_OFF ,
1752 (void *)cursor.data->entry.name);
1757 cookies[cookie_index] = base->key;
1759 if (cookie_index == ncookies)
1761 error = hammer_ip_next(&cursor);
1763 hammer_done_cursor(&cursor);
1766 hammer_done_transaction(&trans);
1769 *ap->a_eofflag = (error == ENOENT);
1770 uio->uio_offset = saveoff;
1771 if (error && cookie_index == 0) {
1772 if (error == ENOENT)
1775 kfree(cookies, M_TEMP);
1776 *ap->a_ncookies = 0;
1777 *ap->a_cookies = NULL;
1780 if (error == ENOENT)
1783 *ap->a_ncookies = cookie_index;
1784 *ap->a_cookies = cookies;
1787 lwkt_reltoken(&hmp->fs_token);
1792 * hammer_vop_readlink { vp, uio, cred }
1796 hammer_vop_readlink(struct vop_readlink_args *ap)
1798 struct hammer_transaction trans;
1799 struct hammer_cursor cursor;
1800 struct hammer_inode *ip;
1803 u_int32_t localization;
1804 hammer_pseudofs_inmem_t pfsm;
1807 ip = VTOI(ap->a_vp);
1810 lwkt_gettoken(&hmp->fs_token);
1813 * Shortcut if the symlink data was stuffed into ino_data.
1815 * Also expand special "@@PFS%05d" softlinks (expansion only
1816 * occurs for non-historical (current) accesses made from the
1817 * primary filesystem).
1819 * Note that userspace hammer command does not allow users to
1820 * create a @@PFS softlink under an existing other PFS (id!=0)
1821 * so the ip localization here for @@PFS softlink is always 0.
1823 if (ip->ino_data.size <= HAMMER_INODE_BASESYMLEN) {
1827 ptr = ip->ino_data.ext.symlink;
1828 bytes = (int)ip->ino_data.size;
1830 ip->obj_asof == HAMMER_MAX_TID &&
1831 ip->obj_localization == 0 &&
1832 strncmp(ptr, "@@PFS", 5) == 0) {
1833 hammer_simple_transaction(&trans, hmp);
1834 bcopy(ptr + 5, buf, 5);
1836 localization = strtoul(buf, NULL, 10) << 16;
1837 pfsm = hammer_load_pseudofs(&trans, localization,
1840 if (pfsm->pfsd.mirror_flags &
1841 HAMMER_PFSD_SLAVE) {
1842 /* vap->va_size == 26 */
1843 ksnprintf(buf, sizeof(buf),
1845 (long long)pfsm->pfsd.sync_end_tid,
1846 localization >> 16);
1848 /* vap->va_size == 10 */
1849 ksnprintf(buf, sizeof(buf),
1851 localization >> 16);
1853 ksnprintf(buf, sizeof(buf),
1855 (long long)HAMMER_MAX_TID,
1856 localization >> 16);
1860 bytes = strlen(buf);
1863 hammer_rel_pseudofs(hmp, pfsm);
1864 hammer_done_transaction(&trans);
1866 error = uiomove(ptr, bytes, ap->a_uio);
1867 lwkt_reltoken(&hmp->fs_token);
1874 hammer_simple_transaction(&trans, hmp);
1875 ++hammer_stats_file_iopsr;
1876 hammer_init_cursor(&trans, &cursor, &ip->cache[1], ip);
1879 * Key range (begin and end inclusive) to scan. Directory keys
1880 * directly translate to a 64 bit 'seek' position.
1882 cursor.key_beg.localization = ip->obj_localization +
1883 HAMMER_LOCALIZE_MISC;
1884 cursor.key_beg.obj_id = ip->obj_id;
1885 cursor.key_beg.create_tid = 0;
1886 cursor.key_beg.delete_tid = 0;
1887 cursor.key_beg.rec_type = HAMMER_RECTYPE_FIX;
1888 cursor.key_beg.obj_type = 0;
1889 cursor.key_beg.key = HAMMER_FIXKEY_SYMLINK;
1890 cursor.asof = ip->obj_asof;
1891 cursor.flags |= HAMMER_CURSOR_ASOF;
1893 error = hammer_ip_lookup(&cursor);
1895 error = hammer_ip_resolve_data(&cursor);
1897 KKASSERT(cursor.leaf->data_len >=
1898 HAMMER_SYMLINK_NAME_OFF);
1899 error = uiomove(cursor.data->symlink.name,
1900 cursor.leaf->data_len -
1901 HAMMER_SYMLINK_NAME_OFF,
1905 hammer_done_cursor(&cursor);
1906 hammer_done_transaction(&trans);
1907 lwkt_reltoken(&hmp->fs_token);
1912 * hammer_vop_nremove { nch, dvp, cred }
1916 hammer_vop_nremove(struct vop_nremove_args *ap)
1918 struct hammer_transaction trans;
1919 struct hammer_inode *dip;
1923 dip = VTOI(ap->a_dvp);
1926 if (hammer_nohistory(dip) == 0 &&
1927 (error = hammer_checkspace(hmp, HAMMER_CHKSPC_REMOVE)) != 0) {
1931 lwkt_gettoken(&hmp->fs_token);
1932 hammer_start_transaction(&trans, hmp);
1933 ++hammer_stats_file_iopsw;
1934 error = hammer_dounlink(&trans, ap->a_nch, ap->a_dvp, ap->a_cred, 0, 0);
1935 hammer_done_transaction(&trans);
1937 hammer_knote(ap->a_dvp, NOTE_WRITE);
1938 lwkt_reltoken(&hmp->fs_token);
1943 * hammer_vop_nrename { fnch, tnch, fdvp, tdvp, cred }
1947 hammer_vop_nrename(struct vop_nrename_args *ap)
1949 struct hammer_transaction trans;
1950 struct namecache *fncp;
1951 struct namecache *tncp;
1952 struct hammer_inode *fdip;
1953 struct hammer_inode *tdip;
1954 struct hammer_inode *ip;
1956 struct hammer_cursor cursor;
1958 u_int32_t max_iterations;
1961 if (ap->a_fdvp->v_mount != ap->a_tdvp->v_mount)
1963 if (ap->a_fdvp->v_mount != ap->a_fnch->ncp->nc_vp->v_mount)
1966 fdip = VTOI(ap->a_fdvp);
1967 tdip = VTOI(ap->a_tdvp);
1968 fncp = ap->a_fnch->ncp;
1969 tncp = ap->a_tnch->ncp;
1970 ip = VTOI(fncp->nc_vp);
1971 KKASSERT(ip != NULL);
1975 if (fdip->obj_localization != tdip->obj_localization)
1977 if (fdip->obj_localization != ip->obj_localization)
1980 if (fdip->flags & HAMMER_INODE_RO)
1982 if (tdip->flags & HAMMER_INODE_RO)
1984 if (ip->flags & HAMMER_INODE_RO)
1986 if ((error = hammer_checkspace(hmp, HAMMER_CHKSPC_CREATE)) != 0)
1989 lwkt_gettoken(&hmp->fs_token);
1990 hammer_start_transaction(&trans, hmp);
1991 ++hammer_stats_file_iopsw;
1994 * Remove tncp from the target directory and then link ip as
1995 * tncp. XXX pass trans to dounlink
1997 * Force the inode sync-time to match the transaction so it is
1998 * in-sync with the creation of the target directory entry.
2000 error = hammer_dounlink(&trans, ap->a_tnch, ap->a_tdvp,
2002 if (error == 0 || error == ENOENT) {
2003 error = hammer_ip_add_directory(&trans, tdip,
2004 tncp->nc_name, tncp->nc_nlen,
2007 ip->ino_data.parent_obj_id = tdip->obj_id;
2008 ip->ino_data.ctime = trans.time;
2009 hammer_modify_inode(&trans, ip, HAMMER_INODE_DDIRTY);
2013 goto failed; /* XXX */
2016 * Locate the record in the originating directory and remove it.
2018 * Calculate the namekey and setup the key range for the scan. This
2019 * works kinda like a chained hash table where the lower 32 bits
2020 * of the namekey synthesize the chain.
2022 * The key range is inclusive of both key_beg and key_end.
2024 namekey = hammer_directory_namekey(fdip, fncp->nc_name, fncp->nc_nlen,
2027 hammer_init_cursor(&trans, &cursor, &fdip->cache[1], fdip);
2028 cursor.key_beg.localization = fdip->obj_localization +
2029 hammer_dir_localization(fdip);
2030 cursor.key_beg.obj_id = fdip->obj_id;
2031 cursor.key_beg.key = namekey;
2032 cursor.key_beg.create_tid = 0;
2033 cursor.key_beg.delete_tid = 0;
2034 cursor.key_beg.rec_type = HAMMER_RECTYPE_DIRENTRY;
2035 cursor.key_beg.obj_type = 0;
2037 cursor.key_end = cursor.key_beg;
2038 cursor.key_end.key += max_iterations;
2039 cursor.asof = fdip->obj_asof;
2040 cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE | HAMMER_CURSOR_ASOF;
2043 * Scan all matching records (the chain), locate the one matching
2044 * the requested path component.
2046 * The hammer_ip_*() functions merge in-memory records with on-disk
2047 * records for the purposes of the search.
2049 error = hammer_ip_first(&cursor);
2050 while (error == 0) {
2051 if (hammer_ip_resolve_data(&cursor) != 0)
2053 nlen = cursor.leaf->data_len - HAMMER_ENTRY_NAME_OFF;
2055 if (fncp->nc_nlen == nlen &&
2056 bcmp(fncp->nc_name, cursor.data->entry.name, nlen) == 0) {
2059 error = hammer_ip_next(&cursor);
2063 * If all is ok we have to get the inode so we can adjust nlinks.
2065 * WARNING: hammer_ip_del_directory() may have to terminate the
2066 * cursor to avoid a recursion. It's ok to call hammer_done_cursor()
2070 error = hammer_ip_del_directory(&trans, &cursor, fdip, ip);
2073 * XXX A deadlock here will break rename's atomicy for the purposes
2074 * of crash recovery.
2076 if (error == EDEADLK) {
2077 hammer_done_cursor(&cursor);
2082 * Cleanup and tell the kernel that the rename succeeded.
2084 * NOTE: ip->vp, if non-NULL, cannot be directly referenced
2085 * without formally acquiring the vp since the vp might
2086 * have zero refs on it, or in the middle of a reclaim,
2089 hammer_done_cursor(&cursor);
2091 cache_rename(ap->a_fnch, ap->a_tnch);
2092 hammer_knote(ap->a_fdvp, NOTE_WRITE);
2093 hammer_knote(ap->a_tdvp, NOTE_WRITE);
2097 error = hammer_get_vnode(ip, &vp);
2098 if (error == 0 && vp) {
2100 hammer_knote(ip->vp, NOTE_RENAME);
2104 kprintf("Debug: HAMMER ip/vp race2 avoided\n");
2109 hammer_done_transaction(&trans);
2110 lwkt_reltoken(&hmp->fs_token);
2115 * hammer_vop_nrmdir { nch, dvp, cred }
2119 hammer_vop_nrmdir(struct vop_nrmdir_args *ap)
2121 struct hammer_transaction trans;
2122 struct hammer_inode *dip;
2126 dip = VTOI(ap->a_dvp);
2129 if (hammer_nohistory(dip) == 0 &&
2130 (error = hammer_checkspace(hmp, HAMMER_CHKSPC_REMOVE)) != 0) {
2134 lwkt_gettoken(&hmp->fs_token);
2135 hammer_start_transaction(&trans, hmp);
2136 ++hammer_stats_file_iopsw;
2137 error = hammer_dounlink(&trans, ap->a_nch, ap->a_dvp, ap->a_cred, 0, 1);
2138 hammer_done_transaction(&trans);
2140 hammer_knote(ap->a_dvp, NOTE_WRITE | NOTE_LINK);
2141 lwkt_reltoken(&hmp->fs_token);
2146 * hammer_vop_markatime { vp, cred }
2150 hammer_vop_markatime(struct vop_markatime_args *ap)
2152 struct hammer_transaction trans;
2153 struct hammer_inode *ip;
2156 ip = VTOI(ap->a_vp);
2157 if (ap->a_vp->v_mount->mnt_flag & MNT_RDONLY)
2159 if (ip->flags & HAMMER_INODE_RO)
2162 if (hmp->mp->mnt_flag & MNT_NOATIME)
2164 lwkt_gettoken(&hmp->fs_token);
2165 hammer_start_transaction(&trans, hmp);
2166 ++hammer_stats_file_iopsw;
2168 ip->ino_data.atime = trans.time;
2169 hammer_modify_inode(&trans, ip, HAMMER_INODE_ATIME);
2170 hammer_done_transaction(&trans);
2171 hammer_knote(ap->a_vp, NOTE_ATTRIB);
2172 lwkt_reltoken(&hmp->fs_token);
2177 * hammer_vop_setattr { vp, vap, cred }
2181 hammer_vop_setattr(struct vop_setattr_args *ap)
2183 struct hammer_transaction trans;
2184 struct hammer_inode *ip;
2193 int64_t aligned_size;
2198 ip = ap->a_vp->v_data;
2203 if (ap->a_vp->v_mount->mnt_flag & MNT_RDONLY)
2205 if (ip->flags & HAMMER_INODE_RO)
2207 if (hammer_nohistory(ip) == 0 &&
2208 (error = hammer_checkspace(hmp, HAMMER_CHKSPC_REMOVE)) != 0) {
2212 lwkt_gettoken(&hmp->fs_token);
2213 hammer_start_transaction(&trans, hmp);
2214 ++hammer_stats_file_iopsw;
2217 if (vap->va_flags != VNOVAL) {
2218 flags = ip->ino_data.uflags;
2219 error = vop_helper_setattr_flags(&flags, vap->va_flags,
2220 hammer_to_unix_xid(&ip->ino_data.uid),
2223 if (ip->ino_data.uflags != flags) {
2224 ip->ino_data.uflags = flags;
2225 ip->ino_data.ctime = trans.time;
2226 modflags |= HAMMER_INODE_DDIRTY;
2227 kflags |= NOTE_ATTRIB;
2229 if (ip->ino_data.uflags & (IMMUTABLE | APPEND)) {
2236 if (ip->ino_data.uflags & (IMMUTABLE | APPEND)) {
2240 if (vap->va_uid != (uid_t)VNOVAL || vap->va_gid != (gid_t)VNOVAL) {
2241 mode_t cur_mode = ip->ino_data.mode;
2242 uid_t cur_uid = hammer_to_unix_xid(&ip->ino_data.uid);
2243 gid_t cur_gid = hammer_to_unix_xid(&ip->ino_data.gid);
2247 error = vop_helper_chown(ap->a_vp, vap->va_uid, vap->va_gid,
2249 &cur_uid, &cur_gid, &cur_mode);
2251 hammer_guid_to_uuid(&uuid_uid, cur_uid);
2252 hammer_guid_to_uuid(&uuid_gid, cur_gid);
2253 if (bcmp(&uuid_uid, &ip->ino_data.uid,
2254 sizeof(uuid_uid)) ||
2255 bcmp(&uuid_gid, &ip->ino_data.gid,
2256 sizeof(uuid_gid)) ||
2257 ip->ino_data.mode != cur_mode) {
2258 ip->ino_data.uid = uuid_uid;
2259 ip->ino_data.gid = uuid_gid;
2260 ip->ino_data.mode = cur_mode;
2261 ip->ino_data.ctime = trans.time;
2262 modflags |= HAMMER_INODE_DDIRTY;
2264 kflags |= NOTE_ATTRIB;
2267 while (vap->va_size != VNOVAL && ip->ino_data.size != vap->va_size) {
2268 switch(ap->a_vp->v_type) {
2270 if (vap->va_size == ip->ino_data.size)
2274 * Log the operation if in fast-fsync mode or if
2275 * there are unterminated redo write records present.
2277 * The second check is needed so the recovery code
2278 * properly truncates write redos even if nominal
2279 * REDO operations is turned off due to excessive
2280 * writes, because the related records might be
2281 * destroyed and never lay down a TERM_WRITE.
2283 if ((ip->flags & HAMMER_INODE_REDO) ||
2284 (ip->flags & HAMMER_INODE_RDIRTY)) {
2285 error = hammer_generate_redo(&trans, ip,
2290 blksize = hammer_blocksize(vap->va_size);
2293 * XXX break atomicy, we can deadlock the backend
2294 * if we do not release the lock. Probably not a
2297 if (vap->va_size < ip->ino_data.size) {
2298 nvtruncbuf(ap->a_vp, vap->va_size,
2300 hammer_blockoff(vap->va_size),
2303 kflags |= NOTE_WRITE;
2305 nvextendbuf(ap->a_vp,
2308 hammer_blocksize(ip->ino_data.size),
2309 hammer_blocksize(vap->va_size),
2310 hammer_blockoff(ip->ino_data.size),
2311 hammer_blockoff(vap->va_size),
2314 kflags |= NOTE_WRITE | NOTE_EXTEND;
2316 ip->ino_data.size = vap->va_size;
2317 ip->ino_data.mtime = trans.time;
2318 /* XXX safe to use SDIRTY instead of DDIRTY here? */
2319 modflags |= HAMMER_INODE_MTIME | HAMMER_INODE_DDIRTY;
2322 * On-media truncation is cached in the inode until
2323 * the inode is synchronized. We must immediately
2324 * handle any frontend records.
2327 hammer_ip_frontend_trunc(ip, vap->va_size);
2328 #ifdef DEBUG_TRUNCATE
2329 if (HammerTruncIp == NULL)
2332 if ((ip->flags & HAMMER_INODE_TRUNCATED) == 0) {
2333 ip->flags |= HAMMER_INODE_TRUNCATED;
2334 ip->trunc_off = vap->va_size;
2335 hammer_inode_dirty(ip);
2336 #ifdef DEBUG_TRUNCATE
2337 if (ip == HammerTruncIp)
2338 kprintf("truncate1 %016llx\n",
2339 (long long)ip->trunc_off);
2341 } else if (ip->trunc_off > vap->va_size) {
2342 ip->trunc_off = vap->va_size;
2343 #ifdef DEBUG_TRUNCATE
2344 if (ip == HammerTruncIp)
2345 kprintf("truncate2 %016llx\n",
2346 (long long)ip->trunc_off);
2349 #ifdef DEBUG_TRUNCATE
2350 if (ip == HammerTruncIp)
2351 kprintf("truncate3 %016llx (ignored)\n",
2352 (long long)vap->va_size);
2359 * When truncating, nvtruncbuf() may have cleaned out
2360 * a portion of the last block on-disk in the buffer
2361 * cache. We must clean out any frontend records
2362 * for blocks beyond the new last block.
2364 aligned_size = (vap->va_size + (blksize - 1)) &
2365 ~(int64_t)(blksize - 1);
2366 if (truncating && vap->va_size < aligned_size) {
2367 aligned_size -= blksize;
2368 hammer_ip_frontend_trunc(ip, aligned_size);
2373 if ((ip->flags & HAMMER_INODE_TRUNCATED) == 0) {
2374 ip->flags |= HAMMER_INODE_TRUNCATED;
2375 ip->trunc_off = vap->va_size;
2376 hammer_inode_dirty(ip);
2377 } else if (ip->trunc_off > vap->va_size) {
2378 ip->trunc_off = vap->va_size;
2380 hammer_ip_frontend_trunc(ip, vap->va_size);
2381 ip->ino_data.size = vap->va_size;
2382 ip->ino_data.mtime = trans.time;
2383 modflags |= HAMMER_INODE_MTIME | HAMMER_INODE_DDIRTY;
2384 kflags |= NOTE_ATTRIB;
2392 if (vap->va_atime.tv_sec != VNOVAL) {
2393 ip->ino_data.atime = hammer_timespec_to_time(&vap->va_atime);
2394 modflags |= HAMMER_INODE_ATIME;
2395 kflags |= NOTE_ATTRIB;
2397 if (vap->va_mtime.tv_sec != VNOVAL) {
2398 ip->ino_data.mtime = hammer_timespec_to_time(&vap->va_mtime);
2399 modflags |= HAMMER_INODE_MTIME;
2400 kflags |= NOTE_ATTRIB;
2402 if (vap->va_mode != (mode_t)VNOVAL) {
2403 mode_t cur_mode = ip->ino_data.mode;
2404 uid_t cur_uid = hammer_to_unix_xid(&ip->ino_data.uid);
2405 gid_t cur_gid = hammer_to_unix_xid(&ip->ino_data.gid);
2407 error = vop_helper_chmod(ap->a_vp, vap->va_mode, ap->a_cred,
2408 cur_uid, cur_gid, &cur_mode);
2409 if (error == 0 && ip->ino_data.mode != cur_mode) {
2410 ip->ino_data.mode = cur_mode;
2411 ip->ino_data.ctime = trans.time;
2412 modflags |= HAMMER_INODE_DDIRTY;
2413 kflags |= NOTE_ATTRIB;
2418 hammer_modify_inode(&trans, ip, modflags);
2419 hammer_done_transaction(&trans);
2420 hammer_knote(ap->a_vp, kflags);
2421 lwkt_reltoken(&hmp->fs_token);
2426 * hammer_vop_nsymlink { nch, dvp, vpp, cred, vap, target }
2430 hammer_vop_nsymlink(struct vop_nsymlink_args *ap)
2432 struct hammer_transaction trans;
2433 struct hammer_inode *dip;
2434 struct hammer_inode *nip;
2435 hammer_record_t record;
2436 struct nchandle *nch;
2441 ap->a_vap->va_type = VLNK;
2444 dip = VTOI(ap->a_dvp);
2447 if (dip->flags & HAMMER_INODE_RO)
2449 if ((error = hammer_checkspace(hmp, HAMMER_CHKSPC_CREATE)) != 0)
2453 * Create a transaction to cover the operations we perform.
2455 lwkt_gettoken(&hmp->fs_token);
2456 hammer_start_transaction(&trans, hmp);
2457 ++hammer_stats_file_iopsw;
2460 * Create a new filesystem object of the requested type. The
2461 * returned inode will be referenced but not locked.
2464 error = hammer_create_inode(&trans, ap->a_vap, ap->a_cred,
2465 dip, nch->ncp->nc_name, nch->ncp->nc_nlen,
2468 hammer_done_transaction(&trans);
2470 lwkt_reltoken(&hmp->fs_token);
2475 * Add a record representing the symlink. symlink stores the link
2476 * as pure data, not a string, and is no \0 terminated.
2479 bytes = strlen(ap->a_target);
2481 if (bytes <= HAMMER_INODE_BASESYMLEN) {
2482 bcopy(ap->a_target, nip->ino_data.ext.symlink, bytes);
2484 record = hammer_alloc_mem_record(nip, bytes);
2485 record->type = HAMMER_MEM_RECORD_GENERAL;
2487 record->leaf.base.localization = nip->obj_localization +
2488 HAMMER_LOCALIZE_MISC;
2489 record->leaf.base.key = HAMMER_FIXKEY_SYMLINK;
2490 record->leaf.base.rec_type = HAMMER_RECTYPE_FIX;
2491 record->leaf.data_len = bytes;
2492 KKASSERT(HAMMER_SYMLINK_NAME_OFF == 0);
2493 bcopy(ap->a_target, record->data->symlink.name, bytes);
2494 error = hammer_ip_add_record(&trans, record);
2498 * Set the file size to the length of the link.
2501 nip->ino_data.size = bytes;
2502 hammer_modify_inode(&trans, nip, HAMMER_INODE_DDIRTY);
2506 error = hammer_ip_add_directory(&trans, dip, nch->ncp->nc_name,
2507 nch->ncp->nc_nlen, nip);
2513 hammer_rel_inode(nip, 0);
2516 error = hammer_get_vnode(nip, ap->a_vpp);
2517 hammer_rel_inode(nip, 0);
2519 cache_setunresolved(ap->a_nch);
2520 cache_setvp(ap->a_nch, *ap->a_vpp);
2521 hammer_knote(ap->a_dvp, NOTE_WRITE);
2524 hammer_done_transaction(&trans);
2525 lwkt_reltoken(&hmp->fs_token);
2530 * hammer_vop_nwhiteout { nch, dvp, cred, flags }
2534 hammer_vop_nwhiteout(struct vop_nwhiteout_args *ap)
2536 struct hammer_transaction trans;
2537 struct hammer_inode *dip;
2541 dip = VTOI(ap->a_dvp);
2544 if (hammer_nohistory(dip) == 0 &&
2545 (error = hammer_checkspace(hmp, HAMMER_CHKSPC_CREATE)) != 0) {
2549 lwkt_gettoken(&hmp->fs_token);
2550 hammer_start_transaction(&trans, hmp);
2551 ++hammer_stats_file_iopsw;
2552 error = hammer_dounlink(&trans, ap->a_nch, ap->a_dvp,
2553 ap->a_cred, ap->a_flags, -1);
2554 hammer_done_transaction(&trans);
2555 lwkt_reltoken(&hmp->fs_token);
2561 * hammer_vop_ioctl { vp, command, data, fflag, cred }
2565 hammer_vop_ioctl(struct vop_ioctl_args *ap)
2567 struct hammer_inode *ip = ap->a_vp->v_data;
2568 hammer_mount_t hmp = ip->hmp;
2571 ++hammer_stats_file_iopsr;
2572 lwkt_gettoken(&hmp->fs_token);
2573 error = hammer_ioctl(ip, ap->a_command, ap->a_data,
2574 ap->a_fflag, ap->a_cred);
2575 lwkt_reltoken(&hmp->fs_token);
2581 hammer_vop_mountctl(struct vop_mountctl_args *ap)
2583 static const struct mountctl_opt extraopt[] = {
2584 { HMNT_NOHISTORY, "nohistory" },
2585 { HMNT_MASTERID, "master" },
2589 struct hammer_mount *hmp;
2596 mp = ap->a_head.a_ops->head.vv_mount;
2597 KKASSERT(mp->mnt_data != NULL);
2598 hmp = (struct hammer_mount *)mp->mnt_data;
2600 lwkt_gettoken(&hmp->fs_token);
2603 case MOUNTCTL_SET_EXPORT:
2604 if (ap->a_ctllen != sizeof(struct export_args))
2607 error = hammer_vfs_export(mp, ap->a_op,
2608 (const struct export_args *)ap->a_ctl);
2610 case MOUNTCTL_MOUNTFLAGS:
2613 * Call standard mountctl VOP function
2614 * so we get user mount flags.
2616 error = vop_stdmountctl(ap);
2620 usedbytes = *ap->a_res;
2622 if (usedbytes > 0 && usedbytes < ap->a_buflen) {
2623 usedbytes += vfs_flagstostr(hmp->hflags, extraopt,
2625 ap->a_buflen - usedbytes,
2629 *ap->a_res += usedbytes;
2633 error = vop_stdmountctl(ap);
2636 lwkt_reltoken(&hmp->fs_token);
2641 * hammer_vop_strategy { vp, bio }
2643 * Strategy call, used for regular file read & write only. Note that the
2644 * bp may represent a cluster.
2646 * To simplify operation and allow better optimizations in the future,
2647 * this code does not make any assumptions with regards to buffer alignment
2652 hammer_vop_strategy(struct vop_strategy_args *ap)
2657 bp = ap->a_bio->bio_buf;
2661 error = hammer_vop_strategy_read(ap);
2664 error = hammer_vop_strategy_write(ap);
2667 bp->b_error = error = EINVAL;
2668 bp->b_flags |= B_ERROR;
2673 /* hammer_dump_dedup_cache(((hammer_inode_t)ap->a_vp->v_data)->hmp); */
2679 * Read from a regular file. Iterate the related records and fill in the
2680 * BIO/BUF. Gaps are zero-filled.
2682 * The support code in hammer_object.c should be used to deal with mixed
2683 * in-memory and on-disk records.
2685 * NOTE: Can be called from the cluster code with an oversized buf.
2691 hammer_vop_strategy_read(struct vop_strategy_args *ap)
2693 struct hammer_transaction trans;
2694 struct hammer_inode *ip;
2695 struct hammer_inode *dip;
2697 struct hammer_cursor cursor;
2698 hammer_base_elm_t base;
2699 hammer_off_t disk_offset;
2714 ip = ap->a_vp->v_data;
2718 * The zone-2 disk offset may have been set by the cluster code via
2719 * a BMAP operation, or else should be NOOFFSET.
2721 * Checking the high bits for a match against zone-2 should suffice.
2723 * In cases where a lot of data duplication is present it may be
2724 * more beneficial to drop through and doubule-buffer through the
2727 nbio = push_bio(bio);
2728 if ((nbio->bio_offset & HAMMER_OFF_ZONE_MASK) ==
2729 HAMMER_ZONE_LARGE_DATA) {
2730 if (hammer_double_buffer == 0) {
2731 lwkt_gettoken(&hmp->fs_token);
2732 error = hammer_io_direct_read(hmp, nbio, NULL);
2733 lwkt_reltoken(&hmp->fs_token);
2738 * Try to shortcut requests for double_buffer mode too.
2739 * Since this mode runs through the device buffer cache
2740 * only compatible buffer sizes (meaning those generated
2741 * by normal filesystem buffers) are legal.
2743 if (hammer_live_dedup == 0 && (bp->b_flags & B_PAGING) == 0) {
2744 lwkt_gettoken(&hmp->fs_token);
2745 error = hammer_io_indirect_read(hmp, nbio, NULL);
2746 lwkt_reltoken(&hmp->fs_token);
2752 * Well, that sucked. Do it the hard way. If all the stars are
2753 * aligned we may still be able to issue a direct-read.
2755 lwkt_gettoken(&hmp->fs_token);
2756 hammer_simple_transaction(&trans, hmp);
2757 hammer_init_cursor(&trans, &cursor, &ip->cache[1], ip);
2760 * Key range (begin and end inclusive) to scan. Note that the key's
2761 * stored in the actual records represent BASE+LEN, not BASE. The
2762 * first record containing bio_offset will have a key > bio_offset.
2764 cursor.key_beg.localization = ip->obj_localization +
2765 HAMMER_LOCALIZE_MISC;
2766 cursor.key_beg.obj_id = ip->obj_id;
2767 cursor.key_beg.create_tid = 0;
2768 cursor.key_beg.delete_tid = 0;
2769 cursor.key_beg.obj_type = 0;
2770 cursor.key_beg.key = bio->bio_offset + 1;
2771 cursor.asof = ip->obj_asof;
2772 cursor.flags |= HAMMER_CURSOR_ASOF;
2774 cursor.key_end = cursor.key_beg;
2775 KKASSERT(ip->ino_data.obj_type == HAMMER_OBJTYPE_REGFILE);
2777 if (ip->ino_data.obj_type == HAMMER_OBJTYPE_DBFILE) {
2778 cursor.key_beg.rec_type = HAMMER_RECTYPE_DB;
2779 cursor.key_end.rec_type = HAMMER_RECTYPE_DB;
2780 cursor.key_end.key = 0x7FFFFFFFFFFFFFFFLL;
2784 ran_end = bio->bio_offset + bp->b_bufsize;
2785 cursor.key_beg.rec_type = HAMMER_RECTYPE_DATA;
2786 cursor.key_end.rec_type = HAMMER_RECTYPE_DATA;
2787 tmp64 = ran_end + MAXPHYS + 1; /* work-around GCC-4 bug */
2788 if (tmp64 < ran_end)
2789 cursor.key_end.key = 0x7FFFFFFFFFFFFFFFLL;
2791 cursor.key_end.key = ran_end + MAXPHYS + 1;
2793 cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE;
2796 * Set NOSWAPCACHE for cursor data extraction if double buffering
2797 * is disabled or (if the file is not marked cacheable via chflags
2798 * and vm.swapcache_use_chflags is enabled).
2800 if (hammer_double_buffer == 0 ||
2801 ((ap->a_vp->v_flag & VSWAPCACHE) == 0 &&
2802 vm_swapcache_use_chflags)) {
2803 cursor.flags |= HAMMER_CURSOR_NOSWAPCACHE;
2806 error = hammer_ip_first(&cursor);
2809 while (error == 0) {
2811 * Get the base file offset of the record. The key for
2812 * data records is (base + bytes) rather then (base).
2814 base = &cursor.leaf->base;
2815 rec_offset = base->key - cursor.leaf->data_len;
2818 * Calculate the gap, if any, and zero-fill it.
2820 * n is the offset of the start of the record verses our
2821 * current seek offset in the bio.
2823 n = (int)(rec_offset - (bio->bio_offset + boff));
2825 if (n > bp->b_bufsize - boff)
2826 n = bp->b_bufsize - boff;
2827 bzero((char *)bp->b_data + boff, n);
2833 * Calculate the data offset in the record and the number
2834 * of bytes we can copy.
2836 * There are two degenerate cases. First, boff may already
2837 * be at bp->b_bufsize. Secondly, the data offset within
2838 * the record may exceed the record's size.
2842 n = cursor.leaf->data_len - roff;
2844 kprintf("strategy_read: bad n=%d roff=%d\n", n, roff);
2846 } else if (n > bp->b_bufsize - boff) {
2847 n = bp->b_bufsize - boff;
2851 * Deal with cached truncations. This cool bit of code
2852 * allows truncate()/ftruncate() to avoid having to sync
2855 * If the frontend is truncated then all backend records are
2856 * subject to the frontend's truncation.
2858 * If the backend is truncated then backend records on-disk
2859 * (but not in-memory) are subject to the backend's
2860 * truncation. In-memory records owned by the backend
2861 * represent data written after the truncation point on the
2862 * backend and must not be truncated.
2864 * Truncate operations deal with frontend buffer cache
2865 * buffers and frontend-owned in-memory records synchronously.
2867 if (ip->flags & HAMMER_INODE_TRUNCATED) {
2868 if (hammer_cursor_ondisk(&cursor)/* ||
2869 cursor.iprec->flush_state == HAMMER_FST_FLUSH*/) {
2870 if (ip->trunc_off <= rec_offset)
2872 else if (ip->trunc_off < rec_offset + n)
2873 n = (int)(ip->trunc_off - rec_offset);
2876 if (ip->sync_flags & HAMMER_INODE_TRUNCATED) {
2877 if (hammer_cursor_ondisk(&cursor)) {
2878 if (ip->sync_trunc_off <= rec_offset)
2880 else if (ip->sync_trunc_off < rec_offset + n)
2881 n = (int)(ip->sync_trunc_off - rec_offset);
2886 * Try to issue a direct read into our bio if possible,
2887 * otherwise resolve the element data into a hammer_buffer
2890 * The buffer on-disk should be zerod past any real
2891 * truncation point, but may not be for any synthesized
2892 * truncation point from above.
2894 * NOTE: disk_offset is only valid if the cursor data is
2897 disk_offset = cursor.leaf->data_offset + roff;
2898 isdedupable = (boff == 0 && n == bp->b_bufsize &&
2899 hammer_cursor_ondisk(&cursor) &&
2900 ((int)disk_offset & HAMMER_BUFMASK) == 0);
2902 if (isdedupable && hammer_double_buffer == 0) {
2906 KKASSERT((disk_offset & HAMMER_OFF_ZONE_MASK) ==
2907 HAMMER_ZONE_LARGE_DATA);
2908 nbio->bio_offset = disk_offset;
2909 error = hammer_io_direct_read(hmp, nbio, cursor.leaf);
2910 if (hammer_live_dedup && error == 0)
2911 hammer_dedup_cache_add(ip, cursor.leaf);
2913 } else if (isdedupable) {
2915 * Async I/O case for reading from backing store
2916 * and copying the data to the filesystem buffer.
2917 * live-dedup has to verify the data anyway if it
2918 * gets a hit later so we can just add the entry
2921 KKASSERT((disk_offset & HAMMER_OFF_ZONE_MASK) ==
2922 HAMMER_ZONE_LARGE_DATA);
2923 nbio->bio_offset = disk_offset;
2924 if (hammer_live_dedup)
2925 hammer_dedup_cache_add(ip, cursor.leaf);
2926 error = hammer_io_indirect_read(hmp, nbio, cursor.leaf);
2929 error = hammer_ip_resolve_data(&cursor);
2931 if (hammer_live_dedup && isdedupable)
2932 hammer_dedup_cache_add(ip, cursor.leaf);
2933 bcopy((char *)cursor.data + roff,
2934 (char *)bp->b_data + boff, n);
2941 * We have to be sure that the only elements added to the
2942 * dedup cache are those which are already on-media.
2944 if (hammer_live_dedup && hammer_cursor_ondisk(&cursor))
2945 hammer_dedup_cache_add(ip, cursor.leaf);
2948 * Iterate until we have filled the request.
2951 if (boff == bp->b_bufsize)
2953 error = hammer_ip_next(&cursor);
2957 * There may have been a gap after the last record
2959 if (error == ENOENT)
2961 if (error == 0 && boff != bp->b_bufsize) {
2962 KKASSERT(boff < bp->b_bufsize);
2963 bzero((char *)bp->b_data + boff, bp->b_bufsize - boff);
2964 /* boff = bp->b_bufsize; */
2968 * Disallow swapcache operation on the vnode buffer if double
2969 * buffering is enabled, the swapcache will get the data via
2970 * the block device buffer.
2972 if (hammer_double_buffer)
2973 bp->b_flags |= B_NOTMETA;
2979 bp->b_error = error;
2981 bp->b_flags |= B_ERROR;
2986 * Cache the b-tree node for the last data read in cache[1].
2988 * If we hit the file EOF then also cache the node in the
2989 * governing director's cache[3], it will be used to initialize
2990 * the inode's cache[1] for any inodes looked up via the directory.
2992 * This doesn't reduce disk accesses since the B-Tree chain is
2993 * likely cached, but it does reduce cpu overhead when looking
2994 * up file offsets for cpdup/tar/cpio style iterations.
2997 hammer_cache_node(&ip->cache[1], cursor.node);
2998 if (ran_end >= ip->ino_data.size) {
2999 dip = hammer_find_inode(&trans, ip->ino_data.parent_obj_id,
3000 ip->obj_asof, ip->obj_localization);
3002 hammer_cache_node(&dip->cache[3], cursor.node);
3003 hammer_rel_inode(dip, 0);
3006 hammer_done_cursor(&cursor);
3007 hammer_done_transaction(&trans);
3008 lwkt_reltoken(&hmp->fs_token);
3013 * BMAP operation - used to support cluster_read() only.
3015 * (struct vnode *vp, off_t loffset, off_t *doffsetp, int *runp, int *runb)
3017 * This routine may return EOPNOTSUPP if the opration is not supported for
3018 * the specified offset. The contents of the pointer arguments do not
3019 * need to be initialized in that case.
3021 * If a disk address is available and properly aligned return 0 with
3022 * *doffsetp set to the zone-2 address, and *runp / *runb set appropriately
3023 * to the run-length relative to that offset. Callers may assume that
3024 * *doffsetp is valid if 0 is returned, even if *runp is not sufficiently
3025 * large, so return EOPNOTSUPP if it is not sufficiently large.
3029 hammer_vop_bmap(struct vop_bmap_args *ap)
3031 struct hammer_transaction trans;
3032 struct hammer_inode *ip;
3034 struct hammer_cursor cursor;
3035 hammer_base_elm_t base;
3039 int64_t base_offset;
3040 int64_t base_disk_offset;
3041 int64_t last_offset;
3042 hammer_off_t last_disk_offset;
3043 hammer_off_t disk_offset;
3048 ++hammer_stats_file_iopsr;
3049 ip = ap->a_vp->v_data;
3053 * We can only BMAP regular files. We can't BMAP database files,
3056 if (ip->ino_data.obj_type != HAMMER_OBJTYPE_REGFILE)
3060 * bmap is typically called with runp/runb both NULL when used
3061 * for writing. We do not support BMAP for writing atm.
3063 if (ap->a_cmd != BUF_CMD_READ)
3067 * Scan the B-Tree to acquire blockmap addresses, then translate
3070 lwkt_gettoken(&hmp->fs_token);
3071 hammer_simple_transaction(&trans, hmp);
3073 kprintf("bmap_beg %016llx ip->cache %p\n",
3074 (long long)ap->a_loffset, ip->cache[1]);
3076 hammer_init_cursor(&trans, &cursor, &ip->cache[1], ip);
3079 * Key range (begin and end inclusive) to scan. Note that the key's
3080 * stored in the actual records represent BASE+LEN, not BASE. The
3081 * first record containing bio_offset will have a key > bio_offset.
3083 cursor.key_beg.localization = ip->obj_localization +
3084 HAMMER_LOCALIZE_MISC;
3085 cursor.key_beg.obj_id = ip->obj_id;
3086 cursor.key_beg.create_tid = 0;
3087 cursor.key_beg.delete_tid = 0;
3088 cursor.key_beg.obj_type = 0;
3090 cursor.key_beg.key = ap->a_loffset - MAXPHYS + 1;
3092 cursor.key_beg.key = ap->a_loffset + 1;
3093 if (cursor.key_beg.key < 0)
3094 cursor.key_beg.key = 0;
3095 cursor.asof = ip->obj_asof;
3096 cursor.flags |= HAMMER_CURSOR_ASOF;
3098 cursor.key_end = cursor.key_beg;
3099 KKASSERT(ip->ino_data.obj_type == HAMMER_OBJTYPE_REGFILE);
3101 ran_end = ap->a_loffset + MAXPHYS;
3102 cursor.key_beg.rec_type = HAMMER_RECTYPE_DATA;
3103 cursor.key_end.rec_type = HAMMER_RECTYPE_DATA;
3104 tmp64 = ran_end + MAXPHYS + 1; /* work-around GCC-4 bug */
3105 if (tmp64 < ran_end)
3106 cursor.key_end.key = 0x7FFFFFFFFFFFFFFFLL;
3108 cursor.key_end.key = ran_end + MAXPHYS + 1;
3110 cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE;
3112 error = hammer_ip_first(&cursor);
3113 base_offset = last_offset = 0;
3114 base_disk_offset = last_disk_offset = 0;
3116 while (error == 0) {
3118 * Get the base file offset of the record. The key for
3119 * data records is (base + bytes) rather then (base).
3121 * NOTE: rec_offset + rec_len may exceed the end-of-file.
3122 * The extra bytes should be zero on-disk and the BMAP op
3123 * should still be ok.
3125 base = &cursor.leaf->base;
3126 rec_offset = base->key - cursor.leaf->data_len;
3127 rec_len = cursor.leaf->data_len;
3130 * Incorporate any cached truncation.
3132 * NOTE: Modifications to rec_len based on synthesized
3133 * truncation points remove the guarantee that any extended
3134 * data on disk is zero (since the truncations may not have
3135 * taken place on-media yet).
3137 if (ip->flags & HAMMER_INODE_TRUNCATED) {
3138 if (hammer_cursor_ondisk(&cursor) ||
3139 cursor.iprec->flush_state == HAMMER_FST_FLUSH) {
3140 if (ip->trunc_off <= rec_offset)
3142 else if (ip->trunc_off < rec_offset + rec_len)
3143 rec_len = (int)(ip->trunc_off - rec_offset);
3146 if (ip->sync_flags & HAMMER_INODE_TRUNCATED) {
3147 if (hammer_cursor_ondisk(&cursor)) {
3148 if (ip->sync_trunc_off <= rec_offset)
3150 else if (ip->sync_trunc_off < rec_offset + rec_len)
3151 rec_len = (int)(ip->sync_trunc_off - rec_offset);
3156 * Accumulate information. If we have hit a discontiguous
3157 * block reset base_offset unless we are already beyond the
3158 * requested offset. If we are, that's it, we stop.
3162 if (hammer_cursor_ondisk(&cursor)) {
3163 disk_offset = cursor.leaf->data_offset;
3164 if (rec_offset != last_offset ||
3165 disk_offset != last_disk_offset) {
3166 if (rec_offset > ap->a_loffset)
3168 base_offset = rec_offset;
3169 base_disk_offset = disk_offset;
3171 last_offset = rec_offset + rec_len;
3172 last_disk_offset = disk_offset + rec_len;
3174 if (hammer_live_dedup)
3175 hammer_dedup_cache_add(ip, cursor.leaf);
3178 error = hammer_ip_next(&cursor);
3182 kprintf("BMAP %016llx: %016llx - %016llx\n",
3183 (long long)ap->a_loffset,
3184 (long long)base_offset,
3185 (long long)last_offset);
3186 kprintf("BMAP %16s: %016llx - %016llx\n", "",
3187 (long long)base_disk_offset,
3188 (long long)last_disk_offset);
3192 hammer_cache_node(&ip->cache[1], cursor.node);
3194 kprintf("bmap_end2 %016llx ip->cache %p\n",
3195 (long long)ap->a_loffset, ip->cache[1]);
3198 hammer_done_cursor(&cursor);
3199 hammer_done_transaction(&trans);
3200 lwkt_reltoken(&hmp->fs_token);
3203 * If we couldn't find any records or the records we did find were
3204 * all behind the requested offset, return failure. A forward
3205 * truncation can leave a hole w/ no on-disk records.
3207 if (last_offset == 0 || last_offset < ap->a_loffset)
3208 return (EOPNOTSUPP);
3211 * Figure out the block size at the requested offset and adjust
3212 * our limits so the cluster_read() does not create inappropriately
3213 * sized buffer cache buffers.
3215 blksize = hammer_blocksize(ap->a_loffset);
3216 if (hammer_blocksize(base_offset) != blksize) {
3217 base_offset = hammer_blockdemarc(base_offset, ap->a_loffset);
3219 if (last_offset != ap->a_loffset &&
3220 hammer_blocksize(last_offset - 1) != blksize) {
3221 last_offset = hammer_blockdemarc(ap->a_loffset,
3226 * Returning EOPNOTSUPP simply prevents the direct-IO optimization
3229 disk_offset = base_disk_offset + (ap->a_loffset - base_offset);
3231 if ((disk_offset & HAMMER_OFF_ZONE_MASK) != HAMMER_ZONE_LARGE_DATA) {
3233 * Only large-data zones can be direct-IOd
3236 } else if ((disk_offset & HAMMER_BUFMASK) ||
3237 (last_offset - ap->a_loffset) < blksize) {
3239 * doffsetp is not aligned or the forward run size does
3240 * not cover a whole buffer, disallow the direct I/O.
3247 *ap->a_doffsetp = disk_offset;
3249 *ap->a_runb = ap->a_loffset - base_offset;
3250 KKASSERT(*ap->a_runb >= 0);
3253 *ap->a_runp = last_offset - ap->a_loffset;
3254 KKASSERT(*ap->a_runp >= 0);
3262 * Write to a regular file. Because this is a strategy call the OS is
3263 * trying to actually get data onto the media.
3267 hammer_vop_strategy_write(struct vop_strategy_args *ap)
3269 hammer_record_t record;
3274 int blksize __debugvar;
3280 ip = ap->a_vp->v_data;
3283 blksize = hammer_blocksize(bio->bio_offset);
3284 KKASSERT(bp->b_bufsize == blksize);
3286 if (ip->flags & HAMMER_INODE_RO) {
3287 bp->b_error = EROFS;
3288 bp->b_flags |= B_ERROR;
3293 lwkt_gettoken(&hmp->fs_token);
3296 * Disallow swapcache operation on the vnode buffer if double
3297 * buffering is enabled, the swapcache will get the data via
3298 * the block device buffer.
3300 if (hammer_double_buffer)
3301 bp->b_flags |= B_NOTMETA;
3304 * Interlock with inode destruction (no in-kernel or directory
3305 * topology visibility). If we queue new IO while trying to
3306 * destroy the inode we can deadlock the vtrunc call in
3307 * hammer_inode_unloadable_check().
3309 * Besides, there's no point flushing a bp associated with an
3310 * inode that is being destroyed on-media and has no kernel
3313 if ((ip->flags | ip->sync_flags) &
3314 (HAMMER_INODE_DELETING|HAMMER_INODE_DELETED)) {
3317 lwkt_reltoken(&hmp->fs_token);
3322 * Reserve space and issue a direct-write from the front-end.
3323 * NOTE: The direct_io code will hammer_bread/bcopy smaller
3326 * An in-memory record will be installed to reference the storage
3327 * until the flusher can get to it.
3329 * Since we own the high level bio the front-end will not try to
3330 * do a direct-read until the write completes.
3332 * NOTE: The only time we do not reserve a full-sized buffers
3333 * worth of data is if the file is small. We do not try to
3334 * allocate a fragment (from the small-data zone) at the end of
3335 * an otherwise large file as this can lead to wildly separated
3338 KKASSERT((bio->bio_offset & HAMMER_BUFMASK) == 0);
3339 KKASSERT(bio->bio_offset < ip->ino_data.size);
3340 if (bio->bio_offset || ip->ino_data.size > HAMMER_BUFSIZE / 2)
3341 bytes = bp->b_bufsize;
3343 bytes = ((int)ip->ino_data.size + 15) & ~15;
3345 record = hammer_ip_add_bulk(ip, bio->bio_offset, bp->b_data,
3349 * B_VFSFLAG1 indicates that a REDO_WRITE entry was generated
3350 * in hammer_vop_write(). We must flag the record so the proper
3351 * REDO_TERM_WRITE entry is generated during the flush.
3354 if (bp->b_flags & B_VFSFLAG1) {
3355 record->flags |= HAMMER_RECF_REDO;
3356 bp->b_flags &= ~B_VFSFLAG1;
3358 if (record->flags & HAMMER_RECF_DEDUPED) {
3360 hammer_ip_replace_bulk(hmp, record);
3363 hammer_io_direct_write(hmp, bio, record);
3365 if (ip->rsv_recs > 1 && hmp->rsv_recs > hammer_limit_recs)
3366 hammer_flush_inode(ip, 0);
3368 bp->b_bio2.bio_offset = NOOFFSET;
3369 bp->b_error = error;
3370 bp->b_flags |= B_ERROR;
3373 lwkt_reltoken(&hmp->fs_token);
3378 * dounlink - disconnect a directory entry
3380 * XXX whiteout support not really in yet
3383 hammer_dounlink(hammer_transaction_t trans, struct nchandle *nch,
3384 struct vnode *dvp, struct ucred *cred,
3385 int flags, int isdir)
3387 struct namecache *ncp;
3391 struct hammer_cursor cursor;
3393 u_int32_t max_iterations;
3397 * Calculate the namekey and setup the key range for the scan. This
3398 * works kinda like a chained hash table where the lower 32 bits
3399 * of the namekey synthesize the chain.
3401 * The key range is inclusive of both key_beg and key_end.
3407 if (dip->flags & HAMMER_INODE_RO)
3410 namekey = hammer_directory_namekey(dip, ncp->nc_name, ncp->nc_nlen,
3413 hammer_init_cursor(trans, &cursor, &dip->cache[1], dip);
3414 cursor.key_beg.localization = dip->obj_localization +
3415 hammer_dir_localization(dip);
3416 cursor.key_beg.obj_id = dip->obj_id;
3417 cursor.key_beg.key = namekey;
3418 cursor.key_beg.create_tid = 0;
3419 cursor.key_beg.delete_tid = 0;
3420 cursor.key_beg.rec_type = HAMMER_RECTYPE_DIRENTRY;
3421 cursor.key_beg.obj_type = 0;
3423 cursor.key_end = cursor.key_beg;
3424 cursor.key_end.key += max_iterations;
3425 cursor.asof = dip->obj_asof;
3426 cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE | HAMMER_CURSOR_ASOF;
3429 * Scan all matching records (the chain), locate the one matching
3430 * the requested path component. info->last_error contains the
3431 * error code on search termination and could be 0, ENOENT, or
3434 * The hammer_ip_*() functions merge in-memory records with on-disk
3435 * records for the purposes of the search.
3437 error = hammer_ip_first(&cursor);
3439 while (error == 0) {
3440 error = hammer_ip_resolve_data(&cursor);
3443 nlen = cursor.leaf->data_len - HAMMER_ENTRY_NAME_OFF;
3445 if (ncp->nc_nlen == nlen &&
3446 bcmp(ncp->nc_name, cursor.data->entry.name, nlen) == 0) {
3449 error = hammer_ip_next(&cursor);
3453 * If all is ok we have to get the inode so we can adjust nlinks.
3454 * To avoid a deadlock with the flusher we must release the inode
3455 * lock on the directory when acquiring the inode for the entry.
3457 * If the target is a directory, it must be empty.
3460 hammer_unlock(&cursor.ip->lock);
3461 ip = hammer_get_inode(trans, dip, cursor.data->entry.obj_id,
3463 cursor.data->entry.localization,
3465 hammer_lock_sh(&cursor.ip->lock);
3466 if (error == ENOENT) {
3467 kprintf("HAMMER: WARNING: Removing "
3468 "dirent w/missing inode \"%s\"\n"
3469 "\tobj_id = %016llx\n",
3471 (long long)cursor.data->entry.obj_id);
3476 * If isdir >= 0 we validate that the entry is or is not a
3477 * directory. If isdir < 0 we don't care.
3479 if (error == 0 && isdir >= 0 && ip) {
3481 ip->ino_data.obj_type != HAMMER_OBJTYPE_DIRECTORY) {
3483 } else if (isdir == 0 &&
3484 ip->ino_data.obj_type == HAMMER_OBJTYPE_DIRECTORY) {
3490 * If we are trying to remove a directory the directory must
3493 * The check directory code can loop and deadlock/retry. Our
3494 * own cursor's node locks must be released to avoid a 3-way
3495 * deadlock with the flusher if the check directory code
3498 * If any changes whatsoever have been made to the cursor
3499 * set EDEADLK and retry.
3501 * WARNING: See warnings in hammer_unlock_cursor()
3504 if (error == 0 && ip && ip->ino_data.obj_type ==
3505 HAMMER_OBJTYPE_DIRECTORY) {
3506 hammer_unlock_cursor(&cursor);
3507 error = hammer_ip_check_directory_empty(trans, ip);
3508 hammer_lock_cursor(&cursor);
3509 if (cursor.flags & HAMMER_CURSOR_RETEST) {
3510 kprintf("HAMMER: Warning: avoided deadlock "
3518 * Delete the directory entry.
3520 * WARNING: hammer_ip_del_directory() may have to terminate
3521 * the cursor to avoid a deadlock. It is ok to call
3522 * hammer_done_cursor() twice.
3525 error = hammer_ip_del_directory(trans, &cursor,
3528 hammer_done_cursor(&cursor);
3531 * Tell the namecache that we are now unlinked.
3536 * NOTE: ip->vp, if non-NULL, cannot be directly
3537 * referenced without formally acquiring the
3538 * vp since the vp might have zero refs on it,
3539 * or in the middle of a reclaim, etc.
3541 * NOTE: The cache_setunresolved() can rip the vp
3542 * out from under us since the vp may not have
3543 * any refs, in which case ip->vp will be NULL
3546 while (ip && ip->vp) {
3549 error = hammer_get_vnode(ip, &vp);
3550 if (error == 0 && vp) {
3552 hammer_knote(ip->vp, NOTE_DELETE);
3555 * Don't do this, it can deadlock
3556 * on concurrent rm's of hardlinks.
3557 * Shouldn't be needed any more.
3559 cache_inval_vp(ip->vp, CINV_DESTROY);
3564 kprintf("Debug: HAMMER ip/vp race1 avoided\n");
3568 hammer_rel_inode(ip, 0);
3570 hammer_done_cursor(&cursor);
3572 if (error == EDEADLK)
3578 /************************************************************************
3579 * FIFO AND SPECFS OPS *
3580 ************************************************************************
3584 hammer_vop_fifoclose (struct vop_close_args *ap)
3586 /* XXX update itimes */
3587 return (VOCALL(&fifo_vnode_vops, &ap->a_head));
3591 hammer_vop_fiforead (struct vop_read_args *ap)
3595 error = VOCALL(&fifo_vnode_vops, &ap->a_head);
3596 /* XXX update access time */
3601 hammer_vop_fifowrite (struct vop_write_args *ap)
3605 error = VOCALL(&fifo_vnode_vops, &ap->a_head);
3606 /* XXX update access time */
3612 hammer_vop_fifokqfilter(struct vop_kqfilter_args *ap)
3616 error = VOCALL(&fifo_vnode_vops, &ap->a_head);
3618 error = hammer_vop_kqfilter(ap);
3622 /************************************************************************
3624 ************************************************************************
3627 static void filt_hammerdetach(struct knote *kn);
3628 static int filt_hammerread(struct knote *kn, long hint);
3629 static int filt_hammerwrite(struct knote *kn, long hint);
3630 static int filt_hammervnode(struct knote *kn, long hint);
3632 static struct filterops hammerread_filtops =
3633 { FILTEROP_ISFD | FILTEROP_MPSAFE,
3634 NULL, filt_hammerdetach, filt_hammerread };
3635 static struct filterops hammerwrite_filtops =
3636 { FILTEROP_ISFD | FILTEROP_MPSAFE,
3637 NULL, filt_hammerdetach, filt_hammerwrite };
3638 static struct filterops hammervnode_filtops =
3639 { FILTEROP_ISFD | FILTEROP_MPSAFE,
3640 NULL, filt_hammerdetach, filt_hammervnode };
3644 hammer_vop_kqfilter(struct vop_kqfilter_args *ap)
3646 struct vnode *vp = ap->a_vp;
3647 struct knote *kn = ap->a_kn;
3649 switch (kn->kn_filter) {
3651 kn->kn_fop = &hammerread_filtops;
3654 kn->kn_fop = &hammerwrite_filtops;
3657 kn->kn_fop = &hammervnode_filtops;
3660 return (EOPNOTSUPP);
3663 kn->kn_hook = (caddr_t)vp;
3665 knote_insert(&vp->v_pollinfo.vpi_kqinfo.ki_note, kn);
3671 filt_hammerdetach(struct knote *kn)
3673 struct vnode *vp = (void *)kn->kn_hook;
3675 knote_remove(&vp->v_pollinfo.vpi_kqinfo.ki_note, kn);
3679 filt_hammerread(struct knote *kn, long hint)
3681 struct vnode *vp = (void *)kn->kn_hook;
3682 hammer_inode_t ip = VTOI(vp);
3683 hammer_mount_t hmp = ip->hmp;
3686 if (hint == NOTE_REVOKE) {
3687 kn->kn_flags |= (EV_EOF | EV_NODATA | EV_ONESHOT);
3690 lwkt_gettoken(&hmp->fs_token); /* XXX use per-ip-token */
3691 off = ip->ino_data.size - kn->kn_fp->f_offset;
3692 kn->kn_data = (off < INTPTR_MAX) ? off : INTPTR_MAX;
3693 lwkt_reltoken(&hmp->fs_token);
3694 if (kn->kn_sfflags & NOTE_OLDAPI)
3696 return (kn->kn_data != 0);
3700 filt_hammerwrite(struct knote *kn, long hint)
3702 if (hint == NOTE_REVOKE)
3703 kn->kn_flags |= (EV_EOF | EV_NODATA | EV_ONESHOT);
3709 filt_hammervnode(struct knote *kn, long hint)
3711 if (kn->kn_sfflags & hint)
3712 kn->kn_fflags |= hint;
3713 if (hint == NOTE_REVOKE) {
3714 kn->kn_flags |= (EV_EOF | EV_NODATA);
3717 return (kn->kn_fflags != 0);