2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * $DragonFly: src/sys/vfs/hammer/hammer_vnops.c,v 1.102 2008/10/16 17:24:16 dillon Exp $
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/kernel.h>
40 #include <sys/fcntl.h>
41 #include <sys/namecache.h>
42 #include <sys/vnode.h>
43 #include <sys/lockf.h>
44 #include <sys/event.h>
46 #include <sys/dirent.h>
48 #include <vm/vm_extern.h>
49 #include <vfs/fifofs/fifo.h>
51 #include <sys/mplock2.h>
58 /*static int hammer_vop_vnoperate(struct vop_generic_args *);*/
59 static int hammer_vop_fsync(struct vop_fsync_args *);
60 static int hammer_vop_read(struct vop_read_args *);
61 static int hammer_vop_write(struct vop_write_args *);
62 static int hammer_vop_access(struct vop_access_args *);
63 static int hammer_vop_advlock(struct vop_advlock_args *);
64 static int hammer_vop_close(struct vop_close_args *);
65 static int hammer_vop_ncreate(struct vop_ncreate_args *);
66 static int hammer_vop_getattr(struct vop_getattr_args *);
67 static int hammer_vop_nresolve(struct vop_nresolve_args *);
68 static int hammer_vop_nlookupdotdot(struct vop_nlookupdotdot_args *);
69 static int hammer_vop_nlink(struct vop_nlink_args *);
70 static int hammer_vop_nmkdir(struct vop_nmkdir_args *);
71 static int hammer_vop_nmknod(struct vop_nmknod_args *);
72 static int hammer_vop_open(struct vop_open_args *);
73 static int hammer_vop_print(struct vop_print_args *);
74 static int hammer_vop_readdir(struct vop_readdir_args *);
75 static int hammer_vop_readlink(struct vop_readlink_args *);
76 static int hammer_vop_nremove(struct vop_nremove_args *);
77 static int hammer_vop_nrename(struct vop_nrename_args *);
78 static int hammer_vop_nrmdir(struct vop_nrmdir_args *);
79 static int hammer_vop_markatime(struct vop_markatime_args *);
80 static int hammer_vop_setattr(struct vop_setattr_args *);
81 static int hammer_vop_strategy(struct vop_strategy_args *);
82 static int hammer_vop_bmap(struct vop_bmap_args *ap);
83 static int hammer_vop_nsymlink(struct vop_nsymlink_args *);
84 static int hammer_vop_nwhiteout(struct vop_nwhiteout_args *);
85 static int hammer_vop_ioctl(struct vop_ioctl_args *);
86 static int hammer_vop_mountctl(struct vop_mountctl_args *);
87 static int hammer_vop_kqfilter (struct vop_kqfilter_args *);
89 static int hammer_vop_fifoclose (struct vop_close_args *);
90 static int hammer_vop_fiforead (struct vop_read_args *);
91 static int hammer_vop_fifowrite (struct vop_write_args *);
92 static int hammer_vop_fifokqfilter (struct vop_kqfilter_args *);
94 struct vop_ops hammer_vnode_vops = {
95 .vop_default = vop_defaultop,
96 .vop_fsync = hammer_vop_fsync,
97 .vop_getpages = vop_stdgetpages,
98 .vop_putpages = vop_stdputpages,
99 .vop_read = hammer_vop_read,
100 .vop_write = hammer_vop_write,
101 .vop_access = hammer_vop_access,
102 .vop_advlock = hammer_vop_advlock,
103 .vop_close = hammer_vop_close,
104 .vop_ncreate = hammer_vop_ncreate,
105 .vop_getattr = hammer_vop_getattr,
106 .vop_inactive = hammer_vop_inactive,
107 .vop_reclaim = hammer_vop_reclaim,
108 .vop_nresolve = hammer_vop_nresolve,
109 .vop_nlookupdotdot = hammer_vop_nlookupdotdot,
110 .vop_nlink = hammer_vop_nlink,
111 .vop_nmkdir = hammer_vop_nmkdir,
112 .vop_nmknod = hammer_vop_nmknod,
113 .vop_open = hammer_vop_open,
114 .vop_pathconf = vop_stdpathconf,
115 .vop_print = hammer_vop_print,
116 .vop_readdir = hammer_vop_readdir,
117 .vop_readlink = hammer_vop_readlink,
118 .vop_nremove = hammer_vop_nremove,
119 .vop_nrename = hammer_vop_nrename,
120 .vop_nrmdir = hammer_vop_nrmdir,
121 .vop_markatime = hammer_vop_markatime,
122 .vop_setattr = hammer_vop_setattr,
123 .vop_bmap = hammer_vop_bmap,
124 .vop_strategy = hammer_vop_strategy,
125 .vop_nsymlink = hammer_vop_nsymlink,
126 .vop_nwhiteout = hammer_vop_nwhiteout,
127 .vop_ioctl = hammer_vop_ioctl,
128 .vop_mountctl = hammer_vop_mountctl,
129 .vop_kqfilter = hammer_vop_kqfilter
132 struct vop_ops hammer_spec_vops = {
133 .vop_default = vop_defaultop,
134 .vop_fsync = hammer_vop_fsync,
135 .vop_read = vop_stdnoread,
136 .vop_write = vop_stdnowrite,
137 .vop_access = hammer_vop_access,
138 .vop_close = hammer_vop_close,
139 .vop_markatime = hammer_vop_markatime,
140 .vop_getattr = hammer_vop_getattr,
141 .vop_inactive = hammer_vop_inactive,
142 .vop_reclaim = hammer_vop_reclaim,
143 .vop_setattr = hammer_vop_setattr
146 struct vop_ops hammer_fifo_vops = {
147 .vop_default = fifo_vnoperate,
148 .vop_fsync = hammer_vop_fsync,
149 .vop_read = hammer_vop_fiforead,
150 .vop_write = hammer_vop_fifowrite,
151 .vop_access = hammer_vop_access,
152 .vop_close = hammer_vop_fifoclose,
153 .vop_markatime = hammer_vop_markatime,
154 .vop_getattr = hammer_vop_getattr,
155 .vop_inactive = hammer_vop_inactive,
156 .vop_reclaim = hammer_vop_reclaim,
157 .vop_setattr = hammer_vop_setattr,
158 .vop_kqfilter = hammer_vop_fifokqfilter
163 hammer_knote(struct vnode *vp, int flags)
166 KNOTE(&vp->v_pollinfo.vpi_kqinfo.ki_note, flags);
169 #ifdef DEBUG_TRUNCATE
170 struct hammer_inode *HammerTruncIp;
173 static int hammer_dounlink(hammer_transaction_t trans, struct nchandle *nch,
174 struct vnode *dvp, struct ucred *cred,
175 int flags, int isdir);
176 static int hammer_vop_strategy_read(struct vop_strategy_args *ap);
177 static int hammer_vop_strategy_write(struct vop_strategy_args *ap);
182 hammer_vop_vnoperate(struct vop_generic_args *)
184 return (VOCALL(&hammer_vnode_vops, ap));
189 * hammer_vop_fsync { vp, waitfor }
191 * fsync() an inode to disk and wait for it to be completely committed
192 * such that the information would not be undone if a crash occured after
195 * NOTE: HAMMER's fsync()'s are going to remain expensive until we implement
196 * a REDO log. A sysctl is provided to relax HAMMER's fsync()
199 * Ultimately the combination of a REDO log and use of fast storage
200 * to front-end cluster caches will make fsync fast, but it aint
201 * here yet. And, in anycase, we need real transactional
202 * all-or-nothing features which are not restricted to a single file.
206 hammer_vop_fsync(struct vop_fsync_args *ap)
208 hammer_inode_t ip = VTOI(ap->a_vp);
209 hammer_mount_t hmp = ip->hmp;
210 int waitfor = ap->a_waitfor;
213 lwkt_gettoken(&hmp->fs_token);
216 * Fsync rule relaxation (default is either full synchronous flush
217 * or REDO semantics with synchronous flush).
219 if (ap->a_flags & VOP_FSYNC_SYSCALL) {
220 switch(hammer_fsync_mode) {
223 /* no REDO, full synchronous flush */
227 /* no REDO, full asynchronous flush */
228 if (waitfor == MNT_WAIT)
229 waitfor = MNT_NOWAIT;
232 /* REDO semantics, synchronous flush */
233 if (hmp->version < HAMMER_VOL_VERSION_FOUR)
235 mode = HAMMER_FLUSH_UNDOS_AUTO;
238 /* REDO semantics, relaxed asynchronous flush */
239 if (hmp->version < HAMMER_VOL_VERSION_FOUR)
241 mode = HAMMER_FLUSH_UNDOS_RELAXED;
242 if (waitfor == MNT_WAIT)
243 waitfor = MNT_NOWAIT;
246 /* ignore the fsync() system call */
247 lwkt_reltoken(&hmp->fs_token);
250 /* we have to do something */
251 mode = HAMMER_FLUSH_UNDOS_RELAXED;
252 if (waitfor == MNT_WAIT)
253 waitfor = MNT_NOWAIT;
258 * Fast fsync only needs to flush the UNDO/REDO fifo if
259 * HAMMER_INODE_REDO is non-zero and the only modifications
260 * made to the file are write or write-extends.
262 if ((ip->flags & HAMMER_INODE_REDO) &&
263 (ip->flags & HAMMER_INODE_MODMASK_NOREDO) == 0
265 ++hammer_count_fsyncs;
266 hammer_flusher_flush_undos(hmp, mode);
268 lwkt_reltoken(&hmp->fs_token);
273 * REDO is enabled by fsync(), the idea being we really only
274 * want to lay down REDO records when programs are using
275 * fsync() heavily. The first fsync() on the file starts
276 * the gravy train going and later fsync()s keep it hot by
277 * resetting the redo_count.
279 * We weren't running REDOs before now so we have to fall
280 * through and do a full fsync of what we have.
282 if (hmp->version >= HAMMER_VOL_VERSION_FOUR &&
283 (hmp->flags & HAMMER_MOUNT_REDO_RECOVERY_RUN) == 0) {
284 ip->flags |= HAMMER_INODE_REDO;
291 * Do a full flush sequence.
293 ++hammer_count_fsyncs;
294 vfsync(ap->a_vp, waitfor, 1, NULL, NULL);
295 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
296 if (waitfor == MNT_WAIT) {
298 hammer_wait_inode(ip);
299 vn_lock(ap->a_vp, LK_EXCLUSIVE | LK_RETRY);
301 lwkt_reltoken(&hmp->fs_token);
306 * hammer_vop_read { vp, uio, ioflag, cred }
308 * MPSAFE (for the cache safe does not require fs_token)
312 hammer_vop_read(struct vop_read_args *ap)
314 struct hammer_transaction trans;
328 if (ap->a_vp->v_type != VREG)
336 * Allow the UIO's size to override the sequential heuristic.
338 blksize = hammer_blocksize(uio->uio_offset);
339 seqcount = (uio->uio_resid + (BKVASIZE - 1)) / BKVASIZE;
340 ioseqcount = (ap->a_ioflag >> 16);
341 if (seqcount < ioseqcount)
342 seqcount = ioseqcount;
345 * If reading or writing a huge amount of data we have to break
346 * atomicy and allow the operation to be interrupted by a signal
347 * or it can DOS the machine.
349 bigread = (uio->uio_resid > 100 * 1024 * 1024);
353 * Access the data typically in HAMMER_BUFSIZE blocks via the
354 * buffer cache, but HAMMER may use a variable block size based
357 * XXX Temporary hack, delay the start transaction while we remain
358 * MPSAFE. NOTE: ino_data.size cannot change while vnode is
361 while (uio->uio_resid > 0 && uio->uio_offset < ip->ino_data.size) {
365 blksize = hammer_blocksize(uio->uio_offset);
366 offset = (int)uio->uio_offset & (blksize - 1);
367 base_offset = uio->uio_offset - offset;
369 if (bigread && (error = hammer_signal_check(ip->hmp)) != 0)
375 bp = getcacheblk(ap->a_vp, base_offset);
384 if (got_fstoken == 0) {
385 lwkt_gettoken(&hmp->fs_token);
387 hammer_start_transaction(&trans, ip->hmp);
390 if (hammer_cluster_enable) {
392 * Use file_limit to prevent cluster_read() from
393 * creating buffers of the wrong block size past
396 file_limit = ip->ino_data.size;
397 if (base_offset < HAMMER_XDEMARC &&
398 file_limit > HAMMER_XDEMARC) {
399 file_limit = HAMMER_XDEMARC;
401 error = cluster_read(ap->a_vp,
402 file_limit, base_offset,
403 blksize, uio->uio_resid,
404 seqcount * BKVASIZE, &bp);
406 error = bread(ap->a_vp, base_offset, blksize, &bp);
413 if ((hammer_debug_io & 0x0001) && (bp->b_flags & B_IODEBUG)) {
414 kprintf("doff %016jx read file %016jx@%016jx\n",
415 (intmax_t)bp->b_bio2.bio_offset,
416 (intmax_t)ip->obj_id,
417 (intmax_t)bp->b_loffset);
419 bp->b_flags &= ~B_IODEBUG;
421 /* bp->b_flags |= B_CLUSTEROK; temporarily disabled */
422 n = blksize - offset;
423 if (n > uio->uio_resid)
425 if (n > ip->ino_data.size - uio->uio_offset)
426 n = (int)(ip->ino_data.size - uio->uio_offset);
427 error = uiomove((char *)bp->b_data + offset, n, uio);
429 /* data has a lower priority then meta-data */
430 bp->b_flags |= B_AGE;
434 hammer_stats_file_read += n;
438 * XXX only update the atime if we had to get the MP lock.
439 * XXX hack hack hack, fixme.
442 if ((ip->flags & HAMMER_INODE_RO) == 0 &&
443 (ip->hmp->mp->mnt_flag & MNT_NOATIME) == 0) {
444 ip->ino_data.atime = trans.time;
445 hammer_modify_inode(&trans, ip, HAMMER_INODE_ATIME);
447 hammer_done_transaction(&trans);
448 lwkt_reltoken(&hmp->fs_token);
454 * hammer_vop_write { vp, uio, ioflag, cred }
458 hammer_vop_write(struct vop_write_args *ap)
460 struct hammer_transaction trans;
461 struct hammer_inode *ip;
474 if (ap->a_vp->v_type != VREG)
480 seqcount = ap->a_ioflag >> 16;
482 if (ip->flags & HAMMER_INODE_RO)
486 * Create a transaction to cover the operations we perform.
488 lwkt_gettoken(&hmp->fs_token);
489 hammer_start_transaction(&trans, hmp);
495 if (ap->a_ioflag & IO_APPEND)
496 uio->uio_offset = ip->ino_data.size;
499 * Check for illegal write offsets. Valid range is 0...2^63-1.
501 * NOTE: the base_off assignment is required to work around what
502 * I consider to be a GCC-4 optimization bug.
504 if (uio->uio_offset < 0) {
505 hammer_done_transaction(&trans);
506 lwkt_reltoken(&hmp->fs_token);
509 base_offset = uio->uio_offset + uio->uio_resid; /* work around gcc-4 */
510 if (uio->uio_resid > 0 && base_offset <= uio->uio_offset) {
511 hammer_done_transaction(&trans);
512 lwkt_reltoken(&hmp->fs_token);
517 * If reading or writing a huge amount of data we have to break
518 * atomicy and allow the operation to be interrupted by a signal
519 * or it can DOS the machine.
521 * Preset redo_count so we stop generating REDOs earlier if the
524 bigwrite = (uio->uio_resid > 100 * 1024 * 1024);
525 if ((ip->flags & HAMMER_INODE_REDO) &&
526 ip->redo_count < hammer_limit_redo) {
527 ip->redo_count += uio->uio_resid;
531 * Access the data typically in HAMMER_BUFSIZE blocks via the
532 * buffer cache, but HAMMER may use a variable block size based
535 while (uio->uio_resid > 0) {
543 if ((error = hammer_checkspace(hmp, HAMMER_CHKSPC_WRITE)) != 0)
545 if (bigwrite && (error = hammer_signal_check(hmp)) != 0)
548 blksize = hammer_blocksize(uio->uio_offset);
551 * Do not allow HAMMER to blow out the buffer cache. Very
552 * large UIOs can lockout other processes due to bwillwrite()
555 * The hammer inode is not locked during these operations.
556 * The vnode is locked which can interfere with the pageout
557 * daemon for non-UIO_NOCOPY writes but should not interfere
558 * with the buffer cache. Even so, we cannot afford to
559 * allow the pageout daemon to build up too many dirty buffer
562 * Only call this if we aren't being recursively called from
563 * a virtual disk device (vn), else we may deadlock.
565 if ((ap->a_ioflag & IO_RECURSE) == 0)
569 * Control the number of pending records associated with
570 * this inode. If too many have accumulated start a
571 * flush. Try to maintain a pipeline with the flusher.
573 if (ip->rsv_recs >= hammer_limit_inode_recs) {
574 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
576 if (ip->rsv_recs >= hammer_limit_inode_recs * 2) {
577 while (ip->rsv_recs >= hammer_limit_inode_recs) {
578 tsleep(&ip->rsv_recs, 0, "hmrwww", hz);
580 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
585 * Do not allow HAMMER to blow out system memory by
586 * accumulating too many records. Records are so well
587 * decoupled from the buffer cache that it is possible
588 * for userland to push data out to the media via
589 * direct-write, but build up the records queued to the
590 * backend faster then the backend can flush them out.
591 * HAMMER has hit its write limit but the frontend has
592 * no pushback to slow it down.
594 if (hmp->rsv_recs > hammer_limit_recs / 2) {
596 * Get the inode on the flush list
598 if (ip->rsv_recs >= 64)
599 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
600 else if (ip->rsv_recs >= 16)
601 hammer_flush_inode(ip, 0);
604 * Keep the flusher going if the system keeps
607 delta = hmp->count_newrecords -
608 hmp->last_newrecords;
609 if (delta < 0 || delta > hammer_limit_recs / 2) {
610 hmp->last_newrecords = hmp->count_newrecords;
611 hammer_sync_hmp(hmp, MNT_NOWAIT);
615 * If we have gotten behind start slowing
618 delta = (hmp->rsv_recs - hammer_limit_recs) *
619 hz / hammer_limit_recs;
621 tsleep(&trans, 0, "hmrslo", delta);
626 * Calculate the blocksize at the current offset and figure
627 * out how much we can actually write.
629 blkmask = blksize - 1;
630 offset = (int)uio->uio_offset & blkmask;
631 base_offset = uio->uio_offset & ~(int64_t)blkmask;
632 n = blksize - offset;
633 if (n > uio->uio_resid) {
639 nsize = uio->uio_offset + n;
640 if (nsize > ip->ino_data.size) {
641 if (uio->uio_offset > ip->ino_data.size)
645 nvextendbuf(ap->a_vp,
648 hammer_blocksize(ip->ino_data.size),
649 hammer_blocksize(nsize),
650 hammer_blockoff(ip->ino_data.size),
651 hammer_blockoff(nsize),
654 kflags |= NOTE_EXTEND;
657 if (uio->uio_segflg == UIO_NOCOPY) {
659 * Issuing a write with the same data backing the
660 * buffer. Instantiate the buffer to collect the
661 * backing vm pages, then read-in any missing bits.
663 * This case is used by vop_stdputpages().
665 bp = getblk(ap->a_vp, base_offset,
666 blksize, GETBLK_BHEAVY, 0);
667 if ((bp->b_flags & B_CACHE) == 0) {
669 error = bread(ap->a_vp, base_offset,
672 } else if (offset == 0 && uio->uio_resid >= blksize) {
674 * Even though we are entirely overwriting the buffer
675 * we may still have to zero it out to avoid a
676 * mmap/write visibility issue.
678 bp = getblk(ap->a_vp, base_offset, blksize, GETBLK_BHEAVY, 0);
679 if ((bp->b_flags & B_CACHE) == 0)
681 } else if (base_offset >= ip->ino_data.size) {
683 * If the base offset of the buffer is beyond the
684 * file EOF, we don't have to issue a read.
686 bp = getblk(ap->a_vp, base_offset,
687 blksize, GETBLK_BHEAVY, 0);
691 * Partial overwrite, read in any missing bits then
692 * replace the portion being written.
694 error = bread(ap->a_vp, base_offset, blksize, &bp);
699 error = uiomove(bp->b_data + offset, n, uio);
702 * Generate REDO records if enabled and redo_count will not
703 * exceeded the limit.
705 * If redo_count exceeds the limit we stop generating records
706 * and clear HAMMER_INODE_REDO. This will cause the next
707 * fsync() to do a full meta-data sync instead of just an
708 * UNDO/REDO fifo update.
710 * When clearing HAMMER_INODE_REDO any pre-existing REDOs
711 * will still be tracked. The tracks will be terminated
712 * when the related meta-data (including possible data
713 * modifications which are not tracked via REDO) is
716 if ((ip->flags & HAMMER_INODE_REDO) && error == 0) {
717 if (ip->redo_count < hammer_limit_redo) {
718 bp->b_flags |= B_VFSFLAG1;
719 error = hammer_generate_redo(&trans, ip,
720 base_offset + offset,
725 ip->flags &= ~HAMMER_INODE_REDO;
730 * If we screwed up we have to undo any VM size changes we
736 nvtruncbuf(ap->a_vp, ip->ino_data.size,
737 hammer_blocksize(ip->ino_data.size),
738 hammer_blockoff(ip->ino_data.size));
742 kflags |= NOTE_WRITE;
743 hammer_stats_file_write += n;
744 /* bp->b_flags |= B_CLUSTEROK; temporarily disabled */
745 if (ip->ino_data.size < uio->uio_offset) {
746 ip->ino_data.size = uio->uio_offset;
747 flags = HAMMER_INODE_SDIRTY;
751 ip->ino_data.mtime = trans.time;
752 flags |= HAMMER_INODE_MTIME | HAMMER_INODE_BUFS;
753 hammer_modify_inode(&trans, ip, flags);
756 * Once we dirty the buffer any cached zone-X offset
757 * becomes invalid. HAMMER NOTE: no-history mode cannot
758 * allow overwriting over the same data sector unless
759 * we provide UNDOs for the old data, which we don't.
761 bp->b_bio2.bio_offset = NOOFFSET;
764 * Final buffer disposition.
766 * Because meta-data updates are deferred, HAMMER is
767 * especially sensitive to excessive bdwrite()s because
768 * the I/O stream is not broken up by disk reads. So the
769 * buffer cache simply cannot keep up.
771 * WARNING! blksize is variable. cluster_write() is
772 * expected to not blow up if it encounters
773 * buffers that do not match the passed blksize.
775 * NOTE! Hammer shouldn't need to bawrite()/cluster_write().
776 * The ip->rsv_recs check should burst-flush the data.
777 * If we queue it immediately the buf could be left
778 * locked on the device queue for a very long time.
780 * NOTE! To avoid degenerate stalls due to mismatched block
781 * sizes we only honor IO_DIRECT on the write which
782 * abuts the end of the buffer. However, we must
783 * honor IO_SYNC in case someone is silly enough to
784 * configure a HAMMER file as swap, or when HAMMER
785 * is serving NFS (for commits). Ick ick.
787 bp->b_flags |= B_AGE;
788 if (ap->a_ioflag & IO_SYNC) {
790 } else if ((ap->a_ioflag & IO_DIRECT) && endofblk) {
794 if (offset + n == blksize) {
795 if (hammer_cluster_enable == 0 ||
796 (ap->a_vp->v_mount->mnt_flag & MNT_NOCLUSTERW)) {
799 cluster_write(bp, ip->ino_data.size,
807 hammer_done_transaction(&trans);
808 hammer_knote(ap->a_vp, kflags);
809 lwkt_reltoken(&hmp->fs_token);
814 * hammer_vop_access { vp, mode, cred }
816 * MPSAFE - does not require fs_token
820 hammer_vop_access(struct vop_access_args *ap)
822 struct hammer_inode *ip = VTOI(ap->a_vp);
827 ++hammer_stats_file_iopsr;
828 uid = hammer_to_unix_xid(&ip->ino_data.uid);
829 gid = hammer_to_unix_xid(&ip->ino_data.gid);
831 error = vop_helper_access(ap, uid, gid, ip->ino_data.mode,
832 ip->ino_data.uflags);
837 * hammer_vop_advlock { vp, id, op, fl, flags }
839 * MPSAFE - does not require fs_token
843 hammer_vop_advlock(struct vop_advlock_args *ap)
845 hammer_inode_t ip = VTOI(ap->a_vp);
847 return (lf_advlock(ap, &ip->advlock, ip->ino_data.size));
851 * hammer_vop_close { vp, fflag }
853 * We can only sync-on-close for normal closes. XXX disabled for now.
857 hammer_vop_close(struct vop_close_args *ap)
860 struct vnode *vp = ap->a_vp;
861 hammer_inode_t ip = VTOI(vp);
863 if (ip->flags & (HAMMER_INODE_CLOSESYNC|HAMMER_INODE_CLOSEASYNC)) {
864 if (vn_islocked(vp) == LK_EXCLUSIVE &&
865 (vp->v_flag & (VINACTIVE|VRECLAIMED)) == 0) {
866 if (ip->flags & HAMMER_INODE_CLOSESYNC)
869 waitfor = MNT_NOWAIT;
870 ip->flags &= ~(HAMMER_INODE_CLOSESYNC |
871 HAMMER_INODE_CLOSEASYNC);
872 VOP_FSYNC(vp, MNT_NOWAIT, waitfor);
876 return (vop_stdclose(ap));
880 * hammer_vop_ncreate { nch, dvp, vpp, cred, vap }
882 * The operating system has already ensured that the directory entry
883 * does not exist and done all appropriate namespace locking.
887 hammer_vop_ncreate(struct vop_ncreate_args *ap)
889 struct hammer_transaction trans;
890 struct hammer_inode *dip;
891 struct hammer_inode *nip;
892 struct nchandle *nch;
897 dip = VTOI(ap->a_dvp);
900 if (dip->flags & HAMMER_INODE_RO)
902 if ((error = hammer_checkspace(hmp, HAMMER_CHKSPC_CREATE)) != 0)
906 * Create a transaction to cover the operations we perform.
908 lwkt_gettoken(&hmp->fs_token);
909 hammer_start_transaction(&trans, hmp);
910 ++hammer_stats_file_iopsw;
913 * Create a new filesystem object of the requested type. The
914 * returned inode will be referenced and shared-locked to prevent
915 * it from being moved to the flusher.
917 error = hammer_create_inode(&trans, ap->a_vap, ap->a_cred,
918 dip, nch->ncp->nc_name, nch->ncp->nc_nlen,
921 hkprintf("hammer_create_inode error %d\n", error);
922 hammer_done_transaction(&trans);
924 lwkt_reltoken(&hmp->fs_token);
929 * Add the new filesystem object to the directory. This will also
930 * bump the inode's link count.
932 error = hammer_ip_add_directory(&trans, dip,
933 nch->ncp->nc_name, nch->ncp->nc_nlen,
936 hkprintf("hammer_ip_add_directory error %d\n", error);
942 hammer_rel_inode(nip, 0);
943 hammer_done_transaction(&trans);
946 error = hammer_get_vnode(nip, ap->a_vpp);
947 hammer_done_transaction(&trans);
948 hammer_rel_inode(nip, 0);
950 cache_setunresolved(ap->a_nch);
951 cache_setvp(ap->a_nch, *ap->a_vpp);
953 hammer_knote(ap->a_dvp, NOTE_WRITE);
955 lwkt_reltoken(&hmp->fs_token);
960 * hammer_vop_getattr { vp, vap }
962 * Retrieve an inode's attribute information. When accessing inodes
963 * historically we fake the atime field to ensure consistent results.
964 * The atime field is stored in the B-Tree element and allowed to be
965 * updated without cycling the element.
967 * MPSAFE - does not require fs_token
971 hammer_vop_getattr(struct vop_getattr_args *ap)
973 struct hammer_inode *ip = VTOI(ap->a_vp);
974 struct vattr *vap = ap->a_vap;
977 * We want the fsid to be different when accessing a filesystem
978 * with different as-of's so programs like diff don't think
979 * the files are the same.
981 * We also want the fsid to be the same when comparing snapshots,
982 * or when comparing mirrors (which might be backed by different
983 * physical devices). HAMMER fsids are based on the PFS's
986 * XXX there is a chance of collision here. The va_fsid reported
987 * by stat is different from the more involved fsid used in the
990 ++hammer_stats_file_iopsr;
991 hammer_lock_sh(&ip->lock);
992 vap->va_fsid = ip->pfsm->fsid_udev ^ (u_int32_t)ip->obj_asof ^
993 (u_int32_t)(ip->obj_asof >> 32);
995 vap->va_fileid = ip->ino_leaf.base.obj_id;
996 vap->va_mode = ip->ino_data.mode;
997 vap->va_nlink = ip->ino_data.nlinks;
998 vap->va_uid = hammer_to_unix_xid(&ip->ino_data.uid);
999 vap->va_gid = hammer_to_unix_xid(&ip->ino_data.gid);
1002 vap->va_size = ip->ino_data.size;
1005 * Special case for @@PFS softlinks. The actual size of the
1006 * expanded softlink is "@@0x%016llx:%05d" == 26 bytes.
1007 * or for MAX_TID is "@@-1:%05d" == 10 bytes.
1009 if (ip->ino_data.obj_type == HAMMER_OBJTYPE_SOFTLINK &&
1010 ip->ino_data.size == 10 &&
1011 ip->obj_asof == HAMMER_MAX_TID &&
1012 ip->obj_localization == 0 &&
1013 strncmp(ip->ino_data.ext.symlink, "@@PFS", 5) == 0) {
1014 if (ip->pfsm->pfsd.mirror_flags & HAMMER_PFSD_SLAVE)
1021 * We must provide a consistent atime and mtime for snapshots
1022 * so people can do a 'tar cf - ... | md5' on them and get
1023 * consistent results.
1025 if (ip->flags & HAMMER_INODE_RO) {
1026 hammer_time_to_timespec(ip->ino_data.ctime, &vap->va_atime);
1027 hammer_time_to_timespec(ip->ino_data.ctime, &vap->va_mtime);
1029 hammer_time_to_timespec(ip->ino_data.atime, &vap->va_atime);
1030 hammer_time_to_timespec(ip->ino_data.mtime, &vap->va_mtime);
1032 hammer_time_to_timespec(ip->ino_data.ctime, &vap->va_ctime);
1033 vap->va_flags = ip->ino_data.uflags;
1034 vap->va_gen = 1; /* hammer inums are unique for all time */
1035 vap->va_blocksize = HAMMER_BUFSIZE;
1036 if (ip->ino_data.size >= HAMMER_XDEMARC) {
1037 vap->va_bytes = (ip->ino_data.size + HAMMER_XBUFMASK64) &
1039 } else if (ip->ino_data.size > HAMMER_BUFSIZE / 2) {
1040 vap->va_bytes = (ip->ino_data.size + HAMMER_BUFMASK64) &
1043 vap->va_bytes = (ip->ino_data.size + 15) & ~15;
1046 vap->va_type = hammer_get_vnode_type(ip->ino_data.obj_type);
1047 vap->va_filerev = 0; /* XXX */
1048 vap->va_uid_uuid = ip->ino_data.uid;
1049 vap->va_gid_uuid = ip->ino_data.gid;
1050 vap->va_fsid_uuid = ip->hmp->fsid;
1051 vap->va_vaflags = VA_UID_UUID_VALID | VA_GID_UUID_VALID |
1054 switch (ip->ino_data.obj_type) {
1055 case HAMMER_OBJTYPE_CDEV:
1056 case HAMMER_OBJTYPE_BDEV:
1057 vap->va_rmajor = ip->ino_data.rmajor;
1058 vap->va_rminor = ip->ino_data.rminor;
1063 hammer_unlock(&ip->lock);
1068 * hammer_vop_nresolve { nch, dvp, cred }
1070 * Locate the requested directory entry.
1074 hammer_vop_nresolve(struct vop_nresolve_args *ap)
1076 struct hammer_transaction trans;
1077 struct namecache *ncp;
1082 struct hammer_cursor cursor;
1091 u_int32_t localization;
1092 u_int32_t max_iterations;
1095 * Misc initialization, plus handle as-of name extensions. Look for
1096 * the '@@' extension. Note that as-of files and directories cannot
1099 dip = VTOI(ap->a_dvp);
1100 ncp = ap->a_nch->ncp;
1101 asof = dip->obj_asof;
1102 localization = dip->obj_localization; /* for code consistency */
1103 nlen = ncp->nc_nlen;
1104 flags = dip->flags & HAMMER_INODE_RO;
1108 lwkt_gettoken(&hmp->fs_token);
1109 hammer_simple_transaction(&trans, hmp);
1110 ++hammer_stats_file_iopsr;
1112 for (i = 0; i < nlen; ++i) {
1113 if (ncp->nc_name[i] == '@' && ncp->nc_name[i+1] == '@') {
1114 error = hammer_str_to_tid(ncp->nc_name + i + 2,
1115 &ispfs, &asof, &localization);
1120 if (asof != HAMMER_MAX_TID)
1121 flags |= HAMMER_INODE_RO;
1128 * If this is a PFS softlink we dive into the PFS
1130 if (ispfs && nlen == 0) {
1131 ip = hammer_get_inode(&trans, dip, HAMMER_OBJID_ROOT,
1135 error = hammer_get_vnode(ip, &vp);
1136 hammer_rel_inode(ip, 0);
1142 cache_setvp(ap->a_nch, vp);
1149 * If there is no path component the time extension is relative to dip.
1150 * e.g. "fubar/@@<snapshot>"
1152 * "." is handled by the kernel, but ".@@<snapshot>" is not.
1153 * e.g. "fubar/.@@<snapshot>"
1155 * ".." is handled by the kernel. We do not currently handle
1158 if (nlen == 0 || (nlen == 1 && ncp->nc_name[0] == '.')) {
1159 ip = hammer_get_inode(&trans, dip, dip->obj_id,
1160 asof, dip->obj_localization,
1163 error = hammer_get_vnode(ip, &vp);
1164 hammer_rel_inode(ip, 0);
1170 cache_setvp(ap->a_nch, vp);
1177 * Calculate the namekey and setup the key range for the scan. This
1178 * works kinda like a chained hash table where the lower 32 bits
1179 * of the namekey synthesize the chain.
1181 * The key range is inclusive of both key_beg and key_end.
1183 namekey = hammer_directory_namekey(dip, ncp->nc_name, nlen,
1186 error = hammer_init_cursor(&trans, &cursor, &dip->cache[1], dip);
1187 cursor.key_beg.localization = dip->obj_localization +
1188 hammer_dir_localization(dip);
1189 cursor.key_beg.obj_id = dip->obj_id;
1190 cursor.key_beg.key = namekey;
1191 cursor.key_beg.create_tid = 0;
1192 cursor.key_beg.delete_tid = 0;
1193 cursor.key_beg.rec_type = HAMMER_RECTYPE_DIRENTRY;
1194 cursor.key_beg.obj_type = 0;
1196 cursor.key_end = cursor.key_beg;
1197 cursor.key_end.key += max_iterations;
1199 cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE | HAMMER_CURSOR_ASOF;
1202 * Scan all matching records (the chain), locate the one matching
1203 * the requested path component.
1205 * The hammer_ip_*() functions merge in-memory records with on-disk
1206 * records for the purposes of the search.
1209 localization = HAMMER_DEF_LOCALIZATION;
1212 error = hammer_ip_first(&cursor);
1213 while (error == 0) {
1214 error = hammer_ip_resolve_data(&cursor);
1217 if (nlen == cursor.leaf->data_len - HAMMER_ENTRY_NAME_OFF &&
1218 bcmp(ncp->nc_name, cursor.data->entry.name, nlen) == 0) {
1219 obj_id = cursor.data->entry.obj_id;
1220 localization = cursor.data->entry.localization;
1223 error = hammer_ip_next(&cursor);
1226 hammer_done_cursor(&cursor);
1229 * Lookup the obj_id. This should always succeed. If it does not
1230 * the filesystem may be damaged and we return a dummy inode.
1233 ip = hammer_get_inode(&trans, dip, obj_id,
1236 if (error == ENOENT) {
1237 kprintf("HAMMER: WARNING: Missing "
1238 "inode for dirent \"%s\"\n"
1239 "\tobj_id = %016llx, asof=%016llx, lo=%08x\n",
1241 (long long)obj_id, (long long)asof,
1244 ip = hammer_get_dummy_inode(&trans, dip, obj_id,
1249 error = hammer_get_vnode(ip, &vp);
1250 hammer_rel_inode(ip, 0);
1256 cache_setvp(ap->a_nch, vp);
1259 } else if (error == ENOENT) {
1260 cache_setvp(ap->a_nch, NULL);
1263 hammer_done_transaction(&trans);
1264 lwkt_reltoken(&hmp->fs_token);
1269 * hammer_vop_nlookupdotdot { dvp, vpp, cred }
1271 * Locate the parent directory of a directory vnode.
1273 * dvp is referenced but not locked. *vpp must be returned referenced and
1274 * locked. A parent_obj_id of 0 does not necessarily indicate that we are
1275 * at the root, instead it could indicate that the directory we were in was
1278 * NOTE: as-of sequences are not linked into the directory structure. If
1279 * we are at the root with a different asof then the mount point, reload
1280 * the same directory with the mount point's asof. I'm not sure what this
1281 * will do to NFS. We encode ASOF stamps in NFS file handles so it might not
1282 * get confused, but it hasn't been tested.
1286 hammer_vop_nlookupdotdot(struct vop_nlookupdotdot_args *ap)
1288 struct hammer_transaction trans;
1289 struct hammer_inode *dip;
1290 struct hammer_inode *ip;
1292 int64_t parent_obj_id;
1293 u_int32_t parent_obj_localization;
1297 dip = VTOI(ap->a_dvp);
1298 asof = dip->obj_asof;
1302 * Whos are parent? This could be the root of a pseudo-filesystem
1303 * whos parent is in another localization domain.
1305 lwkt_gettoken(&hmp->fs_token);
1306 parent_obj_id = dip->ino_data.parent_obj_id;
1307 if (dip->obj_id == HAMMER_OBJID_ROOT)
1308 parent_obj_localization = dip->ino_data.ext.obj.parent_obj_localization;
1310 parent_obj_localization = dip->obj_localization;
1312 if (parent_obj_id == 0) {
1313 if (dip->obj_id == HAMMER_OBJID_ROOT &&
1314 asof != hmp->asof) {
1315 parent_obj_id = dip->obj_id;
1317 *ap->a_fakename = kmalloc(19, M_TEMP, M_WAITOK);
1318 ksnprintf(*ap->a_fakename, 19, "0x%016llx",
1319 (long long)dip->obj_asof);
1322 lwkt_reltoken(&hmp->fs_token);
1327 hammer_simple_transaction(&trans, hmp);
1328 ++hammer_stats_file_iopsr;
1330 ip = hammer_get_inode(&trans, dip, parent_obj_id,
1331 asof, parent_obj_localization,
1332 dip->flags, &error);
1334 error = hammer_get_vnode(ip, ap->a_vpp);
1335 hammer_rel_inode(ip, 0);
1339 hammer_done_transaction(&trans);
1340 lwkt_reltoken(&hmp->fs_token);
1345 * hammer_vop_nlink { nch, dvp, vp, cred }
1349 hammer_vop_nlink(struct vop_nlink_args *ap)
1351 struct hammer_transaction trans;
1352 struct hammer_inode *dip;
1353 struct hammer_inode *ip;
1354 struct nchandle *nch;
1358 if (ap->a_dvp->v_mount != ap->a_vp->v_mount)
1362 dip = VTOI(ap->a_dvp);
1363 ip = VTOI(ap->a_vp);
1366 if (dip->obj_localization != ip->obj_localization)
1369 if (dip->flags & HAMMER_INODE_RO)
1371 if (ip->flags & HAMMER_INODE_RO)
1373 if ((error = hammer_checkspace(hmp, HAMMER_CHKSPC_CREATE)) != 0)
1377 * Create a transaction to cover the operations we perform.
1379 lwkt_gettoken(&hmp->fs_token);
1380 hammer_start_transaction(&trans, hmp);
1381 ++hammer_stats_file_iopsw;
1384 * Add the filesystem object to the directory. Note that neither
1385 * dip nor ip are referenced or locked, but their vnodes are
1386 * referenced. This function will bump the inode's link count.
1388 error = hammer_ip_add_directory(&trans, dip,
1389 nch->ncp->nc_name, nch->ncp->nc_nlen,
1396 cache_setunresolved(nch);
1397 cache_setvp(nch, ap->a_vp);
1399 hammer_done_transaction(&trans);
1400 hammer_knote(ap->a_vp, NOTE_LINK);
1401 hammer_knote(ap->a_dvp, NOTE_WRITE);
1402 lwkt_reltoken(&hmp->fs_token);
1407 * hammer_vop_nmkdir { nch, dvp, vpp, cred, vap }
1409 * The operating system has already ensured that the directory entry
1410 * does not exist and done all appropriate namespace locking.
1414 hammer_vop_nmkdir(struct vop_nmkdir_args *ap)
1416 struct hammer_transaction trans;
1417 struct hammer_inode *dip;
1418 struct hammer_inode *nip;
1419 struct nchandle *nch;
1424 dip = VTOI(ap->a_dvp);
1427 if (dip->flags & HAMMER_INODE_RO)
1429 if ((error = hammer_checkspace(hmp, HAMMER_CHKSPC_CREATE)) != 0)
1433 * Create a transaction to cover the operations we perform.
1435 lwkt_gettoken(&hmp->fs_token);
1436 hammer_start_transaction(&trans, hmp);
1437 ++hammer_stats_file_iopsw;
1440 * Create a new filesystem object of the requested type. The
1441 * returned inode will be referenced but not locked.
1443 error = hammer_create_inode(&trans, ap->a_vap, ap->a_cred,
1444 dip, nch->ncp->nc_name, nch->ncp->nc_nlen,
1447 hkprintf("hammer_mkdir error %d\n", error);
1448 hammer_done_transaction(&trans);
1450 lwkt_reltoken(&hmp->fs_token);
1454 * Add the new filesystem object to the directory. This will also
1455 * bump the inode's link count.
1457 error = hammer_ip_add_directory(&trans, dip,
1458 nch->ncp->nc_name, nch->ncp->nc_nlen,
1461 hkprintf("hammer_mkdir (add) error %d\n", error);
1467 hammer_rel_inode(nip, 0);
1470 error = hammer_get_vnode(nip, ap->a_vpp);
1471 hammer_rel_inode(nip, 0);
1473 cache_setunresolved(ap->a_nch);
1474 cache_setvp(ap->a_nch, *ap->a_vpp);
1477 hammer_done_transaction(&trans);
1479 hammer_knote(ap->a_dvp, NOTE_WRITE | NOTE_LINK);
1480 lwkt_reltoken(&hmp->fs_token);
1485 * hammer_vop_nmknod { nch, dvp, vpp, cred, vap }
1487 * The operating system has already ensured that the directory entry
1488 * does not exist and done all appropriate namespace locking.
1492 hammer_vop_nmknod(struct vop_nmknod_args *ap)
1494 struct hammer_transaction trans;
1495 struct hammer_inode *dip;
1496 struct hammer_inode *nip;
1497 struct nchandle *nch;
1502 dip = VTOI(ap->a_dvp);
1505 if (dip->flags & HAMMER_INODE_RO)
1507 if ((error = hammer_checkspace(hmp, HAMMER_CHKSPC_CREATE)) != 0)
1511 * Create a transaction to cover the operations we perform.
1513 lwkt_gettoken(&hmp->fs_token);
1514 hammer_start_transaction(&trans, hmp);
1515 ++hammer_stats_file_iopsw;
1518 * Create a new filesystem object of the requested type. The
1519 * returned inode will be referenced but not locked.
1521 * If mknod specifies a directory a pseudo-fs is created.
1523 error = hammer_create_inode(&trans, ap->a_vap, ap->a_cred,
1524 dip, nch->ncp->nc_name, nch->ncp->nc_nlen,
1527 hammer_done_transaction(&trans);
1529 lwkt_reltoken(&hmp->fs_token);
1534 * Add the new filesystem object to the directory. This will also
1535 * bump the inode's link count.
1537 error = hammer_ip_add_directory(&trans, dip,
1538 nch->ncp->nc_name, nch->ncp->nc_nlen,
1545 hammer_rel_inode(nip, 0);
1548 error = hammer_get_vnode(nip, ap->a_vpp);
1549 hammer_rel_inode(nip, 0);
1551 cache_setunresolved(ap->a_nch);
1552 cache_setvp(ap->a_nch, *ap->a_vpp);
1555 hammer_done_transaction(&trans);
1557 hammer_knote(ap->a_dvp, NOTE_WRITE);
1558 lwkt_reltoken(&hmp->fs_token);
1563 * hammer_vop_open { vp, mode, cred, fp }
1565 * MPSAFE (does not require fs_token)
1569 hammer_vop_open(struct vop_open_args *ap)
1573 ++hammer_stats_file_iopsr;
1574 ip = VTOI(ap->a_vp);
1576 if ((ap->a_mode & FWRITE) && (ip->flags & HAMMER_INODE_RO))
1578 return(vop_stdopen(ap));
1582 * hammer_vop_print { vp }
1586 hammer_vop_print(struct vop_print_args *ap)
1592 * hammer_vop_readdir { vp, uio, cred, *eofflag, *ncookies, off_t **cookies }
1596 hammer_vop_readdir(struct vop_readdir_args *ap)
1598 struct hammer_transaction trans;
1599 struct hammer_cursor cursor;
1600 struct hammer_inode *ip;
1603 hammer_base_elm_t base;
1612 ++hammer_stats_file_iopsr;
1613 ip = VTOI(ap->a_vp);
1615 saveoff = uio->uio_offset;
1618 if (ap->a_ncookies) {
1619 ncookies = uio->uio_resid / 16 + 1;
1620 if (ncookies > 1024)
1622 cookies = kmalloc(ncookies * sizeof(off_t), M_TEMP, M_WAITOK);
1630 lwkt_gettoken(&hmp->fs_token);
1631 hammer_simple_transaction(&trans, hmp);
1634 * Handle artificial entries
1636 * It should be noted that the minimum value for a directory
1637 * hash key on-media is 0x0000000100000000, so we can use anything
1638 * less then that to represent our 'special' key space.
1642 r = vop_write_dirent(&error, uio, ip->obj_id, DT_DIR, 1, ".");
1646 cookies[cookie_index] = saveoff;
1649 if (cookie_index == ncookies)
1653 if (ip->ino_data.parent_obj_id) {
1654 r = vop_write_dirent(&error, uio,
1655 ip->ino_data.parent_obj_id,
1658 r = vop_write_dirent(&error, uio,
1659 ip->obj_id, DT_DIR, 2, "..");
1664 cookies[cookie_index] = saveoff;
1667 if (cookie_index == ncookies)
1672 * Key range (begin and end inclusive) to scan. Directory keys
1673 * directly translate to a 64 bit 'seek' position.
1675 hammer_init_cursor(&trans, &cursor, &ip->cache[1], ip);
1676 cursor.key_beg.localization = ip->obj_localization +
1677 hammer_dir_localization(ip);
1678 cursor.key_beg.obj_id = ip->obj_id;
1679 cursor.key_beg.create_tid = 0;
1680 cursor.key_beg.delete_tid = 0;
1681 cursor.key_beg.rec_type = HAMMER_RECTYPE_DIRENTRY;
1682 cursor.key_beg.obj_type = 0;
1683 cursor.key_beg.key = saveoff;
1685 cursor.key_end = cursor.key_beg;
1686 cursor.key_end.key = HAMMER_MAX_KEY;
1687 cursor.asof = ip->obj_asof;
1688 cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE | HAMMER_CURSOR_ASOF;
1690 error = hammer_ip_first(&cursor);
1692 while (error == 0) {
1693 error = hammer_ip_resolve_data(&cursor);
1696 base = &cursor.leaf->base;
1697 saveoff = base->key;
1698 KKASSERT(cursor.leaf->data_len > HAMMER_ENTRY_NAME_OFF);
1700 if (base->obj_id != ip->obj_id)
1701 panic("readdir: bad record at %p", cursor.node);
1704 * Convert pseudo-filesystems into softlinks
1706 dtype = hammer_get_dtype(cursor.leaf->base.obj_type);
1707 r = vop_write_dirent(
1708 &error, uio, cursor.data->entry.obj_id,
1710 cursor.leaf->data_len - HAMMER_ENTRY_NAME_OFF ,
1711 (void *)cursor.data->entry.name);
1716 cookies[cookie_index] = base->key;
1718 if (cookie_index == ncookies)
1720 error = hammer_ip_next(&cursor);
1722 hammer_done_cursor(&cursor);
1725 hammer_done_transaction(&trans);
1728 *ap->a_eofflag = (error == ENOENT);
1729 uio->uio_offset = saveoff;
1730 if (error && cookie_index == 0) {
1731 if (error == ENOENT)
1734 kfree(cookies, M_TEMP);
1735 *ap->a_ncookies = 0;
1736 *ap->a_cookies = NULL;
1739 if (error == ENOENT)
1742 *ap->a_ncookies = cookie_index;
1743 *ap->a_cookies = cookies;
1746 lwkt_reltoken(&hmp->fs_token);
1751 * hammer_vop_readlink { vp, uio, cred }
1755 hammer_vop_readlink(struct vop_readlink_args *ap)
1757 struct hammer_transaction trans;
1758 struct hammer_cursor cursor;
1759 struct hammer_inode *ip;
1762 u_int32_t localization;
1763 hammer_pseudofs_inmem_t pfsm;
1766 ip = VTOI(ap->a_vp);
1769 lwkt_gettoken(&hmp->fs_token);
1772 * Shortcut if the symlink data was stuffed into ino_data.
1774 * Also expand special "@@PFS%05d" softlinks (expansion only
1775 * occurs for non-historical (current) accesses made from the
1776 * primary filesystem).
1778 if (ip->ino_data.size <= HAMMER_INODE_BASESYMLEN) {
1782 ptr = ip->ino_data.ext.symlink;
1783 bytes = (int)ip->ino_data.size;
1785 ip->obj_asof == HAMMER_MAX_TID &&
1786 ip->obj_localization == 0 &&
1787 strncmp(ptr, "@@PFS", 5) == 0) {
1788 hammer_simple_transaction(&trans, hmp);
1789 bcopy(ptr + 5, buf, 5);
1791 localization = strtoul(buf, NULL, 10) << 16;
1792 pfsm = hammer_load_pseudofs(&trans, localization,
1795 if (pfsm->pfsd.mirror_flags &
1796 HAMMER_PFSD_SLAVE) {
1797 /* vap->va_size == 26 */
1798 ksnprintf(buf, sizeof(buf),
1800 (long long)pfsm->pfsd.sync_end_tid,
1801 localization >> 16);
1803 /* vap->va_size == 10 */
1804 ksnprintf(buf, sizeof(buf),
1806 localization >> 16);
1808 ksnprintf(buf, sizeof(buf),
1810 (long long)HAMMER_MAX_TID,
1811 localization >> 16);
1815 bytes = strlen(buf);
1818 hammer_rel_pseudofs(hmp, pfsm);
1819 hammer_done_transaction(&trans);
1821 error = uiomove(ptr, bytes, ap->a_uio);
1822 lwkt_reltoken(&hmp->fs_token);
1829 hammer_simple_transaction(&trans, hmp);
1830 ++hammer_stats_file_iopsr;
1831 hammer_init_cursor(&trans, &cursor, &ip->cache[1], ip);
1834 * Key range (begin and end inclusive) to scan. Directory keys
1835 * directly translate to a 64 bit 'seek' position.
1837 cursor.key_beg.localization = ip->obj_localization +
1838 HAMMER_LOCALIZE_MISC;
1839 cursor.key_beg.obj_id = ip->obj_id;
1840 cursor.key_beg.create_tid = 0;
1841 cursor.key_beg.delete_tid = 0;
1842 cursor.key_beg.rec_type = HAMMER_RECTYPE_FIX;
1843 cursor.key_beg.obj_type = 0;
1844 cursor.key_beg.key = HAMMER_FIXKEY_SYMLINK;
1845 cursor.asof = ip->obj_asof;
1846 cursor.flags |= HAMMER_CURSOR_ASOF;
1848 error = hammer_ip_lookup(&cursor);
1850 error = hammer_ip_resolve_data(&cursor);
1852 KKASSERT(cursor.leaf->data_len >=
1853 HAMMER_SYMLINK_NAME_OFF);
1854 error = uiomove(cursor.data->symlink.name,
1855 cursor.leaf->data_len -
1856 HAMMER_SYMLINK_NAME_OFF,
1860 hammer_done_cursor(&cursor);
1861 hammer_done_transaction(&trans);
1862 lwkt_reltoken(&hmp->fs_token);
1867 * hammer_vop_nremove { nch, dvp, cred }
1871 hammer_vop_nremove(struct vop_nremove_args *ap)
1873 struct hammer_transaction trans;
1874 struct hammer_inode *dip;
1878 dip = VTOI(ap->a_dvp);
1881 if (hammer_nohistory(dip) == 0 &&
1882 (error = hammer_checkspace(hmp, HAMMER_CHKSPC_REMOVE)) != 0) {
1886 lwkt_gettoken(&hmp->fs_token);
1887 hammer_start_transaction(&trans, hmp);
1888 ++hammer_stats_file_iopsw;
1889 error = hammer_dounlink(&trans, ap->a_nch, ap->a_dvp, ap->a_cred, 0, 0);
1890 hammer_done_transaction(&trans);
1892 hammer_knote(ap->a_dvp, NOTE_WRITE);
1893 lwkt_reltoken(&hmp->fs_token);
1898 * hammer_vop_nrename { fnch, tnch, fdvp, tdvp, cred }
1902 hammer_vop_nrename(struct vop_nrename_args *ap)
1904 struct hammer_transaction trans;
1905 struct namecache *fncp;
1906 struct namecache *tncp;
1907 struct hammer_inode *fdip;
1908 struct hammer_inode *tdip;
1909 struct hammer_inode *ip;
1911 struct hammer_cursor cursor;
1913 u_int32_t max_iterations;
1916 if (ap->a_fdvp->v_mount != ap->a_tdvp->v_mount)
1918 if (ap->a_fdvp->v_mount != ap->a_fnch->ncp->nc_vp->v_mount)
1921 fdip = VTOI(ap->a_fdvp);
1922 tdip = VTOI(ap->a_tdvp);
1923 fncp = ap->a_fnch->ncp;
1924 tncp = ap->a_tnch->ncp;
1925 ip = VTOI(fncp->nc_vp);
1926 KKASSERT(ip != NULL);
1930 if (fdip->obj_localization != tdip->obj_localization)
1932 if (fdip->obj_localization != ip->obj_localization)
1935 if (fdip->flags & HAMMER_INODE_RO)
1937 if (tdip->flags & HAMMER_INODE_RO)
1939 if (ip->flags & HAMMER_INODE_RO)
1941 if ((error = hammer_checkspace(hmp, HAMMER_CHKSPC_CREATE)) != 0)
1944 lwkt_gettoken(&hmp->fs_token);
1945 hammer_start_transaction(&trans, hmp);
1946 ++hammer_stats_file_iopsw;
1949 * Remove tncp from the target directory and then link ip as
1950 * tncp. XXX pass trans to dounlink
1952 * Force the inode sync-time to match the transaction so it is
1953 * in-sync with the creation of the target directory entry.
1955 error = hammer_dounlink(&trans, ap->a_tnch, ap->a_tdvp,
1957 if (error == 0 || error == ENOENT) {
1958 error = hammer_ip_add_directory(&trans, tdip,
1959 tncp->nc_name, tncp->nc_nlen,
1962 ip->ino_data.parent_obj_id = tdip->obj_id;
1963 ip->ino_data.ctime = trans.time;
1964 hammer_modify_inode(&trans, ip, HAMMER_INODE_DDIRTY);
1968 goto failed; /* XXX */
1971 * Locate the record in the originating directory and remove it.
1973 * Calculate the namekey and setup the key range for the scan. This
1974 * works kinda like a chained hash table where the lower 32 bits
1975 * of the namekey synthesize the chain.
1977 * The key range is inclusive of both key_beg and key_end.
1979 namekey = hammer_directory_namekey(fdip, fncp->nc_name, fncp->nc_nlen,
1982 hammer_init_cursor(&trans, &cursor, &fdip->cache[1], fdip);
1983 cursor.key_beg.localization = fdip->obj_localization +
1984 hammer_dir_localization(fdip);
1985 cursor.key_beg.obj_id = fdip->obj_id;
1986 cursor.key_beg.key = namekey;
1987 cursor.key_beg.create_tid = 0;
1988 cursor.key_beg.delete_tid = 0;
1989 cursor.key_beg.rec_type = HAMMER_RECTYPE_DIRENTRY;
1990 cursor.key_beg.obj_type = 0;
1992 cursor.key_end = cursor.key_beg;
1993 cursor.key_end.key += max_iterations;
1994 cursor.asof = fdip->obj_asof;
1995 cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE | HAMMER_CURSOR_ASOF;
1998 * Scan all matching records (the chain), locate the one matching
1999 * the requested path component.
2001 * The hammer_ip_*() functions merge in-memory records with on-disk
2002 * records for the purposes of the search.
2004 error = hammer_ip_first(&cursor);
2005 while (error == 0) {
2006 if (hammer_ip_resolve_data(&cursor) != 0)
2008 nlen = cursor.leaf->data_len - HAMMER_ENTRY_NAME_OFF;
2010 if (fncp->nc_nlen == nlen &&
2011 bcmp(fncp->nc_name, cursor.data->entry.name, nlen) == 0) {
2014 error = hammer_ip_next(&cursor);
2018 * If all is ok we have to get the inode so we can adjust nlinks.
2020 * WARNING: hammer_ip_del_directory() may have to terminate the
2021 * cursor to avoid a recursion. It's ok to call hammer_done_cursor()
2025 error = hammer_ip_del_directory(&trans, &cursor, fdip, ip);
2028 * XXX A deadlock here will break rename's atomicy for the purposes
2029 * of crash recovery.
2031 if (error == EDEADLK) {
2032 hammer_done_cursor(&cursor);
2037 * Cleanup and tell the kernel that the rename succeeded.
2039 hammer_done_cursor(&cursor);
2041 cache_rename(ap->a_fnch, ap->a_tnch);
2042 hammer_knote(ap->a_fdvp, NOTE_WRITE);
2043 hammer_knote(ap->a_tdvp, NOTE_WRITE);
2045 hammer_knote(ip->vp, NOTE_RENAME);
2049 hammer_done_transaction(&trans);
2050 lwkt_reltoken(&hmp->fs_token);
2055 * hammer_vop_nrmdir { nch, dvp, cred }
2059 hammer_vop_nrmdir(struct vop_nrmdir_args *ap)
2061 struct hammer_transaction trans;
2062 struct hammer_inode *dip;
2066 dip = VTOI(ap->a_dvp);
2069 if (hammer_nohistory(dip) == 0 &&
2070 (error = hammer_checkspace(hmp, HAMMER_CHKSPC_REMOVE)) != 0) {
2074 lwkt_gettoken(&hmp->fs_token);
2075 hammer_start_transaction(&trans, hmp);
2076 ++hammer_stats_file_iopsw;
2077 error = hammer_dounlink(&trans, ap->a_nch, ap->a_dvp, ap->a_cred, 0, 1);
2078 hammer_done_transaction(&trans);
2080 hammer_knote(ap->a_dvp, NOTE_WRITE | NOTE_LINK);
2081 lwkt_reltoken(&hmp->fs_token);
2086 * hammer_vop_markatime { vp, cred }
2090 hammer_vop_markatime(struct vop_markatime_args *ap)
2092 struct hammer_transaction trans;
2093 struct hammer_inode *ip;
2096 ip = VTOI(ap->a_vp);
2097 if (ap->a_vp->v_mount->mnt_flag & MNT_RDONLY)
2099 if (ip->flags & HAMMER_INODE_RO)
2102 if (hmp->mp->mnt_flag & MNT_NOATIME)
2104 lwkt_gettoken(&hmp->fs_token);
2105 hammer_start_transaction(&trans, hmp);
2106 ++hammer_stats_file_iopsw;
2108 ip->ino_data.atime = trans.time;
2109 hammer_modify_inode(&trans, ip, HAMMER_INODE_ATIME);
2110 hammer_done_transaction(&trans);
2111 hammer_knote(ap->a_vp, NOTE_ATTRIB);
2112 lwkt_reltoken(&hmp->fs_token);
2117 * hammer_vop_setattr { vp, vap, cred }
2121 hammer_vop_setattr(struct vop_setattr_args *ap)
2123 struct hammer_transaction trans;
2124 struct hammer_inode *ip;
2133 int64_t aligned_size;
2138 ip = ap->a_vp->v_data;
2143 if (ap->a_vp->v_mount->mnt_flag & MNT_RDONLY)
2145 if (ip->flags & HAMMER_INODE_RO)
2147 if (hammer_nohistory(ip) == 0 &&
2148 (error = hammer_checkspace(hmp, HAMMER_CHKSPC_REMOVE)) != 0) {
2152 lwkt_gettoken(&hmp->fs_token);
2153 hammer_start_transaction(&trans, hmp);
2154 ++hammer_stats_file_iopsw;
2157 if (vap->va_flags != VNOVAL) {
2158 flags = ip->ino_data.uflags;
2159 error = vop_helper_setattr_flags(&flags, vap->va_flags,
2160 hammer_to_unix_xid(&ip->ino_data.uid),
2163 if (ip->ino_data.uflags != flags) {
2164 ip->ino_data.uflags = flags;
2165 ip->ino_data.ctime = trans.time;
2166 modflags |= HAMMER_INODE_DDIRTY;
2167 kflags |= NOTE_ATTRIB;
2169 if (ip->ino_data.uflags & (IMMUTABLE | APPEND)) {
2176 if (ip->ino_data.uflags & (IMMUTABLE | APPEND)) {
2180 if (vap->va_uid != (uid_t)VNOVAL || vap->va_gid != (gid_t)VNOVAL) {
2181 mode_t cur_mode = ip->ino_data.mode;
2182 uid_t cur_uid = hammer_to_unix_xid(&ip->ino_data.uid);
2183 gid_t cur_gid = hammer_to_unix_xid(&ip->ino_data.gid);
2187 error = vop_helper_chown(ap->a_vp, vap->va_uid, vap->va_gid,
2189 &cur_uid, &cur_gid, &cur_mode);
2191 hammer_guid_to_uuid(&uuid_uid, cur_uid);
2192 hammer_guid_to_uuid(&uuid_gid, cur_gid);
2193 if (bcmp(&uuid_uid, &ip->ino_data.uid,
2194 sizeof(uuid_uid)) ||
2195 bcmp(&uuid_gid, &ip->ino_data.gid,
2196 sizeof(uuid_gid)) ||
2197 ip->ino_data.mode != cur_mode
2199 ip->ino_data.uid = uuid_uid;
2200 ip->ino_data.gid = uuid_gid;
2201 ip->ino_data.mode = cur_mode;
2202 ip->ino_data.ctime = trans.time;
2203 modflags |= HAMMER_INODE_DDIRTY;
2205 kflags |= NOTE_ATTRIB;
2208 while (vap->va_size != VNOVAL && ip->ino_data.size != vap->va_size) {
2209 switch(ap->a_vp->v_type) {
2211 if (vap->va_size == ip->ino_data.size)
2215 * Log the operation if in fast-fsync mode or if
2216 * there are unterminated redo write records present.
2218 * The second check is needed so the recovery code
2219 * properly truncates write redos even if nominal
2220 * REDO operations is turned off due to excessive
2221 * writes, because the related records might be
2222 * destroyed and never lay down a TERM_WRITE.
2224 if ((ip->flags & HAMMER_INODE_REDO) ||
2225 (ip->flags & HAMMER_INODE_RDIRTY)) {
2226 error = hammer_generate_redo(&trans, ip,
2231 blksize = hammer_blocksize(vap->va_size);
2234 * XXX break atomicy, we can deadlock the backend
2235 * if we do not release the lock. Probably not a
2238 if (vap->va_size < ip->ino_data.size) {
2239 nvtruncbuf(ap->a_vp, vap->va_size,
2241 hammer_blockoff(vap->va_size));
2243 kflags |= NOTE_WRITE;
2245 nvextendbuf(ap->a_vp,
2248 hammer_blocksize(ip->ino_data.size),
2249 hammer_blocksize(vap->va_size),
2250 hammer_blockoff(ip->ino_data.size),
2251 hammer_blockoff(vap->va_size),
2254 kflags |= NOTE_WRITE | NOTE_EXTEND;
2256 ip->ino_data.size = vap->va_size;
2257 ip->ino_data.mtime = trans.time;
2258 /* XXX safe to use SDIRTY instead of DDIRTY here? */
2259 modflags |= HAMMER_INODE_MTIME | HAMMER_INODE_DDIRTY;
2262 * On-media truncation is cached in the inode until
2263 * the inode is synchronized. We must immediately
2264 * handle any frontend records.
2267 hammer_ip_frontend_trunc(ip, vap->va_size);
2268 #ifdef DEBUG_TRUNCATE
2269 if (HammerTruncIp == NULL)
2272 if ((ip->flags & HAMMER_INODE_TRUNCATED) == 0) {
2273 ip->flags |= HAMMER_INODE_TRUNCATED;
2274 ip->trunc_off = vap->va_size;
2275 #ifdef DEBUG_TRUNCATE
2276 if (ip == HammerTruncIp)
2277 kprintf("truncate1 %016llx\n",
2278 (long long)ip->trunc_off);
2280 } else if (ip->trunc_off > vap->va_size) {
2281 ip->trunc_off = vap->va_size;
2282 #ifdef DEBUG_TRUNCATE
2283 if (ip == HammerTruncIp)
2284 kprintf("truncate2 %016llx\n",
2285 (long long)ip->trunc_off);
2288 #ifdef DEBUG_TRUNCATE
2289 if (ip == HammerTruncIp)
2290 kprintf("truncate3 %016llx (ignored)\n",
2291 (long long)vap->va_size);
2298 * When truncating, nvtruncbuf() may have cleaned out
2299 * a portion of the last block on-disk in the buffer
2300 * cache. We must clean out any frontend records
2301 * for blocks beyond the new last block.
2303 aligned_size = (vap->va_size + (blksize - 1)) &
2304 ~(int64_t)(blksize - 1);
2305 if (truncating && vap->va_size < aligned_size) {
2306 aligned_size -= blksize;
2307 hammer_ip_frontend_trunc(ip, aligned_size);
2312 if ((ip->flags & HAMMER_INODE_TRUNCATED) == 0) {
2313 ip->flags |= HAMMER_INODE_TRUNCATED;
2314 ip->trunc_off = vap->va_size;
2315 } else if (ip->trunc_off > vap->va_size) {
2316 ip->trunc_off = vap->va_size;
2318 hammer_ip_frontend_trunc(ip, vap->va_size);
2319 ip->ino_data.size = vap->va_size;
2320 ip->ino_data.mtime = trans.time;
2321 modflags |= HAMMER_INODE_MTIME | HAMMER_INODE_DDIRTY;
2322 kflags |= NOTE_ATTRIB;
2330 if (vap->va_atime.tv_sec != VNOVAL) {
2331 ip->ino_data.atime = hammer_timespec_to_time(&vap->va_atime);
2332 modflags |= HAMMER_INODE_ATIME;
2333 kflags |= NOTE_ATTRIB;
2335 if (vap->va_mtime.tv_sec != VNOVAL) {
2336 ip->ino_data.mtime = hammer_timespec_to_time(&vap->va_mtime);
2337 modflags |= HAMMER_INODE_MTIME;
2338 kflags |= NOTE_ATTRIB;
2340 if (vap->va_mode != (mode_t)VNOVAL) {
2341 mode_t cur_mode = ip->ino_data.mode;
2342 uid_t cur_uid = hammer_to_unix_xid(&ip->ino_data.uid);
2343 gid_t cur_gid = hammer_to_unix_xid(&ip->ino_data.gid);
2345 error = vop_helper_chmod(ap->a_vp, vap->va_mode, ap->a_cred,
2346 cur_uid, cur_gid, &cur_mode);
2347 if (error == 0 && ip->ino_data.mode != cur_mode) {
2348 ip->ino_data.mode = cur_mode;
2349 ip->ino_data.ctime = trans.time;
2350 modflags |= HAMMER_INODE_DDIRTY;
2351 kflags |= NOTE_ATTRIB;
2356 hammer_modify_inode(&trans, ip, modflags);
2357 hammer_done_transaction(&trans);
2358 hammer_knote(ap->a_vp, kflags);
2359 lwkt_reltoken(&hmp->fs_token);
2364 * hammer_vop_nsymlink { nch, dvp, vpp, cred, vap, target }
2368 hammer_vop_nsymlink(struct vop_nsymlink_args *ap)
2370 struct hammer_transaction trans;
2371 struct hammer_inode *dip;
2372 struct hammer_inode *nip;
2373 hammer_record_t record;
2374 struct nchandle *nch;
2379 ap->a_vap->va_type = VLNK;
2382 dip = VTOI(ap->a_dvp);
2385 if (dip->flags & HAMMER_INODE_RO)
2387 if ((error = hammer_checkspace(hmp, HAMMER_CHKSPC_CREATE)) != 0)
2391 * Create a transaction to cover the operations we perform.
2393 lwkt_gettoken(&hmp->fs_token);
2394 hammer_start_transaction(&trans, hmp);
2395 ++hammer_stats_file_iopsw;
2398 * Create a new filesystem object of the requested type. The
2399 * returned inode will be referenced but not locked.
2402 error = hammer_create_inode(&trans, ap->a_vap, ap->a_cred,
2403 dip, nch->ncp->nc_name, nch->ncp->nc_nlen,
2406 hammer_done_transaction(&trans);
2408 lwkt_reltoken(&hmp->fs_token);
2413 * Add a record representing the symlink. symlink stores the link
2414 * as pure data, not a string, and is no \0 terminated.
2417 bytes = strlen(ap->a_target);
2419 if (bytes <= HAMMER_INODE_BASESYMLEN) {
2420 bcopy(ap->a_target, nip->ino_data.ext.symlink, bytes);
2422 record = hammer_alloc_mem_record(nip, bytes);
2423 record->type = HAMMER_MEM_RECORD_GENERAL;
2425 record->leaf.base.localization = nip->obj_localization +
2426 HAMMER_LOCALIZE_MISC;
2427 record->leaf.base.key = HAMMER_FIXKEY_SYMLINK;
2428 record->leaf.base.rec_type = HAMMER_RECTYPE_FIX;
2429 record->leaf.data_len = bytes;
2430 KKASSERT(HAMMER_SYMLINK_NAME_OFF == 0);
2431 bcopy(ap->a_target, record->data->symlink.name, bytes);
2432 error = hammer_ip_add_record(&trans, record);
2436 * Set the file size to the length of the link.
2439 nip->ino_data.size = bytes;
2440 hammer_modify_inode(&trans, nip, HAMMER_INODE_DDIRTY);
2444 error = hammer_ip_add_directory(&trans, dip, nch->ncp->nc_name,
2445 nch->ncp->nc_nlen, nip);
2451 hammer_rel_inode(nip, 0);
2454 error = hammer_get_vnode(nip, ap->a_vpp);
2455 hammer_rel_inode(nip, 0);
2457 cache_setunresolved(ap->a_nch);
2458 cache_setvp(ap->a_nch, *ap->a_vpp);
2459 hammer_knote(ap->a_dvp, NOTE_WRITE);
2462 hammer_done_transaction(&trans);
2463 lwkt_reltoken(&hmp->fs_token);
2468 * hammer_vop_nwhiteout { nch, dvp, cred, flags }
2472 hammer_vop_nwhiteout(struct vop_nwhiteout_args *ap)
2474 struct hammer_transaction trans;
2475 struct hammer_inode *dip;
2479 dip = VTOI(ap->a_dvp);
2482 if (hammer_nohistory(dip) == 0 &&
2483 (error = hammer_checkspace(hmp, HAMMER_CHKSPC_CREATE)) != 0) {
2487 lwkt_gettoken(&hmp->fs_token);
2488 hammer_start_transaction(&trans, hmp);
2489 ++hammer_stats_file_iopsw;
2490 error = hammer_dounlink(&trans, ap->a_nch, ap->a_dvp,
2491 ap->a_cred, ap->a_flags, -1);
2492 hammer_done_transaction(&trans);
2493 lwkt_reltoken(&hmp->fs_token);
2499 * hammer_vop_ioctl { vp, command, data, fflag, cred }
2503 hammer_vop_ioctl(struct vop_ioctl_args *ap)
2505 struct hammer_inode *ip = ap->a_vp->v_data;
2506 hammer_mount_t hmp = ip->hmp;
2509 ++hammer_stats_file_iopsr;
2510 lwkt_gettoken(&hmp->fs_token);
2511 error = hammer_ioctl(ip, ap->a_command, ap->a_data,
2512 ap->a_fflag, ap->a_cred);
2513 lwkt_reltoken(&hmp->fs_token);
2519 hammer_vop_mountctl(struct vop_mountctl_args *ap)
2521 static const struct mountctl_opt extraopt[] = {
2522 { HMNT_NOHISTORY, "nohistory" },
2523 { HMNT_MASTERID, "master" },
2527 struct hammer_mount *hmp;
2534 mp = ap->a_head.a_ops->head.vv_mount;
2535 KKASSERT(mp->mnt_data != NULL);
2536 hmp = (struct hammer_mount *)mp->mnt_data;
2538 lwkt_gettoken(&hmp->fs_token);
2541 case MOUNTCTL_SET_EXPORT:
2542 if (ap->a_ctllen != sizeof(struct export_args))
2545 error = hammer_vfs_export(mp, ap->a_op,
2546 (const struct export_args *)ap->a_ctl);
2548 case MOUNTCTL_MOUNTFLAGS:
2551 * Call standard mountctl VOP function
2552 * so we get user mount flags.
2554 error = vop_stdmountctl(ap);
2558 usedbytes = *ap->a_res;
2560 if (usedbytes > 0 && usedbytes < ap->a_buflen) {
2561 usedbytes += vfs_flagstostr(hmp->hflags, extraopt,
2563 ap->a_buflen - usedbytes,
2567 *ap->a_res += usedbytes;
2571 error = vop_stdmountctl(ap);
2574 lwkt_reltoken(&hmp->fs_token);
2579 * hammer_vop_strategy { vp, bio }
2581 * Strategy call, used for regular file read & write only. Note that the
2582 * bp may represent a cluster.
2584 * To simplify operation and allow better optimizations in the future,
2585 * this code does not make any assumptions with regards to buffer alignment
2590 hammer_vop_strategy(struct vop_strategy_args *ap)
2595 bp = ap->a_bio->bio_buf;
2599 error = hammer_vop_strategy_read(ap);
2602 error = hammer_vop_strategy_write(ap);
2605 bp->b_error = error = EINVAL;
2606 bp->b_flags |= B_ERROR;
2614 * Read from a regular file. Iterate the related records and fill in the
2615 * BIO/BUF. Gaps are zero-filled.
2617 * The support code in hammer_object.c should be used to deal with mixed
2618 * in-memory and on-disk records.
2620 * NOTE: Can be called from the cluster code with an oversized buf.
2626 hammer_vop_strategy_read(struct vop_strategy_args *ap)
2628 struct hammer_transaction trans;
2629 struct hammer_inode *ip;
2630 struct hammer_inode *dip;
2632 struct hammer_cursor cursor;
2633 hammer_base_elm_t base;
2634 hammer_off_t disk_offset;
2648 ip = ap->a_vp->v_data;
2652 * The zone-2 disk offset may have been set by the cluster code via
2653 * a BMAP operation, or else should be NOOFFSET.
2655 * Checking the high bits for a match against zone-2 should suffice.
2657 nbio = push_bio(bio);
2658 if ((nbio->bio_offset & HAMMER_OFF_ZONE_MASK) ==
2659 HAMMER_ZONE_LARGE_DATA) {
2660 lwkt_gettoken(&hmp->fs_token);
2661 error = hammer_io_direct_read(hmp, nbio, NULL);
2662 lwkt_reltoken(&hmp->fs_token);
2667 * Well, that sucked. Do it the hard way. If all the stars are
2668 * aligned we may still be able to issue a direct-read.
2670 lwkt_gettoken(&hmp->fs_token);
2671 hammer_simple_transaction(&trans, hmp);
2672 hammer_init_cursor(&trans, &cursor, &ip->cache[1], ip);
2675 * Key range (begin and end inclusive) to scan. Note that the key's
2676 * stored in the actual records represent BASE+LEN, not BASE. The
2677 * first record containing bio_offset will have a key > bio_offset.
2679 cursor.key_beg.localization = ip->obj_localization +
2680 HAMMER_LOCALIZE_MISC;
2681 cursor.key_beg.obj_id = ip->obj_id;
2682 cursor.key_beg.create_tid = 0;
2683 cursor.key_beg.delete_tid = 0;
2684 cursor.key_beg.obj_type = 0;
2685 cursor.key_beg.key = bio->bio_offset + 1;
2686 cursor.asof = ip->obj_asof;
2687 cursor.flags |= HAMMER_CURSOR_ASOF;
2689 cursor.key_end = cursor.key_beg;
2690 KKASSERT(ip->ino_data.obj_type == HAMMER_OBJTYPE_REGFILE);
2692 if (ip->ino_data.obj_type == HAMMER_OBJTYPE_DBFILE) {
2693 cursor.key_beg.rec_type = HAMMER_RECTYPE_DB;
2694 cursor.key_end.rec_type = HAMMER_RECTYPE_DB;
2695 cursor.key_end.key = 0x7FFFFFFFFFFFFFFFLL;
2699 ran_end = bio->bio_offset + bp->b_bufsize;
2700 cursor.key_beg.rec_type = HAMMER_RECTYPE_DATA;
2701 cursor.key_end.rec_type = HAMMER_RECTYPE_DATA;
2702 tmp64 = ran_end + MAXPHYS + 1; /* work-around GCC-4 bug */
2703 if (tmp64 < ran_end)
2704 cursor.key_end.key = 0x7FFFFFFFFFFFFFFFLL;
2706 cursor.key_end.key = ran_end + MAXPHYS + 1;
2708 cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE;
2710 error = hammer_ip_first(&cursor);
2713 while (error == 0) {
2715 * Get the base file offset of the record. The key for
2716 * data records is (base + bytes) rather then (base).
2718 base = &cursor.leaf->base;
2719 rec_offset = base->key - cursor.leaf->data_len;
2722 * Calculate the gap, if any, and zero-fill it.
2724 * n is the offset of the start of the record verses our
2725 * current seek offset in the bio.
2727 n = (int)(rec_offset - (bio->bio_offset + boff));
2729 if (n > bp->b_bufsize - boff)
2730 n = bp->b_bufsize - boff;
2731 bzero((char *)bp->b_data + boff, n);
2737 * Calculate the data offset in the record and the number
2738 * of bytes we can copy.
2740 * There are two degenerate cases. First, boff may already
2741 * be at bp->b_bufsize. Secondly, the data offset within
2742 * the record may exceed the record's size.
2746 n = cursor.leaf->data_len - roff;
2748 kprintf("strategy_read: bad n=%d roff=%d\n", n, roff);
2750 } else if (n > bp->b_bufsize - boff) {
2751 n = bp->b_bufsize - boff;
2755 * Deal with cached truncations. This cool bit of code
2756 * allows truncate()/ftruncate() to avoid having to sync
2759 * If the frontend is truncated then all backend records are
2760 * subject to the frontend's truncation.
2762 * If the backend is truncated then backend records on-disk
2763 * (but not in-memory) are subject to the backend's
2764 * truncation. In-memory records owned by the backend
2765 * represent data written after the truncation point on the
2766 * backend and must not be truncated.
2768 * Truncate operations deal with frontend buffer cache
2769 * buffers and frontend-owned in-memory records synchronously.
2771 if (ip->flags & HAMMER_INODE_TRUNCATED) {
2772 if (hammer_cursor_ondisk(&cursor)/* ||
2773 cursor.iprec->flush_state == HAMMER_FST_FLUSH*/) {
2774 if (ip->trunc_off <= rec_offset)
2776 else if (ip->trunc_off < rec_offset + n)
2777 n = (int)(ip->trunc_off - rec_offset);
2780 if (ip->sync_flags & HAMMER_INODE_TRUNCATED) {
2781 if (hammer_cursor_ondisk(&cursor)) {
2782 if (ip->sync_trunc_off <= rec_offset)
2784 else if (ip->sync_trunc_off < rec_offset + n)
2785 n = (int)(ip->sync_trunc_off - rec_offset);
2790 * Try to issue a direct read into our bio if possible,
2791 * otherwise resolve the element data into a hammer_buffer
2794 * The buffer on-disk should be zerod past any real
2795 * truncation point, but may not be for any synthesized
2796 * truncation point from above.
2798 disk_offset = cursor.leaf->data_offset + roff;
2799 if (boff == 0 && n == bp->b_bufsize &&
2800 hammer_cursor_ondisk(&cursor) &&
2801 (disk_offset & HAMMER_BUFMASK) == 0) {
2802 KKASSERT((disk_offset & HAMMER_OFF_ZONE_MASK) ==
2803 HAMMER_ZONE_LARGE_DATA);
2804 nbio->bio_offset = disk_offset;
2805 error = hammer_io_direct_read(hmp, nbio, cursor.leaf);
2808 error = hammer_ip_resolve_data(&cursor);
2810 bcopy((char *)cursor.data + roff,
2811 (char *)bp->b_data + boff, n);
2818 * Iterate until we have filled the request.
2821 if (boff == bp->b_bufsize)
2823 error = hammer_ip_next(&cursor);
2827 * There may have been a gap after the last record
2829 if (error == ENOENT)
2831 if (error == 0 && boff != bp->b_bufsize) {
2832 KKASSERT(boff < bp->b_bufsize);
2833 bzero((char *)bp->b_data + boff, bp->b_bufsize - boff);
2834 /* boff = bp->b_bufsize; */
2837 bp->b_error = error;
2839 bp->b_flags |= B_ERROR;
2844 * Cache the b-tree node for the last data read in cache[1].
2846 * If we hit the file EOF then also cache the node in the
2847 * governing director's cache[3], it will be used to initialize
2848 * the inode's cache[1] for any inodes looked up via the directory.
2850 * This doesn't reduce disk accesses since the B-Tree chain is
2851 * likely cached, but it does reduce cpu overhead when looking
2852 * up file offsets for cpdup/tar/cpio style iterations.
2855 hammer_cache_node(&ip->cache[1], cursor.node);
2856 if (ran_end >= ip->ino_data.size) {
2857 dip = hammer_find_inode(&trans, ip->ino_data.parent_obj_id,
2858 ip->obj_asof, ip->obj_localization);
2860 hammer_cache_node(&dip->cache[3], cursor.node);
2861 hammer_rel_inode(dip, 0);
2864 hammer_done_cursor(&cursor);
2865 hammer_done_transaction(&trans);
2866 lwkt_reltoken(&hmp->fs_token);
2871 * BMAP operation - used to support cluster_read() only.
2873 * (struct vnode *vp, off_t loffset, off_t *doffsetp, int *runp, int *runb)
2875 * This routine may return EOPNOTSUPP if the opration is not supported for
2876 * the specified offset. The contents of the pointer arguments do not
2877 * need to be initialized in that case.
2879 * If a disk address is available and properly aligned return 0 with
2880 * *doffsetp set to the zone-2 address, and *runp / *runb set appropriately
2881 * to the run-length relative to that offset. Callers may assume that
2882 * *doffsetp is valid if 0 is returned, even if *runp is not sufficiently
2883 * large, so return EOPNOTSUPP if it is not sufficiently large.
2887 hammer_vop_bmap(struct vop_bmap_args *ap)
2889 struct hammer_transaction trans;
2890 struct hammer_inode *ip;
2892 struct hammer_cursor cursor;
2893 hammer_base_elm_t base;
2897 int64_t base_offset;
2898 int64_t base_disk_offset;
2899 int64_t last_offset;
2900 hammer_off_t last_disk_offset;
2901 hammer_off_t disk_offset;
2906 ++hammer_stats_file_iopsr;
2907 ip = ap->a_vp->v_data;
2911 * We can only BMAP regular files. We can't BMAP database files,
2914 if (ip->ino_data.obj_type != HAMMER_OBJTYPE_REGFILE)
2918 * bmap is typically called with runp/runb both NULL when used
2919 * for writing. We do not support BMAP for writing atm.
2921 if (ap->a_cmd != BUF_CMD_READ)
2925 * Scan the B-Tree to acquire blockmap addresses, then translate
2928 lwkt_gettoken(&hmp->fs_token);
2929 hammer_simple_transaction(&trans, hmp);
2931 kprintf("bmap_beg %016llx ip->cache %p\n",
2932 (long long)ap->a_loffset, ip->cache[1]);
2934 hammer_init_cursor(&trans, &cursor, &ip->cache[1], ip);
2937 * Key range (begin and end inclusive) to scan. Note that the key's
2938 * stored in the actual records represent BASE+LEN, not BASE. The
2939 * first record containing bio_offset will have a key > bio_offset.
2941 cursor.key_beg.localization = ip->obj_localization +
2942 HAMMER_LOCALIZE_MISC;
2943 cursor.key_beg.obj_id = ip->obj_id;
2944 cursor.key_beg.create_tid = 0;
2945 cursor.key_beg.delete_tid = 0;
2946 cursor.key_beg.obj_type = 0;
2948 cursor.key_beg.key = ap->a_loffset - MAXPHYS + 1;
2950 cursor.key_beg.key = ap->a_loffset + 1;
2951 if (cursor.key_beg.key < 0)
2952 cursor.key_beg.key = 0;
2953 cursor.asof = ip->obj_asof;
2954 cursor.flags |= HAMMER_CURSOR_ASOF;
2956 cursor.key_end = cursor.key_beg;
2957 KKASSERT(ip->ino_data.obj_type == HAMMER_OBJTYPE_REGFILE);
2959 ran_end = ap->a_loffset + MAXPHYS;
2960 cursor.key_beg.rec_type = HAMMER_RECTYPE_DATA;
2961 cursor.key_end.rec_type = HAMMER_RECTYPE_DATA;
2962 tmp64 = ran_end + MAXPHYS + 1; /* work-around GCC-4 bug */
2963 if (tmp64 < ran_end)
2964 cursor.key_end.key = 0x7FFFFFFFFFFFFFFFLL;
2966 cursor.key_end.key = ran_end + MAXPHYS + 1;
2968 cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE;
2970 error = hammer_ip_first(&cursor);
2971 base_offset = last_offset = 0;
2972 base_disk_offset = last_disk_offset = 0;
2974 while (error == 0) {
2976 * Get the base file offset of the record. The key for
2977 * data records is (base + bytes) rather then (base).
2979 * NOTE: rec_offset + rec_len may exceed the end-of-file.
2980 * The extra bytes should be zero on-disk and the BMAP op
2981 * should still be ok.
2983 base = &cursor.leaf->base;
2984 rec_offset = base->key - cursor.leaf->data_len;
2985 rec_len = cursor.leaf->data_len;
2988 * Incorporate any cached truncation.
2990 * NOTE: Modifications to rec_len based on synthesized
2991 * truncation points remove the guarantee that any extended
2992 * data on disk is zero (since the truncations may not have
2993 * taken place on-media yet).
2995 if (ip->flags & HAMMER_INODE_TRUNCATED) {
2996 if (hammer_cursor_ondisk(&cursor) ||
2997 cursor.iprec->flush_state == HAMMER_FST_FLUSH) {
2998 if (ip->trunc_off <= rec_offset)
3000 else if (ip->trunc_off < rec_offset + rec_len)
3001 rec_len = (int)(ip->trunc_off - rec_offset);
3004 if (ip->sync_flags & HAMMER_INODE_TRUNCATED) {
3005 if (hammer_cursor_ondisk(&cursor)) {
3006 if (ip->sync_trunc_off <= rec_offset)
3008 else if (ip->sync_trunc_off < rec_offset + rec_len)
3009 rec_len = (int)(ip->sync_trunc_off - rec_offset);
3014 * Accumulate information. If we have hit a discontiguous
3015 * block reset base_offset unless we are already beyond the
3016 * requested offset. If we are, that's it, we stop.
3020 if (hammer_cursor_ondisk(&cursor)) {
3021 disk_offset = cursor.leaf->data_offset;
3022 if (rec_offset != last_offset ||
3023 disk_offset != last_disk_offset) {
3024 if (rec_offset > ap->a_loffset)
3026 base_offset = rec_offset;
3027 base_disk_offset = disk_offset;
3029 last_offset = rec_offset + rec_len;
3030 last_disk_offset = disk_offset + rec_len;
3032 error = hammer_ip_next(&cursor);
3036 kprintf("BMAP %016llx: %016llx - %016llx\n",
3037 (long long)ap->a_loffset,
3038 (long long)base_offset,
3039 (long long)last_offset);
3040 kprintf("BMAP %16s: %016llx - %016llx\n", "",
3041 (long long)base_disk_offset,
3042 (long long)last_disk_offset);
3046 hammer_cache_node(&ip->cache[1], cursor.node);
3048 kprintf("bmap_end2 %016llx ip->cache %p\n",
3049 (long long)ap->a_loffset, ip->cache[1]);
3052 hammer_done_cursor(&cursor);
3053 hammer_done_transaction(&trans);
3054 lwkt_reltoken(&hmp->fs_token);
3057 * If we couldn't find any records or the records we did find were
3058 * all behind the requested offset, return failure. A forward
3059 * truncation can leave a hole w/ no on-disk records.
3061 if (last_offset == 0 || last_offset < ap->a_loffset)
3062 return (EOPNOTSUPP);
3065 * Figure out the block size at the requested offset and adjust
3066 * our limits so the cluster_read() does not create inappropriately
3067 * sized buffer cache buffers.
3069 blksize = hammer_blocksize(ap->a_loffset);
3070 if (hammer_blocksize(base_offset) != blksize) {
3071 base_offset = hammer_blockdemarc(base_offset, ap->a_loffset);
3073 if (last_offset != ap->a_loffset &&
3074 hammer_blocksize(last_offset - 1) != blksize) {
3075 last_offset = hammer_blockdemarc(ap->a_loffset,
3080 * Returning EOPNOTSUPP simply prevents the direct-IO optimization
3083 disk_offset = base_disk_offset + (ap->a_loffset - base_offset);
3085 if ((disk_offset & HAMMER_OFF_ZONE_MASK) != HAMMER_ZONE_LARGE_DATA) {
3087 * Only large-data zones can be direct-IOd
3090 } else if ((disk_offset & HAMMER_BUFMASK) ||
3091 (last_offset - ap->a_loffset) < blksize) {
3093 * doffsetp is not aligned or the forward run size does
3094 * not cover a whole buffer, disallow the direct I/O.
3101 *ap->a_doffsetp = disk_offset;
3103 *ap->a_runb = ap->a_loffset - base_offset;
3104 KKASSERT(*ap->a_runb >= 0);
3107 *ap->a_runp = last_offset - ap->a_loffset;
3108 KKASSERT(*ap->a_runp >= 0);
3116 * Write to a regular file. Because this is a strategy call the OS is
3117 * trying to actually get data onto the media.
3121 hammer_vop_strategy_write(struct vop_strategy_args *ap)
3123 hammer_record_t record;
3134 ip = ap->a_vp->v_data;
3137 blksize = hammer_blocksize(bio->bio_offset);
3138 KKASSERT(bp->b_bufsize == blksize);
3140 if (ip->flags & HAMMER_INODE_RO) {
3141 bp->b_error = EROFS;
3142 bp->b_flags |= B_ERROR;
3147 lwkt_gettoken(&hmp->fs_token);
3150 * Interlock with inode destruction (no in-kernel or directory
3151 * topology visibility). If we queue new IO while trying to
3152 * destroy the inode we can deadlock the vtrunc call in
3153 * hammer_inode_unloadable_check().
3155 * Besides, there's no point flushing a bp associated with an
3156 * inode that is being destroyed on-media and has no kernel
3159 if ((ip->flags | ip->sync_flags) &
3160 (HAMMER_INODE_DELETING|HAMMER_INODE_DELETED)) {
3163 lwkt_reltoken(&hmp->fs_token);
3168 * Reserve space and issue a direct-write from the front-end.
3169 * NOTE: The direct_io code will hammer_bread/bcopy smaller
3172 * An in-memory record will be installed to reference the storage
3173 * until the flusher can get to it.
3175 * Since we own the high level bio the front-end will not try to
3176 * do a direct-read until the write completes.
3178 * NOTE: The only time we do not reserve a full-sized buffers
3179 * worth of data is if the file is small. We do not try to
3180 * allocate a fragment (from the small-data zone) at the end of
3181 * an otherwise large file as this can lead to wildly separated
3184 KKASSERT((bio->bio_offset & HAMMER_BUFMASK) == 0);
3185 KKASSERT(bio->bio_offset < ip->ino_data.size);
3186 if (bio->bio_offset || ip->ino_data.size > HAMMER_BUFSIZE / 2)
3187 bytes = bp->b_bufsize;
3189 bytes = ((int)ip->ino_data.size + 15) & ~15;
3191 record = hammer_ip_add_bulk(ip, bio->bio_offset, bp->b_data,
3195 * B_VFSFLAG1 indicates that a REDO_WRITE entry was generated
3196 * in hammer_vop_write(). We must flag the record so the proper
3197 * REDO_TERM_WRITE entry is generated during the flush.
3200 if (bp->b_flags & B_VFSFLAG1) {
3201 record->flags |= HAMMER_RECF_REDO;
3202 bp->b_flags &= ~B_VFSFLAG1;
3204 hammer_io_direct_write(hmp, bio, record);
3205 if (ip->rsv_recs > 1 && hmp->rsv_recs > hammer_limit_recs)
3206 hammer_flush_inode(ip, 0);
3208 bp->b_bio2.bio_offset = NOOFFSET;
3209 bp->b_error = error;
3210 bp->b_flags |= B_ERROR;
3213 lwkt_reltoken(&hmp->fs_token);
3218 * dounlink - disconnect a directory entry
3220 * XXX whiteout support not really in yet
3223 hammer_dounlink(hammer_transaction_t trans, struct nchandle *nch,
3224 struct vnode *dvp, struct ucred *cred,
3225 int flags, int isdir)
3227 struct namecache *ncp;
3231 struct hammer_cursor cursor;
3233 u_int32_t max_iterations;
3237 * Calculate the namekey and setup the key range for the scan. This
3238 * works kinda like a chained hash table where the lower 32 bits
3239 * of the namekey synthesize the chain.
3241 * The key range is inclusive of both key_beg and key_end.
3247 if (dip->flags & HAMMER_INODE_RO)
3250 namekey = hammer_directory_namekey(dip, ncp->nc_name, ncp->nc_nlen,
3253 hammer_init_cursor(trans, &cursor, &dip->cache[1], dip);
3254 cursor.key_beg.localization = dip->obj_localization +
3255 hammer_dir_localization(dip);
3256 cursor.key_beg.obj_id = dip->obj_id;
3257 cursor.key_beg.key = namekey;
3258 cursor.key_beg.create_tid = 0;
3259 cursor.key_beg.delete_tid = 0;
3260 cursor.key_beg.rec_type = HAMMER_RECTYPE_DIRENTRY;
3261 cursor.key_beg.obj_type = 0;
3263 cursor.key_end = cursor.key_beg;
3264 cursor.key_end.key += max_iterations;
3265 cursor.asof = dip->obj_asof;
3266 cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE | HAMMER_CURSOR_ASOF;
3269 * Scan all matching records (the chain), locate the one matching
3270 * the requested path component. info->last_error contains the
3271 * error code on search termination and could be 0, ENOENT, or
3274 * The hammer_ip_*() functions merge in-memory records with on-disk
3275 * records for the purposes of the search.
3277 error = hammer_ip_first(&cursor);
3279 while (error == 0) {
3280 error = hammer_ip_resolve_data(&cursor);
3283 nlen = cursor.leaf->data_len - HAMMER_ENTRY_NAME_OFF;
3285 if (ncp->nc_nlen == nlen &&
3286 bcmp(ncp->nc_name, cursor.data->entry.name, nlen) == 0) {
3289 error = hammer_ip_next(&cursor);
3293 * If all is ok we have to get the inode so we can adjust nlinks.
3294 * To avoid a deadlock with the flusher we must release the inode
3295 * lock on the directory when acquiring the inode for the entry.
3297 * If the target is a directory, it must be empty.
3300 hammer_unlock(&cursor.ip->lock);
3301 ip = hammer_get_inode(trans, dip, cursor.data->entry.obj_id,
3303 cursor.data->entry.localization,
3305 hammer_lock_sh(&cursor.ip->lock);
3306 if (error == ENOENT) {
3307 kprintf("HAMMER: WARNING: Removing "
3308 "dirent w/missing inode \"%s\"\n"
3309 "\tobj_id = %016llx\n",
3311 (long long)cursor.data->entry.obj_id);
3316 * If isdir >= 0 we validate that the entry is or is not a
3317 * directory. If isdir < 0 we don't care.
3319 if (error == 0 && isdir >= 0 && ip) {
3321 ip->ino_data.obj_type != HAMMER_OBJTYPE_DIRECTORY) {
3323 } else if (isdir == 0 &&
3324 ip->ino_data.obj_type == HAMMER_OBJTYPE_DIRECTORY) {
3330 * If we are trying to remove a directory the directory must
3333 * The check directory code can loop and deadlock/retry. Our
3334 * own cursor's node locks must be released to avoid a 3-way
3335 * deadlock with the flusher if the check directory code
3338 * If any changes whatsoever have been made to the cursor
3339 * set EDEADLK and retry.
3341 * WARNING: See warnings in hammer_unlock_cursor()
3344 if (error == 0 && ip && ip->ino_data.obj_type ==
3345 HAMMER_OBJTYPE_DIRECTORY) {
3346 hammer_unlock_cursor(&cursor);
3347 error = hammer_ip_check_directory_empty(trans, ip);
3348 hammer_lock_cursor(&cursor);
3349 if (cursor.flags & HAMMER_CURSOR_RETEST) {
3350 kprintf("HAMMER: Warning: avoided deadlock "
3358 * Delete the directory entry.
3360 * WARNING: hammer_ip_del_directory() may have to terminate
3361 * the cursor to avoid a deadlock. It is ok to call
3362 * hammer_done_cursor() twice.
3365 error = hammer_ip_del_directory(trans, &cursor,
3368 hammer_done_cursor(&cursor);
3370 cache_setunresolved(nch);
3371 cache_setvp(nch, NULL);
3374 * XXX locking. Note: ip->vp might get ripped out
3375 * when we setunresolved() the nch since we had
3376 * no other reference to it. In that case ip->vp
3380 hammer_knote(ip->vp, NOTE_DELETE);
3381 cache_inval_vp(ip->vp, CINV_DESTROY);
3385 hammer_rel_inode(ip, 0);
3387 hammer_done_cursor(&cursor);
3389 if (error == EDEADLK)
3395 /************************************************************************
3396 * FIFO AND SPECFS OPS *
3397 ************************************************************************
3401 hammer_vop_fifoclose (struct vop_close_args *ap)
3403 /* XXX update itimes */
3404 return (VOCALL(&fifo_vnode_vops, &ap->a_head));
3408 hammer_vop_fiforead (struct vop_read_args *ap)
3412 error = VOCALL(&fifo_vnode_vops, &ap->a_head);
3413 /* XXX update access time */
3418 hammer_vop_fifowrite (struct vop_write_args *ap)
3422 error = VOCALL(&fifo_vnode_vops, &ap->a_head);
3423 /* XXX update access time */
3429 hammer_vop_fifokqfilter(struct vop_kqfilter_args *ap)
3433 error = VOCALL(&fifo_vnode_vops, &ap->a_head);
3435 error = hammer_vop_kqfilter(ap);
3439 /************************************************************************
3441 ************************************************************************
3444 static void filt_hammerdetach(struct knote *kn);
3445 static int filt_hammerread(struct knote *kn, long hint);
3446 static int filt_hammerwrite(struct knote *kn, long hint);
3447 static int filt_hammervnode(struct knote *kn, long hint);
3449 static struct filterops hammerread_filtops =
3450 { FILTEROP_ISFD, NULL, filt_hammerdetach, filt_hammerread };
3451 static struct filterops hammerwrite_filtops =
3452 { FILTEROP_ISFD, NULL, filt_hammerdetach, filt_hammerwrite };
3453 static struct filterops hammervnode_filtops =
3454 { FILTEROP_ISFD, NULL, filt_hammerdetach, filt_hammervnode };
3458 hammer_vop_kqfilter(struct vop_kqfilter_args *ap)
3460 struct vnode *vp = ap->a_vp;
3461 struct knote *kn = ap->a_kn;
3463 switch (kn->kn_filter) {
3465 kn->kn_fop = &hammerread_filtops;
3468 kn->kn_fop = &hammerwrite_filtops;
3471 kn->kn_fop = &hammervnode_filtops;
3474 return (EOPNOTSUPP);
3477 kn->kn_hook = (caddr_t)vp;
3479 knote_insert(&vp->v_pollinfo.vpi_kqinfo.ki_note, kn);
3485 filt_hammerdetach(struct knote *kn)
3487 struct vnode *vp = (void *)kn->kn_hook;
3489 knote_remove(&vp->v_pollinfo.vpi_kqinfo.ki_note, kn);
3493 filt_hammerread(struct knote *kn, long hint)
3495 struct vnode *vp = (void *)kn->kn_hook;
3496 hammer_inode_t ip = VTOI(vp);
3497 hammer_mount_t hmp = ip->hmp;
3500 if (hint == NOTE_REVOKE) {
3501 kn->kn_flags |= (EV_EOF | EV_ONESHOT);
3504 lwkt_gettoken(&hmp->fs_token); /* XXX use per-ip-token */
3505 off = ip->ino_data.size - kn->kn_fp->f_offset;
3506 kn->kn_data = (off < INTPTR_MAX) ? off : INTPTR_MAX;
3507 lwkt_reltoken(&hmp->fs_token);
3508 if (kn->kn_sfflags & NOTE_OLDAPI)
3510 return (kn->kn_data != 0);
3514 filt_hammerwrite(struct knote *kn, long hint)
3516 if (hint == NOTE_REVOKE)
3517 kn->kn_flags |= (EV_EOF | EV_ONESHOT);
3523 filt_hammervnode(struct knote *kn, long hint)
3525 if (kn->kn_sfflags & hint)
3526 kn->kn_fflags |= hint;
3527 if (hint == NOTE_REVOKE) {
3528 kn->kn_flags |= EV_EOF;
3531 return (kn->kn_fflags != 0);