2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * $DragonFly: src/sys/vfs/hammer/hammer_vnops.c,v 1.102 2008/10/16 17:24:16 dillon Exp $
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/kernel.h>
40 #include <sys/fcntl.h>
41 #include <sys/namecache.h>
42 #include <sys/vnode.h>
43 #include <sys/lockf.h>
44 #include <sys/event.h>
46 #include <sys/dirent.h>
48 #include <vm/vm_extern.h>
49 #include <vfs/fifofs/fifo.h>
51 #include <sys/mplock2.h>
58 /*static int hammer_vop_vnoperate(struct vop_generic_args *);*/
59 static int hammer_vop_fsync(struct vop_fsync_args *);
60 static int hammer_vop_read(struct vop_read_args *);
61 static int hammer_vop_write(struct vop_write_args *);
62 static int hammer_vop_access(struct vop_access_args *);
63 static int hammer_vop_advlock(struct vop_advlock_args *);
64 static int hammer_vop_close(struct vop_close_args *);
65 static int hammer_vop_ncreate(struct vop_ncreate_args *);
66 static int hammer_vop_getattr(struct vop_getattr_args *);
67 static int hammer_vop_nresolve(struct vop_nresolve_args *);
68 static int hammer_vop_nlookupdotdot(struct vop_nlookupdotdot_args *);
69 static int hammer_vop_nlink(struct vop_nlink_args *);
70 static int hammer_vop_nmkdir(struct vop_nmkdir_args *);
71 static int hammer_vop_nmknod(struct vop_nmknod_args *);
72 static int hammer_vop_open(struct vop_open_args *);
73 static int hammer_vop_print(struct vop_print_args *);
74 static int hammer_vop_readdir(struct vop_readdir_args *);
75 static int hammer_vop_readlink(struct vop_readlink_args *);
76 static int hammer_vop_nremove(struct vop_nremove_args *);
77 static int hammer_vop_nrename(struct vop_nrename_args *);
78 static int hammer_vop_nrmdir(struct vop_nrmdir_args *);
79 static int hammer_vop_markatime(struct vop_markatime_args *);
80 static int hammer_vop_setattr(struct vop_setattr_args *);
81 static int hammer_vop_strategy(struct vop_strategy_args *);
82 static int hammer_vop_bmap(struct vop_bmap_args *ap);
83 static int hammer_vop_nsymlink(struct vop_nsymlink_args *);
84 static int hammer_vop_nwhiteout(struct vop_nwhiteout_args *);
85 static int hammer_vop_ioctl(struct vop_ioctl_args *);
86 static int hammer_vop_mountctl(struct vop_mountctl_args *);
87 static int hammer_vop_kqfilter (struct vop_kqfilter_args *);
89 static int hammer_vop_fifoclose (struct vop_close_args *);
90 static int hammer_vop_fiforead (struct vop_read_args *);
91 static int hammer_vop_fifowrite (struct vop_write_args *);
92 static int hammer_vop_fifokqfilter (struct vop_kqfilter_args *);
94 struct vop_ops hammer_vnode_vops = {
95 .vop_default = vop_defaultop,
96 .vop_fsync = hammer_vop_fsync,
97 .vop_getpages = vop_stdgetpages,
98 .vop_putpages = vop_stdputpages,
99 .vop_read = hammer_vop_read,
100 .vop_write = hammer_vop_write,
101 .vop_access = hammer_vop_access,
102 .vop_advlock = hammer_vop_advlock,
103 .vop_close = hammer_vop_close,
104 .vop_ncreate = hammer_vop_ncreate,
105 .vop_getattr = hammer_vop_getattr,
106 .vop_inactive = hammer_vop_inactive,
107 .vop_reclaim = hammer_vop_reclaim,
108 .vop_nresolve = hammer_vop_nresolve,
109 .vop_nlookupdotdot = hammer_vop_nlookupdotdot,
110 .vop_nlink = hammer_vop_nlink,
111 .vop_nmkdir = hammer_vop_nmkdir,
112 .vop_nmknod = hammer_vop_nmknod,
113 .vop_open = hammer_vop_open,
114 .vop_pathconf = vop_stdpathconf,
115 .vop_print = hammer_vop_print,
116 .vop_readdir = hammer_vop_readdir,
117 .vop_readlink = hammer_vop_readlink,
118 .vop_nremove = hammer_vop_nremove,
119 .vop_nrename = hammer_vop_nrename,
120 .vop_nrmdir = hammer_vop_nrmdir,
121 .vop_markatime = hammer_vop_markatime,
122 .vop_setattr = hammer_vop_setattr,
123 .vop_bmap = hammer_vop_bmap,
124 .vop_strategy = hammer_vop_strategy,
125 .vop_nsymlink = hammer_vop_nsymlink,
126 .vop_nwhiteout = hammer_vop_nwhiteout,
127 .vop_ioctl = hammer_vop_ioctl,
128 .vop_mountctl = hammer_vop_mountctl,
129 .vop_kqfilter = hammer_vop_kqfilter
132 struct vop_ops hammer_spec_vops = {
133 .vop_default = vop_defaultop,
134 .vop_fsync = hammer_vop_fsync,
135 .vop_read = vop_stdnoread,
136 .vop_write = vop_stdnowrite,
137 .vop_access = hammer_vop_access,
138 .vop_close = hammer_vop_close,
139 .vop_markatime = hammer_vop_markatime,
140 .vop_getattr = hammer_vop_getattr,
141 .vop_inactive = hammer_vop_inactive,
142 .vop_reclaim = hammer_vop_reclaim,
143 .vop_setattr = hammer_vop_setattr
146 struct vop_ops hammer_fifo_vops = {
147 .vop_default = fifo_vnoperate,
148 .vop_fsync = hammer_vop_fsync,
149 .vop_read = hammer_vop_fiforead,
150 .vop_write = hammer_vop_fifowrite,
151 .vop_access = hammer_vop_access,
152 .vop_close = hammer_vop_fifoclose,
153 .vop_markatime = hammer_vop_markatime,
154 .vop_getattr = hammer_vop_getattr,
155 .vop_inactive = hammer_vop_inactive,
156 .vop_reclaim = hammer_vop_reclaim,
157 .vop_setattr = hammer_vop_setattr,
158 .vop_kqfilter = hammer_vop_fifokqfilter
163 hammer_knote(struct vnode *vp, int flags)
166 KNOTE(&vp->v_pollinfo.vpi_kqinfo.ki_note, flags);
169 #ifdef DEBUG_TRUNCATE
170 struct hammer_inode *HammerTruncIp;
173 static int hammer_dounlink(hammer_transaction_t trans, struct nchandle *nch,
174 struct vnode *dvp, struct ucred *cred,
175 int flags, int isdir);
176 static int hammer_vop_strategy_read(struct vop_strategy_args *ap);
177 static int hammer_vop_strategy_write(struct vop_strategy_args *ap);
182 hammer_vop_vnoperate(struct vop_generic_args *)
184 return (VOCALL(&hammer_vnode_vops, ap));
189 * hammer_vop_fsync { vp, waitfor }
191 * fsync() an inode to disk and wait for it to be completely committed
192 * such that the information would not be undone if a crash occured after
195 * NOTE: HAMMER's fsync()'s are going to remain expensive until we implement
196 * a REDO log. A sysctl is provided to relax HAMMER's fsync()
199 * Ultimately the combination of a REDO log and use of fast storage
200 * to front-end cluster caches will make fsync fast, but it aint
201 * here yet. And, in anycase, we need real transactional
202 * all-or-nothing features which are not restricted to a single file.
206 hammer_vop_fsync(struct vop_fsync_args *ap)
208 hammer_inode_t ip = VTOI(ap->a_vp);
209 hammer_mount_t hmp = ip->hmp;
210 int waitfor = ap->a_waitfor;
214 * Fsync rule relaxation (default is either full synchronous flush
215 * or REDO semantics with synchronous flush).
217 if (ap->a_flags & VOP_FSYNC_SYSCALL) {
218 switch(hammer_fsync_mode) {
221 /* no REDO, full synchronous flush */
225 /* no REDO, full asynchronous flush */
226 if (waitfor == MNT_WAIT)
227 waitfor = MNT_NOWAIT;
230 /* REDO semantics, synchronous flush */
231 if (hmp->version < HAMMER_VOL_VERSION_FOUR)
233 mode = HAMMER_FLUSH_UNDOS_AUTO;
236 /* REDO semantics, relaxed asynchronous flush */
237 if (hmp->version < HAMMER_VOL_VERSION_FOUR)
239 mode = HAMMER_FLUSH_UNDOS_RELAXED;
240 if (waitfor == MNT_WAIT)
241 waitfor = MNT_NOWAIT;
244 /* ignore the fsync() system call */
247 /* we have to do something */
248 mode = HAMMER_FLUSH_UNDOS_RELAXED;
249 if (waitfor == MNT_WAIT)
250 waitfor = MNT_NOWAIT;
255 * Fast fsync only needs to flush the UNDO/REDO fifo if
256 * HAMMER_INODE_REDO is non-zero and the only modifications
257 * made to the file are write or write-extends.
259 if ((ip->flags & HAMMER_INODE_REDO) &&
260 (ip->flags & HAMMER_INODE_MODMASK_NOREDO) == 0
262 ++hammer_count_fsyncs;
263 hammer_flusher_flush_undos(hmp, mode);
269 * REDO is enabled by fsync(), the idea being we really only
270 * want to lay down REDO records when programs are using
271 * fsync() heavily. The first fsync() on the file starts
272 * the gravy train going and later fsync()s keep it hot by
273 * resetting the redo_count.
275 * We weren't running REDOs before now so we have to fall
276 * through and do a full fsync of what we have.
278 if (hmp->version >= HAMMER_VOL_VERSION_FOUR &&
279 (hmp->flags & HAMMER_MOUNT_REDO_RECOVERY_RUN) == 0) {
280 ip->flags |= HAMMER_INODE_REDO;
287 * Do a full flush sequence.
289 ++hammer_count_fsyncs;
290 vfsync(ap->a_vp, waitfor, 1, NULL, NULL);
291 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
292 if (waitfor == MNT_WAIT) {
294 hammer_wait_inode(ip);
295 vn_lock(ap->a_vp, LK_EXCLUSIVE | LK_RETRY);
301 * hammer_vop_read { vp, uio, ioflag, cred }
307 hammer_vop_read(struct vop_read_args *ap)
309 struct hammer_transaction trans;
322 if (ap->a_vp->v_type != VREG)
329 * Allow the UIO's size to override the sequential heuristic.
331 blksize = hammer_blocksize(uio->uio_offset);
332 seqcount = (uio->uio_resid + (BKVASIZE - 1)) / BKVASIZE;
333 ioseqcount = (ap->a_ioflag >> 16);
334 if (seqcount < ioseqcount)
335 seqcount = ioseqcount;
338 * Temporary hack until more of HAMMER can be made MPSAFE.
341 if (curthread->td_mpcount) {
343 hammer_start_transaction(&trans, ip->hmp);
348 hammer_start_transaction(&trans, ip->hmp);
353 * If reading or writing a huge amount of data we have to break
354 * atomicy and allow the operation to be interrupted by a signal
355 * or it can DOS the machine.
357 bigread = (uio->uio_resid > 100 * 1024 * 1024);
360 * Access the data typically in HAMMER_BUFSIZE blocks via the
361 * buffer cache, but HAMMER may use a variable block size based
364 * XXX Temporary hack, delay the start transaction while we remain
365 * MPSAFE. NOTE: ino_data.size cannot change while vnode is
368 while (uio->uio_resid > 0 && uio->uio_offset < ip->ino_data.size) {
372 blksize = hammer_blocksize(uio->uio_offset);
373 offset = (int)uio->uio_offset & (blksize - 1);
374 base_offset = uio->uio_offset - offset;
376 if (bigread && (error = hammer_signal_check(ip->hmp)) != 0)
382 bp = getcacheblk(ap->a_vp, base_offset);
391 if (got_mplock == 0) {
394 hammer_start_transaction(&trans, ip->hmp);
397 if (hammer_cluster_enable) {
399 * Use file_limit to prevent cluster_read() from
400 * creating buffers of the wrong block size past
403 file_limit = ip->ino_data.size;
404 if (base_offset < HAMMER_XDEMARC &&
405 file_limit > HAMMER_XDEMARC) {
406 file_limit = HAMMER_XDEMARC;
408 error = cluster_read(ap->a_vp,
409 file_limit, base_offset,
410 blksize, uio->uio_resid,
411 seqcount * BKVASIZE, &bp);
413 error = bread(ap->a_vp, base_offset, blksize, &bp);
420 if ((hammer_debug_io & 0x0001) && (bp->b_flags & B_IODEBUG)) {
421 kprintf("doff %016jx read file %016jx@%016jx\n",
422 (intmax_t)bp->b_bio2.bio_offset,
423 (intmax_t)ip->obj_id,
424 (intmax_t)bp->b_loffset);
426 bp->b_flags &= ~B_IODEBUG;
428 /* bp->b_flags |= B_CLUSTEROK; temporarily disabled */
429 n = blksize - offset;
430 if (n > uio->uio_resid)
432 if (n > ip->ino_data.size - uio->uio_offset)
433 n = (int)(ip->ino_data.size - uio->uio_offset);
434 error = uiomove((char *)bp->b_data + offset, n, uio);
436 /* data has a lower priority then meta-data */
437 bp->b_flags |= B_AGE;
441 hammer_stats_file_read += n;
445 * XXX only update the atime if we had to get the MP lock.
446 * XXX hack hack hack, fixme.
449 if ((ip->flags & HAMMER_INODE_RO) == 0 &&
450 (ip->hmp->mp->mnt_flag & MNT_NOATIME) == 0) {
451 ip->ino_data.atime = trans.time;
452 hammer_modify_inode(&trans, ip, HAMMER_INODE_ATIME);
454 hammer_done_transaction(&trans);
462 * hammer_vop_write { vp, uio, ioflag, cred }
466 hammer_vop_write(struct vop_write_args *ap)
468 struct hammer_transaction trans;
469 struct hammer_inode *ip;
482 if (ap->a_vp->v_type != VREG)
488 seqcount = ap->a_ioflag >> 16;
490 if (ip->flags & HAMMER_INODE_RO)
494 * Create a transaction to cover the operations we perform.
496 hammer_start_transaction(&trans, hmp);
502 if (ap->a_ioflag & IO_APPEND)
503 uio->uio_offset = ip->ino_data.size;
506 * Check for illegal write offsets. Valid range is 0...2^63-1.
508 * NOTE: the base_off assignment is required to work around what
509 * I consider to be a GCC-4 optimization bug.
511 if (uio->uio_offset < 0) {
512 hammer_done_transaction(&trans);
515 base_offset = uio->uio_offset + uio->uio_resid; /* work around gcc-4 */
516 if (uio->uio_resid > 0 && base_offset <= uio->uio_offset) {
517 hammer_done_transaction(&trans);
522 * If reading or writing a huge amount of data we have to break
523 * atomicy and allow the operation to be interrupted by a signal
524 * or it can DOS the machine.
526 * Preset redo_count so we stop generating REDOs earlier if the
529 bigwrite = (uio->uio_resid > 100 * 1024 * 1024);
530 if ((ip->flags & HAMMER_INODE_REDO) &&
531 ip->redo_count < hammer_limit_redo) {
532 ip->redo_count += uio->uio_resid;
536 * Access the data typically in HAMMER_BUFSIZE blocks via the
537 * buffer cache, but HAMMER may use a variable block size based
540 while (uio->uio_resid > 0) {
548 if ((error = hammer_checkspace(hmp, HAMMER_CHKSPC_WRITE)) != 0)
550 if (bigwrite && (error = hammer_signal_check(hmp)) != 0)
553 blksize = hammer_blocksize(uio->uio_offset);
556 * Do not allow HAMMER to blow out the buffer cache. Very
557 * large UIOs can lockout other processes due to bwillwrite()
560 * The hammer inode is not locked during these operations.
561 * The vnode is locked which can interfere with the pageout
562 * daemon for non-UIO_NOCOPY writes but should not interfere
563 * with the buffer cache. Even so, we cannot afford to
564 * allow the pageout daemon to build up too many dirty buffer
567 * Only call this if we aren't being recursively called from
568 * a virtual disk device (vn), else we may deadlock.
570 if ((ap->a_ioflag & IO_RECURSE) == 0)
574 * Control the number of pending records associated with
575 * this inode. If too many have accumulated start a
576 * flush. Try to maintain a pipeline with the flusher.
578 if (ip->rsv_recs >= hammer_limit_inode_recs) {
579 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
581 if (ip->rsv_recs >= hammer_limit_inode_recs * 2) {
582 while (ip->rsv_recs >= hammer_limit_inode_recs) {
583 tsleep(&ip->rsv_recs, 0, "hmrwww", hz);
585 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
590 * Do not allow HAMMER to blow out system memory by
591 * accumulating too many records. Records are so well
592 * decoupled from the buffer cache that it is possible
593 * for userland to push data out to the media via
594 * direct-write, but build up the records queued to the
595 * backend faster then the backend can flush them out.
596 * HAMMER has hit its write limit but the frontend has
597 * no pushback to slow it down.
599 if (hmp->rsv_recs > hammer_limit_recs / 2) {
601 * Get the inode on the flush list
603 if (ip->rsv_recs >= 64)
604 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
605 else if (ip->rsv_recs >= 16)
606 hammer_flush_inode(ip, 0);
609 * Keep the flusher going if the system keeps
612 delta = hmp->count_newrecords -
613 hmp->last_newrecords;
614 if (delta < 0 || delta > hammer_limit_recs / 2) {
615 hmp->last_newrecords = hmp->count_newrecords;
616 hammer_sync_hmp(hmp, MNT_NOWAIT);
620 * If we have gotten behind start slowing
623 delta = (hmp->rsv_recs - hammer_limit_recs) *
624 hz / hammer_limit_recs;
626 tsleep(&trans, 0, "hmrslo", delta);
631 * Calculate the blocksize at the current offset and figure
632 * out how much we can actually write.
634 blkmask = blksize - 1;
635 offset = (int)uio->uio_offset & blkmask;
636 base_offset = uio->uio_offset & ~(int64_t)blkmask;
637 n = blksize - offset;
638 if (n > uio->uio_resid) {
644 nsize = uio->uio_offset + n;
645 if (nsize > ip->ino_data.size) {
646 if (uio->uio_offset > ip->ino_data.size)
650 nvextendbuf(ap->a_vp,
653 hammer_blocksize(ip->ino_data.size),
654 hammer_blocksize(nsize),
655 hammer_blockoff(ip->ino_data.size),
656 hammer_blockoff(nsize),
659 kflags |= NOTE_EXTEND;
662 if (uio->uio_segflg == UIO_NOCOPY) {
664 * Issuing a write with the same data backing the
665 * buffer. Instantiate the buffer to collect the
666 * backing vm pages, then read-in any missing bits.
668 * This case is used by vop_stdputpages().
670 bp = getblk(ap->a_vp, base_offset,
671 blksize, GETBLK_BHEAVY, 0);
672 if ((bp->b_flags & B_CACHE) == 0) {
674 error = bread(ap->a_vp, base_offset,
677 } else if (offset == 0 && uio->uio_resid >= blksize) {
679 * Even though we are entirely overwriting the buffer
680 * we may still have to zero it out to avoid a
681 * mmap/write visibility issue.
683 bp = getblk(ap->a_vp, base_offset, blksize, GETBLK_BHEAVY, 0);
684 if ((bp->b_flags & B_CACHE) == 0)
686 } else if (base_offset >= ip->ino_data.size) {
688 * If the base offset of the buffer is beyond the
689 * file EOF, we don't have to issue a read.
691 bp = getblk(ap->a_vp, base_offset,
692 blksize, GETBLK_BHEAVY, 0);
696 * Partial overwrite, read in any missing bits then
697 * replace the portion being written.
699 error = bread(ap->a_vp, base_offset, blksize, &bp);
704 error = uiomove(bp->b_data + offset, n, uio);
707 * Generate REDO records if enabled and redo_count will not
708 * exceeded the limit.
710 * If redo_count exceeds the limit we stop generating records
711 * and clear HAMMER_INODE_REDO. This will cause the next
712 * fsync() to do a full meta-data sync instead of just an
713 * UNDO/REDO fifo update.
715 * When clearing HAMMER_INODE_REDO any pre-existing REDOs
716 * will still be tracked. The tracks will be terminated
717 * when the related meta-data (including possible data
718 * modifications which are not tracked via REDO) is
721 if ((ip->flags & HAMMER_INODE_REDO) && error == 0) {
722 if (ip->redo_count < hammer_limit_redo) {
723 bp->b_flags |= B_VFSFLAG1;
724 error = hammer_generate_redo(&trans, ip,
725 base_offset + offset,
730 ip->flags &= ~HAMMER_INODE_REDO;
735 * If we screwed up we have to undo any VM size changes we
741 nvtruncbuf(ap->a_vp, ip->ino_data.size,
742 hammer_blocksize(ip->ino_data.size),
743 hammer_blockoff(ip->ino_data.size));
747 kflags |= NOTE_WRITE;
748 hammer_stats_file_write += n;
749 /* bp->b_flags |= B_CLUSTEROK; temporarily disabled */
750 if (ip->ino_data.size < uio->uio_offset) {
751 ip->ino_data.size = uio->uio_offset;
752 flags = HAMMER_INODE_SDIRTY;
756 ip->ino_data.mtime = trans.time;
757 flags |= HAMMER_INODE_MTIME | HAMMER_INODE_BUFS;
758 hammer_modify_inode(&trans, ip, flags);
761 * Once we dirty the buffer any cached zone-X offset
762 * becomes invalid. HAMMER NOTE: no-history mode cannot
763 * allow overwriting over the same data sector unless
764 * we provide UNDOs for the old data, which we don't.
766 bp->b_bio2.bio_offset = NOOFFSET;
769 * Final buffer disposition.
771 * Because meta-data updates are deferred, HAMMER is
772 * especially sensitive to excessive bdwrite()s because
773 * the I/O stream is not broken up by disk reads. So the
774 * buffer cache simply cannot keep up.
776 * WARNING! blksize is variable. cluster_write() is
777 * expected to not blow up if it encounters
778 * buffers that do not match the passed blksize.
780 * NOTE! Hammer shouldn't need to bawrite()/cluster_write().
781 * The ip->rsv_recs check should burst-flush the data.
782 * If we queue it immediately the buf could be left
783 * locked on the device queue for a very long time.
785 * NOTE! To avoid degenerate stalls due to mismatched block
786 * sizes we only honor IO_DIRECT on the write which
787 * abuts the end of the buffer. However, we must
788 * honor IO_SYNC in case someone is silly enough to
789 * configure a HAMMER file as swap, or when HAMMER
790 * is serving NFS (for commits). Ick ick.
792 bp->b_flags |= B_AGE;
793 if (ap->a_ioflag & IO_SYNC) {
795 } else if ((ap->a_ioflag & IO_DIRECT) && endofblk) {
799 if (offset + n == blksize) {
800 if (hammer_cluster_enable == 0 ||
801 (ap->a_vp->v_mount->mnt_flag & MNT_NOCLUSTERW)) {
804 cluster_write(bp, ip->ino_data.size,
812 hammer_done_transaction(&trans);
813 hammer_knote(ap->a_vp, kflags);
818 * hammer_vop_access { vp, mode, cred }
822 hammer_vop_access(struct vop_access_args *ap)
824 struct hammer_inode *ip = VTOI(ap->a_vp);
829 ++hammer_stats_file_iopsr;
830 uid = hammer_to_unix_xid(&ip->ino_data.uid);
831 gid = hammer_to_unix_xid(&ip->ino_data.gid);
833 error = vop_helper_access(ap, uid, gid, ip->ino_data.mode,
834 ip->ino_data.uflags);
839 * hammer_vop_advlock { vp, id, op, fl, flags }
843 hammer_vop_advlock(struct vop_advlock_args *ap)
845 hammer_inode_t ip = VTOI(ap->a_vp);
847 return (lf_advlock(ap, &ip->advlock, ip->ino_data.size));
851 * hammer_vop_close { vp, fflag }
853 * We can only sync-on-close for normal closes.
857 hammer_vop_close(struct vop_close_args *ap)
860 struct vnode *vp = ap->a_vp;
861 hammer_inode_t ip = VTOI(vp);
863 if (ip->flags & (HAMMER_INODE_CLOSESYNC|HAMMER_INODE_CLOSEASYNC)) {
864 if (vn_islocked(vp) == LK_EXCLUSIVE &&
865 (vp->v_flag & (VINACTIVE|VRECLAIMED)) == 0) {
866 if (ip->flags & HAMMER_INODE_CLOSESYNC)
869 waitfor = MNT_NOWAIT;
870 ip->flags &= ~(HAMMER_INODE_CLOSESYNC |
871 HAMMER_INODE_CLOSEASYNC);
872 VOP_FSYNC(vp, MNT_NOWAIT, waitfor);
876 return (vop_stdclose(ap));
880 * hammer_vop_ncreate { nch, dvp, vpp, cred, vap }
882 * The operating system has already ensured that the directory entry
883 * does not exist and done all appropriate namespace locking.
887 hammer_vop_ncreate(struct vop_ncreate_args *ap)
889 struct hammer_transaction trans;
890 struct hammer_inode *dip;
891 struct hammer_inode *nip;
892 struct nchandle *nch;
896 dip = VTOI(ap->a_dvp);
898 if (dip->flags & HAMMER_INODE_RO)
900 if ((error = hammer_checkspace(dip->hmp, HAMMER_CHKSPC_CREATE)) != 0)
904 * Create a transaction to cover the operations we perform.
906 hammer_start_transaction(&trans, dip->hmp);
907 ++hammer_stats_file_iopsw;
910 * Create a new filesystem object of the requested type. The
911 * returned inode will be referenced and shared-locked to prevent
912 * it from being moved to the flusher.
914 error = hammer_create_inode(&trans, ap->a_vap, ap->a_cred,
915 dip, nch->ncp->nc_name, nch->ncp->nc_nlen,
918 hkprintf("hammer_create_inode error %d\n", error);
919 hammer_done_transaction(&trans);
925 * Add the new filesystem object to the directory. This will also
926 * bump the inode's link count.
928 error = hammer_ip_add_directory(&trans, dip,
929 nch->ncp->nc_name, nch->ncp->nc_nlen,
932 hkprintf("hammer_ip_add_directory error %d\n", error);
938 hammer_rel_inode(nip, 0);
939 hammer_done_transaction(&trans);
942 error = hammer_get_vnode(nip, ap->a_vpp);
943 hammer_done_transaction(&trans);
944 hammer_rel_inode(nip, 0);
946 cache_setunresolved(ap->a_nch);
947 cache_setvp(ap->a_nch, *ap->a_vpp);
949 hammer_knote(ap->a_dvp, NOTE_WRITE);
955 * hammer_vop_getattr { vp, vap }
957 * Retrieve an inode's attribute information. When accessing inodes
958 * historically we fake the atime field to ensure consistent results.
959 * The atime field is stored in the B-Tree element and allowed to be
960 * updated without cycling the element.
966 hammer_vop_getattr(struct vop_getattr_args *ap)
968 struct hammer_inode *ip = VTOI(ap->a_vp);
969 struct vattr *vap = ap->a_vap;
972 * We want the fsid to be different when accessing a filesystem
973 * with different as-of's so programs like diff don't think
974 * the files are the same.
976 * We also want the fsid to be the same when comparing snapshots,
977 * or when comparing mirrors (which might be backed by different
978 * physical devices). HAMMER fsids are based on the PFS's
981 * XXX there is a chance of collision here. The va_fsid reported
982 * by stat is different from the more involved fsid used in the
985 ++hammer_stats_file_iopsr;
986 hammer_lock_sh(&ip->lock);
987 vap->va_fsid = ip->pfsm->fsid_udev ^ (u_int32_t)ip->obj_asof ^
988 (u_int32_t)(ip->obj_asof >> 32);
990 vap->va_fileid = ip->ino_leaf.base.obj_id;
991 vap->va_mode = ip->ino_data.mode;
992 vap->va_nlink = ip->ino_data.nlinks;
993 vap->va_uid = hammer_to_unix_xid(&ip->ino_data.uid);
994 vap->va_gid = hammer_to_unix_xid(&ip->ino_data.gid);
997 vap->va_size = ip->ino_data.size;
1000 * Special case for @@PFS softlinks. The actual size of the
1001 * expanded softlink is "@@0x%016llx:%05d" == 26 bytes.
1002 * or for MAX_TID is "@@-1:%05d" == 10 bytes.
1004 if (ip->ino_data.obj_type == HAMMER_OBJTYPE_SOFTLINK &&
1005 ip->ino_data.size == 10 &&
1006 ip->obj_asof == HAMMER_MAX_TID &&
1007 ip->obj_localization == 0 &&
1008 strncmp(ip->ino_data.ext.symlink, "@@PFS", 5) == 0) {
1009 if (ip->pfsm->pfsd.mirror_flags & HAMMER_PFSD_SLAVE)
1016 * We must provide a consistent atime and mtime for snapshots
1017 * so people can do a 'tar cf - ... | md5' on them and get
1018 * consistent results.
1020 if (ip->flags & HAMMER_INODE_RO) {
1021 hammer_time_to_timespec(ip->ino_data.ctime, &vap->va_atime);
1022 hammer_time_to_timespec(ip->ino_data.ctime, &vap->va_mtime);
1024 hammer_time_to_timespec(ip->ino_data.atime, &vap->va_atime);
1025 hammer_time_to_timespec(ip->ino_data.mtime, &vap->va_mtime);
1027 hammer_time_to_timespec(ip->ino_data.ctime, &vap->va_ctime);
1028 vap->va_flags = ip->ino_data.uflags;
1029 vap->va_gen = 1; /* hammer inums are unique for all time */
1030 vap->va_blocksize = HAMMER_BUFSIZE;
1031 if (ip->ino_data.size >= HAMMER_XDEMARC) {
1032 vap->va_bytes = (ip->ino_data.size + HAMMER_XBUFMASK64) &
1034 } else if (ip->ino_data.size > HAMMER_BUFSIZE / 2) {
1035 vap->va_bytes = (ip->ino_data.size + HAMMER_BUFMASK64) &
1038 vap->va_bytes = (ip->ino_data.size + 15) & ~15;
1041 vap->va_type = hammer_get_vnode_type(ip->ino_data.obj_type);
1042 vap->va_filerev = 0; /* XXX */
1043 vap->va_uid_uuid = ip->ino_data.uid;
1044 vap->va_gid_uuid = ip->ino_data.gid;
1045 vap->va_fsid_uuid = ip->hmp->fsid;
1046 vap->va_vaflags = VA_UID_UUID_VALID | VA_GID_UUID_VALID |
1049 switch (ip->ino_data.obj_type) {
1050 case HAMMER_OBJTYPE_CDEV:
1051 case HAMMER_OBJTYPE_BDEV:
1052 vap->va_rmajor = ip->ino_data.rmajor;
1053 vap->va_rminor = ip->ino_data.rminor;
1058 hammer_unlock(&ip->lock);
1063 * hammer_vop_nresolve { nch, dvp, cred }
1065 * Locate the requested directory entry.
1069 hammer_vop_nresolve(struct vop_nresolve_args *ap)
1071 struct hammer_transaction trans;
1072 struct namecache *ncp;
1076 struct hammer_cursor cursor;
1085 u_int32_t localization;
1086 u_int32_t max_iterations;
1089 * Misc initialization, plus handle as-of name extensions. Look for
1090 * the '@@' extension. Note that as-of files and directories cannot
1093 dip = VTOI(ap->a_dvp);
1094 ncp = ap->a_nch->ncp;
1095 asof = dip->obj_asof;
1096 localization = dip->obj_localization; /* for code consistency */
1097 nlen = ncp->nc_nlen;
1098 flags = dip->flags & HAMMER_INODE_RO;
1101 hammer_simple_transaction(&trans, dip->hmp);
1102 ++hammer_stats_file_iopsr;
1104 for (i = 0; i < nlen; ++i) {
1105 if (ncp->nc_name[i] == '@' && ncp->nc_name[i+1] == '@') {
1106 error = hammer_str_to_tid(ncp->nc_name + i + 2,
1107 &ispfs, &asof, &localization);
1112 if (asof != HAMMER_MAX_TID)
1113 flags |= HAMMER_INODE_RO;
1120 * If this is a PFS softlink we dive into the PFS
1122 if (ispfs && nlen == 0) {
1123 ip = hammer_get_inode(&trans, dip, HAMMER_OBJID_ROOT,
1127 error = hammer_get_vnode(ip, &vp);
1128 hammer_rel_inode(ip, 0);
1134 cache_setvp(ap->a_nch, vp);
1141 * If there is no path component the time extension is relative to dip.
1142 * e.g. "fubar/@@<snapshot>"
1144 * "." is handled by the kernel, but ".@@<snapshot>" is not.
1145 * e.g. "fubar/.@@<snapshot>"
1147 * ".." is handled by the kernel. We do not currently handle
1150 if (nlen == 0 || (nlen == 1 && ncp->nc_name[0] == '.')) {
1151 ip = hammer_get_inode(&trans, dip, dip->obj_id,
1152 asof, dip->obj_localization,
1155 error = hammer_get_vnode(ip, &vp);
1156 hammer_rel_inode(ip, 0);
1162 cache_setvp(ap->a_nch, vp);
1169 * Calculate the namekey and setup the key range for the scan. This
1170 * works kinda like a chained hash table where the lower 32 bits
1171 * of the namekey synthesize the chain.
1173 * The key range is inclusive of both key_beg and key_end.
1175 namekey = hammer_directory_namekey(dip, ncp->nc_name, nlen,
1178 error = hammer_init_cursor(&trans, &cursor, &dip->cache[1], dip);
1179 cursor.key_beg.localization = dip->obj_localization +
1180 hammer_dir_localization(dip);
1181 cursor.key_beg.obj_id = dip->obj_id;
1182 cursor.key_beg.key = namekey;
1183 cursor.key_beg.create_tid = 0;
1184 cursor.key_beg.delete_tid = 0;
1185 cursor.key_beg.rec_type = HAMMER_RECTYPE_DIRENTRY;
1186 cursor.key_beg.obj_type = 0;
1188 cursor.key_end = cursor.key_beg;
1189 cursor.key_end.key += max_iterations;
1191 cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE | HAMMER_CURSOR_ASOF;
1194 * Scan all matching records (the chain), locate the one matching
1195 * the requested path component.
1197 * The hammer_ip_*() functions merge in-memory records with on-disk
1198 * records for the purposes of the search.
1201 localization = HAMMER_DEF_LOCALIZATION;
1204 error = hammer_ip_first(&cursor);
1205 while (error == 0) {
1206 error = hammer_ip_resolve_data(&cursor);
1209 if (nlen == cursor.leaf->data_len - HAMMER_ENTRY_NAME_OFF &&
1210 bcmp(ncp->nc_name, cursor.data->entry.name, nlen) == 0) {
1211 obj_id = cursor.data->entry.obj_id;
1212 localization = cursor.data->entry.localization;
1215 error = hammer_ip_next(&cursor);
1218 hammer_done_cursor(&cursor);
1221 * Lookup the obj_id. This should always succeed. If it does not
1222 * the filesystem may be damaged and we return a dummy inode.
1225 ip = hammer_get_inode(&trans, dip, obj_id,
1228 if (error == ENOENT) {
1229 kprintf("HAMMER: WARNING: Missing "
1230 "inode for dirent \"%s\"\n"
1231 "\tobj_id = %016llx, asof=%016llx, lo=%08x\n",
1233 (long long)obj_id, (long long)asof,
1236 ip = hammer_get_dummy_inode(&trans, dip, obj_id,
1241 error = hammer_get_vnode(ip, &vp);
1242 hammer_rel_inode(ip, 0);
1248 cache_setvp(ap->a_nch, vp);
1251 } else if (error == ENOENT) {
1252 cache_setvp(ap->a_nch, NULL);
1255 hammer_done_transaction(&trans);
1260 * hammer_vop_nlookupdotdot { dvp, vpp, cred }
1262 * Locate the parent directory of a directory vnode.
1264 * dvp is referenced but not locked. *vpp must be returned referenced and
1265 * locked. A parent_obj_id of 0 does not necessarily indicate that we are
1266 * at the root, instead it could indicate that the directory we were in was
1269 * NOTE: as-of sequences are not linked into the directory structure. If
1270 * we are at the root with a different asof then the mount point, reload
1271 * the same directory with the mount point's asof. I'm not sure what this
1272 * will do to NFS. We encode ASOF stamps in NFS file handles so it might not
1273 * get confused, but it hasn't been tested.
1277 hammer_vop_nlookupdotdot(struct vop_nlookupdotdot_args *ap)
1279 struct hammer_transaction trans;
1280 struct hammer_inode *dip;
1281 struct hammer_inode *ip;
1282 int64_t parent_obj_id;
1283 u_int32_t parent_obj_localization;
1287 dip = VTOI(ap->a_dvp);
1288 asof = dip->obj_asof;
1291 * Whos are parent? This could be the root of a pseudo-filesystem
1292 * whos parent is in another localization domain.
1294 parent_obj_id = dip->ino_data.parent_obj_id;
1295 if (dip->obj_id == HAMMER_OBJID_ROOT)
1296 parent_obj_localization = dip->ino_data.ext.obj.parent_obj_localization;
1298 parent_obj_localization = dip->obj_localization;
1300 if (parent_obj_id == 0) {
1301 if (dip->obj_id == HAMMER_OBJID_ROOT &&
1302 asof != dip->hmp->asof) {
1303 parent_obj_id = dip->obj_id;
1304 asof = dip->hmp->asof;
1305 *ap->a_fakename = kmalloc(19, M_TEMP, M_WAITOK);
1306 ksnprintf(*ap->a_fakename, 19, "0x%016llx",
1307 (long long)dip->obj_asof);
1314 hammer_simple_transaction(&trans, dip->hmp);
1315 ++hammer_stats_file_iopsr;
1317 ip = hammer_get_inode(&trans, dip, parent_obj_id,
1318 asof, parent_obj_localization,
1319 dip->flags, &error);
1321 error = hammer_get_vnode(ip, ap->a_vpp);
1322 hammer_rel_inode(ip, 0);
1326 hammer_done_transaction(&trans);
1331 * hammer_vop_nlink { nch, dvp, vp, cred }
1335 hammer_vop_nlink(struct vop_nlink_args *ap)
1337 struct hammer_transaction trans;
1338 struct hammer_inode *dip;
1339 struct hammer_inode *ip;
1340 struct nchandle *nch;
1343 if (ap->a_dvp->v_mount != ap->a_vp->v_mount)
1347 dip = VTOI(ap->a_dvp);
1348 ip = VTOI(ap->a_vp);
1350 if (dip->obj_localization != ip->obj_localization)
1353 if (dip->flags & HAMMER_INODE_RO)
1355 if (ip->flags & HAMMER_INODE_RO)
1357 if ((error = hammer_checkspace(dip->hmp, HAMMER_CHKSPC_CREATE)) != 0)
1361 * Create a transaction to cover the operations we perform.
1363 hammer_start_transaction(&trans, dip->hmp);
1364 ++hammer_stats_file_iopsw;
1367 * Add the filesystem object to the directory. Note that neither
1368 * dip nor ip are referenced or locked, but their vnodes are
1369 * referenced. This function will bump the inode's link count.
1371 error = hammer_ip_add_directory(&trans, dip,
1372 nch->ncp->nc_name, nch->ncp->nc_nlen,
1379 cache_setunresolved(nch);
1380 cache_setvp(nch, ap->a_vp);
1382 hammer_done_transaction(&trans);
1383 hammer_knote(ap->a_vp, NOTE_LINK);
1384 hammer_knote(ap->a_dvp, NOTE_WRITE);
1389 * hammer_vop_nmkdir { nch, dvp, vpp, cred, vap }
1391 * The operating system has already ensured that the directory entry
1392 * does not exist and done all appropriate namespace locking.
1396 hammer_vop_nmkdir(struct vop_nmkdir_args *ap)
1398 struct hammer_transaction trans;
1399 struct hammer_inode *dip;
1400 struct hammer_inode *nip;
1401 struct nchandle *nch;
1405 dip = VTOI(ap->a_dvp);
1407 if (dip->flags & HAMMER_INODE_RO)
1409 if ((error = hammer_checkspace(dip->hmp, HAMMER_CHKSPC_CREATE)) != 0)
1413 * Create a transaction to cover the operations we perform.
1415 hammer_start_transaction(&trans, dip->hmp);
1416 ++hammer_stats_file_iopsw;
1419 * Create a new filesystem object of the requested type. The
1420 * returned inode will be referenced but not locked.
1422 error = hammer_create_inode(&trans, ap->a_vap, ap->a_cred,
1423 dip, nch->ncp->nc_name, nch->ncp->nc_nlen,
1426 hkprintf("hammer_mkdir error %d\n", error);
1427 hammer_done_transaction(&trans);
1432 * Add the new filesystem object to the directory. This will also
1433 * bump the inode's link count.
1435 error = hammer_ip_add_directory(&trans, dip,
1436 nch->ncp->nc_name, nch->ncp->nc_nlen,
1439 hkprintf("hammer_mkdir (add) error %d\n", error);
1445 hammer_rel_inode(nip, 0);
1448 error = hammer_get_vnode(nip, ap->a_vpp);
1449 hammer_rel_inode(nip, 0);
1451 cache_setunresolved(ap->a_nch);
1452 cache_setvp(ap->a_nch, *ap->a_vpp);
1455 hammer_done_transaction(&trans);
1457 hammer_knote(ap->a_dvp, NOTE_WRITE | NOTE_LINK);
1462 * hammer_vop_nmknod { nch, dvp, vpp, cred, vap }
1464 * The operating system has already ensured that the directory entry
1465 * does not exist and done all appropriate namespace locking.
1469 hammer_vop_nmknod(struct vop_nmknod_args *ap)
1471 struct hammer_transaction trans;
1472 struct hammer_inode *dip;
1473 struct hammer_inode *nip;
1474 struct nchandle *nch;
1478 dip = VTOI(ap->a_dvp);
1480 if (dip->flags & HAMMER_INODE_RO)
1482 if ((error = hammer_checkspace(dip->hmp, HAMMER_CHKSPC_CREATE)) != 0)
1486 * Create a transaction to cover the operations we perform.
1488 hammer_start_transaction(&trans, dip->hmp);
1489 ++hammer_stats_file_iopsw;
1492 * Create a new filesystem object of the requested type. The
1493 * returned inode will be referenced but not locked.
1495 * If mknod specifies a directory a pseudo-fs is created.
1497 error = hammer_create_inode(&trans, ap->a_vap, ap->a_cred,
1498 dip, nch->ncp->nc_name, nch->ncp->nc_nlen,
1501 hammer_done_transaction(&trans);
1507 * Add the new filesystem object to the directory. This will also
1508 * bump the inode's link count.
1510 error = hammer_ip_add_directory(&trans, dip,
1511 nch->ncp->nc_name, nch->ncp->nc_nlen,
1518 hammer_rel_inode(nip, 0);
1521 error = hammer_get_vnode(nip, ap->a_vpp);
1522 hammer_rel_inode(nip, 0);
1524 cache_setunresolved(ap->a_nch);
1525 cache_setvp(ap->a_nch, *ap->a_vpp);
1528 hammer_done_transaction(&trans);
1530 hammer_knote(ap->a_dvp, NOTE_WRITE);
1535 * hammer_vop_open { vp, mode, cred, fp }
1539 hammer_vop_open(struct vop_open_args *ap)
1543 ++hammer_stats_file_iopsr;
1544 ip = VTOI(ap->a_vp);
1546 if ((ap->a_mode & FWRITE) && (ip->flags & HAMMER_INODE_RO))
1548 return(vop_stdopen(ap));
1552 * hammer_vop_print { vp }
1556 hammer_vop_print(struct vop_print_args *ap)
1562 * hammer_vop_readdir { vp, uio, cred, *eofflag, *ncookies, off_t **cookies }
1566 hammer_vop_readdir(struct vop_readdir_args *ap)
1568 struct hammer_transaction trans;
1569 struct hammer_cursor cursor;
1570 struct hammer_inode *ip;
1572 hammer_base_elm_t base;
1581 ++hammer_stats_file_iopsr;
1582 ip = VTOI(ap->a_vp);
1584 saveoff = uio->uio_offset;
1586 if (ap->a_ncookies) {
1587 ncookies = uio->uio_resid / 16 + 1;
1588 if (ncookies > 1024)
1590 cookies = kmalloc(ncookies * sizeof(off_t), M_TEMP, M_WAITOK);
1598 hammer_simple_transaction(&trans, ip->hmp);
1601 * Handle artificial entries
1603 * It should be noted that the minimum value for a directory
1604 * hash key on-media is 0x0000000100000000, so we can use anything
1605 * less then that to represent our 'special' key space.
1609 r = vop_write_dirent(&error, uio, ip->obj_id, DT_DIR, 1, ".");
1613 cookies[cookie_index] = saveoff;
1616 if (cookie_index == ncookies)
1620 if (ip->ino_data.parent_obj_id) {
1621 r = vop_write_dirent(&error, uio,
1622 ip->ino_data.parent_obj_id,
1625 r = vop_write_dirent(&error, uio,
1626 ip->obj_id, DT_DIR, 2, "..");
1631 cookies[cookie_index] = saveoff;
1634 if (cookie_index == ncookies)
1639 * Key range (begin and end inclusive) to scan. Directory keys
1640 * directly translate to a 64 bit 'seek' position.
1642 hammer_init_cursor(&trans, &cursor, &ip->cache[1], ip);
1643 cursor.key_beg.localization = ip->obj_localization +
1644 hammer_dir_localization(ip);
1645 cursor.key_beg.obj_id = ip->obj_id;
1646 cursor.key_beg.create_tid = 0;
1647 cursor.key_beg.delete_tid = 0;
1648 cursor.key_beg.rec_type = HAMMER_RECTYPE_DIRENTRY;
1649 cursor.key_beg.obj_type = 0;
1650 cursor.key_beg.key = saveoff;
1652 cursor.key_end = cursor.key_beg;
1653 cursor.key_end.key = HAMMER_MAX_KEY;
1654 cursor.asof = ip->obj_asof;
1655 cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE | HAMMER_CURSOR_ASOF;
1657 error = hammer_ip_first(&cursor);
1659 while (error == 0) {
1660 error = hammer_ip_resolve_data(&cursor);
1663 base = &cursor.leaf->base;
1664 saveoff = base->key;
1665 KKASSERT(cursor.leaf->data_len > HAMMER_ENTRY_NAME_OFF);
1667 if (base->obj_id != ip->obj_id)
1668 panic("readdir: bad record at %p", cursor.node);
1671 * Convert pseudo-filesystems into softlinks
1673 dtype = hammer_get_dtype(cursor.leaf->base.obj_type);
1674 r = vop_write_dirent(
1675 &error, uio, cursor.data->entry.obj_id,
1677 cursor.leaf->data_len - HAMMER_ENTRY_NAME_OFF ,
1678 (void *)cursor.data->entry.name);
1683 cookies[cookie_index] = base->key;
1685 if (cookie_index == ncookies)
1687 error = hammer_ip_next(&cursor);
1689 hammer_done_cursor(&cursor);
1692 hammer_done_transaction(&trans);
1695 *ap->a_eofflag = (error == ENOENT);
1696 uio->uio_offset = saveoff;
1697 if (error && cookie_index == 0) {
1698 if (error == ENOENT)
1701 kfree(cookies, M_TEMP);
1702 *ap->a_ncookies = 0;
1703 *ap->a_cookies = NULL;
1706 if (error == ENOENT)
1709 *ap->a_ncookies = cookie_index;
1710 *ap->a_cookies = cookies;
1717 * hammer_vop_readlink { vp, uio, cred }
1721 hammer_vop_readlink(struct vop_readlink_args *ap)
1723 struct hammer_transaction trans;
1724 struct hammer_cursor cursor;
1725 struct hammer_inode *ip;
1727 u_int32_t localization;
1728 hammer_pseudofs_inmem_t pfsm;
1731 ip = VTOI(ap->a_vp);
1734 * Shortcut if the symlink data was stuffed into ino_data.
1736 * Also expand special "@@PFS%05d" softlinks (expansion only
1737 * occurs for non-historical (current) accesses made from the
1738 * primary filesystem).
1740 if (ip->ino_data.size <= HAMMER_INODE_BASESYMLEN) {
1744 ptr = ip->ino_data.ext.symlink;
1745 bytes = (int)ip->ino_data.size;
1747 ip->obj_asof == HAMMER_MAX_TID &&
1748 ip->obj_localization == 0 &&
1749 strncmp(ptr, "@@PFS", 5) == 0) {
1750 hammer_simple_transaction(&trans, ip->hmp);
1751 bcopy(ptr + 5, buf, 5);
1753 localization = strtoul(buf, NULL, 10) << 16;
1754 pfsm = hammer_load_pseudofs(&trans, localization,
1757 if (pfsm->pfsd.mirror_flags &
1758 HAMMER_PFSD_SLAVE) {
1759 /* vap->va_size == 26 */
1760 ksnprintf(buf, sizeof(buf),
1762 (long long)pfsm->pfsd.sync_end_tid,
1763 localization >> 16);
1765 /* vap->va_size == 10 */
1766 ksnprintf(buf, sizeof(buf),
1768 localization >> 16);
1770 ksnprintf(buf, sizeof(buf),
1772 (long long)HAMMER_MAX_TID,
1773 localization >> 16);
1777 bytes = strlen(buf);
1780 hammer_rel_pseudofs(trans.hmp, pfsm);
1781 hammer_done_transaction(&trans);
1783 error = uiomove(ptr, bytes, ap->a_uio);
1790 hammer_simple_transaction(&trans, ip->hmp);
1791 ++hammer_stats_file_iopsr;
1792 hammer_init_cursor(&trans, &cursor, &ip->cache[1], ip);
1795 * Key range (begin and end inclusive) to scan. Directory keys
1796 * directly translate to a 64 bit 'seek' position.
1798 cursor.key_beg.localization = ip->obj_localization +
1799 HAMMER_LOCALIZE_MISC;
1800 cursor.key_beg.obj_id = ip->obj_id;
1801 cursor.key_beg.create_tid = 0;
1802 cursor.key_beg.delete_tid = 0;
1803 cursor.key_beg.rec_type = HAMMER_RECTYPE_FIX;
1804 cursor.key_beg.obj_type = 0;
1805 cursor.key_beg.key = HAMMER_FIXKEY_SYMLINK;
1806 cursor.asof = ip->obj_asof;
1807 cursor.flags |= HAMMER_CURSOR_ASOF;
1809 error = hammer_ip_lookup(&cursor);
1811 error = hammer_ip_resolve_data(&cursor);
1813 KKASSERT(cursor.leaf->data_len >=
1814 HAMMER_SYMLINK_NAME_OFF);
1815 error = uiomove(cursor.data->symlink.name,
1816 cursor.leaf->data_len -
1817 HAMMER_SYMLINK_NAME_OFF,
1821 hammer_done_cursor(&cursor);
1822 hammer_done_transaction(&trans);
1827 * hammer_vop_nremove { nch, dvp, cred }
1831 hammer_vop_nremove(struct vop_nremove_args *ap)
1833 struct hammer_transaction trans;
1834 struct hammer_inode *dip;
1837 dip = VTOI(ap->a_dvp);
1839 if (hammer_nohistory(dip) == 0 &&
1840 (error = hammer_checkspace(dip->hmp, HAMMER_CHKSPC_REMOVE)) != 0) {
1844 hammer_start_transaction(&trans, dip->hmp);
1845 ++hammer_stats_file_iopsw;
1846 error = hammer_dounlink(&trans, ap->a_nch, ap->a_dvp, ap->a_cred, 0, 0);
1847 hammer_done_transaction(&trans);
1849 hammer_knote(ap->a_dvp, NOTE_WRITE);
1854 * hammer_vop_nrename { fnch, tnch, fdvp, tdvp, cred }
1858 hammer_vop_nrename(struct vop_nrename_args *ap)
1860 struct hammer_transaction trans;
1861 struct namecache *fncp;
1862 struct namecache *tncp;
1863 struct hammer_inode *fdip;
1864 struct hammer_inode *tdip;
1865 struct hammer_inode *ip;
1866 struct hammer_cursor cursor;
1868 u_int32_t max_iterations;
1871 if (ap->a_fdvp->v_mount != ap->a_tdvp->v_mount)
1873 if (ap->a_fdvp->v_mount != ap->a_fnch->ncp->nc_vp->v_mount)
1876 fdip = VTOI(ap->a_fdvp);
1877 tdip = VTOI(ap->a_tdvp);
1878 fncp = ap->a_fnch->ncp;
1879 tncp = ap->a_tnch->ncp;
1880 ip = VTOI(fncp->nc_vp);
1881 KKASSERT(ip != NULL);
1883 if (fdip->obj_localization != tdip->obj_localization)
1885 if (fdip->obj_localization != ip->obj_localization)
1888 if (fdip->flags & HAMMER_INODE_RO)
1890 if (tdip->flags & HAMMER_INODE_RO)
1892 if (ip->flags & HAMMER_INODE_RO)
1894 if ((error = hammer_checkspace(fdip->hmp, HAMMER_CHKSPC_CREATE)) != 0)
1897 hammer_start_transaction(&trans, fdip->hmp);
1898 ++hammer_stats_file_iopsw;
1901 * Remove tncp from the target directory and then link ip as
1902 * tncp. XXX pass trans to dounlink
1904 * Force the inode sync-time to match the transaction so it is
1905 * in-sync with the creation of the target directory entry.
1907 error = hammer_dounlink(&trans, ap->a_tnch, ap->a_tdvp,
1909 if (error == 0 || error == ENOENT) {
1910 error = hammer_ip_add_directory(&trans, tdip,
1911 tncp->nc_name, tncp->nc_nlen,
1914 ip->ino_data.parent_obj_id = tdip->obj_id;
1915 ip->ino_data.ctime = trans.time;
1916 hammer_modify_inode(&trans, ip, HAMMER_INODE_DDIRTY);
1920 goto failed; /* XXX */
1923 * Locate the record in the originating directory and remove it.
1925 * Calculate the namekey and setup the key range for the scan. This
1926 * works kinda like a chained hash table where the lower 32 bits
1927 * of the namekey synthesize the chain.
1929 * The key range is inclusive of both key_beg and key_end.
1931 namekey = hammer_directory_namekey(fdip, fncp->nc_name, fncp->nc_nlen,
1934 hammer_init_cursor(&trans, &cursor, &fdip->cache[1], fdip);
1935 cursor.key_beg.localization = fdip->obj_localization +
1936 hammer_dir_localization(fdip);
1937 cursor.key_beg.obj_id = fdip->obj_id;
1938 cursor.key_beg.key = namekey;
1939 cursor.key_beg.create_tid = 0;
1940 cursor.key_beg.delete_tid = 0;
1941 cursor.key_beg.rec_type = HAMMER_RECTYPE_DIRENTRY;
1942 cursor.key_beg.obj_type = 0;
1944 cursor.key_end = cursor.key_beg;
1945 cursor.key_end.key += max_iterations;
1946 cursor.asof = fdip->obj_asof;
1947 cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE | HAMMER_CURSOR_ASOF;
1950 * Scan all matching records (the chain), locate the one matching
1951 * the requested path component.
1953 * The hammer_ip_*() functions merge in-memory records with on-disk
1954 * records for the purposes of the search.
1956 error = hammer_ip_first(&cursor);
1957 while (error == 0) {
1958 if (hammer_ip_resolve_data(&cursor) != 0)
1960 nlen = cursor.leaf->data_len - HAMMER_ENTRY_NAME_OFF;
1962 if (fncp->nc_nlen == nlen &&
1963 bcmp(fncp->nc_name, cursor.data->entry.name, nlen) == 0) {
1966 error = hammer_ip_next(&cursor);
1970 * If all is ok we have to get the inode so we can adjust nlinks.
1972 * WARNING: hammer_ip_del_directory() may have to terminate the
1973 * cursor to avoid a recursion. It's ok to call hammer_done_cursor()
1977 error = hammer_ip_del_directory(&trans, &cursor, fdip, ip);
1980 * XXX A deadlock here will break rename's atomicy for the purposes
1981 * of crash recovery.
1983 if (error == EDEADLK) {
1984 hammer_done_cursor(&cursor);
1989 * Cleanup and tell the kernel that the rename succeeded.
1991 hammer_done_cursor(&cursor);
1993 cache_rename(ap->a_fnch, ap->a_tnch);
1994 hammer_knote(ap->a_fdvp, NOTE_WRITE);
1995 hammer_knote(ap->a_tdvp, NOTE_WRITE);
1997 hammer_knote(ip->vp, NOTE_RENAME);
2001 hammer_done_transaction(&trans);
2006 * hammer_vop_nrmdir { nch, dvp, cred }
2010 hammer_vop_nrmdir(struct vop_nrmdir_args *ap)
2012 struct hammer_transaction trans;
2013 struct hammer_inode *dip;
2016 dip = VTOI(ap->a_dvp);
2018 if (hammer_nohistory(dip) == 0 &&
2019 (error = hammer_checkspace(dip->hmp, HAMMER_CHKSPC_REMOVE)) != 0) {
2023 hammer_start_transaction(&trans, dip->hmp);
2024 ++hammer_stats_file_iopsw;
2025 error = hammer_dounlink(&trans, ap->a_nch, ap->a_dvp, ap->a_cred, 0, 1);
2026 hammer_done_transaction(&trans);
2028 hammer_knote(ap->a_dvp, NOTE_WRITE | NOTE_LINK);
2033 * hammer_vop_markatime { vp, cred }
2037 hammer_vop_markatime(struct vop_markatime_args *ap)
2039 struct hammer_transaction trans;
2040 struct hammer_inode *ip;
2042 ip = VTOI(ap->a_vp);
2043 if (ap->a_vp->v_mount->mnt_flag & MNT_RDONLY)
2045 if (ip->flags & HAMMER_INODE_RO)
2047 if (ip->hmp->mp->mnt_flag & MNT_NOATIME)
2049 hammer_start_transaction(&trans, ip->hmp);
2050 ++hammer_stats_file_iopsw;
2052 ip->ino_data.atime = trans.time;
2053 hammer_modify_inode(&trans, ip, HAMMER_INODE_ATIME);
2054 hammer_done_transaction(&trans);
2055 hammer_knote(ap->a_vp, NOTE_ATTRIB);
2060 * hammer_vop_setattr { vp, vap, cred }
2064 hammer_vop_setattr(struct vop_setattr_args *ap)
2066 struct hammer_transaction trans;
2068 struct hammer_inode *ip;
2075 int64_t aligned_size;
2080 ip = ap->a_vp->v_data;
2084 if (ap->a_vp->v_mount->mnt_flag & MNT_RDONLY)
2086 if (ip->flags & HAMMER_INODE_RO)
2088 if (hammer_nohistory(ip) == 0 &&
2089 (error = hammer_checkspace(ip->hmp, HAMMER_CHKSPC_REMOVE)) != 0) {
2093 hammer_start_transaction(&trans, ip->hmp);
2094 ++hammer_stats_file_iopsw;
2097 if (vap->va_flags != VNOVAL) {
2098 flags = ip->ino_data.uflags;
2099 error = vop_helper_setattr_flags(&flags, vap->va_flags,
2100 hammer_to_unix_xid(&ip->ino_data.uid),
2103 if (ip->ino_data.uflags != flags) {
2104 ip->ino_data.uflags = flags;
2105 ip->ino_data.ctime = trans.time;
2106 modflags |= HAMMER_INODE_DDIRTY;
2107 kflags |= NOTE_ATTRIB;
2109 if (ip->ino_data.uflags & (IMMUTABLE | APPEND)) {
2116 if (ip->ino_data.uflags & (IMMUTABLE | APPEND)) {
2120 if (vap->va_uid != (uid_t)VNOVAL || vap->va_gid != (gid_t)VNOVAL) {
2121 mode_t cur_mode = ip->ino_data.mode;
2122 uid_t cur_uid = hammer_to_unix_xid(&ip->ino_data.uid);
2123 gid_t cur_gid = hammer_to_unix_xid(&ip->ino_data.gid);
2127 error = vop_helper_chown(ap->a_vp, vap->va_uid, vap->va_gid,
2129 &cur_uid, &cur_gid, &cur_mode);
2131 hammer_guid_to_uuid(&uuid_uid, cur_uid);
2132 hammer_guid_to_uuid(&uuid_gid, cur_gid);
2133 if (bcmp(&uuid_uid, &ip->ino_data.uid,
2134 sizeof(uuid_uid)) ||
2135 bcmp(&uuid_gid, &ip->ino_data.gid,
2136 sizeof(uuid_gid)) ||
2137 ip->ino_data.mode != cur_mode
2139 ip->ino_data.uid = uuid_uid;
2140 ip->ino_data.gid = uuid_gid;
2141 ip->ino_data.mode = cur_mode;
2142 ip->ino_data.ctime = trans.time;
2143 modflags |= HAMMER_INODE_DDIRTY;
2145 kflags |= NOTE_ATTRIB;
2148 while (vap->va_size != VNOVAL && ip->ino_data.size != vap->va_size) {
2149 switch(ap->a_vp->v_type) {
2151 if (vap->va_size == ip->ino_data.size)
2155 * Log the operation if in fast-fsync mode or if
2156 * there are unterminated redo write records present.
2158 * The second check is needed so the recovery code
2159 * properly truncates write redos even if nominal
2160 * REDO operations is turned off due to excessive
2161 * writes, because the related records might be
2162 * destroyed and never lay down a TERM_WRITE.
2164 if ((ip->flags & HAMMER_INODE_REDO) ||
2165 (ip->flags & HAMMER_INODE_RDIRTY)) {
2166 error = hammer_generate_redo(&trans, ip,
2171 blksize = hammer_blocksize(vap->va_size);
2174 * XXX break atomicy, we can deadlock the backend
2175 * if we do not release the lock. Probably not a
2178 if (vap->va_size < ip->ino_data.size) {
2179 nvtruncbuf(ap->a_vp, vap->va_size,
2181 hammer_blockoff(vap->va_size));
2183 kflags |= NOTE_WRITE;
2185 nvextendbuf(ap->a_vp,
2188 hammer_blocksize(ip->ino_data.size),
2189 hammer_blocksize(vap->va_size),
2190 hammer_blockoff(ip->ino_data.size),
2191 hammer_blockoff(vap->va_size),
2194 kflags |= NOTE_WRITE | NOTE_EXTEND;
2196 ip->ino_data.size = vap->va_size;
2197 ip->ino_data.mtime = trans.time;
2198 /* XXX safe to use SDIRTY instead of DDIRTY here? */
2199 modflags |= HAMMER_INODE_MTIME | HAMMER_INODE_DDIRTY;
2202 * On-media truncation is cached in the inode until
2203 * the inode is synchronized. We must immediately
2204 * handle any frontend records.
2207 hammer_ip_frontend_trunc(ip, vap->va_size);
2208 #ifdef DEBUG_TRUNCATE
2209 if (HammerTruncIp == NULL)
2212 if ((ip->flags & HAMMER_INODE_TRUNCATED) == 0) {
2213 ip->flags |= HAMMER_INODE_TRUNCATED;
2214 ip->trunc_off = vap->va_size;
2215 #ifdef DEBUG_TRUNCATE
2216 if (ip == HammerTruncIp)
2217 kprintf("truncate1 %016llx\n",
2218 (long long)ip->trunc_off);
2220 } else if (ip->trunc_off > vap->va_size) {
2221 ip->trunc_off = vap->va_size;
2222 #ifdef DEBUG_TRUNCATE
2223 if (ip == HammerTruncIp)
2224 kprintf("truncate2 %016llx\n",
2225 (long long)ip->trunc_off);
2228 #ifdef DEBUG_TRUNCATE
2229 if (ip == HammerTruncIp)
2230 kprintf("truncate3 %016llx (ignored)\n",
2231 (long long)vap->va_size);
2238 * When truncating, nvtruncbuf() may have cleaned out
2239 * a portion of the last block on-disk in the buffer
2240 * cache. We must clean out any frontend records
2241 * for blocks beyond the new last block.
2243 aligned_size = (vap->va_size + (blksize - 1)) &
2244 ~(int64_t)(blksize - 1);
2245 if (truncating && vap->va_size < aligned_size) {
2246 aligned_size -= blksize;
2247 hammer_ip_frontend_trunc(ip, aligned_size);
2252 if ((ip->flags & HAMMER_INODE_TRUNCATED) == 0) {
2253 ip->flags |= HAMMER_INODE_TRUNCATED;
2254 ip->trunc_off = vap->va_size;
2255 } else if (ip->trunc_off > vap->va_size) {
2256 ip->trunc_off = vap->va_size;
2258 hammer_ip_frontend_trunc(ip, vap->va_size);
2259 ip->ino_data.size = vap->va_size;
2260 ip->ino_data.mtime = trans.time;
2261 modflags |= HAMMER_INODE_MTIME | HAMMER_INODE_DDIRTY;
2262 kflags |= NOTE_ATTRIB;
2270 if (vap->va_atime.tv_sec != VNOVAL) {
2271 ip->ino_data.atime = hammer_timespec_to_time(&vap->va_atime);
2272 modflags |= HAMMER_INODE_ATIME;
2273 kflags |= NOTE_ATTRIB;
2275 if (vap->va_mtime.tv_sec != VNOVAL) {
2276 ip->ino_data.mtime = hammer_timespec_to_time(&vap->va_mtime);
2277 modflags |= HAMMER_INODE_MTIME;
2278 kflags |= NOTE_ATTRIB;
2280 if (vap->va_mode != (mode_t)VNOVAL) {
2281 mode_t cur_mode = ip->ino_data.mode;
2282 uid_t cur_uid = hammer_to_unix_xid(&ip->ino_data.uid);
2283 gid_t cur_gid = hammer_to_unix_xid(&ip->ino_data.gid);
2285 error = vop_helper_chmod(ap->a_vp, vap->va_mode, ap->a_cred,
2286 cur_uid, cur_gid, &cur_mode);
2287 if (error == 0 && ip->ino_data.mode != cur_mode) {
2288 ip->ino_data.mode = cur_mode;
2289 ip->ino_data.ctime = trans.time;
2290 modflags |= HAMMER_INODE_DDIRTY;
2291 kflags |= NOTE_ATTRIB;
2296 hammer_modify_inode(&trans, ip, modflags);
2297 hammer_done_transaction(&trans);
2298 hammer_knote(ap->a_vp, kflags);
2303 * hammer_vop_nsymlink { nch, dvp, vpp, cred, vap, target }
2307 hammer_vop_nsymlink(struct vop_nsymlink_args *ap)
2309 struct hammer_transaction trans;
2310 struct hammer_inode *dip;
2311 struct hammer_inode *nip;
2312 struct nchandle *nch;
2313 hammer_record_t record;
2317 ap->a_vap->va_type = VLNK;
2320 dip = VTOI(ap->a_dvp);
2322 if (dip->flags & HAMMER_INODE_RO)
2324 if ((error = hammer_checkspace(dip->hmp, HAMMER_CHKSPC_CREATE)) != 0)
2328 * Create a transaction to cover the operations we perform.
2330 hammer_start_transaction(&trans, dip->hmp);
2331 ++hammer_stats_file_iopsw;
2334 * Create a new filesystem object of the requested type. The
2335 * returned inode will be referenced but not locked.
2338 error = hammer_create_inode(&trans, ap->a_vap, ap->a_cred,
2339 dip, nch->ncp->nc_name, nch->ncp->nc_nlen,
2342 hammer_done_transaction(&trans);
2348 * Add a record representing the symlink. symlink stores the link
2349 * as pure data, not a string, and is no \0 terminated.
2352 bytes = strlen(ap->a_target);
2354 if (bytes <= HAMMER_INODE_BASESYMLEN) {
2355 bcopy(ap->a_target, nip->ino_data.ext.symlink, bytes);
2357 record = hammer_alloc_mem_record(nip, bytes);
2358 record->type = HAMMER_MEM_RECORD_GENERAL;
2360 record->leaf.base.localization = nip->obj_localization +
2361 HAMMER_LOCALIZE_MISC;
2362 record->leaf.base.key = HAMMER_FIXKEY_SYMLINK;
2363 record->leaf.base.rec_type = HAMMER_RECTYPE_FIX;
2364 record->leaf.data_len = bytes;
2365 KKASSERT(HAMMER_SYMLINK_NAME_OFF == 0);
2366 bcopy(ap->a_target, record->data->symlink.name, bytes);
2367 error = hammer_ip_add_record(&trans, record);
2371 * Set the file size to the length of the link.
2374 nip->ino_data.size = bytes;
2375 hammer_modify_inode(&trans, nip, HAMMER_INODE_DDIRTY);
2379 error = hammer_ip_add_directory(&trans, dip, nch->ncp->nc_name,
2380 nch->ncp->nc_nlen, nip);
2386 hammer_rel_inode(nip, 0);
2389 error = hammer_get_vnode(nip, ap->a_vpp);
2390 hammer_rel_inode(nip, 0);
2392 cache_setunresolved(ap->a_nch);
2393 cache_setvp(ap->a_nch, *ap->a_vpp);
2394 hammer_knote(ap->a_dvp, NOTE_WRITE);
2397 hammer_done_transaction(&trans);
2402 * hammer_vop_nwhiteout { nch, dvp, cred, flags }
2406 hammer_vop_nwhiteout(struct vop_nwhiteout_args *ap)
2408 struct hammer_transaction trans;
2409 struct hammer_inode *dip;
2412 dip = VTOI(ap->a_dvp);
2414 if (hammer_nohistory(dip) == 0 &&
2415 (error = hammer_checkspace(dip->hmp, HAMMER_CHKSPC_CREATE)) != 0) {
2419 hammer_start_transaction(&trans, dip->hmp);
2420 ++hammer_stats_file_iopsw;
2421 error = hammer_dounlink(&trans, ap->a_nch, ap->a_dvp,
2422 ap->a_cred, ap->a_flags, -1);
2423 hammer_done_transaction(&trans);
2429 * hammer_vop_ioctl { vp, command, data, fflag, cred }
2433 hammer_vop_ioctl(struct vop_ioctl_args *ap)
2435 struct hammer_inode *ip = ap->a_vp->v_data;
2437 ++hammer_stats_file_iopsr;
2438 return(hammer_ioctl(ip, ap->a_command, ap->a_data,
2439 ap->a_fflag, ap->a_cred));
2444 hammer_vop_mountctl(struct vop_mountctl_args *ap)
2446 static const struct mountctl_opt extraopt[] = {
2447 { HMNT_NOHISTORY, "nohistory" },
2448 { HMNT_MASTERID, "master" },
2452 struct hammer_mount *hmp;
2459 mp = ap->a_head.a_ops->head.vv_mount;
2460 KKASSERT(mp->mnt_data != NULL);
2461 hmp = (struct hammer_mount *)mp->mnt_data;
2465 case MOUNTCTL_SET_EXPORT:
2466 if (ap->a_ctllen != sizeof(struct export_args))
2469 error = hammer_vfs_export(mp, ap->a_op,
2470 (const struct export_args *)ap->a_ctl);
2472 case MOUNTCTL_MOUNTFLAGS:
2475 * Call standard mountctl VOP function
2476 * so we get user mount flags.
2478 error = vop_stdmountctl(ap);
2482 usedbytes = *ap->a_res;
2484 if (usedbytes > 0 && usedbytes < ap->a_buflen) {
2485 usedbytes += vfs_flagstostr(hmp->hflags, extraopt, ap->a_buf,
2486 ap->a_buflen - usedbytes,
2490 *ap->a_res += usedbytes;
2494 error = vop_stdmountctl(ap);
2501 * hammer_vop_strategy { vp, bio }
2503 * Strategy call, used for regular file read & write only. Note that the
2504 * bp may represent a cluster.
2506 * To simplify operation and allow better optimizations in the future,
2507 * this code does not make any assumptions with regards to buffer alignment
2512 hammer_vop_strategy(struct vop_strategy_args *ap)
2517 bp = ap->a_bio->bio_buf;
2521 error = hammer_vop_strategy_read(ap);
2524 error = hammer_vop_strategy_write(ap);
2527 bp->b_error = error = EINVAL;
2528 bp->b_flags |= B_ERROR;
2536 * Read from a regular file. Iterate the related records and fill in the
2537 * BIO/BUF. Gaps are zero-filled.
2539 * The support code in hammer_object.c should be used to deal with mixed
2540 * in-memory and on-disk records.
2542 * NOTE: Can be called from the cluster code with an oversized buf.
2548 hammer_vop_strategy_read(struct vop_strategy_args *ap)
2550 struct hammer_transaction trans;
2551 struct hammer_inode *ip;
2552 struct hammer_inode *dip;
2553 struct hammer_cursor cursor;
2554 hammer_base_elm_t base;
2555 hammer_off_t disk_offset;
2569 ip = ap->a_vp->v_data;
2572 * The zone-2 disk offset may have been set by the cluster code via
2573 * a BMAP operation, or else should be NOOFFSET.
2575 * Checking the high bits for a match against zone-2 should suffice.
2577 nbio = push_bio(bio);
2578 if ((nbio->bio_offset & HAMMER_OFF_ZONE_MASK) ==
2579 HAMMER_ZONE_LARGE_DATA) {
2580 error = hammer_io_direct_read(ip->hmp, nbio, NULL);
2585 * Well, that sucked. Do it the hard way. If all the stars are
2586 * aligned we may still be able to issue a direct-read.
2588 hammer_simple_transaction(&trans, ip->hmp);
2589 hammer_init_cursor(&trans, &cursor, &ip->cache[1], ip);
2592 * Key range (begin and end inclusive) to scan. Note that the key's
2593 * stored in the actual records represent BASE+LEN, not BASE. The
2594 * first record containing bio_offset will have a key > bio_offset.
2596 cursor.key_beg.localization = ip->obj_localization +
2597 HAMMER_LOCALIZE_MISC;
2598 cursor.key_beg.obj_id = ip->obj_id;
2599 cursor.key_beg.create_tid = 0;
2600 cursor.key_beg.delete_tid = 0;
2601 cursor.key_beg.obj_type = 0;
2602 cursor.key_beg.key = bio->bio_offset + 1;
2603 cursor.asof = ip->obj_asof;
2604 cursor.flags |= HAMMER_CURSOR_ASOF;
2606 cursor.key_end = cursor.key_beg;
2607 KKASSERT(ip->ino_data.obj_type == HAMMER_OBJTYPE_REGFILE);
2609 if (ip->ino_data.obj_type == HAMMER_OBJTYPE_DBFILE) {
2610 cursor.key_beg.rec_type = HAMMER_RECTYPE_DB;
2611 cursor.key_end.rec_type = HAMMER_RECTYPE_DB;
2612 cursor.key_end.key = 0x7FFFFFFFFFFFFFFFLL;
2616 ran_end = bio->bio_offset + bp->b_bufsize;
2617 cursor.key_beg.rec_type = HAMMER_RECTYPE_DATA;
2618 cursor.key_end.rec_type = HAMMER_RECTYPE_DATA;
2619 tmp64 = ran_end + MAXPHYS + 1; /* work-around GCC-4 bug */
2620 if (tmp64 < ran_end)
2621 cursor.key_end.key = 0x7FFFFFFFFFFFFFFFLL;
2623 cursor.key_end.key = ran_end + MAXPHYS + 1;
2625 cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE;
2627 error = hammer_ip_first(&cursor);
2630 while (error == 0) {
2632 * Get the base file offset of the record. The key for
2633 * data records is (base + bytes) rather then (base).
2635 base = &cursor.leaf->base;
2636 rec_offset = base->key - cursor.leaf->data_len;
2639 * Calculate the gap, if any, and zero-fill it.
2641 * n is the offset of the start of the record verses our
2642 * current seek offset in the bio.
2644 n = (int)(rec_offset - (bio->bio_offset + boff));
2646 if (n > bp->b_bufsize - boff)
2647 n = bp->b_bufsize - boff;
2648 bzero((char *)bp->b_data + boff, n);
2654 * Calculate the data offset in the record and the number
2655 * of bytes we can copy.
2657 * There are two degenerate cases. First, boff may already
2658 * be at bp->b_bufsize. Secondly, the data offset within
2659 * the record may exceed the record's size.
2663 n = cursor.leaf->data_len - roff;
2665 kprintf("strategy_read: bad n=%d roff=%d\n", n, roff);
2667 } else if (n > bp->b_bufsize - boff) {
2668 n = bp->b_bufsize - boff;
2672 * Deal with cached truncations. This cool bit of code
2673 * allows truncate()/ftruncate() to avoid having to sync
2676 * If the frontend is truncated then all backend records are
2677 * subject to the frontend's truncation.
2679 * If the backend is truncated then backend records on-disk
2680 * (but not in-memory) are subject to the backend's
2681 * truncation. In-memory records owned by the backend
2682 * represent data written after the truncation point on the
2683 * backend and must not be truncated.
2685 * Truncate operations deal with frontend buffer cache
2686 * buffers and frontend-owned in-memory records synchronously.
2688 if (ip->flags & HAMMER_INODE_TRUNCATED) {
2689 if (hammer_cursor_ondisk(&cursor)/* ||
2690 cursor.iprec->flush_state == HAMMER_FST_FLUSH*/) {
2691 if (ip->trunc_off <= rec_offset)
2693 else if (ip->trunc_off < rec_offset + n)
2694 n = (int)(ip->trunc_off - rec_offset);
2697 if (ip->sync_flags & HAMMER_INODE_TRUNCATED) {
2698 if (hammer_cursor_ondisk(&cursor)) {
2699 if (ip->sync_trunc_off <= rec_offset)
2701 else if (ip->sync_trunc_off < rec_offset + n)
2702 n = (int)(ip->sync_trunc_off - rec_offset);
2707 * Try to issue a direct read into our bio if possible,
2708 * otherwise resolve the element data into a hammer_buffer
2711 * The buffer on-disk should be zerod past any real
2712 * truncation point, but may not be for any synthesized
2713 * truncation point from above.
2715 disk_offset = cursor.leaf->data_offset + roff;
2716 if (boff == 0 && n == bp->b_bufsize &&
2717 hammer_cursor_ondisk(&cursor) &&
2718 (disk_offset & HAMMER_BUFMASK) == 0) {
2719 KKASSERT((disk_offset & HAMMER_OFF_ZONE_MASK) ==
2720 HAMMER_ZONE_LARGE_DATA);
2721 nbio->bio_offset = disk_offset;
2722 error = hammer_io_direct_read(trans.hmp, nbio,
2726 error = hammer_ip_resolve_data(&cursor);
2728 bcopy((char *)cursor.data + roff,
2729 (char *)bp->b_data + boff, n);
2736 * Iterate until we have filled the request.
2739 if (boff == bp->b_bufsize)
2741 error = hammer_ip_next(&cursor);
2745 * There may have been a gap after the last record
2747 if (error == ENOENT)
2749 if (error == 0 && boff != bp->b_bufsize) {
2750 KKASSERT(boff < bp->b_bufsize);
2751 bzero((char *)bp->b_data + boff, bp->b_bufsize - boff);
2752 /* boff = bp->b_bufsize; */
2755 bp->b_error = error;
2757 bp->b_flags |= B_ERROR;
2762 * Cache the b-tree node for the last data read in cache[1].
2764 * If we hit the file EOF then also cache the node in the
2765 * governing director's cache[3], it will be used to initialize
2766 * the inode's cache[1] for any inodes looked up via the directory.
2768 * This doesn't reduce disk accesses since the B-Tree chain is
2769 * likely cached, but it does reduce cpu overhead when looking
2770 * up file offsets for cpdup/tar/cpio style iterations.
2773 hammer_cache_node(&ip->cache[1], cursor.node);
2774 if (ran_end >= ip->ino_data.size) {
2775 dip = hammer_find_inode(&trans, ip->ino_data.parent_obj_id,
2776 ip->obj_asof, ip->obj_localization);
2778 hammer_cache_node(&dip->cache[3], cursor.node);
2779 hammer_rel_inode(dip, 0);
2782 hammer_done_cursor(&cursor);
2783 hammer_done_transaction(&trans);
2788 * BMAP operation - used to support cluster_read() only.
2790 * (struct vnode *vp, off_t loffset, off_t *doffsetp, int *runp, int *runb)
2792 * This routine may return EOPNOTSUPP if the opration is not supported for
2793 * the specified offset. The contents of the pointer arguments do not
2794 * need to be initialized in that case.
2796 * If a disk address is available and properly aligned return 0 with
2797 * *doffsetp set to the zone-2 address, and *runp / *runb set appropriately
2798 * to the run-length relative to that offset. Callers may assume that
2799 * *doffsetp is valid if 0 is returned, even if *runp is not sufficiently
2800 * large, so return EOPNOTSUPP if it is not sufficiently large.
2804 hammer_vop_bmap(struct vop_bmap_args *ap)
2806 struct hammer_transaction trans;
2807 struct hammer_inode *ip;
2808 struct hammer_cursor cursor;
2809 hammer_base_elm_t base;
2813 int64_t base_offset;
2814 int64_t base_disk_offset;
2815 int64_t last_offset;
2816 hammer_off_t last_disk_offset;
2817 hammer_off_t disk_offset;
2822 ++hammer_stats_file_iopsr;
2823 ip = ap->a_vp->v_data;
2826 * We can only BMAP regular files. We can't BMAP database files,
2829 if (ip->ino_data.obj_type != HAMMER_OBJTYPE_REGFILE)
2833 * bmap is typically called with runp/runb both NULL when used
2834 * for writing. We do not support BMAP for writing atm.
2836 if (ap->a_cmd != BUF_CMD_READ)
2840 * Scan the B-Tree to acquire blockmap addresses, then translate
2843 hammer_simple_transaction(&trans, ip->hmp);
2845 kprintf("bmap_beg %016llx ip->cache %p\n",
2846 (long long)ap->a_loffset, ip->cache[1]);
2848 hammer_init_cursor(&trans, &cursor, &ip->cache[1], ip);
2851 * Key range (begin and end inclusive) to scan. Note that the key's
2852 * stored in the actual records represent BASE+LEN, not BASE. The
2853 * first record containing bio_offset will have a key > bio_offset.
2855 cursor.key_beg.localization = ip->obj_localization +
2856 HAMMER_LOCALIZE_MISC;
2857 cursor.key_beg.obj_id = ip->obj_id;
2858 cursor.key_beg.create_tid = 0;
2859 cursor.key_beg.delete_tid = 0;
2860 cursor.key_beg.obj_type = 0;
2862 cursor.key_beg.key = ap->a_loffset - MAXPHYS + 1;
2864 cursor.key_beg.key = ap->a_loffset + 1;
2865 if (cursor.key_beg.key < 0)
2866 cursor.key_beg.key = 0;
2867 cursor.asof = ip->obj_asof;
2868 cursor.flags |= HAMMER_CURSOR_ASOF;
2870 cursor.key_end = cursor.key_beg;
2871 KKASSERT(ip->ino_data.obj_type == HAMMER_OBJTYPE_REGFILE);
2873 ran_end = ap->a_loffset + MAXPHYS;
2874 cursor.key_beg.rec_type = HAMMER_RECTYPE_DATA;
2875 cursor.key_end.rec_type = HAMMER_RECTYPE_DATA;
2876 tmp64 = ran_end + MAXPHYS + 1; /* work-around GCC-4 bug */
2877 if (tmp64 < ran_end)
2878 cursor.key_end.key = 0x7FFFFFFFFFFFFFFFLL;
2880 cursor.key_end.key = ran_end + MAXPHYS + 1;
2882 cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE;
2884 error = hammer_ip_first(&cursor);
2885 base_offset = last_offset = 0;
2886 base_disk_offset = last_disk_offset = 0;
2888 while (error == 0) {
2890 * Get the base file offset of the record. The key for
2891 * data records is (base + bytes) rather then (base).
2893 * NOTE: rec_offset + rec_len may exceed the end-of-file.
2894 * The extra bytes should be zero on-disk and the BMAP op
2895 * should still be ok.
2897 base = &cursor.leaf->base;
2898 rec_offset = base->key - cursor.leaf->data_len;
2899 rec_len = cursor.leaf->data_len;
2902 * Incorporate any cached truncation.
2904 * NOTE: Modifications to rec_len based on synthesized
2905 * truncation points remove the guarantee that any extended
2906 * data on disk is zero (since the truncations may not have
2907 * taken place on-media yet).
2909 if (ip->flags & HAMMER_INODE_TRUNCATED) {
2910 if (hammer_cursor_ondisk(&cursor) ||
2911 cursor.iprec->flush_state == HAMMER_FST_FLUSH) {
2912 if (ip->trunc_off <= rec_offset)
2914 else if (ip->trunc_off < rec_offset + rec_len)
2915 rec_len = (int)(ip->trunc_off - rec_offset);
2918 if (ip->sync_flags & HAMMER_INODE_TRUNCATED) {
2919 if (hammer_cursor_ondisk(&cursor)) {
2920 if (ip->sync_trunc_off <= rec_offset)
2922 else if (ip->sync_trunc_off < rec_offset + rec_len)
2923 rec_len = (int)(ip->sync_trunc_off - rec_offset);
2928 * Accumulate information. If we have hit a discontiguous
2929 * block reset base_offset unless we are already beyond the
2930 * requested offset. If we are, that's it, we stop.
2934 if (hammer_cursor_ondisk(&cursor)) {
2935 disk_offset = cursor.leaf->data_offset;
2936 if (rec_offset != last_offset ||
2937 disk_offset != last_disk_offset) {
2938 if (rec_offset > ap->a_loffset)
2940 base_offset = rec_offset;
2941 base_disk_offset = disk_offset;
2943 last_offset = rec_offset + rec_len;
2944 last_disk_offset = disk_offset + rec_len;
2946 error = hammer_ip_next(&cursor);
2950 kprintf("BMAP %016llx: %016llx - %016llx\n",
2951 (long long)ap->a_loffset,
2952 (long long)base_offset,
2953 (long long)last_offset);
2954 kprintf("BMAP %16s: %016llx - %016llx\n", "",
2955 (long long)base_disk_offset,
2956 (long long)last_disk_offset);
2960 hammer_cache_node(&ip->cache[1], cursor.node);
2962 kprintf("bmap_end2 %016llx ip->cache %p\n",
2963 (long long)ap->a_loffset, ip->cache[1]);
2966 hammer_done_cursor(&cursor);
2967 hammer_done_transaction(&trans);
2970 * If we couldn't find any records or the records we did find were
2971 * all behind the requested offset, return failure. A forward
2972 * truncation can leave a hole w/ no on-disk records.
2974 if (last_offset == 0 || last_offset < ap->a_loffset)
2975 return (EOPNOTSUPP);
2978 * Figure out the block size at the requested offset and adjust
2979 * our limits so the cluster_read() does not create inappropriately
2980 * sized buffer cache buffers.
2982 blksize = hammer_blocksize(ap->a_loffset);
2983 if (hammer_blocksize(base_offset) != blksize) {
2984 base_offset = hammer_blockdemarc(base_offset, ap->a_loffset);
2986 if (last_offset != ap->a_loffset &&
2987 hammer_blocksize(last_offset - 1) != blksize) {
2988 last_offset = hammer_blockdemarc(ap->a_loffset,
2993 * Returning EOPNOTSUPP simply prevents the direct-IO optimization
2996 disk_offset = base_disk_offset + (ap->a_loffset - base_offset);
2998 if ((disk_offset & HAMMER_OFF_ZONE_MASK) != HAMMER_ZONE_LARGE_DATA) {
3000 * Only large-data zones can be direct-IOd
3003 } else if ((disk_offset & HAMMER_BUFMASK) ||
3004 (last_offset - ap->a_loffset) < blksize) {
3006 * doffsetp is not aligned or the forward run size does
3007 * not cover a whole buffer, disallow the direct I/O.
3014 *ap->a_doffsetp = disk_offset;
3016 *ap->a_runb = ap->a_loffset - base_offset;
3017 KKASSERT(*ap->a_runb >= 0);
3020 *ap->a_runp = last_offset - ap->a_loffset;
3021 KKASSERT(*ap->a_runp >= 0);
3029 * Write to a regular file. Because this is a strategy call the OS is
3030 * trying to actually get data onto the media.
3034 hammer_vop_strategy_write(struct vop_strategy_args *ap)
3036 hammer_record_t record;
3047 ip = ap->a_vp->v_data;
3050 blksize = hammer_blocksize(bio->bio_offset);
3051 KKASSERT(bp->b_bufsize == blksize);
3053 if (ip->flags & HAMMER_INODE_RO) {
3054 bp->b_error = EROFS;
3055 bp->b_flags |= B_ERROR;
3061 * Interlock with inode destruction (no in-kernel or directory
3062 * topology visibility). If we queue new IO while trying to
3063 * destroy the inode we can deadlock the vtrunc call in
3064 * hammer_inode_unloadable_check().
3066 * Besides, there's no point flushing a bp associated with an
3067 * inode that is being destroyed on-media and has no kernel
3070 if ((ip->flags | ip->sync_flags) &
3071 (HAMMER_INODE_DELETING|HAMMER_INODE_DELETED)) {
3078 * Reserve space and issue a direct-write from the front-end.
3079 * NOTE: The direct_io code will hammer_bread/bcopy smaller
3082 * An in-memory record will be installed to reference the storage
3083 * until the flusher can get to it.
3085 * Since we own the high level bio the front-end will not try to
3086 * do a direct-read until the write completes.
3088 * NOTE: The only time we do not reserve a full-sized buffers
3089 * worth of data is if the file is small. We do not try to
3090 * allocate a fragment (from the small-data zone) at the end of
3091 * an otherwise large file as this can lead to wildly separated
3094 KKASSERT((bio->bio_offset & HAMMER_BUFMASK) == 0);
3095 KKASSERT(bio->bio_offset < ip->ino_data.size);
3096 if (bio->bio_offset || ip->ino_data.size > HAMMER_BUFSIZE / 2)
3097 bytes = bp->b_bufsize;
3099 bytes = ((int)ip->ino_data.size + 15) & ~15;
3101 record = hammer_ip_add_bulk(ip, bio->bio_offset, bp->b_data,
3105 * B_VFSFLAG1 indicates that a REDO_WRITE entry was generated
3106 * in hammer_vop_write(). We must flag the record so the proper
3107 * REDO_TERM_WRITE entry is generated during the flush.
3110 if (bp->b_flags & B_VFSFLAG1) {
3111 record->flags |= HAMMER_RECF_REDO;
3112 bp->b_flags &= ~B_VFSFLAG1;
3114 hammer_io_direct_write(hmp, bio, record);
3115 if (ip->rsv_recs > 1 && hmp->rsv_recs > hammer_limit_recs)
3116 hammer_flush_inode(ip, 0);
3118 bp->b_bio2.bio_offset = NOOFFSET;
3119 bp->b_error = error;
3120 bp->b_flags |= B_ERROR;
3127 * dounlink - disconnect a directory entry
3129 * XXX whiteout support not really in yet
3132 hammer_dounlink(hammer_transaction_t trans, struct nchandle *nch,
3133 struct vnode *dvp, struct ucred *cred,
3134 int flags, int isdir)
3136 struct namecache *ncp;
3139 struct hammer_cursor cursor;
3141 u_int32_t max_iterations;
3145 * Calculate the namekey and setup the key range for the scan. This
3146 * works kinda like a chained hash table where the lower 32 bits
3147 * of the namekey synthesize the chain.
3149 * The key range is inclusive of both key_beg and key_end.
3154 if (dip->flags & HAMMER_INODE_RO)
3157 namekey = hammer_directory_namekey(dip, ncp->nc_name, ncp->nc_nlen,
3160 hammer_init_cursor(trans, &cursor, &dip->cache[1], dip);
3161 cursor.key_beg.localization = dip->obj_localization +
3162 hammer_dir_localization(dip);
3163 cursor.key_beg.obj_id = dip->obj_id;
3164 cursor.key_beg.key = namekey;
3165 cursor.key_beg.create_tid = 0;
3166 cursor.key_beg.delete_tid = 0;
3167 cursor.key_beg.rec_type = HAMMER_RECTYPE_DIRENTRY;
3168 cursor.key_beg.obj_type = 0;
3170 cursor.key_end = cursor.key_beg;
3171 cursor.key_end.key += max_iterations;
3172 cursor.asof = dip->obj_asof;
3173 cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE | HAMMER_CURSOR_ASOF;
3176 * Scan all matching records (the chain), locate the one matching
3177 * the requested path component. info->last_error contains the
3178 * error code on search termination and could be 0, ENOENT, or
3181 * The hammer_ip_*() functions merge in-memory records with on-disk
3182 * records for the purposes of the search.
3184 error = hammer_ip_first(&cursor);
3186 while (error == 0) {
3187 error = hammer_ip_resolve_data(&cursor);
3190 nlen = cursor.leaf->data_len - HAMMER_ENTRY_NAME_OFF;
3192 if (ncp->nc_nlen == nlen &&
3193 bcmp(ncp->nc_name, cursor.data->entry.name, nlen) == 0) {
3196 error = hammer_ip_next(&cursor);
3200 * If all is ok we have to get the inode so we can adjust nlinks.
3201 * To avoid a deadlock with the flusher we must release the inode
3202 * lock on the directory when acquiring the inode for the entry.
3204 * If the target is a directory, it must be empty.
3207 hammer_unlock(&cursor.ip->lock);
3208 ip = hammer_get_inode(trans, dip, cursor.data->entry.obj_id,
3210 cursor.data->entry.localization,
3212 hammer_lock_sh(&cursor.ip->lock);
3213 if (error == ENOENT) {
3214 kprintf("HAMMER: WARNING: Removing "
3215 "dirent w/missing inode \"%s\"\n"
3216 "\tobj_id = %016llx\n",
3218 (long long)cursor.data->entry.obj_id);
3223 * If isdir >= 0 we validate that the entry is or is not a
3224 * directory. If isdir < 0 we don't care.
3226 if (error == 0 && isdir >= 0 && ip) {
3228 ip->ino_data.obj_type != HAMMER_OBJTYPE_DIRECTORY) {
3230 } else if (isdir == 0 &&
3231 ip->ino_data.obj_type == HAMMER_OBJTYPE_DIRECTORY) {
3237 * If we are trying to remove a directory the directory must
3240 * The check directory code can loop and deadlock/retry. Our
3241 * own cursor's node locks must be released to avoid a 3-way
3242 * deadlock with the flusher if the check directory code
3245 * If any changes whatsoever have been made to the cursor
3246 * set EDEADLK and retry.
3248 * WARNING: See warnings in hammer_unlock_cursor()
3251 if (error == 0 && ip && ip->ino_data.obj_type ==
3252 HAMMER_OBJTYPE_DIRECTORY) {
3253 hammer_unlock_cursor(&cursor);
3254 error = hammer_ip_check_directory_empty(trans, ip);
3255 hammer_lock_cursor(&cursor);
3256 if (cursor.flags & HAMMER_CURSOR_RETEST) {
3257 kprintf("HAMMER: Warning: avoided deadlock "
3265 * Delete the directory entry.
3267 * WARNING: hammer_ip_del_directory() may have to terminate
3268 * the cursor to avoid a deadlock. It is ok to call
3269 * hammer_done_cursor() twice.
3272 error = hammer_ip_del_directory(trans, &cursor,
3275 hammer_done_cursor(&cursor);
3277 cache_setunresolved(nch);
3278 cache_setvp(nch, NULL);
3281 * XXX locking. Note: ip->vp might get ripped out
3282 * when we setunresolved() the nch since we had
3283 * no other reference to it. In that case ip->vp
3287 hammer_knote(ip->vp, NOTE_DELETE);
3288 cache_inval_vp(ip->vp, CINV_DESTROY);
3292 hammer_rel_inode(ip, 0);
3294 hammer_done_cursor(&cursor);
3296 if (error == EDEADLK)
3302 /************************************************************************
3303 * FIFO AND SPECFS OPS *
3304 ************************************************************************
3309 hammer_vop_fifoclose (struct vop_close_args *ap)
3311 /* XXX update itimes */
3312 return (VOCALL(&fifo_vnode_vops, &ap->a_head));
3316 hammer_vop_fiforead (struct vop_read_args *ap)
3320 error = VOCALL(&fifo_vnode_vops, &ap->a_head);
3321 /* XXX update access time */
3326 hammer_vop_fifowrite (struct vop_write_args *ap)
3330 error = VOCALL(&fifo_vnode_vops, &ap->a_head);
3331 /* XXX update access time */
3337 hammer_vop_fifokqfilter(struct vop_kqfilter_args *ap)
3341 error = VOCALL(&fifo_vnode_vops, &ap->a_head);
3343 error = hammer_vop_kqfilter(ap);
3347 /************************************************************************
3349 ************************************************************************
3352 static void filt_hammerdetach(struct knote *kn);
3353 static int filt_hammerread(struct knote *kn, long hint);
3354 static int filt_hammerwrite(struct knote *kn, long hint);
3355 static int filt_hammervnode(struct knote *kn, long hint);
3357 static struct filterops hammerread_filtops =
3358 { FILTEROP_ISFD, NULL, filt_hammerdetach, filt_hammerread };
3359 static struct filterops hammerwrite_filtops =
3360 { FILTEROP_ISFD, NULL, filt_hammerdetach, filt_hammerwrite };
3361 static struct filterops hammervnode_filtops =
3362 { FILTEROP_ISFD, NULL, filt_hammerdetach, filt_hammervnode };
3366 hammer_vop_kqfilter(struct vop_kqfilter_args *ap)
3368 struct vnode *vp = ap->a_vp;
3369 struct knote *kn = ap->a_kn;
3371 switch (kn->kn_filter) {
3373 kn->kn_fop = &hammerread_filtops;
3376 kn->kn_fop = &hammerwrite_filtops;
3379 kn->kn_fop = &hammervnode_filtops;
3382 return (EOPNOTSUPP);
3385 kn->kn_hook = (caddr_t)vp;
3387 knote_insert(&vp->v_pollinfo.vpi_kqinfo.ki_note, kn);
3393 filt_hammerdetach(struct knote *kn)
3395 struct vnode *vp = (void *)kn->kn_hook;
3397 knote_remove(&vp->v_pollinfo.vpi_kqinfo.ki_note, kn);
3401 filt_hammerread(struct knote *kn, long hint)
3403 struct vnode *vp = (void *)kn->kn_hook;
3404 hammer_inode_t ip = VTOI(vp);
3407 if (hint == NOTE_REVOKE) {
3408 kn->kn_flags |= (EV_EOF | EV_ONESHOT);
3411 off = ip->ino_data.size - kn->kn_fp->f_offset;
3412 kn->kn_data = (off < INTPTR_MAX) ? off : INTPTR_MAX;
3413 if (kn->kn_sfflags & NOTE_OLDAPI)
3415 return (kn->kn_data != 0);
3419 filt_hammerwrite(struct knote *kn, long hint)
3421 if (hint == NOTE_REVOKE)
3422 kn->kn_flags |= (EV_EOF | EV_ONESHOT);
3428 filt_hammervnode(struct knote *kn, long hint)
3430 if (kn->kn_sfflags & hint)
3431 kn->kn_fflags |= hint;
3432 if (hint == NOTE_REVOKE) {
3433 kn->kn_flags |= EV_EOF;
3436 return (kn->kn_fflags != 0);