2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * $DragonFly: src/sys/vfs/hammer/hammer_vnops.c,v 1.102 2008/10/16 17:24:16 dillon Exp $
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/kernel.h>
40 #include <sys/fcntl.h>
41 #include <sys/namecache.h>
42 #include <sys/vnode.h>
43 #include <sys/lockf.h>
44 #include <sys/event.h>
46 #include <sys/dirent.h>
48 #include <vm/vm_extern.h>
49 #include <vfs/fifofs/fifo.h>
51 #include <sys/mplock2.h>
58 /*static int hammer_vop_vnoperate(struct vop_generic_args *);*/
59 static int hammer_vop_fsync(struct vop_fsync_args *);
60 static int hammer_vop_read(struct vop_read_args *);
61 static int hammer_vop_write(struct vop_write_args *);
62 static int hammer_vop_access(struct vop_access_args *);
63 static int hammer_vop_advlock(struct vop_advlock_args *);
64 static int hammer_vop_close(struct vop_close_args *);
65 static int hammer_vop_ncreate(struct vop_ncreate_args *);
66 static int hammer_vop_getattr(struct vop_getattr_args *);
67 static int hammer_vop_nresolve(struct vop_nresolve_args *);
68 static int hammer_vop_nlookupdotdot(struct vop_nlookupdotdot_args *);
69 static int hammer_vop_nlink(struct vop_nlink_args *);
70 static int hammer_vop_nmkdir(struct vop_nmkdir_args *);
71 static int hammer_vop_nmknod(struct vop_nmknod_args *);
72 static int hammer_vop_open(struct vop_open_args *);
73 static int hammer_vop_print(struct vop_print_args *);
74 static int hammer_vop_readdir(struct vop_readdir_args *);
75 static int hammer_vop_readlink(struct vop_readlink_args *);
76 static int hammer_vop_nremove(struct vop_nremove_args *);
77 static int hammer_vop_nrename(struct vop_nrename_args *);
78 static int hammer_vop_nrmdir(struct vop_nrmdir_args *);
79 static int hammer_vop_markatime(struct vop_markatime_args *);
80 static int hammer_vop_setattr(struct vop_setattr_args *);
81 static int hammer_vop_strategy(struct vop_strategy_args *);
82 static int hammer_vop_bmap(struct vop_bmap_args *ap);
83 static int hammer_vop_nsymlink(struct vop_nsymlink_args *);
84 static int hammer_vop_nwhiteout(struct vop_nwhiteout_args *);
85 static int hammer_vop_ioctl(struct vop_ioctl_args *);
86 static int hammer_vop_mountctl(struct vop_mountctl_args *);
87 static int hammer_vop_kqfilter (struct vop_kqfilter_args *);
89 static int hammer_vop_fifoclose (struct vop_close_args *);
90 static int hammer_vop_fiforead (struct vop_read_args *);
91 static int hammer_vop_fifowrite (struct vop_write_args *);
92 static int hammer_vop_fifokqfilter (struct vop_kqfilter_args *);
94 struct vop_ops hammer_vnode_vops = {
95 .vop_default = vop_defaultop,
96 .vop_fsync = hammer_vop_fsync,
97 .vop_getpages = vop_stdgetpages,
98 .vop_putpages = vop_stdputpages,
99 .vop_read = hammer_vop_read,
100 .vop_write = hammer_vop_write,
101 .vop_access = hammer_vop_access,
102 .vop_advlock = hammer_vop_advlock,
103 .vop_close = hammer_vop_close,
104 .vop_ncreate = hammer_vop_ncreate,
105 .vop_getattr = hammer_vop_getattr,
106 .vop_inactive = hammer_vop_inactive,
107 .vop_reclaim = hammer_vop_reclaim,
108 .vop_nresolve = hammer_vop_nresolve,
109 .vop_nlookupdotdot = hammer_vop_nlookupdotdot,
110 .vop_nlink = hammer_vop_nlink,
111 .vop_nmkdir = hammer_vop_nmkdir,
112 .vop_nmknod = hammer_vop_nmknod,
113 .vop_open = hammer_vop_open,
114 .vop_pathconf = vop_stdpathconf,
115 .vop_print = hammer_vop_print,
116 .vop_readdir = hammer_vop_readdir,
117 .vop_readlink = hammer_vop_readlink,
118 .vop_nremove = hammer_vop_nremove,
119 .vop_nrename = hammer_vop_nrename,
120 .vop_nrmdir = hammer_vop_nrmdir,
121 .vop_markatime = hammer_vop_markatime,
122 .vop_setattr = hammer_vop_setattr,
123 .vop_bmap = hammer_vop_bmap,
124 .vop_strategy = hammer_vop_strategy,
125 .vop_nsymlink = hammer_vop_nsymlink,
126 .vop_nwhiteout = hammer_vop_nwhiteout,
127 .vop_ioctl = hammer_vop_ioctl,
128 .vop_mountctl = hammer_vop_mountctl,
129 .vop_kqfilter = hammer_vop_kqfilter
132 struct vop_ops hammer_spec_vops = {
133 .vop_default = vop_defaultop,
134 .vop_fsync = hammer_vop_fsync,
135 .vop_read = vop_stdnoread,
136 .vop_write = vop_stdnowrite,
137 .vop_access = hammer_vop_access,
138 .vop_close = hammer_vop_close,
139 .vop_markatime = hammer_vop_markatime,
140 .vop_getattr = hammer_vop_getattr,
141 .vop_inactive = hammer_vop_inactive,
142 .vop_reclaim = hammer_vop_reclaim,
143 .vop_setattr = hammer_vop_setattr
146 struct vop_ops hammer_fifo_vops = {
147 .vop_default = fifo_vnoperate,
148 .vop_fsync = hammer_vop_fsync,
149 .vop_read = hammer_vop_fiforead,
150 .vop_write = hammer_vop_fifowrite,
151 .vop_access = hammer_vop_access,
152 .vop_close = hammer_vop_fifoclose,
153 .vop_markatime = hammer_vop_markatime,
154 .vop_getattr = hammer_vop_getattr,
155 .vop_inactive = hammer_vop_inactive,
156 .vop_reclaim = hammer_vop_reclaim,
157 .vop_setattr = hammer_vop_setattr,
158 .vop_kqfilter = hammer_vop_fifokqfilter
163 hammer_knote(struct vnode *vp, int flags)
166 KNOTE(&vp->v_pollinfo.vpi_selinfo.si_note, flags);
169 #ifdef DEBUG_TRUNCATE
170 struct hammer_inode *HammerTruncIp;
173 static int hammer_dounlink(hammer_transaction_t trans, struct nchandle *nch,
174 struct vnode *dvp, struct ucred *cred,
175 int flags, int isdir);
176 static int hammer_vop_strategy_read(struct vop_strategy_args *ap);
177 static int hammer_vop_strategy_write(struct vop_strategy_args *ap);
182 hammer_vop_vnoperate(struct vop_generic_args *)
184 return (VOCALL(&hammer_vnode_vops, ap));
189 * hammer_vop_fsync { vp, waitfor }
191 * fsync() an inode to disk and wait for it to be completely committed
192 * such that the information would not be undone if a crash occured after
195 * NOTE: HAMMER's fsync()'s are going to remain expensive until we implement
196 * a REDO log. A sysctl is provided to relax HAMMER's fsync()
199 * Ultimately the combination of a REDO log and use of fast storage
200 * to front-end cluster caches will make fsync fast, but it aint
201 * here yet. And, in anycase, we need real transactional
202 * all-or-nothing features which are not restricted to a single file.
206 hammer_vop_fsync(struct vop_fsync_args *ap)
208 hammer_inode_t ip = VTOI(ap->a_vp);
209 hammer_mount_t hmp = ip->hmp;
210 int waitfor = ap->a_waitfor;
214 * Fsync rule relaxation (default is either full synchronous flush
215 * or REDO semantics with synchronous flush).
217 if (ap->a_flags & VOP_FSYNC_SYSCALL) {
218 switch(hammer_fsync_mode) {
221 /* disable REDO, full synchronous flush */
222 ip->redo_count = SIZE_T_MAX;
226 /* disable REDO, full asynchronous flush */
227 ip->redo_count = SIZE_T_MAX;
228 if (waitfor == MNT_WAIT)
229 waitfor = MNT_NOWAIT;
232 /* REDO semantics, synchronous flush */
233 if (hmp->version < HAMMER_VOL_VERSION_FOUR)
235 mode = HAMMER_FLUSH_UNDOS_AUTO;
238 /* REDO semantics, relaxed asynchronous flush */
239 if (hmp->version < HAMMER_VOL_VERSION_FOUR)
241 mode = HAMMER_FLUSH_UNDOS_RELAXED;
242 if (waitfor == MNT_WAIT)
243 waitfor = MNT_NOWAIT;
246 /* ignore the fsync() system call */
249 /* we have to do something */
250 mode = HAMMER_FLUSH_UNDOS_RELAXED;
251 if (waitfor == MNT_WAIT)
252 waitfor = MNT_NOWAIT;
257 * redo_count is initialized to a maximal value and set
258 * to 0 after the first fsync() on a file, which enables
259 * REDO logging on the inode unless the number of bytes
260 * written exceeds the limit.
262 if (ip->redo_count < hammer_limit_redo &&
263 (ip->flags & HAMMER_INODE_MODMASK_NOREDO) == 0
265 ++hammer_count_fsyncs;
266 hammer_flusher_flush_undos(hmp, mode);
275 * Do a full flush sequence.
277 ++hammer_count_fsyncs;
278 vfsync(ap->a_vp, waitfor, 1, NULL, NULL);
279 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
280 if (waitfor == MNT_WAIT) {
282 hammer_wait_inode(ip);
283 vn_lock(ap->a_vp, LK_EXCLUSIVE | LK_RETRY);
289 * hammer_vop_read { vp, uio, ioflag, cred }
295 hammer_vop_read(struct vop_read_args *ap)
297 struct hammer_transaction trans;
310 if (ap->a_vp->v_type != VREG)
317 * Allow the UIO's size to override the sequential heuristic.
319 blksize = hammer_blocksize(uio->uio_offset);
320 seqcount = (uio->uio_resid + (blksize - 1)) / blksize;
321 ioseqcount = ap->a_ioflag >> 16;
322 if (seqcount < ioseqcount)
323 seqcount = ioseqcount;
326 * Temporary hack until more of HAMMER can be made MPSAFE.
329 if (curthread->td_mpcount) {
331 hammer_start_transaction(&trans, ip->hmp);
336 hammer_start_transaction(&trans, ip->hmp);
341 * If reading or writing a huge amount of data we have to break
342 * atomicy and allow the operation to be interrupted by a signal
343 * or it can DOS the machine.
345 bigread = (uio->uio_resid > 100 * 1024 * 1024);
348 * Access the data typically in HAMMER_BUFSIZE blocks via the
349 * buffer cache, but HAMMER may use a variable block size based
352 * XXX Temporary hack, delay the start transaction while we remain
353 * MPSAFE. NOTE: ino_data.size cannot change while vnode is
356 while (uio->uio_resid > 0 && uio->uio_offset < ip->ino_data.size) {
360 blksize = hammer_blocksize(uio->uio_offset);
361 offset = (int)uio->uio_offset & (blksize - 1);
362 base_offset = uio->uio_offset - offset;
364 if (bigread && (error = hammer_signal_check(ip->hmp)) != 0)
370 bp = getcacheblk(ap->a_vp, base_offset);
379 if (got_mplock == 0) {
382 hammer_start_transaction(&trans, ip->hmp);
385 if (hammer_cluster_enable) {
387 * Use file_limit to prevent cluster_read() from
388 * creating buffers of the wrong block size past
391 file_limit = ip->ino_data.size;
392 if (base_offset < HAMMER_XDEMARC &&
393 file_limit > HAMMER_XDEMARC) {
394 file_limit = HAMMER_XDEMARC;
396 error = cluster_read(ap->a_vp,
397 file_limit, base_offset,
401 error = bread(ap->a_vp, base_offset, blksize, &bp);
409 /* bp->b_flags |= B_CLUSTEROK; temporarily disabled */
410 n = blksize - offset;
411 if (n > uio->uio_resid)
413 if (n > ip->ino_data.size - uio->uio_offset)
414 n = (int)(ip->ino_data.size - uio->uio_offset);
415 error = uiomove((char *)bp->b_data + offset, n, uio);
417 /* data has a lower priority then meta-data */
418 bp->b_flags |= B_AGE;
422 hammer_stats_file_read += n;
426 * XXX only update the atime if we had to get the MP lock.
427 * XXX hack hack hack, fixme.
430 if ((ip->flags & HAMMER_INODE_RO) == 0 &&
431 (ip->hmp->mp->mnt_flag & MNT_NOATIME) == 0) {
432 ip->ino_data.atime = trans.time;
433 hammer_modify_inode(ip, HAMMER_INODE_ATIME);
435 hammer_done_transaction(&trans);
443 * hammer_vop_write { vp, uio, ioflag, cred }
447 hammer_vop_write(struct vop_write_args *ap)
449 struct hammer_transaction trans;
450 struct hammer_inode *ip;
463 if (ap->a_vp->v_type != VREG)
469 seqcount = ap->a_ioflag >> 16;
471 if (ip->flags & HAMMER_INODE_RO)
475 * Create a transaction to cover the operations we perform.
477 hammer_start_transaction(&trans, hmp);
483 if (ap->a_ioflag & IO_APPEND)
484 uio->uio_offset = ip->ino_data.size;
487 * Check for illegal write offsets. Valid range is 0...2^63-1.
489 * NOTE: the base_off assignment is required to work around what
490 * I consider to be a GCC-4 optimization bug.
492 if (uio->uio_offset < 0) {
493 hammer_done_transaction(&trans);
496 base_offset = uio->uio_offset + uio->uio_resid; /* work around gcc-4 */
497 if (uio->uio_resid > 0 && base_offset <= uio->uio_offset) {
498 hammer_done_transaction(&trans);
503 * If reading or writing a huge amount of data we have to break
504 * atomicy and allow the operation to be interrupted by a signal
505 * or it can DOS the machine.
507 * Adjust redo_count early to avoid generating unnecessary redos.
509 bigwrite = (uio->uio_resid > 100 * 1024 * 1024);
510 if (ip->redo_count < hammer_limit_redo)
511 ip->redo_count += uio->uio_resid;
514 * Access the data typically in HAMMER_BUFSIZE blocks via the
515 * buffer cache, but HAMMER may use a variable block size based
518 while (uio->uio_resid > 0) {
523 if ((error = hammer_checkspace(hmp, HAMMER_CHKSPC_WRITE)) != 0)
525 if (bigwrite && (error = hammer_signal_check(hmp)) != 0)
528 blksize = hammer_blocksize(uio->uio_offset);
531 * Do not allow HAMMER to blow out the buffer cache. Very
532 * large UIOs can lockout other processes due to bwillwrite()
535 * The hammer inode is not locked during these operations.
536 * The vnode is locked which can interfere with the pageout
537 * daemon for non-UIO_NOCOPY writes but should not interfere
538 * with the buffer cache. Even so, we cannot afford to
539 * allow the pageout daemon to build up too many dirty buffer
542 * Only call this if we aren't being recursively called from
543 * a virtual disk device (vn), else we may deadlock.
545 if ((ap->a_ioflag & IO_RECURSE) == 0)
549 * Control the number of pending records associated with
550 * this inode. If too many have accumulated start a
551 * flush. Try to maintain a pipeline with the flusher.
553 if (ip->rsv_recs >= hammer_limit_inode_recs) {
554 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
556 if (ip->rsv_recs >= hammer_limit_inode_recs * 2) {
557 while (ip->rsv_recs >= hammer_limit_inode_recs) {
558 tsleep(&ip->rsv_recs, 0, "hmrwww", hz);
560 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
565 * Do not allow HAMMER to blow out system memory by
566 * accumulating too many records. Records are so well
567 * decoupled from the buffer cache that it is possible
568 * for userland to push data out to the media via
569 * direct-write, but build up the records queued to the
570 * backend faster then the backend can flush them out.
571 * HAMMER has hit its write limit but the frontend has
572 * no pushback to slow it down.
574 if (hmp->rsv_recs > hammer_limit_recs / 2) {
576 * Get the inode on the flush list
578 if (ip->rsv_recs >= 64)
579 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
580 else if (ip->rsv_recs >= 16)
581 hammer_flush_inode(ip, 0);
584 * Keep the flusher going if the system keeps
587 delta = hmp->count_newrecords -
588 hmp->last_newrecords;
589 if (delta < 0 || delta > hammer_limit_recs / 2) {
590 hmp->last_newrecords = hmp->count_newrecords;
591 hammer_sync_hmp(hmp, MNT_NOWAIT);
595 * If we have gotten behind start slowing
598 delta = (hmp->rsv_recs - hammer_limit_recs) *
599 hz / hammer_limit_recs;
601 tsleep(&trans, 0, "hmrslo", delta);
606 * Calculate the blocksize at the current offset and figure
607 * out how much we can actually write.
609 blkmask = blksize - 1;
610 offset = (int)uio->uio_offset & blkmask;
611 base_offset = uio->uio_offset & ~(int64_t)blkmask;
612 n = blksize - offset;
613 if (n > uio->uio_resid)
615 if (uio->uio_offset + n > ip->ino_data.size) {
616 vnode_pager_setsize(ap->a_vp, uio->uio_offset + n);
618 kflags |= NOTE_EXTEND;
621 if (uio->uio_segflg == UIO_NOCOPY) {
623 * Issuing a write with the same data backing the
624 * buffer. Instantiate the buffer to collect the
625 * backing vm pages, then read-in any missing bits.
627 * This case is used by vop_stdputpages().
629 bp = getblk(ap->a_vp, base_offset,
630 blksize, GETBLK_BHEAVY, 0);
631 if ((bp->b_flags & B_CACHE) == 0) {
633 error = bread(ap->a_vp, base_offset,
636 } else if (offset == 0 && uio->uio_resid >= blksize) {
638 * Even though we are entirely overwriting the buffer
639 * we may still have to zero it out to avoid a
640 * mmap/write visibility issue.
642 bp = getblk(ap->a_vp, base_offset, blksize, GETBLK_BHEAVY, 0);
643 if ((bp->b_flags & B_CACHE) == 0)
645 } else if (base_offset >= ip->ino_data.size) {
647 * If the base offset of the buffer is beyond the
648 * file EOF, we don't have to issue a read.
650 bp = getblk(ap->a_vp, base_offset,
651 blksize, GETBLK_BHEAVY, 0);
655 * Partial overwrite, read in any missing bits then
656 * replace the portion being written.
658 error = bread(ap->a_vp, base_offset, blksize, &bp);
663 error = uiomove(bp->b_data + offset, n, uio);
666 * Generate REDO records while redo_count has not exceeded
667 * the limit. Note that redo_count is initialized to a
668 * maximal value until the first fsync(), and zerod on every
669 * fsync(). Thus at least one fsync() is required before we
670 * start generating REDO records for the ip.
672 if (hmp->version >= HAMMER_VOL_VERSION_FOUR &&
673 ip->redo_count < hammer_limit_redo &&
675 hammer_sync_lock_sh(&trans);
676 error = hammer_generate_redo(&trans, ip,
677 base_offset + offset,
680 hammer_sync_unlock(&trans);
684 * If we screwed up we have to undo any VM size changes we
690 vtruncbuf(ap->a_vp, ip->ino_data.size,
691 hammer_blocksize(ip->ino_data.size));
695 kflags |= NOTE_WRITE;
696 hammer_stats_file_write += n;
697 /* bp->b_flags |= B_CLUSTEROK; temporarily disabled */
698 if (ip->ino_data.size < uio->uio_offset) {
699 ip->ino_data.size = uio->uio_offset;
700 flags = HAMMER_INODE_SDIRTY;
701 vnode_pager_setsize(ap->a_vp, ip->ino_data.size);
705 ip->ino_data.mtime = trans.time;
706 flags |= HAMMER_INODE_MTIME | HAMMER_INODE_BUFS;
707 hammer_modify_inode(ip, flags);
710 * Once we dirty the buffer any cached zone-X offset
711 * becomes invalid. HAMMER NOTE: no-history mode cannot
712 * allow overwriting over the same data sector unless
713 * we provide UNDOs for the old data, which we don't.
715 bp->b_bio2.bio_offset = NOOFFSET;
718 * Final buffer disposition.
720 * Because meta-data updates are deferred, HAMMER is
721 * especially sensitive to excessive bdwrite()s because
722 * the I/O stream is not broken up by disk reads. So the
723 * buffer cache simply cannot keep up.
725 * WARNING! blksize is variable. cluster_write() is
726 * expected to not blow up if it encounters buffers that
727 * do not match the passed blksize.
729 * NOTE! Hammer shouldn't need to bawrite()/cluster_write().
730 * The ip->rsv_recs check should burst-flush the data.
731 * If we queue it immediately the buf could be left
732 * locked on the device queue for a very long time.
734 bp->b_flags |= B_AGE;
735 if (ap->a_ioflag & IO_SYNC) {
737 } else if (ap->a_ioflag & IO_DIRECT) {
741 if (offset + n == blksize) {
742 if (hammer_cluster_enable == 0 ||
743 (ap->a_vp->v_mount->mnt_flag & MNT_NOCLUSTERW)) {
746 cluster_write(bp, ip->ino_data.size,
754 hammer_done_transaction(&trans);
755 hammer_knote(ap->a_vp, kflags);
760 * hammer_vop_access { vp, mode, cred }
764 hammer_vop_access(struct vop_access_args *ap)
766 struct hammer_inode *ip = VTOI(ap->a_vp);
771 ++hammer_stats_file_iopsr;
772 uid = hammer_to_unix_xid(&ip->ino_data.uid);
773 gid = hammer_to_unix_xid(&ip->ino_data.gid);
775 error = vop_helper_access(ap, uid, gid, ip->ino_data.mode,
776 ip->ino_data.uflags);
781 * hammer_vop_advlock { vp, id, op, fl, flags }
785 hammer_vop_advlock(struct vop_advlock_args *ap)
787 hammer_inode_t ip = VTOI(ap->a_vp);
789 return (lf_advlock(ap, &ip->advlock, ip->ino_data.size));
793 * hammer_vop_close { vp, fflag }
795 * We can only sync-on-close for normal closes.
799 hammer_vop_close(struct vop_close_args *ap)
802 struct vnode *vp = ap->a_vp;
803 hammer_inode_t ip = VTOI(vp);
805 if (ip->flags & (HAMMER_INODE_CLOSESYNC|HAMMER_INODE_CLOSEASYNC)) {
806 if (vn_islocked(vp) == LK_EXCLUSIVE &&
807 (vp->v_flag & (VINACTIVE|VRECLAIMED)) == 0) {
808 if (ip->flags & HAMMER_INODE_CLOSESYNC)
811 waitfor = MNT_NOWAIT;
812 ip->flags &= ~(HAMMER_INODE_CLOSESYNC |
813 HAMMER_INODE_CLOSEASYNC);
814 VOP_FSYNC(vp, MNT_NOWAIT, waitfor);
818 return (vop_stdclose(ap));
822 * hammer_vop_ncreate { nch, dvp, vpp, cred, vap }
824 * The operating system has already ensured that the directory entry
825 * does not exist and done all appropriate namespace locking.
829 hammer_vop_ncreate(struct vop_ncreate_args *ap)
831 struct hammer_transaction trans;
832 struct hammer_inode *dip;
833 struct hammer_inode *nip;
834 struct nchandle *nch;
838 dip = VTOI(ap->a_dvp);
840 if (dip->flags & HAMMER_INODE_RO)
842 if ((error = hammer_checkspace(dip->hmp, HAMMER_CHKSPC_CREATE)) != 0)
846 * Create a transaction to cover the operations we perform.
848 hammer_start_transaction(&trans, dip->hmp);
849 ++hammer_stats_file_iopsw;
852 * Create a new filesystem object of the requested type. The
853 * returned inode will be referenced and shared-locked to prevent
854 * it from being moved to the flusher.
856 error = hammer_create_inode(&trans, ap->a_vap, ap->a_cred,
857 dip, nch->ncp->nc_name, nch->ncp->nc_nlen,
860 hkprintf("hammer_create_inode error %d\n", error);
861 hammer_done_transaction(&trans);
867 * Add the new filesystem object to the directory. This will also
868 * bump the inode's link count.
870 error = hammer_ip_add_directory(&trans, dip,
871 nch->ncp->nc_name, nch->ncp->nc_nlen,
874 hkprintf("hammer_ip_add_directory error %d\n", error);
880 hammer_rel_inode(nip, 0);
881 hammer_done_transaction(&trans);
884 error = hammer_get_vnode(nip, ap->a_vpp);
885 hammer_done_transaction(&trans);
886 hammer_rel_inode(nip, 0);
888 cache_setunresolved(ap->a_nch);
889 cache_setvp(ap->a_nch, *ap->a_vpp);
891 hammer_knote(ap->a_dvp, NOTE_WRITE);
897 * hammer_vop_getattr { vp, vap }
899 * Retrieve an inode's attribute information. When accessing inodes
900 * historically we fake the atime field to ensure consistent results.
901 * The atime field is stored in the B-Tree element and allowed to be
902 * updated without cycling the element.
908 hammer_vop_getattr(struct vop_getattr_args *ap)
910 struct hammer_inode *ip = VTOI(ap->a_vp);
911 struct vattr *vap = ap->a_vap;
914 * We want the fsid to be different when accessing a filesystem
915 * with different as-of's so programs like diff don't think
916 * the files are the same.
918 * We also want the fsid to be the same when comparing snapshots,
919 * or when comparing mirrors (which might be backed by different
920 * physical devices). HAMMER fsids are based on the PFS's
923 * XXX there is a chance of collision here. The va_fsid reported
924 * by stat is different from the more involved fsid used in the
927 ++hammer_stats_file_iopsr;
928 hammer_lock_sh(&ip->lock);
929 vap->va_fsid = ip->pfsm->fsid_udev ^ (u_int32_t)ip->obj_asof ^
930 (u_int32_t)(ip->obj_asof >> 32);
932 vap->va_fileid = ip->ino_leaf.base.obj_id;
933 vap->va_mode = ip->ino_data.mode;
934 vap->va_nlink = ip->ino_data.nlinks;
935 vap->va_uid = hammer_to_unix_xid(&ip->ino_data.uid);
936 vap->va_gid = hammer_to_unix_xid(&ip->ino_data.gid);
939 vap->va_size = ip->ino_data.size;
942 * Special case for @@PFS softlinks. The actual size of the
943 * expanded softlink is "@@0x%016llx:%05d" == 26 bytes.
944 * or for MAX_TID is "@@-1:%05d" == 10 bytes.
946 if (ip->ino_data.obj_type == HAMMER_OBJTYPE_SOFTLINK &&
947 ip->ino_data.size == 10 &&
948 ip->obj_asof == HAMMER_MAX_TID &&
949 ip->obj_localization == 0 &&
950 strncmp(ip->ino_data.ext.symlink, "@@PFS", 5) == 0) {
951 if (ip->pfsm->pfsd.mirror_flags & HAMMER_PFSD_SLAVE)
958 * We must provide a consistent atime and mtime for snapshots
959 * so people can do a 'tar cf - ... | md5' on them and get
960 * consistent results.
962 if (ip->flags & HAMMER_INODE_RO) {
963 hammer_time_to_timespec(ip->ino_data.ctime, &vap->va_atime);
964 hammer_time_to_timespec(ip->ino_data.ctime, &vap->va_mtime);
966 hammer_time_to_timespec(ip->ino_data.atime, &vap->va_atime);
967 hammer_time_to_timespec(ip->ino_data.mtime, &vap->va_mtime);
969 hammer_time_to_timespec(ip->ino_data.ctime, &vap->va_ctime);
970 vap->va_flags = ip->ino_data.uflags;
971 vap->va_gen = 1; /* hammer inums are unique for all time */
972 vap->va_blocksize = HAMMER_BUFSIZE;
973 if (ip->ino_data.size >= HAMMER_XDEMARC) {
974 vap->va_bytes = (ip->ino_data.size + HAMMER_XBUFMASK64) &
976 } else if (ip->ino_data.size > HAMMER_BUFSIZE / 2) {
977 vap->va_bytes = (ip->ino_data.size + HAMMER_BUFMASK64) &
980 vap->va_bytes = (ip->ino_data.size + 15) & ~15;
983 vap->va_type = hammer_get_vnode_type(ip->ino_data.obj_type);
984 vap->va_filerev = 0; /* XXX */
985 vap->va_uid_uuid = ip->ino_data.uid;
986 vap->va_gid_uuid = ip->ino_data.gid;
987 vap->va_fsid_uuid = ip->hmp->fsid;
988 vap->va_vaflags = VA_UID_UUID_VALID | VA_GID_UUID_VALID |
991 switch (ip->ino_data.obj_type) {
992 case HAMMER_OBJTYPE_CDEV:
993 case HAMMER_OBJTYPE_BDEV:
994 vap->va_rmajor = ip->ino_data.rmajor;
995 vap->va_rminor = ip->ino_data.rminor;
1000 hammer_unlock(&ip->lock);
1005 * hammer_vop_nresolve { nch, dvp, cred }
1007 * Locate the requested directory entry.
1011 hammer_vop_nresolve(struct vop_nresolve_args *ap)
1013 struct hammer_transaction trans;
1014 struct namecache *ncp;
1018 struct hammer_cursor cursor;
1027 u_int32_t localization;
1028 u_int32_t max_iterations;
1031 * Misc initialization, plus handle as-of name extensions. Look for
1032 * the '@@' extension. Note that as-of files and directories cannot
1035 dip = VTOI(ap->a_dvp);
1036 ncp = ap->a_nch->ncp;
1037 asof = dip->obj_asof;
1038 localization = dip->obj_localization; /* for code consistency */
1039 nlen = ncp->nc_nlen;
1040 flags = dip->flags & HAMMER_INODE_RO;
1043 hammer_simple_transaction(&trans, dip->hmp);
1044 ++hammer_stats_file_iopsr;
1046 for (i = 0; i < nlen; ++i) {
1047 if (ncp->nc_name[i] == '@' && ncp->nc_name[i+1] == '@') {
1048 error = hammer_str_to_tid(ncp->nc_name + i + 2,
1049 &ispfs, &asof, &localization);
1054 if (asof != HAMMER_MAX_TID)
1055 flags |= HAMMER_INODE_RO;
1062 * If this is a PFS softlink we dive into the PFS
1064 if (ispfs && nlen == 0) {
1065 ip = hammer_get_inode(&trans, dip, HAMMER_OBJID_ROOT,
1069 error = hammer_get_vnode(ip, &vp);
1070 hammer_rel_inode(ip, 0);
1076 cache_setvp(ap->a_nch, vp);
1083 * If there is no path component the time extension is relative to dip.
1084 * e.g. "fubar/@@<snapshot>"
1086 * "." is handled by the kernel, but ".@@<snapshot>" is not.
1087 * e.g. "fubar/.@@<snapshot>"
1089 * ".." is handled by the kernel. We do not currently handle
1092 if (nlen == 0 || (nlen == 1 && ncp->nc_name[0] == '.')) {
1093 ip = hammer_get_inode(&trans, dip, dip->obj_id,
1094 asof, dip->obj_localization,
1097 error = hammer_get_vnode(ip, &vp);
1098 hammer_rel_inode(ip, 0);
1104 cache_setvp(ap->a_nch, vp);
1111 * Calculate the namekey and setup the key range for the scan. This
1112 * works kinda like a chained hash table where the lower 32 bits
1113 * of the namekey synthesize the chain.
1115 * The key range is inclusive of both key_beg and key_end.
1117 namekey = hammer_directory_namekey(dip, ncp->nc_name, nlen,
1120 error = hammer_init_cursor(&trans, &cursor, &dip->cache[1], dip);
1121 cursor.key_beg.localization = dip->obj_localization +
1122 hammer_dir_localization(dip);
1123 cursor.key_beg.obj_id = dip->obj_id;
1124 cursor.key_beg.key = namekey;
1125 cursor.key_beg.create_tid = 0;
1126 cursor.key_beg.delete_tid = 0;
1127 cursor.key_beg.rec_type = HAMMER_RECTYPE_DIRENTRY;
1128 cursor.key_beg.obj_type = 0;
1130 cursor.key_end = cursor.key_beg;
1131 cursor.key_end.key += max_iterations;
1133 cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE | HAMMER_CURSOR_ASOF;
1136 * Scan all matching records (the chain), locate the one matching
1137 * the requested path component.
1139 * The hammer_ip_*() functions merge in-memory records with on-disk
1140 * records for the purposes of the search.
1143 localization = HAMMER_DEF_LOCALIZATION;
1146 error = hammer_ip_first(&cursor);
1147 while (error == 0) {
1148 error = hammer_ip_resolve_data(&cursor);
1151 if (nlen == cursor.leaf->data_len - HAMMER_ENTRY_NAME_OFF &&
1152 bcmp(ncp->nc_name, cursor.data->entry.name, nlen) == 0) {
1153 obj_id = cursor.data->entry.obj_id;
1154 localization = cursor.data->entry.localization;
1157 error = hammer_ip_next(&cursor);
1160 hammer_done_cursor(&cursor);
1163 * Lookup the obj_id. This should always succeed. If it does not
1164 * the filesystem may be damaged and we return a dummy inode.
1167 ip = hammer_get_inode(&trans, dip, obj_id,
1170 if (error == ENOENT) {
1171 kprintf("HAMMER: WARNING: Missing "
1172 "inode for dirent \"%s\"\n"
1173 "\tobj_id = %016llx, asof=%016llx, lo=%08x\n",
1175 (long long)obj_id, (long long)asof,
1178 ip = hammer_get_dummy_inode(&trans, dip, obj_id,
1183 error = hammer_get_vnode(ip, &vp);
1184 hammer_rel_inode(ip, 0);
1190 cache_setvp(ap->a_nch, vp);
1193 } else if (error == ENOENT) {
1194 cache_setvp(ap->a_nch, NULL);
1197 hammer_done_transaction(&trans);
1202 * hammer_vop_nlookupdotdot { dvp, vpp, cred }
1204 * Locate the parent directory of a directory vnode.
1206 * dvp is referenced but not locked. *vpp must be returned referenced and
1207 * locked. A parent_obj_id of 0 does not necessarily indicate that we are
1208 * at the root, instead it could indicate that the directory we were in was
1211 * NOTE: as-of sequences are not linked into the directory structure. If
1212 * we are at the root with a different asof then the mount point, reload
1213 * the same directory with the mount point's asof. I'm not sure what this
1214 * will do to NFS. We encode ASOF stamps in NFS file handles so it might not
1215 * get confused, but it hasn't been tested.
1219 hammer_vop_nlookupdotdot(struct vop_nlookupdotdot_args *ap)
1221 struct hammer_transaction trans;
1222 struct hammer_inode *dip;
1223 struct hammer_inode *ip;
1224 int64_t parent_obj_id;
1225 u_int32_t parent_obj_localization;
1229 dip = VTOI(ap->a_dvp);
1230 asof = dip->obj_asof;
1233 * Whos are parent? This could be the root of a pseudo-filesystem
1234 * whos parent is in another localization domain.
1236 parent_obj_id = dip->ino_data.parent_obj_id;
1237 if (dip->obj_id == HAMMER_OBJID_ROOT)
1238 parent_obj_localization = dip->ino_data.ext.obj.parent_obj_localization;
1240 parent_obj_localization = dip->obj_localization;
1242 if (parent_obj_id == 0) {
1243 if (dip->obj_id == HAMMER_OBJID_ROOT &&
1244 asof != dip->hmp->asof) {
1245 parent_obj_id = dip->obj_id;
1246 asof = dip->hmp->asof;
1247 *ap->a_fakename = kmalloc(19, M_TEMP, M_WAITOK);
1248 ksnprintf(*ap->a_fakename, 19, "0x%016llx",
1249 (long long)dip->obj_asof);
1256 hammer_simple_transaction(&trans, dip->hmp);
1257 ++hammer_stats_file_iopsr;
1259 ip = hammer_get_inode(&trans, dip, parent_obj_id,
1260 asof, parent_obj_localization,
1261 dip->flags, &error);
1263 error = hammer_get_vnode(ip, ap->a_vpp);
1264 hammer_rel_inode(ip, 0);
1268 hammer_done_transaction(&trans);
1273 * hammer_vop_nlink { nch, dvp, vp, cred }
1277 hammer_vop_nlink(struct vop_nlink_args *ap)
1279 struct hammer_transaction trans;
1280 struct hammer_inode *dip;
1281 struct hammer_inode *ip;
1282 struct nchandle *nch;
1285 if (ap->a_dvp->v_mount != ap->a_vp->v_mount)
1289 dip = VTOI(ap->a_dvp);
1290 ip = VTOI(ap->a_vp);
1292 if (dip->obj_localization != ip->obj_localization)
1295 if (dip->flags & HAMMER_INODE_RO)
1297 if (ip->flags & HAMMER_INODE_RO)
1299 if ((error = hammer_checkspace(dip->hmp, HAMMER_CHKSPC_CREATE)) != 0)
1303 * Create a transaction to cover the operations we perform.
1305 hammer_start_transaction(&trans, dip->hmp);
1306 ++hammer_stats_file_iopsw;
1309 * Add the filesystem object to the directory. Note that neither
1310 * dip nor ip are referenced or locked, but their vnodes are
1311 * referenced. This function will bump the inode's link count.
1313 error = hammer_ip_add_directory(&trans, dip,
1314 nch->ncp->nc_name, nch->ncp->nc_nlen,
1321 cache_setunresolved(nch);
1322 cache_setvp(nch, ap->a_vp);
1324 hammer_done_transaction(&trans);
1325 hammer_knote(ap->a_vp, NOTE_LINK);
1326 hammer_knote(ap->a_dvp, NOTE_WRITE);
1331 * hammer_vop_nmkdir { nch, dvp, vpp, cred, vap }
1333 * The operating system has already ensured that the directory entry
1334 * does not exist and done all appropriate namespace locking.
1338 hammer_vop_nmkdir(struct vop_nmkdir_args *ap)
1340 struct hammer_transaction trans;
1341 struct hammer_inode *dip;
1342 struct hammer_inode *nip;
1343 struct nchandle *nch;
1347 dip = VTOI(ap->a_dvp);
1349 if (dip->flags & HAMMER_INODE_RO)
1351 if ((error = hammer_checkspace(dip->hmp, HAMMER_CHKSPC_CREATE)) != 0)
1355 * Create a transaction to cover the operations we perform.
1357 hammer_start_transaction(&trans, dip->hmp);
1358 ++hammer_stats_file_iopsw;
1361 * Create a new filesystem object of the requested type. The
1362 * returned inode will be referenced but not locked.
1364 error = hammer_create_inode(&trans, ap->a_vap, ap->a_cred,
1365 dip, nch->ncp->nc_name, nch->ncp->nc_nlen,
1368 hkprintf("hammer_mkdir error %d\n", error);
1369 hammer_done_transaction(&trans);
1374 * Add the new filesystem object to the directory. This will also
1375 * bump the inode's link count.
1377 error = hammer_ip_add_directory(&trans, dip,
1378 nch->ncp->nc_name, nch->ncp->nc_nlen,
1381 hkprintf("hammer_mkdir (add) error %d\n", error);
1387 hammer_rel_inode(nip, 0);
1390 error = hammer_get_vnode(nip, ap->a_vpp);
1391 hammer_rel_inode(nip, 0);
1393 cache_setunresolved(ap->a_nch);
1394 cache_setvp(ap->a_nch, *ap->a_vpp);
1397 hammer_done_transaction(&trans);
1399 hammer_knote(ap->a_dvp, NOTE_WRITE | NOTE_LINK);
1404 * hammer_vop_nmknod { nch, dvp, vpp, cred, vap }
1406 * The operating system has already ensured that the directory entry
1407 * does not exist and done all appropriate namespace locking.
1411 hammer_vop_nmknod(struct vop_nmknod_args *ap)
1413 struct hammer_transaction trans;
1414 struct hammer_inode *dip;
1415 struct hammer_inode *nip;
1416 struct nchandle *nch;
1420 dip = VTOI(ap->a_dvp);
1422 if (dip->flags & HAMMER_INODE_RO)
1424 if ((error = hammer_checkspace(dip->hmp, HAMMER_CHKSPC_CREATE)) != 0)
1428 * Create a transaction to cover the operations we perform.
1430 hammer_start_transaction(&trans, dip->hmp);
1431 ++hammer_stats_file_iopsw;
1434 * Create a new filesystem object of the requested type. The
1435 * returned inode will be referenced but not locked.
1437 * If mknod specifies a directory a pseudo-fs is created.
1439 error = hammer_create_inode(&trans, ap->a_vap, ap->a_cred,
1440 dip, nch->ncp->nc_name, nch->ncp->nc_nlen,
1443 hammer_done_transaction(&trans);
1449 * Add the new filesystem object to the directory. This will also
1450 * bump the inode's link count.
1452 error = hammer_ip_add_directory(&trans, dip,
1453 nch->ncp->nc_name, nch->ncp->nc_nlen,
1460 hammer_rel_inode(nip, 0);
1463 error = hammer_get_vnode(nip, ap->a_vpp);
1464 hammer_rel_inode(nip, 0);
1466 cache_setunresolved(ap->a_nch);
1467 cache_setvp(ap->a_nch, *ap->a_vpp);
1470 hammer_done_transaction(&trans);
1472 hammer_knote(ap->a_dvp, NOTE_WRITE);
1477 * hammer_vop_open { vp, mode, cred, fp }
1481 hammer_vop_open(struct vop_open_args *ap)
1485 ++hammer_stats_file_iopsr;
1486 ip = VTOI(ap->a_vp);
1488 if ((ap->a_mode & FWRITE) && (ip->flags & HAMMER_INODE_RO))
1490 return(vop_stdopen(ap));
1494 * hammer_vop_print { vp }
1498 hammer_vop_print(struct vop_print_args *ap)
1504 * hammer_vop_readdir { vp, uio, cred, *eofflag, *ncookies, off_t **cookies }
1508 hammer_vop_readdir(struct vop_readdir_args *ap)
1510 struct hammer_transaction trans;
1511 struct hammer_cursor cursor;
1512 struct hammer_inode *ip;
1514 hammer_base_elm_t base;
1523 ++hammer_stats_file_iopsr;
1524 ip = VTOI(ap->a_vp);
1526 saveoff = uio->uio_offset;
1528 if (ap->a_ncookies) {
1529 ncookies = uio->uio_resid / 16 + 1;
1530 if (ncookies > 1024)
1532 cookies = kmalloc(ncookies * sizeof(off_t), M_TEMP, M_WAITOK);
1540 hammer_simple_transaction(&trans, ip->hmp);
1543 * Handle artificial entries
1545 * It should be noted that the minimum value for a directory
1546 * hash key on-media is 0x0000000100000000, so we can use anything
1547 * less then that to represent our 'special' key space.
1551 r = vop_write_dirent(&error, uio, ip->obj_id, DT_DIR, 1, ".");
1555 cookies[cookie_index] = saveoff;
1558 if (cookie_index == ncookies)
1562 if (ip->ino_data.parent_obj_id) {
1563 r = vop_write_dirent(&error, uio,
1564 ip->ino_data.parent_obj_id,
1567 r = vop_write_dirent(&error, uio,
1568 ip->obj_id, DT_DIR, 2, "..");
1573 cookies[cookie_index] = saveoff;
1576 if (cookie_index == ncookies)
1581 * Key range (begin and end inclusive) to scan. Directory keys
1582 * directly translate to a 64 bit 'seek' position.
1584 hammer_init_cursor(&trans, &cursor, &ip->cache[1], ip);
1585 cursor.key_beg.localization = ip->obj_localization +
1586 hammer_dir_localization(ip);
1587 cursor.key_beg.obj_id = ip->obj_id;
1588 cursor.key_beg.create_tid = 0;
1589 cursor.key_beg.delete_tid = 0;
1590 cursor.key_beg.rec_type = HAMMER_RECTYPE_DIRENTRY;
1591 cursor.key_beg.obj_type = 0;
1592 cursor.key_beg.key = saveoff;
1594 cursor.key_end = cursor.key_beg;
1595 cursor.key_end.key = HAMMER_MAX_KEY;
1596 cursor.asof = ip->obj_asof;
1597 cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE | HAMMER_CURSOR_ASOF;
1599 error = hammer_ip_first(&cursor);
1601 while (error == 0) {
1602 error = hammer_ip_resolve_data(&cursor);
1605 base = &cursor.leaf->base;
1606 saveoff = base->key;
1607 KKASSERT(cursor.leaf->data_len > HAMMER_ENTRY_NAME_OFF);
1609 if (base->obj_id != ip->obj_id)
1610 panic("readdir: bad record at %p", cursor.node);
1613 * Convert pseudo-filesystems into softlinks
1615 dtype = hammer_get_dtype(cursor.leaf->base.obj_type);
1616 r = vop_write_dirent(
1617 &error, uio, cursor.data->entry.obj_id,
1619 cursor.leaf->data_len - HAMMER_ENTRY_NAME_OFF ,
1620 (void *)cursor.data->entry.name);
1625 cookies[cookie_index] = base->key;
1627 if (cookie_index == ncookies)
1629 error = hammer_ip_next(&cursor);
1631 hammer_done_cursor(&cursor);
1634 hammer_done_transaction(&trans);
1637 *ap->a_eofflag = (error == ENOENT);
1638 uio->uio_offset = saveoff;
1639 if (error && cookie_index == 0) {
1640 if (error == ENOENT)
1643 kfree(cookies, M_TEMP);
1644 *ap->a_ncookies = 0;
1645 *ap->a_cookies = NULL;
1648 if (error == ENOENT)
1651 *ap->a_ncookies = cookie_index;
1652 *ap->a_cookies = cookies;
1659 * hammer_vop_readlink { vp, uio, cred }
1663 hammer_vop_readlink(struct vop_readlink_args *ap)
1665 struct hammer_transaction trans;
1666 struct hammer_cursor cursor;
1667 struct hammer_inode *ip;
1669 u_int32_t localization;
1670 hammer_pseudofs_inmem_t pfsm;
1673 ip = VTOI(ap->a_vp);
1676 * Shortcut if the symlink data was stuffed into ino_data.
1678 * Also expand special "@@PFS%05d" softlinks (expansion only
1679 * occurs for non-historical (current) accesses made from the
1680 * primary filesystem).
1682 if (ip->ino_data.size <= HAMMER_INODE_BASESYMLEN) {
1686 ptr = ip->ino_data.ext.symlink;
1687 bytes = (int)ip->ino_data.size;
1689 ip->obj_asof == HAMMER_MAX_TID &&
1690 ip->obj_localization == 0 &&
1691 strncmp(ptr, "@@PFS", 5) == 0) {
1692 hammer_simple_transaction(&trans, ip->hmp);
1693 bcopy(ptr + 5, buf, 5);
1695 localization = strtoul(buf, NULL, 10) << 16;
1696 pfsm = hammer_load_pseudofs(&trans, localization,
1699 if (pfsm->pfsd.mirror_flags &
1700 HAMMER_PFSD_SLAVE) {
1701 /* vap->va_size == 26 */
1702 ksnprintf(buf, sizeof(buf),
1704 (long long)pfsm->pfsd.sync_end_tid,
1705 localization >> 16);
1707 /* vap->va_size == 10 */
1708 ksnprintf(buf, sizeof(buf),
1710 localization >> 16);
1712 ksnprintf(buf, sizeof(buf),
1714 (long long)HAMMER_MAX_TID,
1715 localization >> 16);
1719 bytes = strlen(buf);
1722 hammer_rel_pseudofs(trans.hmp, pfsm);
1723 hammer_done_transaction(&trans);
1725 error = uiomove(ptr, bytes, ap->a_uio);
1732 hammer_simple_transaction(&trans, ip->hmp);
1733 ++hammer_stats_file_iopsr;
1734 hammer_init_cursor(&trans, &cursor, &ip->cache[1], ip);
1737 * Key range (begin and end inclusive) to scan. Directory keys
1738 * directly translate to a 64 bit 'seek' position.
1740 cursor.key_beg.localization = ip->obj_localization +
1741 HAMMER_LOCALIZE_MISC;
1742 cursor.key_beg.obj_id = ip->obj_id;
1743 cursor.key_beg.create_tid = 0;
1744 cursor.key_beg.delete_tid = 0;
1745 cursor.key_beg.rec_type = HAMMER_RECTYPE_FIX;
1746 cursor.key_beg.obj_type = 0;
1747 cursor.key_beg.key = HAMMER_FIXKEY_SYMLINK;
1748 cursor.asof = ip->obj_asof;
1749 cursor.flags |= HAMMER_CURSOR_ASOF;
1751 error = hammer_ip_lookup(&cursor);
1753 error = hammer_ip_resolve_data(&cursor);
1755 KKASSERT(cursor.leaf->data_len >=
1756 HAMMER_SYMLINK_NAME_OFF);
1757 error = uiomove(cursor.data->symlink.name,
1758 cursor.leaf->data_len -
1759 HAMMER_SYMLINK_NAME_OFF,
1763 hammer_done_cursor(&cursor);
1764 hammer_done_transaction(&trans);
1769 * hammer_vop_nremove { nch, dvp, cred }
1773 hammer_vop_nremove(struct vop_nremove_args *ap)
1775 struct hammer_transaction trans;
1776 struct hammer_inode *dip;
1779 dip = VTOI(ap->a_dvp);
1781 if (hammer_nohistory(dip) == 0 &&
1782 (error = hammer_checkspace(dip->hmp, HAMMER_CHKSPC_REMOVE)) != 0) {
1786 hammer_start_transaction(&trans, dip->hmp);
1787 ++hammer_stats_file_iopsw;
1788 error = hammer_dounlink(&trans, ap->a_nch, ap->a_dvp, ap->a_cred, 0, 0);
1789 hammer_done_transaction(&trans);
1791 hammer_knote(ap->a_dvp, NOTE_WRITE);
1796 * hammer_vop_nrename { fnch, tnch, fdvp, tdvp, cred }
1800 hammer_vop_nrename(struct vop_nrename_args *ap)
1802 struct hammer_transaction trans;
1803 struct namecache *fncp;
1804 struct namecache *tncp;
1805 struct hammer_inode *fdip;
1806 struct hammer_inode *tdip;
1807 struct hammer_inode *ip;
1808 struct hammer_cursor cursor;
1810 u_int32_t max_iterations;
1813 if (ap->a_fdvp->v_mount != ap->a_tdvp->v_mount)
1815 if (ap->a_fdvp->v_mount != ap->a_fnch->ncp->nc_vp->v_mount)
1818 fdip = VTOI(ap->a_fdvp);
1819 tdip = VTOI(ap->a_tdvp);
1820 fncp = ap->a_fnch->ncp;
1821 tncp = ap->a_tnch->ncp;
1822 ip = VTOI(fncp->nc_vp);
1823 KKASSERT(ip != NULL);
1825 if (fdip->obj_localization != tdip->obj_localization)
1827 if (fdip->obj_localization != ip->obj_localization)
1830 if (fdip->flags & HAMMER_INODE_RO)
1832 if (tdip->flags & HAMMER_INODE_RO)
1834 if (ip->flags & HAMMER_INODE_RO)
1836 if ((error = hammer_checkspace(fdip->hmp, HAMMER_CHKSPC_CREATE)) != 0)
1839 hammer_start_transaction(&trans, fdip->hmp);
1840 ++hammer_stats_file_iopsw;
1843 * Remove tncp from the target directory and then link ip as
1844 * tncp. XXX pass trans to dounlink
1846 * Force the inode sync-time to match the transaction so it is
1847 * in-sync with the creation of the target directory entry.
1849 error = hammer_dounlink(&trans, ap->a_tnch, ap->a_tdvp,
1851 if (error == 0 || error == ENOENT) {
1852 error = hammer_ip_add_directory(&trans, tdip,
1853 tncp->nc_name, tncp->nc_nlen,
1856 ip->ino_data.parent_obj_id = tdip->obj_id;
1857 ip->ino_data.ctime = trans.time;
1858 hammer_modify_inode(ip, HAMMER_INODE_DDIRTY);
1862 goto failed; /* XXX */
1865 * Locate the record in the originating directory and remove it.
1867 * Calculate the namekey and setup the key range for the scan. This
1868 * works kinda like a chained hash table where the lower 32 bits
1869 * of the namekey synthesize the chain.
1871 * The key range is inclusive of both key_beg and key_end.
1873 namekey = hammer_directory_namekey(fdip, fncp->nc_name, fncp->nc_nlen,
1876 hammer_init_cursor(&trans, &cursor, &fdip->cache[1], fdip);
1877 cursor.key_beg.localization = fdip->obj_localization +
1878 hammer_dir_localization(fdip);
1879 cursor.key_beg.obj_id = fdip->obj_id;
1880 cursor.key_beg.key = namekey;
1881 cursor.key_beg.create_tid = 0;
1882 cursor.key_beg.delete_tid = 0;
1883 cursor.key_beg.rec_type = HAMMER_RECTYPE_DIRENTRY;
1884 cursor.key_beg.obj_type = 0;
1886 cursor.key_end = cursor.key_beg;
1887 cursor.key_end.key += max_iterations;
1888 cursor.asof = fdip->obj_asof;
1889 cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE | HAMMER_CURSOR_ASOF;
1892 * Scan all matching records (the chain), locate the one matching
1893 * the requested path component.
1895 * The hammer_ip_*() functions merge in-memory records with on-disk
1896 * records for the purposes of the search.
1898 error = hammer_ip_first(&cursor);
1899 while (error == 0) {
1900 if (hammer_ip_resolve_data(&cursor) != 0)
1902 nlen = cursor.leaf->data_len - HAMMER_ENTRY_NAME_OFF;
1904 if (fncp->nc_nlen == nlen &&
1905 bcmp(fncp->nc_name, cursor.data->entry.name, nlen) == 0) {
1908 error = hammer_ip_next(&cursor);
1912 * If all is ok we have to get the inode so we can adjust nlinks.
1914 * WARNING: hammer_ip_del_directory() may have to terminate the
1915 * cursor to avoid a recursion. It's ok to call hammer_done_cursor()
1919 error = hammer_ip_del_directory(&trans, &cursor, fdip, ip);
1922 * XXX A deadlock here will break rename's atomicy for the purposes
1923 * of crash recovery.
1925 if (error == EDEADLK) {
1926 hammer_done_cursor(&cursor);
1931 * Cleanup and tell the kernel that the rename succeeded.
1933 hammer_done_cursor(&cursor);
1935 cache_rename(ap->a_fnch, ap->a_tnch);
1936 hammer_knote(ap->a_fdvp, NOTE_WRITE);
1937 hammer_knote(ap->a_tdvp, NOTE_WRITE);
1939 hammer_knote(ip->vp, NOTE_RENAME);
1943 hammer_done_transaction(&trans);
1948 * hammer_vop_nrmdir { nch, dvp, cred }
1952 hammer_vop_nrmdir(struct vop_nrmdir_args *ap)
1954 struct hammer_transaction trans;
1955 struct hammer_inode *dip;
1958 dip = VTOI(ap->a_dvp);
1960 if (hammer_nohistory(dip) == 0 &&
1961 (error = hammer_checkspace(dip->hmp, HAMMER_CHKSPC_REMOVE)) != 0) {
1965 hammer_start_transaction(&trans, dip->hmp);
1966 ++hammer_stats_file_iopsw;
1967 error = hammer_dounlink(&trans, ap->a_nch, ap->a_dvp, ap->a_cred, 0, 1);
1968 hammer_done_transaction(&trans);
1970 hammer_knote(ap->a_dvp, NOTE_WRITE | NOTE_LINK);
1975 * hammer_vop_markatime { vp, cred }
1979 hammer_vop_markatime(struct vop_markatime_args *ap)
1981 struct hammer_transaction trans;
1982 struct hammer_inode *ip;
1984 ip = VTOI(ap->a_vp);
1985 if (ap->a_vp->v_mount->mnt_flag & MNT_RDONLY)
1987 if (ip->flags & HAMMER_INODE_RO)
1989 if (ip->hmp->mp->mnt_flag & MNT_NOATIME)
1991 hammer_start_transaction(&trans, ip->hmp);
1992 ++hammer_stats_file_iopsw;
1994 ip->ino_data.atime = trans.time;
1995 hammer_modify_inode(ip, HAMMER_INODE_ATIME);
1996 hammer_done_transaction(&trans);
1997 hammer_knote(ap->a_vp, NOTE_ATTRIB);
2002 * hammer_vop_setattr { vp, vap, cred }
2006 hammer_vop_setattr(struct vop_setattr_args *ap)
2008 struct hammer_transaction trans;
2010 struct hammer_inode *ip;
2016 int64_t aligned_size;
2020 ip = ap->a_vp->v_data;
2024 if (ap->a_vp->v_mount->mnt_flag & MNT_RDONLY)
2026 if (ip->flags & HAMMER_INODE_RO)
2028 if (hammer_nohistory(ip) == 0 &&
2029 (error = hammer_checkspace(ip->hmp, HAMMER_CHKSPC_REMOVE)) != 0) {
2033 hammer_start_transaction(&trans, ip->hmp);
2034 ++hammer_stats_file_iopsw;
2037 if (vap->va_flags != VNOVAL) {
2038 flags = ip->ino_data.uflags;
2039 error = vop_helper_setattr_flags(&flags, vap->va_flags,
2040 hammer_to_unix_xid(&ip->ino_data.uid),
2043 if (ip->ino_data.uflags != flags) {
2044 ip->ino_data.uflags = flags;
2045 ip->ino_data.ctime = trans.time;
2046 modflags |= HAMMER_INODE_DDIRTY;
2047 kflags |= NOTE_ATTRIB;
2049 if (ip->ino_data.uflags & (IMMUTABLE | APPEND)) {
2056 if (ip->ino_data.uflags & (IMMUTABLE | APPEND)) {
2060 if (vap->va_uid != (uid_t)VNOVAL || vap->va_gid != (gid_t)VNOVAL) {
2061 mode_t cur_mode = ip->ino_data.mode;
2062 uid_t cur_uid = hammer_to_unix_xid(&ip->ino_data.uid);
2063 gid_t cur_gid = hammer_to_unix_xid(&ip->ino_data.gid);
2067 error = vop_helper_chown(ap->a_vp, vap->va_uid, vap->va_gid,
2069 &cur_uid, &cur_gid, &cur_mode);
2071 hammer_guid_to_uuid(&uuid_uid, cur_uid);
2072 hammer_guid_to_uuid(&uuid_gid, cur_gid);
2073 if (bcmp(&uuid_uid, &ip->ino_data.uid,
2074 sizeof(uuid_uid)) ||
2075 bcmp(&uuid_gid, &ip->ino_data.gid,
2076 sizeof(uuid_gid)) ||
2077 ip->ino_data.mode != cur_mode
2079 ip->ino_data.uid = uuid_uid;
2080 ip->ino_data.gid = uuid_gid;
2081 ip->ino_data.mode = cur_mode;
2082 ip->ino_data.ctime = trans.time;
2083 modflags |= HAMMER_INODE_DDIRTY;
2085 kflags |= NOTE_ATTRIB;
2088 while (vap->va_size != VNOVAL && ip->ino_data.size != vap->va_size) {
2089 switch(ap->a_vp->v_type) {
2091 if (vap->va_size == ip->ino_data.size)
2094 * XXX break atomicy, we can deadlock the backend
2095 * if we do not release the lock. Probably not a
2098 blksize = hammer_blocksize(vap->va_size);
2099 if (vap->va_size < ip->ino_data.size) {
2100 vtruncbuf(ap->a_vp, vap->va_size, blksize);
2102 kflags |= NOTE_WRITE;
2104 vnode_pager_setsize(ap->a_vp, vap->va_size);
2106 kflags |= NOTE_WRITE | NOTE_EXTEND;
2108 ip->ino_data.size = vap->va_size;
2109 ip->ino_data.mtime = trans.time;
2110 modflags |= HAMMER_INODE_MTIME | HAMMER_INODE_DDIRTY;
2113 * on-media truncation is cached in the inode until
2114 * the inode is synchronized.
2117 hammer_ip_frontend_trunc(ip, vap->va_size);
2118 #ifdef DEBUG_TRUNCATE
2119 if (HammerTruncIp == NULL)
2122 if ((ip->flags & HAMMER_INODE_TRUNCATED) == 0) {
2123 ip->flags |= HAMMER_INODE_TRUNCATED;
2124 ip->trunc_off = vap->va_size;
2125 #ifdef DEBUG_TRUNCATE
2126 if (ip == HammerTruncIp)
2127 kprintf("truncate1 %016llx\n",
2128 (long long)ip->trunc_off);
2130 } else if (ip->trunc_off > vap->va_size) {
2131 ip->trunc_off = vap->va_size;
2132 #ifdef DEBUG_TRUNCATE
2133 if (ip == HammerTruncIp)
2134 kprintf("truncate2 %016llx\n",
2135 (long long)ip->trunc_off);
2138 #ifdef DEBUG_TRUNCATE
2139 if (ip == HammerTruncIp)
2140 kprintf("truncate3 %016llx (ignored)\n",
2141 (long long)vap->va_size);
2147 * If truncating we have to clean out a portion of
2148 * the last block on-disk. We do this in the
2149 * front-end buffer cache.
2151 aligned_size = (vap->va_size + (blksize - 1)) &
2152 ~(int64_t)(blksize - 1);
2153 if (truncating && vap->va_size < aligned_size) {
2157 aligned_size -= blksize;
2159 offset = (int)vap->va_size & (blksize - 1);
2160 error = bread(ap->a_vp, aligned_size,
2162 hammer_ip_frontend_trunc(ip, aligned_size);
2164 bzero(bp->b_data + offset,
2166 /* must de-cache direct-io offset */
2167 bp->b_bio2.bio_offset = NOOFFSET;
2170 kprintf("ERROR %d\n", error);
2176 if ((ip->flags & HAMMER_INODE_TRUNCATED) == 0) {
2177 ip->flags |= HAMMER_INODE_TRUNCATED;
2178 ip->trunc_off = vap->va_size;
2179 } else if (ip->trunc_off > vap->va_size) {
2180 ip->trunc_off = vap->va_size;
2182 hammer_ip_frontend_trunc(ip, vap->va_size);
2183 ip->ino_data.size = vap->va_size;
2184 ip->ino_data.mtime = trans.time;
2185 modflags |= HAMMER_INODE_MTIME | HAMMER_INODE_DDIRTY;
2186 kflags |= NOTE_ATTRIB;
2194 if (vap->va_atime.tv_sec != VNOVAL) {
2195 ip->ino_data.atime = hammer_timespec_to_time(&vap->va_atime);
2196 modflags |= HAMMER_INODE_ATIME;
2197 kflags |= NOTE_ATTRIB;
2199 if (vap->va_mtime.tv_sec != VNOVAL) {
2200 ip->ino_data.mtime = hammer_timespec_to_time(&vap->va_mtime);
2201 modflags |= HAMMER_INODE_MTIME;
2202 kflags |= NOTE_ATTRIB;
2204 if (vap->va_mode != (mode_t)VNOVAL) {
2205 mode_t cur_mode = ip->ino_data.mode;
2206 uid_t cur_uid = hammer_to_unix_xid(&ip->ino_data.uid);
2207 gid_t cur_gid = hammer_to_unix_xid(&ip->ino_data.gid);
2209 error = vop_helper_chmod(ap->a_vp, vap->va_mode, ap->a_cred,
2210 cur_uid, cur_gid, &cur_mode);
2211 if (error == 0 && ip->ino_data.mode != cur_mode) {
2212 ip->ino_data.mode = cur_mode;
2213 ip->ino_data.ctime = trans.time;
2214 modflags |= HAMMER_INODE_DDIRTY;
2215 kflags |= NOTE_ATTRIB;
2220 hammer_modify_inode(ip, modflags);
2221 hammer_done_transaction(&trans);
2222 hammer_knote(ap->a_vp, kflags);
2227 * hammer_vop_nsymlink { nch, dvp, vpp, cred, vap, target }
2231 hammer_vop_nsymlink(struct vop_nsymlink_args *ap)
2233 struct hammer_transaction trans;
2234 struct hammer_inode *dip;
2235 struct hammer_inode *nip;
2236 struct nchandle *nch;
2237 hammer_record_t record;
2241 ap->a_vap->va_type = VLNK;
2244 dip = VTOI(ap->a_dvp);
2246 if (dip->flags & HAMMER_INODE_RO)
2248 if ((error = hammer_checkspace(dip->hmp, HAMMER_CHKSPC_CREATE)) != 0)
2252 * Create a transaction to cover the operations we perform.
2254 hammer_start_transaction(&trans, dip->hmp);
2255 ++hammer_stats_file_iopsw;
2258 * Create a new filesystem object of the requested type. The
2259 * returned inode will be referenced but not locked.
2262 error = hammer_create_inode(&trans, ap->a_vap, ap->a_cred,
2263 dip, nch->ncp->nc_name, nch->ncp->nc_nlen,
2266 hammer_done_transaction(&trans);
2272 * Add a record representing the symlink. symlink stores the link
2273 * as pure data, not a string, and is no \0 terminated.
2276 bytes = strlen(ap->a_target);
2278 if (bytes <= HAMMER_INODE_BASESYMLEN) {
2279 bcopy(ap->a_target, nip->ino_data.ext.symlink, bytes);
2281 record = hammer_alloc_mem_record(nip, bytes);
2282 record->type = HAMMER_MEM_RECORD_GENERAL;
2284 record->leaf.base.localization = nip->obj_localization +
2285 HAMMER_LOCALIZE_MISC;
2286 record->leaf.base.key = HAMMER_FIXKEY_SYMLINK;
2287 record->leaf.base.rec_type = HAMMER_RECTYPE_FIX;
2288 record->leaf.data_len = bytes;
2289 KKASSERT(HAMMER_SYMLINK_NAME_OFF == 0);
2290 bcopy(ap->a_target, record->data->symlink.name, bytes);
2291 error = hammer_ip_add_record(&trans, record);
2295 * Set the file size to the length of the link.
2298 nip->ino_data.size = bytes;
2299 hammer_modify_inode(nip, HAMMER_INODE_DDIRTY);
2303 error = hammer_ip_add_directory(&trans, dip, nch->ncp->nc_name,
2304 nch->ncp->nc_nlen, nip);
2310 hammer_rel_inode(nip, 0);
2313 error = hammer_get_vnode(nip, ap->a_vpp);
2314 hammer_rel_inode(nip, 0);
2316 cache_setunresolved(ap->a_nch);
2317 cache_setvp(ap->a_nch, *ap->a_vpp);
2318 hammer_knote(ap->a_dvp, NOTE_WRITE);
2321 hammer_done_transaction(&trans);
2326 * hammer_vop_nwhiteout { nch, dvp, cred, flags }
2330 hammer_vop_nwhiteout(struct vop_nwhiteout_args *ap)
2332 struct hammer_transaction trans;
2333 struct hammer_inode *dip;
2336 dip = VTOI(ap->a_dvp);
2338 if (hammer_nohistory(dip) == 0 &&
2339 (error = hammer_checkspace(dip->hmp, HAMMER_CHKSPC_CREATE)) != 0) {
2343 hammer_start_transaction(&trans, dip->hmp);
2344 ++hammer_stats_file_iopsw;
2345 error = hammer_dounlink(&trans, ap->a_nch, ap->a_dvp,
2346 ap->a_cred, ap->a_flags, -1);
2347 hammer_done_transaction(&trans);
2353 * hammer_vop_ioctl { vp, command, data, fflag, cred }
2357 hammer_vop_ioctl(struct vop_ioctl_args *ap)
2359 struct hammer_inode *ip = ap->a_vp->v_data;
2361 ++hammer_stats_file_iopsr;
2362 return(hammer_ioctl(ip, ap->a_command, ap->a_data,
2363 ap->a_fflag, ap->a_cred));
2368 hammer_vop_mountctl(struct vop_mountctl_args *ap)
2370 static const struct mountctl_opt extraopt[] = {
2371 { HMNT_NOHISTORY, "nohistory" },
2372 { HMNT_MASTERID, "master" },
2376 struct hammer_mount *hmp;
2383 mp = ap->a_head.a_ops->head.vv_mount;
2384 KKASSERT(mp->mnt_data != NULL);
2385 hmp = (struct hammer_mount *)mp->mnt_data;
2389 case MOUNTCTL_SET_EXPORT:
2390 if (ap->a_ctllen != sizeof(struct export_args))
2393 error = hammer_vfs_export(mp, ap->a_op,
2394 (const struct export_args *)ap->a_ctl);
2396 case MOUNTCTL_MOUNTFLAGS:
2399 * Call standard mountctl VOP function
2400 * so we get user mount flags.
2402 error = vop_stdmountctl(ap);
2406 usedbytes = *ap->a_res;
2408 if (usedbytes > 0 && usedbytes < ap->a_buflen) {
2409 usedbytes += vfs_flagstostr(hmp->hflags, extraopt, ap->a_buf,
2410 ap->a_buflen - usedbytes,
2414 *ap->a_res += usedbytes;
2418 error = vop_stdmountctl(ap);
2425 * hammer_vop_strategy { vp, bio }
2427 * Strategy call, used for regular file read & write only. Note that the
2428 * bp may represent a cluster.
2430 * To simplify operation and allow better optimizations in the future,
2431 * this code does not make any assumptions with regards to buffer alignment
2436 hammer_vop_strategy(struct vop_strategy_args *ap)
2441 bp = ap->a_bio->bio_buf;
2445 error = hammer_vop_strategy_read(ap);
2448 error = hammer_vop_strategy_write(ap);
2451 bp->b_error = error = EINVAL;
2452 bp->b_flags |= B_ERROR;
2460 * Read from a regular file. Iterate the related records and fill in the
2461 * BIO/BUF. Gaps are zero-filled.
2463 * The support code in hammer_object.c should be used to deal with mixed
2464 * in-memory and on-disk records.
2466 * NOTE: Can be called from the cluster code with an oversized buf.
2472 hammer_vop_strategy_read(struct vop_strategy_args *ap)
2474 struct hammer_transaction trans;
2475 struct hammer_inode *ip;
2476 struct hammer_inode *dip;
2477 struct hammer_cursor cursor;
2478 hammer_base_elm_t base;
2479 hammer_off_t disk_offset;
2493 ip = ap->a_vp->v_data;
2496 * The zone-2 disk offset may have been set by the cluster code via
2497 * a BMAP operation, or else should be NOOFFSET.
2499 * Checking the high bits for a match against zone-2 should suffice.
2501 nbio = push_bio(bio);
2502 if ((nbio->bio_offset & HAMMER_OFF_ZONE_MASK) ==
2503 HAMMER_ZONE_LARGE_DATA) {
2504 error = hammer_io_direct_read(ip->hmp, nbio, NULL);
2509 * Well, that sucked. Do it the hard way. If all the stars are
2510 * aligned we may still be able to issue a direct-read.
2512 hammer_simple_transaction(&trans, ip->hmp);
2513 hammer_init_cursor(&trans, &cursor, &ip->cache[1], ip);
2516 * Key range (begin and end inclusive) to scan. Note that the key's
2517 * stored in the actual records represent BASE+LEN, not BASE. The
2518 * first record containing bio_offset will have a key > bio_offset.
2520 cursor.key_beg.localization = ip->obj_localization +
2521 HAMMER_LOCALIZE_MISC;
2522 cursor.key_beg.obj_id = ip->obj_id;
2523 cursor.key_beg.create_tid = 0;
2524 cursor.key_beg.delete_tid = 0;
2525 cursor.key_beg.obj_type = 0;
2526 cursor.key_beg.key = bio->bio_offset + 1;
2527 cursor.asof = ip->obj_asof;
2528 cursor.flags |= HAMMER_CURSOR_ASOF;
2530 cursor.key_end = cursor.key_beg;
2531 KKASSERT(ip->ino_data.obj_type == HAMMER_OBJTYPE_REGFILE);
2533 if (ip->ino_data.obj_type == HAMMER_OBJTYPE_DBFILE) {
2534 cursor.key_beg.rec_type = HAMMER_RECTYPE_DB;
2535 cursor.key_end.rec_type = HAMMER_RECTYPE_DB;
2536 cursor.key_end.key = 0x7FFFFFFFFFFFFFFFLL;
2540 ran_end = bio->bio_offset + bp->b_bufsize;
2541 cursor.key_beg.rec_type = HAMMER_RECTYPE_DATA;
2542 cursor.key_end.rec_type = HAMMER_RECTYPE_DATA;
2543 tmp64 = ran_end + MAXPHYS + 1; /* work-around GCC-4 bug */
2544 if (tmp64 < ran_end)
2545 cursor.key_end.key = 0x7FFFFFFFFFFFFFFFLL;
2547 cursor.key_end.key = ran_end + MAXPHYS + 1;
2549 cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE;
2551 error = hammer_ip_first(&cursor);
2554 while (error == 0) {
2556 * Get the base file offset of the record. The key for
2557 * data records is (base + bytes) rather then (base).
2559 base = &cursor.leaf->base;
2560 rec_offset = base->key - cursor.leaf->data_len;
2563 * Calculate the gap, if any, and zero-fill it.
2565 * n is the offset of the start of the record verses our
2566 * current seek offset in the bio.
2568 n = (int)(rec_offset - (bio->bio_offset + boff));
2570 if (n > bp->b_bufsize - boff)
2571 n = bp->b_bufsize - boff;
2572 bzero((char *)bp->b_data + boff, n);
2578 * Calculate the data offset in the record and the number
2579 * of bytes we can copy.
2581 * There are two degenerate cases. First, boff may already
2582 * be at bp->b_bufsize. Secondly, the data offset within
2583 * the record may exceed the record's size.
2587 n = cursor.leaf->data_len - roff;
2589 kprintf("strategy_read: bad n=%d roff=%d\n", n, roff);
2591 } else if (n > bp->b_bufsize - boff) {
2592 n = bp->b_bufsize - boff;
2596 * Deal with cached truncations. This cool bit of code
2597 * allows truncate()/ftruncate() to avoid having to sync
2600 * If the frontend is truncated then all backend records are
2601 * subject to the frontend's truncation.
2603 * If the backend is truncated then backend records on-disk
2604 * (but not in-memory) are subject to the backend's
2605 * truncation. In-memory records owned by the backend
2606 * represent data written after the truncation point on the
2607 * backend and must not be truncated.
2609 * Truncate operations deal with frontend buffer cache
2610 * buffers and frontend-owned in-memory records synchronously.
2612 if (ip->flags & HAMMER_INODE_TRUNCATED) {
2613 if (hammer_cursor_ondisk(&cursor) ||
2614 cursor.iprec->flush_state == HAMMER_FST_FLUSH) {
2615 if (ip->trunc_off <= rec_offset)
2617 else if (ip->trunc_off < rec_offset + n)
2618 n = (int)(ip->trunc_off - rec_offset);
2621 if (ip->sync_flags & HAMMER_INODE_TRUNCATED) {
2622 if (hammer_cursor_ondisk(&cursor)) {
2623 if (ip->sync_trunc_off <= rec_offset)
2625 else if (ip->sync_trunc_off < rec_offset + n)
2626 n = (int)(ip->sync_trunc_off - rec_offset);
2631 * Try to issue a direct read into our bio if possible,
2632 * otherwise resolve the element data into a hammer_buffer
2635 * The buffer on-disk should be zerod past any real
2636 * truncation point, but may not be for any synthesized
2637 * truncation point from above.
2639 disk_offset = cursor.leaf->data_offset + roff;
2640 if (boff == 0 && n == bp->b_bufsize &&
2641 hammer_cursor_ondisk(&cursor) &&
2642 (disk_offset & HAMMER_BUFMASK) == 0) {
2643 KKASSERT((disk_offset & HAMMER_OFF_ZONE_MASK) ==
2644 HAMMER_ZONE_LARGE_DATA);
2645 nbio->bio_offset = disk_offset;
2646 error = hammer_io_direct_read(trans.hmp, nbio,
2650 error = hammer_ip_resolve_data(&cursor);
2652 bcopy((char *)cursor.data + roff,
2653 (char *)bp->b_data + boff, n);
2660 * Iterate until we have filled the request.
2663 if (boff == bp->b_bufsize)
2665 error = hammer_ip_next(&cursor);
2669 * There may have been a gap after the last record
2671 if (error == ENOENT)
2673 if (error == 0 && boff != bp->b_bufsize) {
2674 KKASSERT(boff < bp->b_bufsize);
2675 bzero((char *)bp->b_data + boff, bp->b_bufsize - boff);
2676 /* boff = bp->b_bufsize; */
2679 bp->b_error = error;
2681 bp->b_flags |= B_ERROR;
2686 * Cache the b-tree node for the last data read in cache[1].
2688 * If we hit the file EOF then also cache the node in the
2689 * governing director's cache[3], it will be used to initialize
2690 * the inode's cache[1] for any inodes looked up via the directory.
2692 * This doesn't reduce disk accesses since the B-Tree chain is
2693 * likely cached, but it does reduce cpu overhead when looking
2694 * up file offsets for cpdup/tar/cpio style iterations.
2697 hammer_cache_node(&ip->cache[1], cursor.node);
2698 if (ran_end >= ip->ino_data.size) {
2699 dip = hammer_find_inode(&trans, ip->ino_data.parent_obj_id,
2700 ip->obj_asof, ip->obj_localization);
2702 hammer_cache_node(&dip->cache[3], cursor.node);
2703 hammer_rel_inode(dip, 0);
2706 hammer_done_cursor(&cursor);
2707 hammer_done_transaction(&trans);
2712 * BMAP operation - used to support cluster_read() only.
2714 * (struct vnode *vp, off_t loffset, off_t *doffsetp, int *runp, int *runb)
2716 * This routine may return EOPNOTSUPP if the opration is not supported for
2717 * the specified offset. The contents of the pointer arguments do not
2718 * need to be initialized in that case.
2720 * If a disk address is available and properly aligned return 0 with
2721 * *doffsetp set to the zone-2 address, and *runp / *runb set appropriately
2722 * to the run-length relative to that offset. Callers may assume that
2723 * *doffsetp is valid if 0 is returned, even if *runp is not sufficiently
2724 * large, so return EOPNOTSUPP if it is not sufficiently large.
2728 hammer_vop_bmap(struct vop_bmap_args *ap)
2730 struct hammer_transaction trans;
2731 struct hammer_inode *ip;
2732 struct hammer_cursor cursor;
2733 hammer_base_elm_t base;
2737 int64_t base_offset;
2738 int64_t base_disk_offset;
2739 int64_t last_offset;
2740 hammer_off_t last_disk_offset;
2741 hammer_off_t disk_offset;
2746 ++hammer_stats_file_iopsr;
2747 ip = ap->a_vp->v_data;
2750 * We can only BMAP regular files. We can't BMAP database files,
2753 if (ip->ino_data.obj_type != HAMMER_OBJTYPE_REGFILE)
2757 * bmap is typically called with runp/runb both NULL when used
2758 * for writing. We do not support BMAP for writing atm.
2760 if (ap->a_cmd != BUF_CMD_READ)
2764 * Scan the B-Tree to acquire blockmap addresses, then translate
2767 hammer_simple_transaction(&trans, ip->hmp);
2769 kprintf("bmap_beg %016llx ip->cache %p\n",
2770 (long long)ap->a_loffset, ip->cache[1]);
2772 hammer_init_cursor(&trans, &cursor, &ip->cache[1], ip);
2775 * Key range (begin and end inclusive) to scan. Note that the key's
2776 * stored in the actual records represent BASE+LEN, not BASE. The
2777 * first record containing bio_offset will have a key > bio_offset.
2779 cursor.key_beg.localization = ip->obj_localization +
2780 HAMMER_LOCALIZE_MISC;
2781 cursor.key_beg.obj_id = ip->obj_id;
2782 cursor.key_beg.create_tid = 0;
2783 cursor.key_beg.delete_tid = 0;
2784 cursor.key_beg.obj_type = 0;
2786 cursor.key_beg.key = ap->a_loffset - MAXPHYS + 1;
2788 cursor.key_beg.key = ap->a_loffset + 1;
2789 if (cursor.key_beg.key < 0)
2790 cursor.key_beg.key = 0;
2791 cursor.asof = ip->obj_asof;
2792 cursor.flags |= HAMMER_CURSOR_ASOF;
2794 cursor.key_end = cursor.key_beg;
2795 KKASSERT(ip->ino_data.obj_type == HAMMER_OBJTYPE_REGFILE);
2797 ran_end = ap->a_loffset + MAXPHYS;
2798 cursor.key_beg.rec_type = HAMMER_RECTYPE_DATA;
2799 cursor.key_end.rec_type = HAMMER_RECTYPE_DATA;
2800 tmp64 = ran_end + MAXPHYS + 1; /* work-around GCC-4 bug */
2801 if (tmp64 < ran_end)
2802 cursor.key_end.key = 0x7FFFFFFFFFFFFFFFLL;
2804 cursor.key_end.key = ran_end + MAXPHYS + 1;
2806 cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE;
2808 error = hammer_ip_first(&cursor);
2809 base_offset = last_offset = 0;
2810 base_disk_offset = last_disk_offset = 0;
2812 while (error == 0) {
2814 * Get the base file offset of the record. The key for
2815 * data records is (base + bytes) rather then (base).
2817 * NOTE: rec_offset + rec_len may exceed the end-of-file.
2818 * The extra bytes should be zero on-disk and the BMAP op
2819 * should still be ok.
2821 base = &cursor.leaf->base;
2822 rec_offset = base->key - cursor.leaf->data_len;
2823 rec_len = cursor.leaf->data_len;
2826 * Incorporate any cached truncation.
2828 * NOTE: Modifications to rec_len based on synthesized
2829 * truncation points remove the guarantee that any extended
2830 * data on disk is zero (since the truncations may not have
2831 * taken place on-media yet).
2833 if (ip->flags & HAMMER_INODE_TRUNCATED) {
2834 if (hammer_cursor_ondisk(&cursor) ||
2835 cursor.iprec->flush_state == HAMMER_FST_FLUSH) {
2836 if (ip->trunc_off <= rec_offset)
2838 else if (ip->trunc_off < rec_offset + rec_len)
2839 rec_len = (int)(ip->trunc_off - rec_offset);
2842 if (ip->sync_flags & HAMMER_INODE_TRUNCATED) {
2843 if (hammer_cursor_ondisk(&cursor)) {
2844 if (ip->sync_trunc_off <= rec_offset)
2846 else if (ip->sync_trunc_off < rec_offset + rec_len)
2847 rec_len = (int)(ip->sync_trunc_off - rec_offset);
2852 * Accumulate information. If we have hit a discontiguous
2853 * block reset base_offset unless we are already beyond the
2854 * requested offset. If we are, that's it, we stop.
2858 if (hammer_cursor_ondisk(&cursor)) {
2859 disk_offset = cursor.leaf->data_offset;
2860 if (rec_offset != last_offset ||
2861 disk_offset != last_disk_offset) {
2862 if (rec_offset > ap->a_loffset)
2864 base_offset = rec_offset;
2865 base_disk_offset = disk_offset;
2867 last_offset = rec_offset + rec_len;
2868 last_disk_offset = disk_offset + rec_len;
2870 error = hammer_ip_next(&cursor);
2874 kprintf("BMAP %016llx: %016llx - %016llx\n",
2875 (long long)ap->a_loffset,
2876 (long long)base_offset,
2877 (long long)last_offset);
2878 kprintf("BMAP %16s: %016llx - %016llx\n", "",
2879 (long long)base_disk_offset,
2880 (long long)last_disk_offset);
2884 hammer_cache_node(&ip->cache[1], cursor.node);
2886 kprintf("bmap_end2 %016llx ip->cache %p\n",
2887 (long long)ap->a_loffset, ip->cache[1]);
2890 hammer_done_cursor(&cursor);
2891 hammer_done_transaction(&trans);
2894 * If we couldn't find any records or the records we did find were
2895 * all behind the requested offset, return failure. A forward
2896 * truncation can leave a hole w/ no on-disk records.
2898 if (last_offset == 0 || last_offset < ap->a_loffset)
2899 return (EOPNOTSUPP);
2902 * Figure out the block size at the requested offset and adjust
2903 * our limits so the cluster_read() does not create inappropriately
2904 * sized buffer cache buffers.
2906 blksize = hammer_blocksize(ap->a_loffset);
2907 if (hammer_blocksize(base_offset) != blksize) {
2908 base_offset = hammer_blockdemarc(base_offset, ap->a_loffset);
2910 if (last_offset != ap->a_loffset &&
2911 hammer_blocksize(last_offset - 1) != blksize) {
2912 last_offset = hammer_blockdemarc(ap->a_loffset,
2917 * Returning EOPNOTSUPP simply prevents the direct-IO optimization
2920 disk_offset = base_disk_offset + (ap->a_loffset - base_offset);
2922 if ((disk_offset & HAMMER_OFF_ZONE_MASK) != HAMMER_ZONE_LARGE_DATA) {
2924 * Only large-data zones can be direct-IOd
2927 } else if ((disk_offset & HAMMER_BUFMASK) ||
2928 (last_offset - ap->a_loffset) < blksize) {
2930 * doffsetp is not aligned or the forward run size does
2931 * not cover a whole buffer, disallow the direct I/O.
2938 *ap->a_doffsetp = disk_offset;
2940 *ap->a_runb = ap->a_loffset - base_offset;
2941 KKASSERT(*ap->a_runb >= 0);
2944 *ap->a_runp = last_offset - ap->a_loffset;
2945 KKASSERT(*ap->a_runp >= 0);
2953 * Write to a regular file. Because this is a strategy call the OS is
2954 * trying to actually get data onto the media.
2958 hammer_vop_strategy_write(struct vop_strategy_args *ap)
2960 hammer_record_t record;
2971 ip = ap->a_vp->v_data;
2974 blksize = hammer_blocksize(bio->bio_offset);
2975 KKASSERT(bp->b_bufsize == blksize);
2977 if (ip->flags & HAMMER_INODE_RO) {
2978 bp->b_error = EROFS;
2979 bp->b_flags |= B_ERROR;
2985 * Interlock with inode destruction (no in-kernel or directory
2986 * topology visibility). If we queue new IO while trying to
2987 * destroy the inode we can deadlock the vtrunc call in
2988 * hammer_inode_unloadable_check().
2990 * Besides, there's no point flushing a bp associated with an
2991 * inode that is being destroyed on-media and has no kernel
2994 if ((ip->flags | ip->sync_flags) &
2995 (HAMMER_INODE_DELETING|HAMMER_INODE_DELETED)) {
3002 * Reserve space and issue a direct-write from the front-end.
3003 * NOTE: The direct_io code will hammer_bread/bcopy smaller
3006 * An in-memory record will be installed to reference the storage
3007 * until the flusher can get to it.
3009 * Since we own the high level bio the front-end will not try to
3010 * do a direct-read until the write completes.
3012 * NOTE: The only time we do not reserve a full-sized buffers
3013 * worth of data is if the file is small. We do not try to
3014 * allocate a fragment (from the small-data zone) at the end of
3015 * an otherwise large file as this can lead to wildly separated
3018 KKASSERT((bio->bio_offset & HAMMER_BUFMASK) == 0);
3019 KKASSERT(bio->bio_offset < ip->ino_data.size);
3020 if (bio->bio_offset || ip->ino_data.size > HAMMER_BUFSIZE / 2)
3021 bytes = bp->b_bufsize;
3023 bytes = ((int)ip->ino_data.size + 15) & ~15;
3025 record = hammer_ip_add_bulk(ip, bio->bio_offset, bp->b_data,
3028 hammer_io_direct_write(hmp, record, bio);
3029 if (ip->rsv_recs > 1 && hmp->rsv_recs > hammer_limit_recs)
3030 hammer_flush_inode(ip, 0);
3032 bp->b_bio2.bio_offset = NOOFFSET;
3033 bp->b_error = error;
3034 bp->b_flags |= B_ERROR;
3041 * dounlink - disconnect a directory entry
3043 * XXX whiteout support not really in yet
3046 hammer_dounlink(hammer_transaction_t trans, struct nchandle *nch,
3047 struct vnode *dvp, struct ucred *cred,
3048 int flags, int isdir)
3050 struct namecache *ncp;
3053 struct hammer_cursor cursor;
3055 u_int32_t max_iterations;
3059 * Calculate the namekey and setup the key range for the scan. This
3060 * works kinda like a chained hash table where the lower 32 bits
3061 * of the namekey synthesize the chain.
3063 * The key range is inclusive of both key_beg and key_end.
3068 if (dip->flags & HAMMER_INODE_RO)
3071 namekey = hammer_directory_namekey(dip, ncp->nc_name, ncp->nc_nlen,
3074 hammer_init_cursor(trans, &cursor, &dip->cache[1], dip);
3075 cursor.key_beg.localization = dip->obj_localization +
3076 hammer_dir_localization(dip);
3077 cursor.key_beg.obj_id = dip->obj_id;
3078 cursor.key_beg.key = namekey;
3079 cursor.key_beg.create_tid = 0;
3080 cursor.key_beg.delete_tid = 0;
3081 cursor.key_beg.rec_type = HAMMER_RECTYPE_DIRENTRY;
3082 cursor.key_beg.obj_type = 0;
3084 cursor.key_end = cursor.key_beg;
3085 cursor.key_end.key += max_iterations;
3086 cursor.asof = dip->obj_asof;
3087 cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE | HAMMER_CURSOR_ASOF;
3090 * Scan all matching records (the chain), locate the one matching
3091 * the requested path component. info->last_error contains the
3092 * error code on search termination and could be 0, ENOENT, or
3095 * The hammer_ip_*() functions merge in-memory records with on-disk
3096 * records for the purposes of the search.
3098 error = hammer_ip_first(&cursor);
3100 while (error == 0) {
3101 error = hammer_ip_resolve_data(&cursor);
3104 nlen = cursor.leaf->data_len - HAMMER_ENTRY_NAME_OFF;
3106 if (ncp->nc_nlen == nlen &&
3107 bcmp(ncp->nc_name, cursor.data->entry.name, nlen) == 0) {
3110 error = hammer_ip_next(&cursor);
3114 * If all is ok we have to get the inode so we can adjust nlinks.
3115 * To avoid a deadlock with the flusher we must release the inode
3116 * lock on the directory when acquiring the inode for the entry.
3118 * If the target is a directory, it must be empty.
3121 hammer_unlock(&cursor.ip->lock);
3122 ip = hammer_get_inode(trans, dip, cursor.data->entry.obj_id,
3124 cursor.data->entry.localization,
3126 hammer_lock_sh(&cursor.ip->lock);
3127 if (error == ENOENT) {
3128 kprintf("HAMMER: WARNING: Removing "
3129 "dirent w/missing inode \"%s\"\n"
3130 "\tobj_id = %016llx\n",
3132 (long long)cursor.data->entry.obj_id);
3137 * If isdir >= 0 we validate that the entry is or is not a
3138 * directory. If isdir < 0 we don't care.
3140 if (error == 0 && isdir >= 0 && ip) {
3142 ip->ino_data.obj_type != HAMMER_OBJTYPE_DIRECTORY) {
3144 } else if (isdir == 0 &&
3145 ip->ino_data.obj_type == HAMMER_OBJTYPE_DIRECTORY) {
3151 * If we are trying to remove a directory the directory must
3154 * The check directory code can loop and deadlock/retry. Our
3155 * own cursor's node locks must be released to avoid a 3-way
3156 * deadlock with the flusher if the check directory code
3159 * If any changes whatsoever have been made to the cursor
3160 * set EDEADLK and retry.
3162 * WARNING: See warnings in hammer_unlock_cursor()
3165 if (error == 0 && ip && ip->ino_data.obj_type ==
3166 HAMMER_OBJTYPE_DIRECTORY) {
3167 hammer_unlock_cursor(&cursor);
3168 error = hammer_ip_check_directory_empty(trans, ip);
3169 hammer_lock_cursor(&cursor);
3170 if (cursor.flags & HAMMER_CURSOR_RETEST) {
3171 kprintf("HAMMER: Warning: avoided deadlock "
3179 * Delete the directory entry.
3181 * WARNING: hammer_ip_del_directory() may have to terminate
3182 * the cursor to avoid a deadlock. It is ok to call
3183 * hammer_done_cursor() twice.
3186 error = hammer_ip_del_directory(trans, &cursor,
3189 hammer_done_cursor(&cursor);
3191 cache_setunresolved(nch);
3192 cache_setvp(nch, NULL);
3195 hammer_knote(ip->vp, NOTE_DELETE);
3196 cache_inval_vp(ip->vp, CINV_DESTROY);
3200 hammer_rel_inode(ip, 0);
3202 hammer_done_cursor(&cursor);
3204 if (error == EDEADLK)
3210 /************************************************************************
3211 * FIFO AND SPECFS OPS *
3212 ************************************************************************
3217 hammer_vop_fifoclose (struct vop_close_args *ap)
3219 /* XXX update itimes */
3220 return (VOCALL(&fifo_vnode_vops, &ap->a_head));
3224 hammer_vop_fiforead (struct vop_read_args *ap)
3228 error = VOCALL(&fifo_vnode_vops, &ap->a_head);
3229 /* XXX update access time */
3234 hammer_vop_fifowrite (struct vop_write_args *ap)
3238 error = VOCALL(&fifo_vnode_vops, &ap->a_head);
3239 /* XXX update access time */
3245 hammer_vop_fifokqfilter(struct vop_kqfilter_args *ap)
3249 error = VOCALL(&fifo_vnode_vops, &ap->a_head);
3251 error = hammer_vop_kqfilter(ap);
3255 /************************************************************************
3257 ************************************************************************
3260 static void filt_hammerdetach(struct knote *kn);
3261 static int filt_hammerread(struct knote *kn, long hint);
3262 static int filt_hammerwrite(struct knote *kn, long hint);
3263 static int filt_hammervnode(struct knote *kn, long hint);
3265 static struct filterops hammerread_filtops =
3266 { 1, NULL, filt_hammerdetach, filt_hammerread };
3267 static struct filterops hammerwrite_filtops =
3268 { 1, NULL, filt_hammerdetach, filt_hammerwrite };
3269 static struct filterops hammervnode_filtops =
3270 { 1, NULL, filt_hammerdetach, filt_hammervnode };
3274 hammer_vop_kqfilter(struct vop_kqfilter_args *ap)
3276 struct vnode *vp = ap->a_vp;
3277 struct knote *kn = ap->a_kn;
3280 switch (kn->kn_filter) {
3282 kn->kn_fop = &hammerread_filtops;
3285 kn->kn_fop = &hammerwrite_filtops;
3288 kn->kn_fop = &hammervnode_filtops;
3294 kn->kn_hook = (caddr_t)vp;
3296 lwkt_gettoken(&vlock, &vp->v_token);
3297 SLIST_INSERT_HEAD(&vp->v_pollinfo.vpi_selinfo.si_note, kn, kn_selnext);
3298 lwkt_reltoken(&vlock);
3304 filt_hammerdetach(struct knote *kn)
3306 struct vnode *vp = (void *)kn->kn_hook;
3309 lwkt_gettoken(&vlock, &vp->v_token);
3310 SLIST_REMOVE(&vp->v_pollinfo.vpi_selinfo.si_note,
3311 kn, knote, kn_selnext);
3312 lwkt_reltoken(&vlock);
3316 filt_hammerread(struct knote *kn, long hint)
3318 struct vnode *vp = (void *)kn->kn_hook;
3319 hammer_inode_t ip = VTOI(vp);
3321 if (hint == NOTE_REVOKE) {
3322 kn->kn_flags |= (EV_EOF | EV_ONESHOT);
3325 kn->kn_data = ip->ino_data.size - kn->kn_fp->f_offset;
3326 return (kn->kn_data != 0);
3330 filt_hammerwrite(struct knote *kn, long hint)
3332 if (hint == NOTE_REVOKE)
3333 kn->kn_flags |= (EV_EOF | EV_ONESHOT);
3339 filt_hammervnode(struct knote *kn, long hint)
3341 if (kn->kn_sfflags & hint)
3342 kn->kn_fflags |= hint;
3343 if (hint == NOTE_REVOKE) {
3344 kn->kn_flags |= EV_EOF;
3347 return (kn->kn_fflags != 0);