2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * $DragonFly: src/sys/vfs/hammer/hammer_vnops.c,v 1.102 2008/10/16 17:24:16 dillon Exp $
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/kernel.h>
40 #include <sys/fcntl.h>
41 #include <sys/namecache.h>
42 #include <sys/vnode.h>
43 #include <sys/lockf.h>
44 #include <sys/event.h>
46 #include <sys/dirent.h>
48 #include <vm/vm_extern.h>
49 #include <vfs/fifofs/fifo.h>
56 /*static int hammer_vop_vnoperate(struct vop_generic_args *);*/
57 static int hammer_vop_fsync(struct vop_fsync_args *);
58 static int hammer_vop_read(struct vop_read_args *);
59 static int hammer_vop_write(struct vop_write_args *);
60 static int hammer_vop_access(struct vop_access_args *);
61 static int hammer_vop_advlock(struct vop_advlock_args *);
62 static int hammer_vop_close(struct vop_close_args *);
63 static int hammer_vop_ncreate(struct vop_ncreate_args *);
64 static int hammer_vop_getattr(struct vop_getattr_args *);
65 static int hammer_vop_nresolve(struct vop_nresolve_args *);
66 static int hammer_vop_nlookupdotdot(struct vop_nlookupdotdot_args *);
67 static int hammer_vop_nlink(struct vop_nlink_args *);
68 static int hammer_vop_nmkdir(struct vop_nmkdir_args *);
69 static int hammer_vop_nmknod(struct vop_nmknod_args *);
70 static int hammer_vop_open(struct vop_open_args *);
71 static int hammer_vop_print(struct vop_print_args *);
72 static int hammer_vop_readdir(struct vop_readdir_args *);
73 static int hammer_vop_readlink(struct vop_readlink_args *);
74 static int hammer_vop_nremove(struct vop_nremove_args *);
75 static int hammer_vop_nrename(struct vop_nrename_args *);
76 static int hammer_vop_nrmdir(struct vop_nrmdir_args *);
77 static int hammer_vop_markatime(struct vop_markatime_args *);
78 static int hammer_vop_setattr(struct vop_setattr_args *);
79 static int hammer_vop_strategy(struct vop_strategy_args *);
80 static int hammer_vop_bmap(struct vop_bmap_args *ap);
81 static int hammer_vop_nsymlink(struct vop_nsymlink_args *);
82 static int hammer_vop_nwhiteout(struct vop_nwhiteout_args *);
83 static int hammer_vop_ioctl(struct vop_ioctl_args *);
84 static int hammer_vop_mountctl(struct vop_mountctl_args *);
85 static int hammer_vop_kqfilter (struct vop_kqfilter_args *);
87 static int hammer_vop_fifoclose (struct vop_close_args *);
88 static int hammer_vop_fiforead (struct vop_read_args *);
89 static int hammer_vop_fifowrite (struct vop_write_args *);
90 static int hammer_vop_fifokqfilter (struct vop_kqfilter_args *);
92 struct vop_ops hammer_vnode_vops = {
93 .vop_default = vop_defaultop,
94 .vop_fsync = hammer_vop_fsync,
95 .vop_getpages = vop_stdgetpages,
96 .vop_putpages = vop_stdputpages,
97 .vop_read = hammer_vop_read,
98 .vop_write = hammer_vop_write,
99 .vop_access = hammer_vop_access,
100 .vop_advlock = hammer_vop_advlock,
101 .vop_close = hammer_vop_close,
102 .vop_ncreate = hammer_vop_ncreate,
103 .vop_getattr = hammer_vop_getattr,
104 .vop_inactive = hammer_vop_inactive,
105 .vop_reclaim = hammer_vop_reclaim,
106 .vop_nresolve = hammer_vop_nresolve,
107 .vop_nlookupdotdot = hammer_vop_nlookupdotdot,
108 .vop_nlink = hammer_vop_nlink,
109 .vop_nmkdir = hammer_vop_nmkdir,
110 .vop_nmknod = hammer_vop_nmknod,
111 .vop_open = hammer_vop_open,
112 .vop_pathconf = vop_stdpathconf,
113 .vop_print = hammer_vop_print,
114 .vop_readdir = hammer_vop_readdir,
115 .vop_readlink = hammer_vop_readlink,
116 .vop_nremove = hammer_vop_nremove,
117 .vop_nrename = hammer_vop_nrename,
118 .vop_nrmdir = hammer_vop_nrmdir,
119 .vop_markatime = hammer_vop_markatime,
120 .vop_setattr = hammer_vop_setattr,
121 .vop_bmap = hammer_vop_bmap,
122 .vop_strategy = hammer_vop_strategy,
123 .vop_nsymlink = hammer_vop_nsymlink,
124 .vop_nwhiteout = hammer_vop_nwhiteout,
125 .vop_ioctl = hammer_vop_ioctl,
126 .vop_mountctl = hammer_vop_mountctl,
127 .vop_kqfilter = hammer_vop_kqfilter
130 struct vop_ops hammer_spec_vops = {
131 .vop_default = vop_defaultop,
132 .vop_fsync = hammer_vop_fsync,
133 .vop_read = vop_stdnoread,
134 .vop_write = vop_stdnowrite,
135 .vop_access = hammer_vop_access,
136 .vop_close = hammer_vop_close,
137 .vop_markatime = hammer_vop_markatime,
138 .vop_getattr = hammer_vop_getattr,
139 .vop_inactive = hammer_vop_inactive,
140 .vop_reclaim = hammer_vop_reclaim,
141 .vop_setattr = hammer_vop_setattr
144 struct vop_ops hammer_fifo_vops = {
145 .vop_default = fifo_vnoperate,
146 .vop_fsync = hammer_vop_fsync,
147 .vop_read = hammer_vop_fiforead,
148 .vop_write = hammer_vop_fifowrite,
149 .vop_access = hammer_vop_access,
150 .vop_close = hammer_vop_fifoclose,
151 .vop_markatime = hammer_vop_markatime,
152 .vop_getattr = hammer_vop_getattr,
153 .vop_inactive = hammer_vop_inactive,
154 .vop_reclaim = hammer_vop_reclaim,
155 .vop_setattr = hammer_vop_setattr,
156 .vop_kqfilter = hammer_vop_fifokqfilter
161 hammer_knote(struct vnode *vp, int flags)
164 KNOTE(&vp->v_pollinfo.vpi_kqinfo.ki_note, flags);
167 #ifdef DEBUG_TRUNCATE
168 struct hammer_inode *HammerTruncIp;
171 static int hammer_dounlink(hammer_transaction_t trans, struct nchandle *nch,
172 struct vnode *dvp, struct ucred *cred,
173 int flags, int isdir);
174 static int hammer_vop_strategy_read(struct vop_strategy_args *ap);
175 static int hammer_vop_strategy_write(struct vop_strategy_args *ap);
180 hammer_vop_vnoperate(struct vop_generic_args *)
182 return (VOCALL(&hammer_vnode_vops, ap));
187 * hammer_vop_fsync { vp, waitfor }
189 * fsync() an inode to disk and wait for it to be completely committed
190 * such that the information would not be undone if a crash occured after
193 * NOTE: HAMMER's fsync()'s are going to remain expensive until we implement
194 * a REDO log. A sysctl is provided to relax HAMMER's fsync()
197 * Ultimately the combination of a REDO log and use of fast storage
198 * to front-end cluster caches will make fsync fast, but it aint
199 * here yet. And, in anycase, we need real transactional
200 * all-or-nothing features which are not restricted to a single file.
204 hammer_vop_fsync(struct vop_fsync_args *ap)
206 hammer_inode_t ip = VTOI(ap->a_vp);
207 hammer_mount_t hmp = ip->hmp;
208 int waitfor = ap->a_waitfor;
211 lwkt_gettoken(&hmp->fs_token);
214 * Fsync rule relaxation (default is either full synchronous flush
215 * or REDO semantics with synchronous flush).
217 if (ap->a_flags & VOP_FSYNC_SYSCALL) {
218 switch(hammer_fsync_mode) {
221 /* no REDO, full synchronous flush */
225 /* no REDO, full asynchronous flush */
226 if (waitfor == MNT_WAIT)
227 waitfor = MNT_NOWAIT;
230 /* REDO semantics, synchronous flush */
231 if (hmp->version < HAMMER_VOL_VERSION_FOUR)
233 mode = HAMMER_FLUSH_UNDOS_AUTO;
236 /* REDO semantics, relaxed asynchronous flush */
237 if (hmp->version < HAMMER_VOL_VERSION_FOUR)
239 mode = HAMMER_FLUSH_UNDOS_RELAXED;
240 if (waitfor == MNT_WAIT)
241 waitfor = MNT_NOWAIT;
244 /* ignore the fsync() system call */
245 lwkt_reltoken(&hmp->fs_token);
248 /* we have to do something */
249 mode = HAMMER_FLUSH_UNDOS_RELAXED;
250 if (waitfor == MNT_WAIT)
251 waitfor = MNT_NOWAIT;
256 * Fast fsync only needs to flush the UNDO/REDO fifo if
257 * HAMMER_INODE_REDO is non-zero and the only modifications
258 * made to the file are write or write-extends.
260 if ((ip->flags & HAMMER_INODE_REDO) &&
261 (ip->flags & HAMMER_INODE_MODMASK_NOREDO) == 0
263 ++hammer_count_fsyncs;
264 hammer_flusher_flush_undos(hmp, mode);
266 lwkt_reltoken(&hmp->fs_token);
271 * REDO is enabled by fsync(), the idea being we really only
272 * want to lay down REDO records when programs are using
273 * fsync() heavily. The first fsync() on the file starts
274 * the gravy train going and later fsync()s keep it hot by
275 * resetting the redo_count.
277 * We weren't running REDOs before now so we have to fall
278 * through and do a full fsync of what we have.
280 if (hmp->version >= HAMMER_VOL_VERSION_FOUR &&
281 (hmp->flags & HAMMER_MOUNT_REDO_RECOVERY_RUN) == 0) {
282 ip->flags |= HAMMER_INODE_REDO;
289 * Do a full flush sequence.
291 ++hammer_count_fsyncs;
292 vfsync(ap->a_vp, waitfor, 1, NULL, NULL);
293 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
294 if (waitfor == MNT_WAIT) {
296 hammer_wait_inode(ip);
297 vn_lock(ap->a_vp, LK_EXCLUSIVE | LK_RETRY);
299 lwkt_reltoken(&hmp->fs_token);
304 * hammer_vop_read { vp, uio, ioflag, cred }
306 * MPSAFE (for the cache safe does not require fs_token)
310 hammer_vop_read(struct vop_read_args *ap)
312 struct hammer_transaction trans;
326 if (ap->a_vp->v_type != VREG)
334 * Allow the UIO's size to override the sequential heuristic.
336 blksize = hammer_blocksize(uio->uio_offset);
337 seqcount = (uio->uio_resid + (BKVASIZE - 1)) / BKVASIZE;
338 ioseqcount = (ap->a_ioflag >> 16);
339 if (seqcount < ioseqcount)
340 seqcount = ioseqcount;
343 * If reading or writing a huge amount of data we have to break
344 * atomicy and allow the operation to be interrupted by a signal
345 * or it can DOS the machine.
347 bigread = (uio->uio_resid > 100 * 1024 * 1024);
351 * Access the data typically in HAMMER_BUFSIZE blocks via the
352 * buffer cache, but HAMMER may use a variable block size based
355 * XXX Temporary hack, delay the start transaction while we remain
356 * MPSAFE. NOTE: ino_data.size cannot change while vnode is
359 while (uio->uio_resid > 0 && uio->uio_offset < ip->ino_data.size) {
363 blksize = hammer_blocksize(uio->uio_offset);
364 offset = (int)uio->uio_offset & (blksize - 1);
365 base_offset = uio->uio_offset - offset;
367 if (bigread && (error = hammer_signal_check(ip->hmp)) != 0)
373 bp = getcacheblk(ap->a_vp, base_offset, blksize);
382 if (got_fstoken == 0) {
383 lwkt_gettoken(&hmp->fs_token);
385 hammer_start_transaction(&trans, ip->hmp);
388 if (hammer_cluster_enable) {
390 * Use file_limit to prevent cluster_read() from
391 * creating buffers of the wrong block size past
394 file_limit = ip->ino_data.size;
395 if (base_offset < HAMMER_XDEMARC &&
396 file_limit > HAMMER_XDEMARC) {
397 file_limit = HAMMER_XDEMARC;
399 error = cluster_read(ap->a_vp,
400 file_limit, base_offset,
401 blksize, uio->uio_resid,
402 seqcount * BKVASIZE, &bp);
404 error = bread(ap->a_vp, base_offset, blksize, &bp);
411 if ((hammer_debug_io & 0x0001) && (bp->b_flags & B_IODEBUG)) {
412 kprintf("doff %016jx read file %016jx@%016jx\n",
413 (intmax_t)bp->b_bio2.bio_offset,
414 (intmax_t)ip->obj_id,
415 (intmax_t)bp->b_loffset);
417 bp->b_flags &= ~B_IODEBUG;
419 /* bp->b_flags |= B_CLUSTEROK; temporarily disabled */
420 n = blksize - offset;
421 if (n > uio->uio_resid)
423 if (n > ip->ino_data.size - uio->uio_offset)
424 n = (int)(ip->ino_data.size - uio->uio_offset);
426 lwkt_reltoken(&hmp->fs_token);
429 * Set B_AGE, data has a lower priority than meta-data.
431 * Use a hold/unlock/drop sequence to run the uiomove
432 * with the buffer unlocked, avoiding deadlocks against
433 * read()s on mmap()'d spaces.
435 bp->b_flags |= B_AGE;
438 error = uiomove((char *)bp->b_data + offset, n, uio);
442 lwkt_gettoken(&hmp->fs_token);
446 hammer_stats_file_read += n;
450 * XXX only update the atime if we had to get the MP lock.
451 * XXX hack hack hack, fixme.
454 if ((ip->flags & HAMMER_INODE_RO) == 0 &&
455 (ip->hmp->mp->mnt_flag & MNT_NOATIME) == 0) {
456 ip->ino_data.atime = trans.time;
457 hammer_modify_inode(&trans, ip, HAMMER_INODE_ATIME);
459 hammer_done_transaction(&trans);
460 lwkt_reltoken(&hmp->fs_token);
466 * hammer_vop_write { vp, uio, ioflag, cred }
470 hammer_vop_write(struct vop_write_args *ap)
472 struct hammer_transaction trans;
473 struct hammer_inode *ip;
486 if (ap->a_vp->v_type != VREG)
492 seqcount = ap->a_ioflag >> 16;
494 if (ip->flags & HAMMER_INODE_RO)
498 * Create a transaction to cover the operations we perform.
500 lwkt_gettoken(&hmp->fs_token);
501 hammer_start_transaction(&trans, hmp);
507 if (ap->a_ioflag & IO_APPEND)
508 uio->uio_offset = ip->ino_data.size;
511 * Check for illegal write offsets. Valid range is 0...2^63-1.
513 * NOTE: the base_off assignment is required to work around what
514 * I consider to be a GCC-4 optimization bug.
516 if (uio->uio_offset < 0) {
517 hammer_done_transaction(&trans);
518 lwkt_reltoken(&hmp->fs_token);
521 base_offset = uio->uio_offset + uio->uio_resid; /* work around gcc-4 */
522 if (uio->uio_resid > 0 && base_offset <= uio->uio_offset) {
523 hammer_done_transaction(&trans);
524 lwkt_reltoken(&hmp->fs_token);
529 * If reading or writing a huge amount of data we have to break
530 * atomicy and allow the operation to be interrupted by a signal
531 * or it can DOS the machine.
533 * Preset redo_count so we stop generating REDOs earlier if the
536 bigwrite = (uio->uio_resid > 100 * 1024 * 1024);
537 if ((ip->flags & HAMMER_INODE_REDO) &&
538 ip->redo_count < hammer_limit_redo) {
539 ip->redo_count += uio->uio_resid;
543 * Access the data typically in HAMMER_BUFSIZE blocks via the
544 * buffer cache, but HAMMER may use a variable block size based
547 while (uio->uio_resid > 0) {
555 if ((error = hammer_checkspace(hmp, HAMMER_CHKSPC_WRITE)) != 0)
557 if (bigwrite && (error = hammer_signal_check(hmp)) != 0)
560 blksize = hammer_blocksize(uio->uio_offset);
563 * Do not allow HAMMER to blow out the buffer cache. Very
564 * large UIOs can lockout other processes due to bwillwrite()
567 * The hammer inode is not locked during these operations.
568 * The vnode is locked which can interfere with the pageout
569 * daemon for non-UIO_NOCOPY writes but should not interfere
570 * with the buffer cache. Even so, we cannot afford to
571 * allow the pageout daemon to build up too many dirty buffer
574 * Only call this if we aren't being recursively called from
575 * a virtual disk device (vn), else we may deadlock.
577 if ((ap->a_ioflag & IO_RECURSE) == 0)
581 * Control the number of pending records associated with
582 * this inode. If too many have accumulated start a
583 * flush. Try to maintain a pipeline with the flusher.
585 if (ip->rsv_recs >= hammer_limit_inode_recs) {
586 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
588 if (ip->rsv_recs >= hammer_limit_inode_recs * 2) {
589 while (ip->rsv_recs >= hammer_limit_inode_recs) {
590 tsleep(&ip->rsv_recs, 0, "hmrwww", hz);
592 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
597 * Do not allow HAMMER to blow out system memory by
598 * accumulating too many records. Records are so well
599 * decoupled from the buffer cache that it is possible
600 * for userland to push data out to the media via
601 * direct-write, but build up the records queued to the
602 * backend faster then the backend can flush them out.
603 * HAMMER has hit its write limit but the frontend has
604 * no pushback to slow it down.
606 if (hmp->rsv_recs > hammer_limit_recs / 2) {
608 * Get the inode on the flush list
610 if (ip->rsv_recs >= 64)
611 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
612 else if (ip->rsv_recs >= 16)
613 hammer_flush_inode(ip, 0);
616 * Keep the flusher going if the system keeps
619 delta = hmp->count_newrecords -
620 hmp->last_newrecords;
621 if (delta < 0 || delta > hammer_limit_recs / 2) {
622 hmp->last_newrecords = hmp->count_newrecords;
623 hammer_sync_hmp(hmp, MNT_NOWAIT);
627 * If we have gotten behind start slowing
630 delta = (hmp->rsv_recs - hammer_limit_recs) *
631 hz / hammer_limit_recs;
633 tsleep(&trans, 0, "hmrslo", delta);
638 * Calculate the blocksize at the current offset and figure
639 * out how much we can actually write.
641 blkmask = blksize - 1;
642 offset = (int)uio->uio_offset & blkmask;
643 base_offset = uio->uio_offset & ~(int64_t)blkmask;
644 n = blksize - offset;
645 if (n > uio->uio_resid) {
651 nsize = uio->uio_offset + n;
652 if (nsize > ip->ino_data.size) {
653 if (uio->uio_offset > ip->ino_data.size)
657 nvextendbuf(ap->a_vp,
660 hammer_blocksize(ip->ino_data.size),
661 hammer_blocksize(nsize),
662 hammer_blockoff(ip->ino_data.size),
663 hammer_blockoff(nsize),
666 kflags |= NOTE_EXTEND;
669 if (uio->uio_segflg == UIO_NOCOPY) {
671 * Issuing a write with the same data backing the
672 * buffer. Instantiate the buffer to collect the
673 * backing vm pages, then read-in any missing bits.
675 * This case is used by vop_stdputpages().
677 bp = getblk(ap->a_vp, base_offset,
678 blksize, GETBLK_BHEAVY, 0);
679 if ((bp->b_flags & B_CACHE) == 0) {
681 error = bread(ap->a_vp, base_offset,
684 } else if (offset == 0 && uio->uio_resid >= blksize) {
686 * Even though we are entirely overwriting the buffer
687 * we may still have to zero it out to avoid a
688 * mmap/write visibility issue.
690 bp = getblk(ap->a_vp, base_offset, blksize, GETBLK_BHEAVY, 0);
691 if ((bp->b_flags & B_CACHE) == 0)
693 } else if (base_offset >= ip->ino_data.size) {
695 * If the base offset of the buffer is beyond the
696 * file EOF, we don't have to issue a read.
698 bp = getblk(ap->a_vp, base_offset,
699 blksize, GETBLK_BHEAVY, 0);
703 * Partial overwrite, read in any missing bits then
704 * replace the portion being written.
706 error = bread(ap->a_vp, base_offset, blksize, &bp);
711 lwkt_reltoken(&hmp->fs_token);
712 error = uiomove(bp->b_data + offset, n, uio);
713 lwkt_gettoken(&hmp->fs_token);
717 * Generate REDO records if enabled and redo_count will not
718 * exceeded the limit.
720 * If redo_count exceeds the limit we stop generating records
721 * and clear HAMMER_INODE_REDO. This will cause the next
722 * fsync() to do a full meta-data sync instead of just an
723 * UNDO/REDO fifo update.
725 * When clearing HAMMER_INODE_REDO any pre-existing REDOs
726 * will still be tracked. The tracks will be terminated
727 * when the related meta-data (including possible data
728 * modifications which are not tracked via REDO) is
731 if ((ip->flags & HAMMER_INODE_REDO) && error == 0) {
732 if (ip->redo_count < hammer_limit_redo) {
733 bp->b_flags |= B_VFSFLAG1;
734 error = hammer_generate_redo(&trans, ip,
735 base_offset + offset,
740 ip->flags &= ~HAMMER_INODE_REDO;
745 * If we screwed up we have to undo any VM size changes we
751 nvtruncbuf(ap->a_vp, ip->ino_data.size,
752 hammer_blocksize(ip->ino_data.size),
753 hammer_blockoff(ip->ino_data.size));
757 kflags |= NOTE_WRITE;
758 hammer_stats_file_write += n;
759 /* bp->b_flags |= B_CLUSTEROK; temporarily disabled */
760 if (ip->ino_data.size < uio->uio_offset) {
761 ip->ino_data.size = uio->uio_offset;
762 flags = HAMMER_INODE_SDIRTY;
766 ip->ino_data.mtime = trans.time;
767 flags |= HAMMER_INODE_MTIME | HAMMER_INODE_BUFS;
768 hammer_modify_inode(&trans, ip, flags);
771 * Once we dirty the buffer any cached zone-X offset
772 * becomes invalid. HAMMER NOTE: no-history mode cannot
773 * allow overwriting over the same data sector unless
774 * we provide UNDOs for the old data, which we don't.
776 bp->b_bio2.bio_offset = NOOFFSET;
779 * Final buffer disposition.
781 * Because meta-data updates are deferred, HAMMER is
782 * especially sensitive to excessive bdwrite()s because
783 * the I/O stream is not broken up by disk reads. So the
784 * buffer cache simply cannot keep up.
786 * WARNING! blksize is variable. cluster_write() is
787 * expected to not blow up if it encounters
788 * buffers that do not match the passed blksize.
790 * NOTE! Hammer shouldn't need to bawrite()/cluster_write().
791 * The ip->rsv_recs check should burst-flush the data.
792 * If we queue it immediately the buf could be left
793 * locked on the device queue for a very long time.
795 * NOTE! To avoid degenerate stalls due to mismatched block
796 * sizes we only honor IO_DIRECT on the write which
797 * abuts the end of the buffer. However, we must
798 * honor IO_SYNC in case someone is silly enough to
799 * configure a HAMMER file as swap, or when HAMMER
800 * is serving NFS (for commits). Ick ick.
802 bp->b_flags |= B_AGE;
803 if (ap->a_ioflag & IO_SYNC) {
805 } else if ((ap->a_ioflag & IO_DIRECT) && endofblk) {
809 if (offset + n == blksize) {
810 if (hammer_cluster_enable == 0 ||
811 (ap->a_vp->v_mount->mnt_flag & MNT_NOCLUSTERW)) {
814 cluster_write(bp, ip->ino_data.size,
822 hammer_done_transaction(&trans);
823 hammer_knote(ap->a_vp, kflags);
824 lwkt_reltoken(&hmp->fs_token);
829 * hammer_vop_access { vp, mode, cred }
831 * MPSAFE - does not require fs_token
835 hammer_vop_access(struct vop_access_args *ap)
837 struct hammer_inode *ip = VTOI(ap->a_vp);
842 ++hammer_stats_file_iopsr;
843 uid = hammer_to_unix_xid(&ip->ino_data.uid);
844 gid = hammer_to_unix_xid(&ip->ino_data.gid);
846 error = vop_helper_access(ap, uid, gid, ip->ino_data.mode,
847 ip->ino_data.uflags);
852 * hammer_vop_advlock { vp, id, op, fl, flags }
854 * MPSAFE - does not require fs_token
858 hammer_vop_advlock(struct vop_advlock_args *ap)
860 hammer_inode_t ip = VTOI(ap->a_vp);
862 return (lf_advlock(ap, &ip->advlock, ip->ino_data.size));
866 * hammer_vop_close { vp, fflag }
868 * We can only sync-on-close for normal closes. XXX disabled for now.
872 hammer_vop_close(struct vop_close_args *ap)
875 struct vnode *vp = ap->a_vp;
876 hammer_inode_t ip = VTOI(vp);
878 if (ip->flags & (HAMMER_INODE_CLOSESYNC|HAMMER_INODE_CLOSEASYNC)) {
879 if (vn_islocked(vp) == LK_EXCLUSIVE &&
880 (vp->v_flag & (VINACTIVE|VRECLAIMED)) == 0) {
881 if (ip->flags & HAMMER_INODE_CLOSESYNC)
884 waitfor = MNT_NOWAIT;
885 ip->flags &= ~(HAMMER_INODE_CLOSESYNC |
886 HAMMER_INODE_CLOSEASYNC);
887 VOP_FSYNC(vp, MNT_NOWAIT, waitfor);
891 return (vop_stdclose(ap));
895 * hammer_vop_ncreate { nch, dvp, vpp, cred, vap }
897 * The operating system has already ensured that the directory entry
898 * does not exist and done all appropriate namespace locking.
902 hammer_vop_ncreate(struct vop_ncreate_args *ap)
904 struct hammer_transaction trans;
905 struct hammer_inode *dip;
906 struct hammer_inode *nip;
907 struct nchandle *nch;
912 dip = VTOI(ap->a_dvp);
915 if (dip->flags & HAMMER_INODE_RO)
917 if ((error = hammer_checkspace(hmp, HAMMER_CHKSPC_CREATE)) != 0)
921 * Create a transaction to cover the operations we perform.
923 lwkt_gettoken(&hmp->fs_token);
924 hammer_start_transaction(&trans, hmp);
925 ++hammer_stats_file_iopsw;
928 * Create a new filesystem object of the requested type. The
929 * returned inode will be referenced and shared-locked to prevent
930 * it from being moved to the flusher.
932 error = hammer_create_inode(&trans, ap->a_vap, ap->a_cred,
933 dip, nch->ncp->nc_name, nch->ncp->nc_nlen,
936 hkprintf("hammer_create_inode error %d\n", error);
937 hammer_done_transaction(&trans);
939 lwkt_reltoken(&hmp->fs_token);
944 * Add the new filesystem object to the directory. This will also
945 * bump the inode's link count.
947 error = hammer_ip_add_directory(&trans, dip,
948 nch->ncp->nc_name, nch->ncp->nc_nlen,
951 hkprintf("hammer_ip_add_directory error %d\n", error);
957 hammer_rel_inode(nip, 0);
958 hammer_done_transaction(&trans);
961 error = hammer_get_vnode(nip, ap->a_vpp);
962 hammer_done_transaction(&trans);
963 hammer_rel_inode(nip, 0);
965 cache_setunresolved(ap->a_nch);
966 cache_setvp(ap->a_nch, *ap->a_vpp);
968 hammer_knote(ap->a_dvp, NOTE_WRITE);
970 lwkt_reltoken(&hmp->fs_token);
975 * hammer_vop_getattr { vp, vap }
977 * Retrieve an inode's attribute information. When accessing inodes
978 * historically we fake the atime field to ensure consistent results.
979 * The atime field is stored in the B-Tree element and allowed to be
980 * updated without cycling the element.
982 * MPSAFE - does not require fs_token
986 hammer_vop_getattr(struct vop_getattr_args *ap)
988 struct hammer_inode *ip = VTOI(ap->a_vp);
989 struct vattr *vap = ap->a_vap;
992 * We want the fsid to be different when accessing a filesystem
993 * with different as-of's so programs like diff don't think
994 * the files are the same.
996 * We also want the fsid to be the same when comparing snapshots,
997 * or when comparing mirrors (which might be backed by different
998 * physical devices). HAMMER fsids are based on the PFS's
1001 * XXX there is a chance of collision here. The va_fsid reported
1002 * by stat is different from the more involved fsid used in the
1005 ++hammer_stats_file_iopsr;
1006 hammer_lock_sh(&ip->lock);
1007 vap->va_fsid = ip->pfsm->fsid_udev ^ (u_int32_t)ip->obj_asof ^
1008 (u_int32_t)(ip->obj_asof >> 32);
1010 vap->va_fileid = ip->ino_leaf.base.obj_id;
1011 vap->va_mode = ip->ino_data.mode;
1012 vap->va_nlink = ip->ino_data.nlinks;
1013 vap->va_uid = hammer_to_unix_xid(&ip->ino_data.uid);
1014 vap->va_gid = hammer_to_unix_xid(&ip->ino_data.gid);
1017 vap->va_size = ip->ino_data.size;
1020 * Special case for @@PFS softlinks. The actual size of the
1021 * expanded softlink is "@@0x%016llx:%05d" == 26 bytes.
1022 * or for MAX_TID is "@@-1:%05d" == 10 bytes.
1024 if (ip->ino_data.obj_type == HAMMER_OBJTYPE_SOFTLINK &&
1025 ip->ino_data.size == 10 &&
1026 ip->obj_asof == HAMMER_MAX_TID &&
1027 ip->obj_localization == 0 &&
1028 strncmp(ip->ino_data.ext.symlink, "@@PFS", 5) == 0) {
1029 if (ip->pfsm->pfsd.mirror_flags & HAMMER_PFSD_SLAVE)
1036 * We must provide a consistent atime and mtime for snapshots
1037 * so people can do a 'tar cf - ... | md5' on them and get
1038 * consistent results.
1040 if (ip->flags & HAMMER_INODE_RO) {
1041 hammer_time_to_timespec(ip->ino_data.ctime, &vap->va_atime);
1042 hammer_time_to_timespec(ip->ino_data.ctime, &vap->va_mtime);
1044 hammer_time_to_timespec(ip->ino_data.atime, &vap->va_atime);
1045 hammer_time_to_timespec(ip->ino_data.mtime, &vap->va_mtime);
1047 hammer_time_to_timespec(ip->ino_data.ctime, &vap->va_ctime);
1048 vap->va_flags = ip->ino_data.uflags;
1049 vap->va_gen = 1; /* hammer inums are unique for all time */
1050 vap->va_blocksize = HAMMER_BUFSIZE;
1051 if (ip->ino_data.size >= HAMMER_XDEMARC) {
1052 vap->va_bytes = (ip->ino_data.size + HAMMER_XBUFMASK64) &
1054 } else if (ip->ino_data.size > HAMMER_BUFSIZE / 2) {
1055 vap->va_bytes = (ip->ino_data.size + HAMMER_BUFMASK64) &
1058 vap->va_bytes = (ip->ino_data.size + 15) & ~15;
1061 vap->va_type = hammer_get_vnode_type(ip->ino_data.obj_type);
1062 vap->va_filerev = 0; /* XXX */
1063 vap->va_uid_uuid = ip->ino_data.uid;
1064 vap->va_gid_uuid = ip->ino_data.gid;
1065 vap->va_fsid_uuid = ip->hmp->fsid;
1066 vap->va_vaflags = VA_UID_UUID_VALID | VA_GID_UUID_VALID |
1069 switch (ip->ino_data.obj_type) {
1070 case HAMMER_OBJTYPE_CDEV:
1071 case HAMMER_OBJTYPE_BDEV:
1072 vap->va_rmajor = ip->ino_data.rmajor;
1073 vap->va_rminor = ip->ino_data.rminor;
1078 hammer_unlock(&ip->lock);
1083 * hammer_vop_nresolve { nch, dvp, cred }
1085 * Locate the requested directory entry.
1089 hammer_vop_nresolve(struct vop_nresolve_args *ap)
1091 struct hammer_transaction trans;
1092 struct namecache *ncp;
1097 struct hammer_cursor cursor;
1106 u_int32_t localization;
1107 u_int32_t max_iterations;
1110 * Misc initialization, plus handle as-of name extensions. Look for
1111 * the '@@' extension. Note that as-of files and directories cannot
1114 dip = VTOI(ap->a_dvp);
1115 ncp = ap->a_nch->ncp;
1116 asof = dip->obj_asof;
1117 localization = dip->obj_localization; /* for code consistency */
1118 nlen = ncp->nc_nlen;
1119 flags = dip->flags & HAMMER_INODE_RO;
1123 lwkt_gettoken(&hmp->fs_token);
1124 hammer_simple_transaction(&trans, hmp);
1125 ++hammer_stats_file_iopsr;
1127 for (i = 0; i < nlen; ++i) {
1128 if (ncp->nc_name[i] == '@' && ncp->nc_name[i+1] == '@') {
1129 error = hammer_str_to_tid(ncp->nc_name + i + 2,
1130 &ispfs, &asof, &localization);
1135 if (asof != HAMMER_MAX_TID)
1136 flags |= HAMMER_INODE_RO;
1143 * If this is a PFS softlink we dive into the PFS
1145 if (ispfs && nlen == 0) {
1146 ip = hammer_get_inode(&trans, dip, HAMMER_OBJID_ROOT,
1150 error = hammer_get_vnode(ip, &vp);
1151 hammer_rel_inode(ip, 0);
1157 cache_setvp(ap->a_nch, vp);
1164 * If there is no path component the time extension is relative to dip.
1165 * e.g. "fubar/@@<snapshot>"
1167 * "." is handled by the kernel, but ".@@<snapshot>" is not.
1168 * e.g. "fubar/.@@<snapshot>"
1170 * ".." is handled by the kernel. We do not currently handle
1173 if (nlen == 0 || (nlen == 1 && ncp->nc_name[0] == '.')) {
1174 ip = hammer_get_inode(&trans, dip, dip->obj_id,
1175 asof, dip->obj_localization,
1178 error = hammer_get_vnode(ip, &vp);
1179 hammer_rel_inode(ip, 0);
1185 cache_setvp(ap->a_nch, vp);
1192 * Calculate the namekey and setup the key range for the scan. This
1193 * works kinda like a chained hash table where the lower 32 bits
1194 * of the namekey synthesize the chain.
1196 * The key range is inclusive of both key_beg and key_end.
1198 namekey = hammer_directory_namekey(dip, ncp->nc_name, nlen,
1201 error = hammer_init_cursor(&trans, &cursor, &dip->cache[1], dip);
1202 cursor.key_beg.localization = dip->obj_localization +
1203 hammer_dir_localization(dip);
1204 cursor.key_beg.obj_id = dip->obj_id;
1205 cursor.key_beg.key = namekey;
1206 cursor.key_beg.create_tid = 0;
1207 cursor.key_beg.delete_tid = 0;
1208 cursor.key_beg.rec_type = HAMMER_RECTYPE_DIRENTRY;
1209 cursor.key_beg.obj_type = 0;
1211 cursor.key_end = cursor.key_beg;
1212 cursor.key_end.key += max_iterations;
1214 cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE | HAMMER_CURSOR_ASOF;
1217 * Scan all matching records (the chain), locate the one matching
1218 * the requested path component.
1220 * The hammer_ip_*() functions merge in-memory records with on-disk
1221 * records for the purposes of the search.
1224 localization = HAMMER_DEF_LOCALIZATION;
1227 error = hammer_ip_first(&cursor);
1228 while (error == 0) {
1229 error = hammer_ip_resolve_data(&cursor);
1232 if (nlen == cursor.leaf->data_len - HAMMER_ENTRY_NAME_OFF &&
1233 bcmp(ncp->nc_name, cursor.data->entry.name, nlen) == 0) {
1234 obj_id = cursor.data->entry.obj_id;
1235 localization = cursor.data->entry.localization;
1238 error = hammer_ip_next(&cursor);
1241 hammer_done_cursor(&cursor);
1244 * Lookup the obj_id. This should always succeed. If it does not
1245 * the filesystem may be damaged and we return a dummy inode.
1248 ip = hammer_get_inode(&trans, dip, obj_id,
1251 if (error == ENOENT) {
1252 kprintf("HAMMER: WARNING: Missing "
1253 "inode for dirent \"%s\"\n"
1254 "\tobj_id = %016llx, asof=%016llx, lo=%08x\n",
1256 (long long)obj_id, (long long)asof,
1259 ip = hammer_get_dummy_inode(&trans, dip, obj_id,
1264 error = hammer_get_vnode(ip, &vp);
1265 hammer_rel_inode(ip, 0);
1271 cache_setvp(ap->a_nch, vp);
1274 } else if (error == ENOENT) {
1275 cache_setvp(ap->a_nch, NULL);
1278 hammer_done_transaction(&trans);
1279 lwkt_reltoken(&hmp->fs_token);
1284 * hammer_vop_nlookupdotdot { dvp, vpp, cred }
1286 * Locate the parent directory of a directory vnode.
1288 * dvp is referenced but not locked. *vpp must be returned referenced and
1289 * locked. A parent_obj_id of 0 does not necessarily indicate that we are
1290 * at the root, instead it could indicate that the directory we were in was
1293 * NOTE: as-of sequences are not linked into the directory structure. If
1294 * we are at the root with a different asof then the mount point, reload
1295 * the same directory with the mount point's asof. I'm not sure what this
1296 * will do to NFS. We encode ASOF stamps in NFS file handles so it might not
1297 * get confused, but it hasn't been tested.
1301 hammer_vop_nlookupdotdot(struct vop_nlookupdotdot_args *ap)
1303 struct hammer_transaction trans;
1304 struct hammer_inode *dip;
1305 struct hammer_inode *ip;
1307 int64_t parent_obj_id;
1308 u_int32_t parent_obj_localization;
1312 dip = VTOI(ap->a_dvp);
1313 asof = dip->obj_asof;
1317 * Whos are parent? This could be the root of a pseudo-filesystem
1318 * whos parent is in another localization domain.
1320 lwkt_gettoken(&hmp->fs_token);
1321 parent_obj_id = dip->ino_data.parent_obj_id;
1322 if (dip->obj_id == HAMMER_OBJID_ROOT)
1323 parent_obj_localization = dip->ino_data.ext.obj.parent_obj_localization;
1325 parent_obj_localization = dip->obj_localization;
1327 if (parent_obj_id == 0) {
1328 if (dip->obj_id == HAMMER_OBJID_ROOT &&
1329 asof != hmp->asof) {
1330 parent_obj_id = dip->obj_id;
1332 *ap->a_fakename = kmalloc(19, M_TEMP, M_WAITOK);
1333 ksnprintf(*ap->a_fakename, 19, "0x%016llx",
1334 (long long)dip->obj_asof);
1337 lwkt_reltoken(&hmp->fs_token);
1342 hammer_simple_transaction(&trans, hmp);
1343 ++hammer_stats_file_iopsr;
1345 ip = hammer_get_inode(&trans, dip, parent_obj_id,
1346 asof, parent_obj_localization,
1347 dip->flags, &error);
1349 error = hammer_get_vnode(ip, ap->a_vpp);
1350 hammer_rel_inode(ip, 0);
1354 hammer_done_transaction(&trans);
1355 lwkt_reltoken(&hmp->fs_token);
1360 * hammer_vop_nlink { nch, dvp, vp, cred }
1364 hammer_vop_nlink(struct vop_nlink_args *ap)
1366 struct hammer_transaction trans;
1367 struct hammer_inode *dip;
1368 struct hammer_inode *ip;
1369 struct nchandle *nch;
1373 if (ap->a_dvp->v_mount != ap->a_vp->v_mount)
1377 dip = VTOI(ap->a_dvp);
1378 ip = VTOI(ap->a_vp);
1381 if (dip->obj_localization != ip->obj_localization)
1384 if (dip->flags & HAMMER_INODE_RO)
1386 if (ip->flags & HAMMER_INODE_RO)
1388 if ((error = hammer_checkspace(hmp, HAMMER_CHKSPC_CREATE)) != 0)
1392 * Create a transaction to cover the operations we perform.
1394 lwkt_gettoken(&hmp->fs_token);
1395 hammer_start_transaction(&trans, hmp);
1396 ++hammer_stats_file_iopsw;
1399 * Add the filesystem object to the directory. Note that neither
1400 * dip nor ip are referenced or locked, but their vnodes are
1401 * referenced. This function will bump the inode's link count.
1403 error = hammer_ip_add_directory(&trans, dip,
1404 nch->ncp->nc_name, nch->ncp->nc_nlen,
1411 cache_setunresolved(nch);
1412 cache_setvp(nch, ap->a_vp);
1414 hammer_done_transaction(&trans);
1415 hammer_knote(ap->a_vp, NOTE_LINK);
1416 hammer_knote(ap->a_dvp, NOTE_WRITE);
1417 lwkt_reltoken(&hmp->fs_token);
1422 * hammer_vop_nmkdir { nch, dvp, vpp, cred, vap }
1424 * The operating system has already ensured that the directory entry
1425 * does not exist and done all appropriate namespace locking.
1429 hammer_vop_nmkdir(struct vop_nmkdir_args *ap)
1431 struct hammer_transaction trans;
1432 struct hammer_inode *dip;
1433 struct hammer_inode *nip;
1434 struct nchandle *nch;
1439 dip = VTOI(ap->a_dvp);
1442 if (dip->flags & HAMMER_INODE_RO)
1444 if ((error = hammer_checkspace(hmp, HAMMER_CHKSPC_CREATE)) != 0)
1448 * Create a transaction to cover the operations we perform.
1450 lwkt_gettoken(&hmp->fs_token);
1451 hammer_start_transaction(&trans, hmp);
1452 ++hammer_stats_file_iopsw;
1455 * Create a new filesystem object of the requested type. The
1456 * returned inode will be referenced but not locked.
1458 error = hammer_create_inode(&trans, ap->a_vap, ap->a_cred,
1459 dip, nch->ncp->nc_name, nch->ncp->nc_nlen,
1462 hkprintf("hammer_mkdir error %d\n", error);
1463 hammer_done_transaction(&trans);
1465 lwkt_reltoken(&hmp->fs_token);
1469 * Add the new filesystem object to the directory. This will also
1470 * bump the inode's link count.
1472 error = hammer_ip_add_directory(&trans, dip,
1473 nch->ncp->nc_name, nch->ncp->nc_nlen,
1476 hkprintf("hammer_mkdir (add) error %d\n", error);
1482 hammer_rel_inode(nip, 0);
1485 error = hammer_get_vnode(nip, ap->a_vpp);
1486 hammer_rel_inode(nip, 0);
1488 cache_setunresolved(ap->a_nch);
1489 cache_setvp(ap->a_nch, *ap->a_vpp);
1492 hammer_done_transaction(&trans);
1494 hammer_knote(ap->a_dvp, NOTE_WRITE | NOTE_LINK);
1495 lwkt_reltoken(&hmp->fs_token);
1500 * hammer_vop_nmknod { nch, dvp, vpp, cred, vap }
1502 * The operating system has already ensured that the directory entry
1503 * does not exist and done all appropriate namespace locking.
1507 hammer_vop_nmknod(struct vop_nmknod_args *ap)
1509 struct hammer_transaction trans;
1510 struct hammer_inode *dip;
1511 struct hammer_inode *nip;
1512 struct nchandle *nch;
1517 dip = VTOI(ap->a_dvp);
1520 if (dip->flags & HAMMER_INODE_RO)
1522 if ((error = hammer_checkspace(hmp, HAMMER_CHKSPC_CREATE)) != 0)
1526 * Create a transaction to cover the operations we perform.
1528 lwkt_gettoken(&hmp->fs_token);
1529 hammer_start_transaction(&trans, hmp);
1530 ++hammer_stats_file_iopsw;
1533 * Create a new filesystem object of the requested type. The
1534 * returned inode will be referenced but not locked.
1536 * If mknod specifies a directory a pseudo-fs is created.
1538 error = hammer_create_inode(&trans, ap->a_vap, ap->a_cred,
1539 dip, nch->ncp->nc_name, nch->ncp->nc_nlen,
1542 hammer_done_transaction(&trans);
1544 lwkt_reltoken(&hmp->fs_token);
1549 * Add the new filesystem object to the directory. This will also
1550 * bump the inode's link count.
1552 error = hammer_ip_add_directory(&trans, dip,
1553 nch->ncp->nc_name, nch->ncp->nc_nlen,
1560 hammer_rel_inode(nip, 0);
1563 error = hammer_get_vnode(nip, ap->a_vpp);
1564 hammer_rel_inode(nip, 0);
1566 cache_setunresolved(ap->a_nch);
1567 cache_setvp(ap->a_nch, *ap->a_vpp);
1570 hammer_done_transaction(&trans);
1572 hammer_knote(ap->a_dvp, NOTE_WRITE);
1573 lwkt_reltoken(&hmp->fs_token);
1578 * hammer_vop_open { vp, mode, cred, fp }
1580 * MPSAFE (does not require fs_token)
1584 hammer_vop_open(struct vop_open_args *ap)
1588 ++hammer_stats_file_iopsr;
1589 ip = VTOI(ap->a_vp);
1591 if ((ap->a_mode & FWRITE) && (ip->flags & HAMMER_INODE_RO))
1593 return(vop_stdopen(ap));
1597 * hammer_vop_print { vp }
1601 hammer_vop_print(struct vop_print_args *ap)
1607 * hammer_vop_readdir { vp, uio, cred, *eofflag, *ncookies, off_t **cookies }
1611 hammer_vop_readdir(struct vop_readdir_args *ap)
1613 struct hammer_transaction trans;
1614 struct hammer_cursor cursor;
1615 struct hammer_inode *ip;
1618 hammer_base_elm_t base;
1627 ++hammer_stats_file_iopsr;
1628 ip = VTOI(ap->a_vp);
1630 saveoff = uio->uio_offset;
1633 if (ap->a_ncookies) {
1634 ncookies = uio->uio_resid / 16 + 1;
1635 if (ncookies > 1024)
1637 cookies = kmalloc(ncookies * sizeof(off_t), M_TEMP, M_WAITOK);
1645 lwkt_gettoken(&hmp->fs_token);
1646 hammer_simple_transaction(&trans, hmp);
1649 * Handle artificial entries
1651 * It should be noted that the minimum value for a directory
1652 * hash key on-media is 0x0000000100000000, so we can use anything
1653 * less then that to represent our 'special' key space.
1657 r = vop_write_dirent(&error, uio, ip->obj_id, DT_DIR, 1, ".");
1661 cookies[cookie_index] = saveoff;
1664 if (cookie_index == ncookies)
1668 if (ip->ino_data.parent_obj_id) {
1669 r = vop_write_dirent(&error, uio,
1670 ip->ino_data.parent_obj_id,
1673 r = vop_write_dirent(&error, uio,
1674 ip->obj_id, DT_DIR, 2, "..");
1679 cookies[cookie_index] = saveoff;
1682 if (cookie_index == ncookies)
1687 * Key range (begin and end inclusive) to scan. Directory keys
1688 * directly translate to a 64 bit 'seek' position.
1690 hammer_init_cursor(&trans, &cursor, &ip->cache[1], ip);
1691 cursor.key_beg.localization = ip->obj_localization +
1692 hammer_dir_localization(ip);
1693 cursor.key_beg.obj_id = ip->obj_id;
1694 cursor.key_beg.create_tid = 0;
1695 cursor.key_beg.delete_tid = 0;
1696 cursor.key_beg.rec_type = HAMMER_RECTYPE_DIRENTRY;
1697 cursor.key_beg.obj_type = 0;
1698 cursor.key_beg.key = saveoff;
1700 cursor.key_end = cursor.key_beg;
1701 cursor.key_end.key = HAMMER_MAX_KEY;
1702 cursor.asof = ip->obj_asof;
1703 cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE | HAMMER_CURSOR_ASOF;
1705 error = hammer_ip_first(&cursor);
1707 while (error == 0) {
1708 error = hammer_ip_resolve_data(&cursor);
1711 base = &cursor.leaf->base;
1712 saveoff = base->key;
1713 KKASSERT(cursor.leaf->data_len > HAMMER_ENTRY_NAME_OFF);
1715 if (base->obj_id != ip->obj_id)
1716 panic("readdir: bad record at %p", cursor.node);
1719 * Convert pseudo-filesystems into softlinks
1721 dtype = hammer_get_dtype(cursor.leaf->base.obj_type);
1722 r = vop_write_dirent(
1723 &error, uio, cursor.data->entry.obj_id,
1725 cursor.leaf->data_len - HAMMER_ENTRY_NAME_OFF ,
1726 (void *)cursor.data->entry.name);
1731 cookies[cookie_index] = base->key;
1733 if (cookie_index == ncookies)
1735 error = hammer_ip_next(&cursor);
1737 hammer_done_cursor(&cursor);
1740 hammer_done_transaction(&trans);
1743 *ap->a_eofflag = (error == ENOENT);
1744 uio->uio_offset = saveoff;
1745 if (error && cookie_index == 0) {
1746 if (error == ENOENT)
1749 kfree(cookies, M_TEMP);
1750 *ap->a_ncookies = 0;
1751 *ap->a_cookies = NULL;
1754 if (error == ENOENT)
1757 *ap->a_ncookies = cookie_index;
1758 *ap->a_cookies = cookies;
1761 lwkt_reltoken(&hmp->fs_token);
1766 * hammer_vop_readlink { vp, uio, cred }
1770 hammer_vop_readlink(struct vop_readlink_args *ap)
1772 struct hammer_transaction trans;
1773 struct hammer_cursor cursor;
1774 struct hammer_inode *ip;
1777 u_int32_t localization;
1778 hammer_pseudofs_inmem_t pfsm;
1781 ip = VTOI(ap->a_vp);
1784 lwkt_gettoken(&hmp->fs_token);
1787 * Shortcut if the symlink data was stuffed into ino_data.
1789 * Also expand special "@@PFS%05d" softlinks (expansion only
1790 * occurs for non-historical (current) accesses made from the
1791 * primary filesystem).
1793 if (ip->ino_data.size <= HAMMER_INODE_BASESYMLEN) {
1797 ptr = ip->ino_data.ext.symlink;
1798 bytes = (int)ip->ino_data.size;
1800 ip->obj_asof == HAMMER_MAX_TID &&
1801 ip->obj_localization == 0 &&
1802 strncmp(ptr, "@@PFS", 5) == 0) {
1803 hammer_simple_transaction(&trans, hmp);
1804 bcopy(ptr + 5, buf, 5);
1806 localization = strtoul(buf, NULL, 10) << 16;
1807 pfsm = hammer_load_pseudofs(&trans, localization,
1810 if (pfsm->pfsd.mirror_flags &
1811 HAMMER_PFSD_SLAVE) {
1812 /* vap->va_size == 26 */
1813 ksnprintf(buf, sizeof(buf),
1815 (long long)pfsm->pfsd.sync_end_tid,
1816 localization >> 16);
1818 /* vap->va_size == 10 */
1819 ksnprintf(buf, sizeof(buf),
1821 localization >> 16);
1823 ksnprintf(buf, sizeof(buf),
1825 (long long)HAMMER_MAX_TID,
1826 localization >> 16);
1830 bytes = strlen(buf);
1833 hammer_rel_pseudofs(hmp, pfsm);
1834 hammer_done_transaction(&trans);
1836 error = uiomove(ptr, bytes, ap->a_uio);
1837 lwkt_reltoken(&hmp->fs_token);
1844 hammer_simple_transaction(&trans, hmp);
1845 ++hammer_stats_file_iopsr;
1846 hammer_init_cursor(&trans, &cursor, &ip->cache[1], ip);
1849 * Key range (begin and end inclusive) to scan. Directory keys
1850 * directly translate to a 64 bit 'seek' position.
1852 cursor.key_beg.localization = ip->obj_localization +
1853 HAMMER_LOCALIZE_MISC;
1854 cursor.key_beg.obj_id = ip->obj_id;
1855 cursor.key_beg.create_tid = 0;
1856 cursor.key_beg.delete_tid = 0;
1857 cursor.key_beg.rec_type = HAMMER_RECTYPE_FIX;
1858 cursor.key_beg.obj_type = 0;
1859 cursor.key_beg.key = HAMMER_FIXKEY_SYMLINK;
1860 cursor.asof = ip->obj_asof;
1861 cursor.flags |= HAMMER_CURSOR_ASOF;
1863 error = hammer_ip_lookup(&cursor);
1865 error = hammer_ip_resolve_data(&cursor);
1867 KKASSERT(cursor.leaf->data_len >=
1868 HAMMER_SYMLINK_NAME_OFF);
1869 error = uiomove(cursor.data->symlink.name,
1870 cursor.leaf->data_len -
1871 HAMMER_SYMLINK_NAME_OFF,
1875 hammer_done_cursor(&cursor);
1876 hammer_done_transaction(&trans);
1877 lwkt_reltoken(&hmp->fs_token);
1882 * hammer_vop_nremove { nch, dvp, cred }
1886 hammer_vop_nremove(struct vop_nremove_args *ap)
1888 struct hammer_transaction trans;
1889 struct hammer_inode *dip;
1893 dip = VTOI(ap->a_dvp);
1896 if (hammer_nohistory(dip) == 0 &&
1897 (error = hammer_checkspace(hmp, HAMMER_CHKSPC_REMOVE)) != 0) {
1901 lwkt_gettoken(&hmp->fs_token);
1902 hammer_start_transaction(&trans, hmp);
1903 ++hammer_stats_file_iopsw;
1904 error = hammer_dounlink(&trans, ap->a_nch, ap->a_dvp, ap->a_cred, 0, 0);
1905 hammer_done_transaction(&trans);
1907 hammer_knote(ap->a_dvp, NOTE_WRITE);
1908 lwkt_reltoken(&hmp->fs_token);
1913 * hammer_vop_nrename { fnch, tnch, fdvp, tdvp, cred }
1917 hammer_vop_nrename(struct vop_nrename_args *ap)
1919 struct hammer_transaction trans;
1920 struct namecache *fncp;
1921 struct namecache *tncp;
1922 struct hammer_inode *fdip;
1923 struct hammer_inode *tdip;
1924 struct hammer_inode *ip;
1926 struct hammer_cursor cursor;
1928 u_int32_t max_iterations;
1931 if (ap->a_fdvp->v_mount != ap->a_tdvp->v_mount)
1933 if (ap->a_fdvp->v_mount != ap->a_fnch->ncp->nc_vp->v_mount)
1936 fdip = VTOI(ap->a_fdvp);
1937 tdip = VTOI(ap->a_tdvp);
1938 fncp = ap->a_fnch->ncp;
1939 tncp = ap->a_tnch->ncp;
1940 ip = VTOI(fncp->nc_vp);
1941 KKASSERT(ip != NULL);
1945 if (fdip->obj_localization != tdip->obj_localization)
1947 if (fdip->obj_localization != ip->obj_localization)
1950 if (fdip->flags & HAMMER_INODE_RO)
1952 if (tdip->flags & HAMMER_INODE_RO)
1954 if (ip->flags & HAMMER_INODE_RO)
1956 if ((error = hammer_checkspace(hmp, HAMMER_CHKSPC_CREATE)) != 0)
1959 lwkt_gettoken(&hmp->fs_token);
1960 hammer_start_transaction(&trans, hmp);
1961 ++hammer_stats_file_iopsw;
1964 * Remove tncp from the target directory and then link ip as
1965 * tncp. XXX pass trans to dounlink
1967 * Force the inode sync-time to match the transaction so it is
1968 * in-sync with the creation of the target directory entry.
1970 error = hammer_dounlink(&trans, ap->a_tnch, ap->a_tdvp,
1972 if (error == 0 || error == ENOENT) {
1973 error = hammer_ip_add_directory(&trans, tdip,
1974 tncp->nc_name, tncp->nc_nlen,
1977 ip->ino_data.parent_obj_id = tdip->obj_id;
1978 ip->ino_data.ctime = trans.time;
1979 hammer_modify_inode(&trans, ip, HAMMER_INODE_DDIRTY);
1983 goto failed; /* XXX */
1986 * Locate the record in the originating directory and remove it.
1988 * Calculate the namekey and setup the key range for the scan. This
1989 * works kinda like a chained hash table where the lower 32 bits
1990 * of the namekey synthesize the chain.
1992 * The key range is inclusive of both key_beg and key_end.
1994 namekey = hammer_directory_namekey(fdip, fncp->nc_name, fncp->nc_nlen,
1997 hammer_init_cursor(&trans, &cursor, &fdip->cache[1], fdip);
1998 cursor.key_beg.localization = fdip->obj_localization +
1999 hammer_dir_localization(fdip);
2000 cursor.key_beg.obj_id = fdip->obj_id;
2001 cursor.key_beg.key = namekey;
2002 cursor.key_beg.create_tid = 0;
2003 cursor.key_beg.delete_tid = 0;
2004 cursor.key_beg.rec_type = HAMMER_RECTYPE_DIRENTRY;
2005 cursor.key_beg.obj_type = 0;
2007 cursor.key_end = cursor.key_beg;
2008 cursor.key_end.key += max_iterations;
2009 cursor.asof = fdip->obj_asof;
2010 cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE | HAMMER_CURSOR_ASOF;
2013 * Scan all matching records (the chain), locate the one matching
2014 * the requested path component.
2016 * The hammer_ip_*() functions merge in-memory records with on-disk
2017 * records for the purposes of the search.
2019 error = hammer_ip_first(&cursor);
2020 while (error == 0) {
2021 if (hammer_ip_resolve_data(&cursor) != 0)
2023 nlen = cursor.leaf->data_len - HAMMER_ENTRY_NAME_OFF;
2025 if (fncp->nc_nlen == nlen &&
2026 bcmp(fncp->nc_name, cursor.data->entry.name, nlen) == 0) {
2029 error = hammer_ip_next(&cursor);
2033 * If all is ok we have to get the inode so we can adjust nlinks.
2035 * WARNING: hammer_ip_del_directory() may have to terminate the
2036 * cursor to avoid a recursion. It's ok to call hammer_done_cursor()
2040 error = hammer_ip_del_directory(&trans, &cursor, fdip, ip);
2043 * XXX A deadlock here will break rename's atomicy for the purposes
2044 * of crash recovery.
2046 if (error == EDEADLK) {
2047 hammer_done_cursor(&cursor);
2052 * Cleanup and tell the kernel that the rename succeeded.
2054 * NOTE: ip->vp, if non-NULL, cannot be directly referenced
2055 * without formally acquiring the vp since the vp might
2056 * have zero refs on it, or in the middle of a reclaim,
2059 hammer_done_cursor(&cursor);
2061 cache_rename(ap->a_fnch, ap->a_tnch);
2062 hammer_knote(ap->a_fdvp, NOTE_WRITE);
2063 hammer_knote(ap->a_tdvp, NOTE_WRITE);
2067 error = hammer_get_vnode(ip, &vp);
2068 if (error == 0 && vp) {
2070 hammer_knote(ip->vp, NOTE_RENAME);
2074 kprintf("Debug: HAMMER ip/vp race2 avoided\n");
2079 hammer_done_transaction(&trans);
2080 lwkt_reltoken(&hmp->fs_token);
2085 * hammer_vop_nrmdir { nch, dvp, cred }
2089 hammer_vop_nrmdir(struct vop_nrmdir_args *ap)
2091 struct hammer_transaction trans;
2092 struct hammer_inode *dip;
2096 dip = VTOI(ap->a_dvp);
2099 if (hammer_nohistory(dip) == 0 &&
2100 (error = hammer_checkspace(hmp, HAMMER_CHKSPC_REMOVE)) != 0) {
2104 lwkt_gettoken(&hmp->fs_token);
2105 hammer_start_transaction(&trans, hmp);
2106 ++hammer_stats_file_iopsw;
2107 error = hammer_dounlink(&trans, ap->a_nch, ap->a_dvp, ap->a_cred, 0, 1);
2108 hammer_done_transaction(&trans);
2110 hammer_knote(ap->a_dvp, NOTE_WRITE | NOTE_LINK);
2111 lwkt_reltoken(&hmp->fs_token);
2116 * hammer_vop_markatime { vp, cred }
2120 hammer_vop_markatime(struct vop_markatime_args *ap)
2122 struct hammer_transaction trans;
2123 struct hammer_inode *ip;
2126 ip = VTOI(ap->a_vp);
2127 if (ap->a_vp->v_mount->mnt_flag & MNT_RDONLY)
2129 if (ip->flags & HAMMER_INODE_RO)
2132 if (hmp->mp->mnt_flag & MNT_NOATIME)
2134 lwkt_gettoken(&hmp->fs_token);
2135 hammer_start_transaction(&trans, hmp);
2136 ++hammer_stats_file_iopsw;
2138 ip->ino_data.atime = trans.time;
2139 hammer_modify_inode(&trans, ip, HAMMER_INODE_ATIME);
2140 hammer_done_transaction(&trans);
2141 hammer_knote(ap->a_vp, NOTE_ATTRIB);
2142 lwkt_reltoken(&hmp->fs_token);
2147 * hammer_vop_setattr { vp, vap, cred }
2151 hammer_vop_setattr(struct vop_setattr_args *ap)
2153 struct hammer_transaction trans;
2154 struct hammer_inode *ip;
2163 int64_t aligned_size;
2168 ip = ap->a_vp->v_data;
2173 if (ap->a_vp->v_mount->mnt_flag & MNT_RDONLY)
2175 if (ip->flags & HAMMER_INODE_RO)
2177 if (hammer_nohistory(ip) == 0 &&
2178 (error = hammer_checkspace(hmp, HAMMER_CHKSPC_REMOVE)) != 0) {
2182 lwkt_gettoken(&hmp->fs_token);
2183 hammer_start_transaction(&trans, hmp);
2184 ++hammer_stats_file_iopsw;
2187 if (vap->va_flags != VNOVAL) {
2188 flags = ip->ino_data.uflags;
2189 error = vop_helper_setattr_flags(&flags, vap->va_flags,
2190 hammer_to_unix_xid(&ip->ino_data.uid),
2193 if (ip->ino_data.uflags != flags) {
2194 ip->ino_data.uflags = flags;
2195 ip->ino_data.ctime = trans.time;
2196 modflags |= HAMMER_INODE_DDIRTY;
2197 kflags |= NOTE_ATTRIB;
2199 if (ip->ino_data.uflags & (IMMUTABLE | APPEND)) {
2206 if (ip->ino_data.uflags & (IMMUTABLE | APPEND)) {
2210 if (vap->va_uid != (uid_t)VNOVAL || vap->va_gid != (gid_t)VNOVAL) {
2211 mode_t cur_mode = ip->ino_data.mode;
2212 uid_t cur_uid = hammer_to_unix_xid(&ip->ino_data.uid);
2213 gid_t cur_gid = hammer_to_unix_xid(&ip->ino_data.gid);
2217 error = vop_helper_chown(ap->a_vp, vap->va_uid, vap->va_gid,
2219 &cur_uid, &cur_gid, &cur_mode);
2221 hammer_guid_to_uuid(&uuid_uid, cur_uid);
2222 hammer_guid_to_uuid(&uuid_gid, cur_gid);
2223 if (bcmp(&uuid_uid, &ip->ino_data.uid,
2224 sizeof(uuid_uid)) ||
2225 bcmp(&uuid_gid, &ip->ino_data.gid,
2226 sizeof(uuid_gid)) ||
2227 ip->ino_data.mode != cur_mode
2229 ip->ino_data.uid = uuid_uid;
2230 ip->ino_data.gid = uuid_gid;
2231 ip->ino_data.mode = cur_mode;
2232 ip->ino_data.ctime = trans.time;
2233 modflags |= HAMMER_INODE_DDIRTY;
2235 kflags |= NOTE_ATTRIB;
2238 while (vap->va_size != VNOVAL && ip->ino_data.size != vap->va_size) {
2239 switch(ap->a_vp->v_type) {
2241 if (vap->va_size == ip->ino_data.size)
2245 * Log the operation if in fast-fsync mode or if
2246 * there are unterminated redo write records present.
2248 * The second check is needed so the recovery code
2249 * properly truncates write redos even if nominal
2250 * REDO operations is turned off due to excessive
2251 * writes, because the related records might be
2252 * destroyed and never lay down a TERM_WRITE.
2254 if ((ip->flags & HAMMER_INODE_REDO) ||
2255 (ip->flags & HAMMER_INODE_RDIRTY)) {
2256 error = hammer_generate_redo(&trans, ip,
2261 blksize = hammer_blocksize(vap->va_size);
2264 * XXX break atomicy, we can deadlock the backend
2265 * if we do not release the lock. Probably not a
2268 if (vap->va_size < ip->ino_data.size) {
2269 nvtruncbuf(ap->a_vp, vap->va_size,
2271 hammer_blockoff(vap->va_size));
2273 kflags |= NOTE_WRITE;
2275 nvextendbuf(ap->a_vp,
2278 hammer_blocksize(ip->ino_data.size),
2279 hammer_blocksize(vap->va_size),
2280 hammer_blockoff(ip->ino_data.size),
2281 hammer_blockoff(vap->va_size),
2284 kflags |= NOTE_WRITE | NOTE_EXTEND;
2286 ip->ino_data.size = vap->va_size;
2287 ip->ino_data.mtime = trans.time;
2288 /* XXX safe to use SDIRTY instead of DDIRTY here? */
2289 modflags |= HAMMER_INODE_MTIME | HAMMER_INODE_DDIRTY;
2292 * On-media truncation is cached in the inode until
2293 * the inode is synchronized. We must immediately
2294 * handle any frontend records.
2297 hammer_ip_frontend_trunc(ip, vap->va_size);
2298 #ifdef DEBUG_TRUNCATE
2299 if (HammerTruncIp == NULL)
2302 if ((ip->flags & HAMMER_INODE_TRUNCATED) == 0) {
2303 ip->flags |= HAMMER_INODE_TRUNCATED;
2304 ip->trunc_off = vap->va_size;
2305 #ifdef DEBUG_TRUNCATE
2306 if (ip == HammerTruncIp)
2307 kprintf("truncate1 %016llx\n",
2308 (long long)ip->trunc_off);
2310 } else if (ip->trunc_off > vap->va_size) {
2311 ip->trunc_off = vap->va_size;
2312 #ifdef DEBUG_TRUNCATE
2313 if (ip == HammerTruncIp)
2314 kprintf("truncate2 %016llx\n",
2315 (long long)ip->trunc_off);
2318 #ifdef DEBUG_TRUNCATE
2319 if (ip == HammerTruncIp)
2320 kprintf("truncate3 %016llx (ignored)\n",
2321 (long long)vap->va_size);
2328 * When truncating, nvtruncbuf() may have cleaned out
2329 * a portion of the last block on-disk in the buffer
2330 * cache. We must clean out any frontend records
2331 * for blocks beyond the new last block.
2333 aligned_size = (vap->va_size + (blksize - 1)) &
2334 ~(int64_t)(blksize - 1);
2335 if (truncating && vap->va_size < aligned_size) {
2336 aligned_size -= blksize;
2337 hammer_ip_frontend_trunc(ip, aligned_size);
2342 if ((ip->flags & HAMMER_INODE_TRUNCATED) == 0) {
2343 ip->flags |= HAMMER_INODE_TRUNCATED;
2344 ip->trunc_off = vap->va_size;
2345 } else if (ip->trunc_off > vap->va_size) {
2346 ip->trunc_off = vap->va_size;
2348 hammer_ip_frontend_trunc(ip, vap->va_size);
2349 ip->ino_data.size = vap->va_size;
2350 ip->ino_data.mtime = trans.time;
2351 modflags |= HAMMER_INODE_MTIME | HAMMER_INODE_DDIRTY;
2352 kflags |= NOTE_ATTRIB;
2360 if (vap->va_atime.tv_sec != VNOVAL) {
2361 ip->ino_data.atime = hammer_timespec_to_time(&vap->va_atime);
2362 modflags |= HAMMER_INODE_ATIME;
2363 kflags |= NOTE_ATTRIB;
2365 if (vap->va_mtime.tv_sec != VNOVAL) {
2366 ip->ino_data.mtime = hammer_timespec_to_time(&vap->va_mtime);
2367 modflags |= HAMMER_INODE_MTIME;
2368 kflags |= NOTE_ATTRIB;
2370 if (vap->va_mode != (mode_t)VNOVAL) {
2371 mode_t cur_mode = ip->ino_data.mode;
2372 uid_t cur_uid = hammer_to_unix_xid(&ip->ino_data.uid);
2373 gid_t cur_gid = hammer_to_unix_xid(&ip->ino_data.gid);
2375 error = vop_helper_chmod(ap->a_vp, vap->va_mode, ap->a_cred,
2376 cur_uid, cur_gid, &cur_mode);
2377 if (error == 0 && ip->ino_data.mode != cur_mode) {
2378 ip->ino_data.mode = cur_mode;
2379 ip->ino_data.ctime = trans.time;
2380 modflags |= HAMMER_INODE_DDIRTY;
2381 kflags |= NOTE_ATTRIB;
2386 hammer_modify_inode(&trans, ip, modflags);
2387 hammer_done_transaction(&trans);
2388 hammer_knote(ap->a_vp, kflags);
2389 lwkt_reltoken(&hmp->fs_token);
2394 * hammer_vop_nsymlink { nch, dvp, vpp, cred, vap, target }
2398 hammer_vop_nsymlink(struct vop_nsymlink_args *ap)
2400 struct hammer_transaction trans;
2401 struct hammer_inode *dip;
2402 struct hammer_inode *nip;
2403 hammer_record_t record;
2404 struct nchandle *nch;
2409 ap->a_vap->va_type = VLNK;
2412 dip = VTOI(ap->a_dvp);
2415 if (dip->flags & HAMMER_INODE_RO)
2417 if ((error = hammer_checkspace(hmp, HAMMER_CHKSPC_CREATE)) != 0)
2421 * Create a transaction to cover the operations we perform.
2423 lwkt_gettoken(&hmp->fs_token);
2424 hammer_start_transaction(&trans, hmp);
2425 ++hammer_stats_file_iopsw;
2428 * Create a new filesystem object of the requested type. The
2429 * returned inode will be referenced but not locked.
2432 error = hammer_create_inode(&trans, ap->a_vap, ap->a_cred,
2433 dip, nch->ncp->nc_name, nch->ncp->nc_nlen,
2436 hammer_done_transaction(&trans);
2438 lwkt_reltoken(&hmp->fs_token);
2443 * Add a record representing the symlink. symlink stores the link
2444 * as pure data, not a string, and is no \0 terminated.
2447 bytes = strlen(ap->a_target);
2449 if (bytes <= HAMMER_INODE_BASESYMLEN) {
2450 bcopy(ap->a_target, nip->ino_data.ext.symlink, bytes);
2452 record = hammer_alloc_mem_record(nip, bytes);
2453 record->type = HAMMER_MEM_RECORD_GENERAL;
2455 record->leaf.base.localization = nip->obj_localization +
2456 HAMMER_LOCALIZE_MISC;
2457 record->leaf.base.key = HAMMER_FIXKEY_SYMLINK;
2458 record->leaf.base.rec_type = HAMMER_RECTYPE_FIX;
2459 record->leaf.data_len = bytes;
2460 KKASSERT(HAMMER_SYMLINK_NAME_OFF == 0);
2461 bcopy(ap->a_target, record->data->symlink.name, bytes);
2462 error = hammer_ip_add_record(&trans, record);
2466 * Set the file size to the length of the link.
2469 nip->ino_data.size = bytes;
2470 hammer_modify_inode(&trans, nip, HAMMER_INODE_DDIRTY);
2474 error = hammer_ip_add_directory(&trans, dip, nch->ncp->nc_name,
2475 nch->ncp->nc_nlen, nip);
2481 hammer_rel_inode(nip, 0);
2484 error = hammer_get_vnode(nip, ap->a_vpp);
2485 hammer_rel_inode(nip, 0);
2487 cache_setunresolved(ap->a_nch);
2488 cache_setvp(ap->a_nch, *ap->a_vpp);
2489 hammer_knote(ap->a_dvp, NOTE_WRITE);
2492 hammer_done_transaction(&trans);
2493 lwkt_reltoken(&hmp->fs_token);
2498 * hammer_vop_nwhiteout { nch, dvp, cred, flags }
2502 hammer_vop_nwhiteout(struct vop_nwhiteout_args *ap)
2504 struct hammer_transaction trans;
2505 struct hammer_inode *dip;
2509 dip = VTOI(ap->a_dvp);
2512 if (hammer_nohistory(dip) == 0 &&
2513 (error = hammer_checkspace(hmp, HAMMER_CHKSPC_CREATE)) != 0) {
2517 lwkt_gettoken(&hmp->fs_token);
2518 hammer_start_transaction(&trans, hmp);
2519 ++hammer_stats_file_iopsw;
2520 error = hammer_dounlink(&trans, ap->a_nch, ap->a_dvp,
2521 ap->a_cred, ap->a_flags, -1);
2522 hammer_done_transaction(&trans);
2523 lwkt_reltoken(&hmp->fs_token);
2529 * hammer_vop_ioctl { vp, command, data, fflag, cred }
2533 hammer_vop_ioctl(struct vop_ioctl_args *ap)
2535 struct hammer_inode *ip = ap->a_vp->v_data;
2536 hammer_mount_t hmp = ip->hmp;
2539 ++hammer_stats_file_iopsr;
2540 lwkt_gettoken(&hmp->fs_token);
2541 error = hammer_ioctl(ip, ap->a_command, ap->a_data,
2542 ap->a_fflag, ap->a_cred);
2543 lwkt_reltoken(&hmp->fs_token);
2549 hammer_vop_mountctl(struct vop_mountctl_args *ap)
2551 static const struct mountctl_opt extraopt[] = {
2552 { HMNT_NOHISTORY, "nohistory" },
2553 { HMNT_MASTERID, "master" },
2557 struct hammer_mount *hmp;
2564 mp = ap->a_head.a_ops->head.vv_mount;
2565 KKASSERT(mp->mnt_data != NULL);
2566 hmp = (struct hammer_mount *)mp->mnt_data;
2568 lwkt_gettoken(&hmp->fs_token);
2571 case MOUNTCTL_SET_EXPORT:
2572 if (ap->a_ctllen != sizeof(struct export_args))
2575 error = hammer_vfs_export(mp, ap->a_op,
2576 (const struct export_args *)ap->a_ctl);
2578 case MOUNTCTL_MOUNTFLAGS:
2581 * Call standard mountctl VOP function
2582 * so we get user mount flags.
2584 error = vop_stdmountctl(ap);
2588 usedbytes = *ap->a_res;
2590 if (usedbytes > 0 && usedbytes < ap->a_buflen) {
2591 usedbytes += vfs_flagstostr(hmp->hflags, extraopt,
2593 ap->a_buflen - usedbytes,
2597 *ap->a_res += usedbytes;
2601 error = vop_stdmountctl(ap);
2604 lwkt_reltoken(&hmp->fs_token);
2609 * hammer_vop_strategy { vp, bio }
2611 * Strategy call, used for regular file read & write only. Note that the
2612 * bp may represent a cluster.
2614 * To simplify operation and allow better optimizations in the future,
2615 * this code does not make any assumptions with regards to buffer alignment
2620 hammer_vop_strategy(struct vop_strategy_args *ap)
2625 bp = ap->a_bio->bio_buf;
2629 error = hammer_vop_strategy_read(ap);
2632 error = hammer_vop_strategy_write(ap);
2635 bp->b_error = error = EINVAL;
2636 bp->b_flags |= B_ERROR;
2641 /* hammer_dump_dedup_cache(((hammer_inode_t)ap->a_vp->v_data)->hmp); */
2647 * Read from a regular file. Iterate the related records and fill in the
2648 * BIO/BUF. Gaps are zero-filled.
2650 * The support code in hammer_object.c should be used to deal with mixed
2651 * in-memory and on-disk records.
2653 * NOTE: Can be called from the cluster code with an oversized buf.
2659 hammer_vop_strategy_read(struct vop_strategy_args *ap)
2661 struct hammer_transaction trans;
2662 struct hammer_inode *ip;
2663 struct hammer_inode *dip;
2665 struct hammer_cursor cursor;
2666 hammer_base_elm_t base;
2667 hammer_off_t disk_offset;
2682 ip = ap->a_vp->v_data;
2686 * The zone-2 disk offset may have been set by the cluster code via
2687 * a BMAP operation, or else should be NOOFFSET.
2689 * Checking the high bits for a match against zone-2 should suffice.
2691 * In cases where a lot of data duplication is present it may be
2692 * more beneficial to drop through and doubule-buffer through the
2695 nbio = push_bio(bio);
2696 if (hammer_double_buffer == 0 &&
2697 (nbio->bio_offset & HAMMER_OFF_ZONE_MASK) ==
2698 HAMMER_ZONE_LARGE_DATA) {
2699 lwkt_gettoken(&hmp->fs_token);
2700 error = hammer_io_direct_read(hmp, nbio, NULL);
2701 lwkt_reltoken(&hmp->fs_token);
2706 * Well, that sucked. Do it the hard way. If all the stars are
2707 * aligned we may still be able to issue a direct-read.
2709 lwkt_gettoken(&hmp->fs_token);
2710 hammer_simple_transaction(&trans, hmp);
2711 hammer_init_cursor(&trans, &cursor, &ip->cache[1], ip);
2714 * Key range (begin and end inclusive) to scan. Note that the key's
2715 * stored in the actual records represent BASE+LEN, not BASE. The
2716 * first record containing bio_offset will have a key > bio_offset.
2718 cursor.key_beg.localization = ip->obj_localization +
2719 HAMMER_LOCALIZE_MISC;
2720 cursor.key_beg.obj_id = ip->obj_id;
2721 cursor.key_beg.create_tid = 0;
2722 cursor.key_beg.delete_tid = 0;
2723 cursor.key_beg.obj_type = 0;
2724 cursor.key_beg.key = bio->bio_offset + 1;
2725 cursor.asof = ip->obj_asof;
2726 cursor.flags |= HAMMER_CURSOR_ASOF;
2728 cursor.key_end = cursor.key_beg;
2729 KKASSERT(ip->ino_data.obj_type == HAMMER_OBJTYPE_REGFILE);
2731 if (ip->ino_data.obj_type == HAMMER_OBJTYPE_DBFILE) {
2732 cursor.key_beg.rec_type = HAMMER_RECTYPE_DB;
2733 cursor.key_end.rec_type = HAMMER_RECTYPE_DB;
2734 cursor.key_end.key = 0x7FFFFFFFFFFFFFFFLL;
2738 ran_end = bio->bio_offset + bp->b_bufsize;
2739 cursor.key_beg.rec_type = HAMMER_RECTYPE_DATA;
2740 cursor.key_end.rec_type = HAMMER_RECTYPE_DATA;
2741 tmp64 = ran_end + MAXPHYS + 1; /* work-around GCC-4 bug */
2742 if (tmp64 < ran_end)
2743 cursor.key_end.key = 0x7FFFFFFFFFFFFFFFLL;
2745 cursor.key_end.key = ran_end + MAXPHYS + 1;
2747 cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE;
2749 error = hammer_ip_first(&cursor);
2752 while (error == 0) {
2754 * Get the base file offset of the record. The key for
2755 * data records is (base + bytes) rather then (base).
2757 base = &cursor.leaf->base;
2758 rec_offset = base->key - cursor.leaf->data_len;
2761 * Calculate the gap, if any, and zero-fill it.
2763 * n is the offset of the start of the record verses our
2764 * current seek offset in the bio.
2766 n = (int)(rec_offset - (bio->bio_offset + boff));
2768 if (n > bp->b_bufsize - boff)
2769 n = bp->b_bufsize - boff;
2770 bzero((char *)bp->b_data + boff, n);
2776 * Calculate the data offset in the record and the number
2777 * of bytes we can copy.
2779 * There are two degenerate cases. First, boff may already
2780 * be at bp->b_bufsize. Secondly, the data offset within
2781 * the record may exceed the record's size.
2785 n = cursor.leaf->data_len - roff;
2787 kprintf("strategy_read: bad n=%d roff=%d\n", n, roff);
2789 } else if (n > bp->b_bufsize - boff) {
2790 n = bp->b_bufsize - boff;
2794 * Deal with cached truncations. This cool bit of code
2795 * allows truncate()/ftruncate() to avoid having to sync
2798 * If the frontend is truncated then all backend records are
2799 * subject to the frontend's truncation.
2801 * If the backend is truncated then backend records on-disk
2802 * (but not in-memory) are subject to the backend's
2803 * truncation. In-memory records owned by the backend
2804 * represent data written after the truncation point on the
2805 * backend and must not be truncated.
2807 * Truncate operations deal with frontend buffer cache
2808 * buffers and frontend-owned in-memory records synchronously.
2810 if (ip->flags & HAMMER_INODE_TRUNCATED) {
2811 if (hammer_cursor_ondisk(&cursor)/* ||
2812 cursor.iprec->flush_state == HAMMER_FST_FLUSH*/) {
2813 if (ip->trunc_off <= rec_offset)
2815 else if (ip->trunc_off < rec_offset + n)
2816 n = (int)(ip->trunc_off - rec_offset);
2819 if (ip->sync_flags & HAMMER_INODE_TRUNCATED) {
2820 if (hammer_cursor_ondisk(&cursor)) {
2821 if (ip->sync_trunc_off <= rec_offset)
2823 else if (ip->sync_trunc_off < rec_offset + n)
2824 n = (int)(ip->sync_trunc_off - rec_offset);
2829 * Try to issue a direct read into our bio if possible,
2830 * otherwise resolve the element data into a hammer_buffer
2833 * The buffer on-disk should be zerod past any real
2834 * truncation point, but may not be for any synthesized
2835 * truncation point from above.
2837 disk_offset = cursor.leaf->data_offset + roff;
2838 isdedupable = (boff == 0 && n == bp->b_bufsize &&
2839 hammer_cursor_ondisk(&cursor) &&
2840 ((int)disk_offset & HAMMER_BUFMASK) == 0);
2842 if (isdedupable && hammer_double_buffer == 0) {
2843 KKASSERT((disk_offset & HAMMER_OFF_ZONE_MASK) ==
2844 HAMMER_ZONE_LARGE_DATA);
2845 nbio->bio_offset = disk_offset;
2846 error = hammer_io_direct_read(hmp, nbio, cursor.leaf);
2847 if (hammer_live_dedup && error == 0)
2848 hammer_dedup_cache_add(ip, cursor.leaf);
2851 error = hammer_ip_resolve_data(&cursor);
2853 if (hammer_live_dedup && isdedupable)
2854 hammer_dedup_cache_add(ip, cursor.leaf);
2855 bcopy((char *)cursor.data + roff,
2856 (char *)bp->b_data + boff, n);
2863 * We have to be sure that the only elements added to the
2864 * dedup cache are those which are already on-media.
2866 if (hammer_live_dedup && hammer_cursor_ondisk(&cursor))
2867 hammer_dedup_cache_add(ip, cursor.leaf);
2870 * Iterate until we have filled the request.
2873 if (boff == bp->b_bufsize)
2875 error = hammer_ip_next(&cursor);
2879 * There may have been a gap after the last record
2881 if (error == ENOENT)
2883 if (error == 0 && boff != bp->b_bufsize) {
2884 KKASSERT(boff < bp->b_bufsize);
2885 bzero((char *)bp->b_data + boff, bp->b_bufsize - boff);
2886 /* boff = bp->b_bufsize; */
2889 bp->b_error = error;
2891 bp->b_flags |= B_ERROR;
2896 * Cache the b-tree node for the last data read in cache[1].
2898 * If we hit the file EOF then also cache the node in the
2899 * governing director's cache[3], it will be used to initialize
2900 * the inode's cache[1] for any inodes looked up via the directory.
2902 * This doesn't reduce disk accesses since the B-Tree chain is
2903 * likely cached, but it does reduce cpu overhead when looking
2904 * up file offsets for cpdup/tar/cpio style iterations.
2907 hammer_cache_node(&ip->cache[1], cursor.node);
2908 if (ran_end >= ip->ino_data.size) {
2909 dip = hammer_find_inode(&trans, ip->ino_data.parent_obj_id,
2910 ip->obj_asof, ip->obj_localization);
2912 hammer_cache_node(&dip->cache[3], cursor.node);
2913 hammer_rel_inode(dip, 0);
2916 hammer_done_cursor(&cursor);
2917 hammer_done_transaction(&trans);
2918 lwkt_reltoken(&hmp->fs_token);
2923 * BMAP operation - used to support cluster_read() only.
2925 * (struct vnode *vp, off_t loffset, off_t *doffsetp, int *runp, int *runb)
2927 * This routine may return EOPNOTSUPP if the opration is not supported for
2928 * the specified offset. The contents of the pointer arguments do not
2929 * need to be initialized in that case.
2931 * If a disk address is available and properly aligned return 0 with
2932 * *doffsetp set to the zone-2 address, and *runp / *runb set appropriately
2933 * to the run-length relative to that offset. Callers may assume that
2934 * *doffsetp is valid if 0 is returned, even if *runp is not sufficiently
2935 * large, so return EOPNOTSUPP if it is not sufficiently large.
2939 hammer_vop_bmap(struct vop_bmap_args *ap)
2941 struct hammer_transaction trans;
2942 struct hammer_inode *ip;
2944 struct hammer_cursor cursor;
2945 hammer_base_elm_t base;
2949 int64_t base_offset;
2950 int64_t base_disk_offset;
2951 int64_t last_offset;
2952 hammer_off_t last_disk_offset;
2953 hammer_off_t disk_offset;
2958 ++hammer_stats_file_iopsr;
2959 ip = ap->a_vp->v_data;
2963 * We can only BMAP regular files. We can't BMAP database files,
2966 if (ip->ino_data.obj_type != HAMMER_OBJTYPE_REGFILE)
2970 * bmap is typically called with runp/runb both NULL when used
2971 * for writing. We do not support BMAP for writing atm.
2973 if (ap->a_cmd != BUF_CMD_READ)
2977 * Scan the B-Tree to acquire blockmap addresses, then translate
2980 lwkt_gettoken(&hmp->fs_token);
2981 hammer_simple_transaction(&trans, hmp);
2983 kprintf("bmap_beg %016llx ip->cache %p\n",
2984 (long long)ap->a_loffset, ip->cache[1]);
2986 hammer_init_cursor(&trans, &cursor, &ip->cache[1], ip);
2989 * Key range (begin and end inclusive) to scan. Note that the key's
2990 * stored in the actual records represent BASE+LEN, not BASE. The
2991 * first record containing bio_offset will have a key > bio_offset.
2993 cursor.key_beg.localization = ip->obj_localization +
2994 HAMMER_LOCALIZE_MISC;
2995 cursor.key_beg.obj_id = ip->obj_id;
2996 cursor.key_beg.create_tid = 0;
2997 cursor.key_beg.delete_tid = 0;
2998 cursor.key_beg.obj_type = 0;
3000 cursor.key_beg.key = ap->a_loffset - MAXPHYS + 1;
3002 cursor.key_beg.key = ap->a_loffset + 1;
3003 if (cursor.key_beg.key < 0)
3004 cursor.key_beg.key = 0;
3005 cursor.asof = ip->obj_asof;
3006 cursor.flags |= HAMMER_CURSOR_ASOF;
3008 cursor.key_end = cursor.key_beg;
3009 KKASSERT(ip->ino_data.obj_type == HAMMER_OBJTYPE_REGFILE);
3011 ran_end = ap->a_loffset + MAXPHYS;
3012 cursor.key_beg.rec_type = HAMMER_RECTYPE_DATA;
3013 cursor.key_end.rec_type = HAMMER_RECTYPE_DATA;
3014 tmp64 = ran_end + MAXPHYS + 1; /* work-around GCC-4 bug */
3015 if (tmp64 < ran_end)
3016 cursor.key_end.key = 0x7FFFFFFFFFFFFFFFLL;
3018 cursor.key_end.key = ran_end + MAXPHYS + 1;
3020 cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE;
3022 error = hammer_ip_first(&cursor);
3023 base_offset = last_offset = 0;
3024 base_disk_offset = last_disk_offset = 0;
3026 while (error == 0) {
3028 * Get the base file offset of the record. The key for
3029 * data records is (base + bytes) rather then (base).
3031 * NOTE: rec_offset + rec_len may exceed the end-of-file.
3032 * The extra bytes should be zero on-disk and the BMAP op
3033 * should still be ok.
3035 base = &cursor.leaf->base;
3036 rec_offset = base->key - cursor.leaf->data_len;
3037 rec_len = cursor.leaf->data_len;
3040 * Incorporate any cached truncation.
3042 * NOTE: Modifications to rec_len based on synthesized
3043 * truncation points remove the guarantee that any extended
3044 * data on disk is zero (since the truncations may not have
3045 * taken place on-media yet).
3047 if (ip->flags & HAMMER_INODE_TRUNCATED) {
3048 if (hammer_cursor_ondisk(&cursor) ||
3049 cursor.iprec->flush_state == HAMMER_FST_FLUSH) {
3050 if (ip->trunc_off <= rec_offset)
3052 else if (ip->trunc_off < rec_offset + rec_len)
3053 rec_len = (int)(ip->trunc_off - rec_offset);
3056 if (ip->sync_flags & HAMMER_INODE_TRUNCATED) {
3057 if (hammer_cursor_ondisk(&cursor)) {
3058 if (ip->sync_trunc_off <= rec_offset)
3060 else if (ip->sync_trunc_off < rec_offset + rec_len)
3061 rec_len = (int)(ip->sync_trunc_off - rec_offset);
3066 * Accumulate information. If we have hit a discontiguous
3067 * block reset base_offset unless we are already beyond the
3068 * requested offset. If we are, that's it, we stop.
3072 if (hammer_cursor_ondisk(&cursor)) {
3073 disk_offset = cursor.leaf->data_offset;
3074 if (rec_offset != last_offset ||
3075 disk_offset != last_disk_offset) {
3076 if (rec_offset > ap->a_loffset)
3078 base_offset = rec_offset;
3079 base_disk_offset = disk_offset;
3081 last_offset = rec_offset + rec_len;
3082 last_disk_offset = disk_offset + rec_len;
3084 if (hammer_live_dedup)
3085 hammer_dedup_cache_add(ip, cursor.leaf);
3088 error = hammer_ip_next(&cursor);
3092 kprintf("BMAP %016llx: %016llx - %016llx\n",
3093 (long long)ap->a_loffset,
3094 (long long)base_offset,
3095 (long long)last_offset);
3096 kprintf("BMAP %16s: %016llx - %016llx\n", "",
3097 (long long)base_disk_offset,
3098 (long long)last_disk_offset);
3102 hammer_cache_node(&ip->cache[1], cursor.node);
3104 kprintf("bmap_end2 %016llx ip->cache %p\n",
3105 (long long)ap->a_loffset, ip->cache[1]);
3108 hammer_done_cursor(&cursor);
3109 hammer_done_transaction(&trans);
3110 lwkt_reltoken(&hmp->fs_token);
3113 * If we couldn't find any records or the records we did find were
3114 * all behind the requested offset, return failure. A forward
3115 * truncation can leave a hole w/ no on-disk records.
3117 if (last_offset == 0 || last_offset < ap->a_loffset)
3118 return (EOPNOTSUPP);
3121 * Figure out the block size at the requested offset and adjust
3122 * our limits so the cluster_read() does not create inappropriately
3123 * sized buffer cache buffers.
3125 blksize = hammer_blocksize(ap->a_loffset);
3126 if (hammer_blocksize(base_offset) != blksize) {
3127 base_offset = hammer_blockdemarc(base_offset, ap->a_loffset);
3129 if (last_offset != ap->a_loffset &&
3130 hammer_blocksize(last_offset - 1) != blksize) {
3131 last_offset = hammer_blockdemarc(ap->a_loffset,
3136 * Returning EOPNOTSUPP simply prevents the direct-IO optimization
3139 disk_offset = base_disk_offset + (ap->a_loffset - base_offset);
3141 if ((disk_offset & HAMMER_OFF_ZONE_MASK) != HAMMER_ZONE_LARGE_DATA) {
3143 * Only large-data zones can be direct-IOd
3146 } else if ((disk_offset & HAMMER_BUFMASK) ||
3147 (last_offset - ap->a_loffset) < blksize) {
3149 * doffsetp is not aligned or the forward run size does
3150 * not cover a whole buffer, disallow the direct I/O.
3157 *ap->a_doffsetp = disk_offset;
3159 *ap->a_runb = ap->a_loffset - base_offset;
3160 KKASSERT(*ap->a_runb >= 0);
3163 *ap->a_runp = last_offset - ap->a_loffset;
3164 KKASSERT(*ap->a_runp >= 0);
3172 * Write to a regular file. Because this is a strategy call the OS is
3173 * trying to actually get data onto the media.
3177 hammer_vop_strategy_write(struct vop_strategy_args *ap)
3179 hammer_record_t record;
3190 ip = ap->a_vp->v_data;
3193 blksize = hammer_blocksize(bio->bio_offset);
3194 KKASSERT(bp->b_bufsize == blksize);
3196 if (ip->flags & HAMMER_INODE_RO) {
3197 bp->b_error = EROFS;
3198 bp->b_flags |= B_ERROR;
3203 lwkt_gettoken(&hmp->fs_token);
3206 * Interlock with inode destruction (no in-kernel or directory
3207 * topology visibility). If we queue new IO while trying to
3208 * destroy the inode we can deadlock the vtrunc call in
3209 * hammer_inode_unloadable_check().
3211 * Besides, there's no point flushing a bp associated with an
3212 * inode that is being destroyed on-media and has no kernel
3215 if ((ip->flags | ip->sync_flags) &
3216 (HAMMER_INODE_DELETING|HAMMER_INODE_DELETED)) {
3219 lwkt_reltoken(&hmp->fs_token);
3224 * Reserve space and issue a direct-write from the front-end.
3225 * NOTE: The direct_io code will hammer_bread/bcopy smaller
3228 * An in-memory record will be installed to reference the storage
3229 * until the flusher can get to it.
3231 * Since we own the high level bio the front-end will not try to
3232 * do a direct-read until the write completes.
3234 * NOTE: The only time we do not reserve a full-sized buffers
3235 * worth of data is if the file is small. We do not try to
3236 * allocate a fragment (from the small-data zone) at the end of
3237 * an otherwise large file as this can lead to wildly separated
3240 KKASSERT((bio->bio_offset & HAMMER_BUFMASK) == 0);
3241 KKASSERT(bio->bio_offset < ip->ino_data.size);
3242 if (bio->bio_offset || ip->ino_data.size > HAMMER_BUFSIZE / 2)
3243 bytes = bp->b_bufsize;
3245 bytes = ((int)ip->ino_data.size + 15) & ~15;
3247 record = hammer_ip_add_bulk(ip, bio->bio_offset, bp->b_data,
3251 * B_VFSFLAG1 indicates that a REDO_WRITE entry was generated
3252 * in hammer_vop_write(). We must flag the record so the proper
3253 * REDO_TERM_WRITE entry is generated during the flush.
3256 if (bp->b_flags & B_VFSFLAG1) {
3257 record->flags |= HAMMER_RECF_REDO;
3258 bp->b_flags &= ~B_VFSFLAG1;
3260 if (record->flags & HAMMER_RECF_DEDUPED) {
3262 hammer_ip_replace_bulk(hmp, record);
3265 hammer_io_direct_write(hmp, bio, record);
3267 if (ip->rsv_recs > 1 && hmp->rsv_recs > hammer_limit_recs)
3268 hammer_flush_inode(ip, 0);
3270 bp->b_bio2.bio_offset = NOOFFSET;
3271 bp->b_error = error;
3272 bp->b_flags |= B_ERROR;
3275 lwkt_reltoken(&hmp->fs_token);
3280 * dounlink - disconnect a directory entry
3282 * XXX whiteout support not really in yet
3285 hammer_dounlink(hammer_transaction_t trans, struct nchandle *nch,
3286 struct vnode *dvp, struct ucred *cred,
3287 int flags, int isdir)
3289 struct namecache *ncp;
3293 struct hammer_cursor cursor;
3295 u_int32_t max_iterations;
3299 * Calculate the namekey and setup the key range for the scan. This
3300 * works kinda like a chained hash table where the lower 32 bits
3301 * of the namekey synthesize the chain.
3303 * The key range is inclusive of both key_beg and key_end.
3309 if (dip->flags & HAMMER_INODE_RO)
3312 namekey = hammer_directory_namekey(dip, ncp->nc_name, ncp->nc_nlen,
3315 hammer_init_cursor(trans, &cursor, &dip->cache[1], dip);
3316 cursor.key_beg.localization = dip->obj_localization +
3317 hammer_dir_localization(dip);
3318 cursor.key_beg.obj_id = dip->obj_id;
3319 cursor.key_beg.key = namekey;
3320 cursor.key_beg.create_tid = 0;
3321 cursor.key_beg.delete_tid = 0;
3322 cursor.key_beg.rec_type = HAMMER_RECTYPE_DIRENTRY;
3323 cursor.key_beg.obj_type = 0;
3325 cursor.key_end = cursor.key_beg;
3326 cursor.key_end.key += max_iterations;
3327 cursor.asof = dip->obj_asof;
3328 cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE | HAMMER_CURSOR_ASOF;
3331 * Scan all matching records (the chain), locate the one matching
3332 * the requested path component. info->last_error contains the
3333 * error code on search termination and could be 0, ENOENT, or
3336 * The hammer_ip_*() functions merge in-memory records with on-disk
3337 * records for the purposes of the search.
3339 error = hammer_ip_first(&cursor);
3341 while (error == 0) {
3342 error = hammer_ip_resolve_data(&cursor);
3345 nlen = cursor.leaf->data_len - HAMMER_ENTRY_NAME_OFF;
3347 if (ncp->nc_nlen == nlen &&
3348 bcmp(ncp->nc_name, cursor.data->entry.name, nlen) == 0) {
3351 error = hammer_ip_next(&cursor);
3355 * If all is ok we have to get the inode so we can adjust nlinks.
3356 * To avoid a deadlock with the flusher we must release the inode
3357 * lock on the directory when acquiring the inode for the entry.
3359 * If the target is a directory, it must be empty.
3362 hammer_unlock(&cursor.ip->lock);
3363 ip = hammer_get_inode(trans, dip, cursor.data->entry.obj_id,
3365 cursor.data->entry.localization,
3367 hammer_lock_sh(&cursor.ip->lock);
3368 if (error == ENOENT) {
3369 kprintf("HAMMER: WARNING: Removing "
3370 "dirent w/missing inode \"%s\"\n"
3371 "\tobj_id = %016llx\n",
3373 (long long)cursor.data->entry.obj_id);
3378 * If isdir >= 0 we validate that the entry is or is not a
3379 * directory. If isdir < 0 we don't care.
3381 if (error == 0 && isdir >= 0 && ip) {
3383 ip->ino_data.obj_type != HAMMER_OBJTYPE_DIRECTORY) {
3385 } else if (isdir == 0 &&
3386 ip->ino_data.obj_type == HAMMER_OBJTYPE_DIRECTORY) {
3392 * If we are trying to remove a directory the directory must
3395 * The check directory code can loop and deadlock/retry. Our
3396 * own cursor's node locks must be released to avoid a 3-way
3397 * deadlock with the flusher if the check directory code
3400 * If any changes whatsoever have been made to the cursor
3401 * set EDEADLK and retry.
3403 * WARNING: See warnings in hammer_unlock_cursor()
3406 if (error == 0 && ip && ip->ino_data.obj_type ==
3407 HAMMER_OBJTYPE_DIRECTORY) {
3408 hammer_unlock_cursor(&cursor);
3409 error = hammer_ip_check_directory_empty(trans, ip);
3410 hammer_lock_cursor(&cursor);
3411 if (cursor.flags & HAMMER_CURSOR_RETEST) {
3412 kprintf("HAMMER: Warning: avoided deadlock "
3420 * Delete the directory entry.
3422 * WARNING: hammer_ip_del_directory() may have to terminate
3423 * the cursor to avoid a deadlock. It is ok to call
3424 * hammer_done_cursor() twice.
3427 error = hammer_ip_del_directory(trans, &cursor,
3430 hammer_done_cursor(&cursor);
3432 cache_setunresolved(nch);
3433 cache_setvp(nch, NULL);
3436 * NOTE: ip->vp, if non-NULL, cannot be directly
3437 * referenced without formally acquiring the
3438 * vp since the vp might have zero refs on it,
3439 * or in the middle of a reclaim, etc.
3441 * NOTE: The cache_setunresolved() can rip the vp
3442 * out from under us since the vp may not have
3443 * any refs, in which case ip->vp will be NULL
3446 while (ip && ip->vp) {
3449 error = hammer_get_vnode(ip, &vp);
3450 if (error == 0 && vp) {
3452 hammer_knote(ip->vp, NOTE_DELETE);
3453 cache_inval_vp(ip->vp, CINV_DESTROY);
3457 kprintf("Debug: HAMMER ip/vp race1 avoided\n");
3461 hammer_rel_inode(ip, 0);
3463 hammer_done_cursor(&cursor);
3465 if (error == EDEADLK)
3471 /************************************************************************
3472 * FIFO AND SPECFS OPS *
3473 ************************************************************************
3477 hammer_vop_fifoclose (struct vop_close_args *ap)
3479 /* XXX update itimes */
3480 return (VOCALL(&fifo_vnode_vops, &ap->a_head));
3484 hammer_vop_fiforead (struct vop_read_args *ap)
3488 error = VOCALL(&fifo_vnode_vops, &ap->a_head);
3489 /* XXX update access time */
3494 hammer_vop_fifowrite (struct vop_write_args *ap)
3498 error = VOCALL(&fifo_vnode_vops, &ap->a_head);
3499 /* XXX update access time */
3505 hammer_vop_fifokqfilter(struct vop_kqfilter_args *ap)
3509 error = VOCALL(&fifo_vnode_vops, &ap->a_head);
3511 error = hammer_vop_kqfilter(ap);
3515 /************************************************************************
3517 ************************************************************************
3520 static void filt_hammerdetach(struct knote *kn);
3521 static int filt_hammerread(struct knote *kn, long hint);
3522 static int filt_hammerwrite(struct knote *kn, long hint);
3523 static int filt_hammervnode(struct knote *kn, long hint);
3525 static struct filterops hammerread_filtops =
3526 { FILTEROP_ISFD, NULL, filt_hammerdetach, filt_hammerread };
3527 static struct filterops hammerwrite_filtops =
3528 { FILTEROP_ISFD, NULL, filt_hammerdetach, filt_hammerwrite };
3529 static struct filterops hammervnode_filtops =
3530 { FILTEROP_ISFD, NULL, filt_hammerdetach, filt_hammervnode };
3534 hammer_vop_kqfilter(struct vop_kqfilter_args *ap)
3536 struct vnode *vp = ap->a_vp;
3537 struct knote *kn = ap->a_kn;
3539 switch (kn->kn_filter) {
3541 kn->kn_fop = &hammerread_filtops;
3544 kn->kn_fop = &hammerwrite_filtops;
3547 kn->kn_fop = &hammervnode_filtops;
3550 return (EOPNOTSUPP);
3553 kn->kn_hook = (caddr_t)vp;
3555 knote_insert(&vp->v_pollinfo.vpi_kqinfo.ki_note, kn);
3561 filt_hammerdetach(struct knote *kn)
3563 struct vnode *vp = (void *)kn->kn_hook;
3565 knote_remove(&vp->v_pollinfo.vpi_kqinfo.ki_note, kn);
3569 filt_hammerread(struct knote *kn, long hint)
3571 struct vnode *vp = (void *)kn->kn_hook;
3572 hammer_inode_t ip = VTOI(vp);
3573 hammer_mount_t hmp = ip->hmp;
3576 if (hint == NOTE_REVOKE) {
3577 kn->kn_flags |= (EV_EOF | EV_ONESHOT);
3580 lwkt_gettoken(&hmp->fs_token); /* XXX use per-ip-token */
3581 off = ip->ino_data.size - kn->kn_fp->f_offset;
3582 kn->kn_data = (off < INTPTR_MAX) ? off : INTPTR_MAX;
3583 lwkt_reltoken(&hmp->fs_token);
3584 if (kn->kn_sfflags & NOTE_OLDAPI)
3586 return (kn->kn_data != 0);
3590 filt_hammerwrite(struct knote *kn, long hint)
3592 if (hint == NOTE_REVOKE)
3593 kn->kn_flags |= (EV_EOF | EV_ONESHOT);
3599 filt_hammervnode(struct knote *kn, long hint)
3601 if (kn->kn_sfflags & hint)
3602 kn->kn_fflags |= hint;
3603 if (hint == NOTE_REVOKE) {
3604 kn->kn_flags |= EV_EOF;
3607 return (kn->kn_fflags != 0);