2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * $DragonFly: src/sys/vfs/hammer/hammer_vnops.c,v 1.79 2008/07/01 02:08:58 dillon Exp $
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/kernel.h>
40 #include <sys/fcntl.h>
41 #include <sys/namecache.h>
42 #include <sys/vnode.h>
43 #include <sys/lockf.h>
44 #include <sys/event.h>
46 #include <sys/dirent.h>
47 #include <vm/vm_extern.h>
48 #include <vfs/fifofs/fifo.h>
54 /*static int hammer_vop_vnoperate(struct vop_generic_args *);*/
55 static int hammer_vop_fsync(struct vop_fsync_args *);
56 static int hammer_vop_read(struct vop_read_args *);
57 static int hammer_vop_write(struct vop_write_args *);
58 static int hammer_vop_access(struct vop_access_args *);
59 static int hammer_vop_advlock(struct vop_advlock_args *);
60 static int hammer_vop_close(struct vop_close_args *);
61 static int hammer_vop_ncreate(struct vop_ncreate_args *);
62 static int hammer_vop_getattr(struct vop_getattr_args *);
63 static int hammer_vop_nresolve(struct vop_nresolve_args *);
64 static int hammer_vop_nlookupdotdot(struct vop_nlookupdotdot_args *);
65 static int hammer_vop_nlink(struct vop_nlink_args *);
66 static int hammer_vop_nmkdir(struct vop_nmkdir_args *);
67 static int hammer_vop_nmknod(struct vop_nmknod_args *);
68 static int hammer_vop_open(struct vop_open_args *);
69 static int hammer_vop_pathconf(struct vop_pathconf_args *);
70 static int hammer_vop_print(struct vop_print_args *);
71 static int hammer_vop_readdir(struct vop_readdir_args *);
72 static int hammer_vop_readlink(struct vop_readlink_args *);
73 static int hammer_vop_nremove(struct vop_nremove_args *);
74 static int hammer_vop_nrename(struct vop_nrename_args *);
75 static int hammer_vop_nrmdir(struct vop_nrmdir_args *);
76 static int hammer_vop_setattr(struct vop_setattr_args *);
77 static int hammer_vop_strategy(struct vop_strategy_args *);
78 static int hammer_vop_bmap(struct vop_bmap_args *ap);
79 static int hammer_vop_nsymlink(struct vop_nsymlink_args *);
80 static int hammer_vop_nwhiteout(struct vop_nwhiteout_args *);
81 static int hammer_vop_ioctl(struct vop_ioctl_args *);
82 static int hammer_vop_mountctl(struct vop_mountctl_args *);
84 static int hammer_vop_fifoclose (struct vop_close_args *);
85 static int hammer_vop_fiforead (struct vop_read_args *);
86 static int hammer_vop_fifowrite (struct vop_write_args *);
88 static int hammer_vop_specclose (struct vop_close_args *);
89 static int hammer_vop_specread (struct vop_read_args *);
90 static int hammer_vop_specwrite (struct vop_write_args *);
92 struct vop_ops hammer_vnode_vops = {
93 .vop_default = vop_defaultop,
94 .vop_fsync = hammer_vop_fsync,
95 .vop_getpages = vop_stdgetpages,
96 .vop_putpages = vop_stdputpages,
97 .vop_read = hammer_vop_read,
98 .vop_write = hammer_vop_write,
99 .vop_access = hammer_vop_access,
100 .vop_advlock = hammer_vop_advlock,
101 .vop_close = hammer_vop_close,
102 .vop_ncreate = hammer_vop_ncreate,
103 .vop_getattr = hammer_vop_getattr,
104 .vop_inactive = hammer_vop_inactive,
105 .vop_reclaim = hammer_vop_reclaim,
106 .vop_nresolve = hammer_vop_nresolve,
107 .vop_nlookupdotdot = hammer_vop_nlookupdotdot,
108 .vop_nlink = hammer_vop_nlink,
109 .vop_nmkdir = hammer_vop_nmkdir,
110 .vop_nmknod = hammer_vop_nmknod,
111 .vop_open = hammer_vop_open,
112 .vop_pathconf = hammer_vop_pathconf,
113 .vop_print = hammer_vop_print,
114 .vop_readdir = hammer_vop_readdir,
115 .vop_readlink = hammer_vop_readlink,
116 .vop_nremove = hammer_vop_nremove,
117 .vop_nrename = hammer_vop_nrename,
118 .vop_nrmdir = hammer_vop_nrmdir,
119 .vop_setattr = hammer_vop_setattr,
120 .vop_bmap = hammer_vop_bmap,
121 .vop_strategy = hammer_vop_strategy,
122 .vop_nsymlink = hammer_vop_nsymlink,
123 .vop_nwhiteout = hammer_vop_nwhiteout,
124 .vop_ioctl = hammer_vop_ioctl,
125 .vop_mountctl = hammer_vop_mountctl
128 struct vop_ops hammer_spec_vops = {
129 .vop_default = spec_vnoperate,
130 .vop_fsync = hammer_vop_fsync,
131 .vop_read = hammer_vop_specread,
132 .vop_write = hammer_vop_specwrite,
133 .vop_access = hammer_vop_access,
134 .vop_close = hammer_vop_specclose,
135 .vop_getattr = hammer_vop_getattr,
136 .vop_inactive = hammer_vop_inactive,
137 .vop_reclaim = hammer_vop_reclaim,
138 .vop_setattr = hammer_vop_setattr
141 struct vop_ops hammer_fifo_vops = {
142 .vop_default = fifo_vnoperate,
143 .vop_fsync = hammer_vop_fsync,
144 .vop_read = hammer_vop_fiforead,
145 .vop_write = hammer_vop_fifowrite,
146 .vop_access = hammer_vop_access,
147 .vop_close = hammer_vop_fifoclose,
148 .vop_getattr = hammer_vop_getattr,
149 .vop_inactive = hammer_vop_inactive,
150 .vop_reclaim = hammer_vop_reclaim,
151 .vop_setattr = hammer_vop_setattr
154 #ifdef DEBUG_TRUNCATE
155 struct hammer_inode *HammerTruncIp;
158 static int hammer_dounlink(hammer_transaction_t trans, struct nchandle *nch,
159 struct vnode *dvp, struct ucred *cred, int flags);
160 static int hammer_vop_strategy_read(struct vop_strategy_args *ap);
161 static int hammer_vop_strategy_write(struct vop_strategy_args *ap);
162 static void hammer_cleanup_write_io(hammer_inode_t ip);
163 static void hammer_update_rsv_databufs(hammer_inode_t ip);
168 hammer_vop_vnoperate(struct vop_generic_args *)
170 return (VOCALL(&hammer_vnode_vops, ap));
175 * hammer_vop_fsync { vp, waitfor }
177 * fsync() an inode to disk and wait for it to be completely committed
178 * such that the information would not be undone if a crash occured after
183 hammer_vop_fsync(struct vop_fsync_args *ap)
185 hammer_inode_t ip = VTOI(ap->a_vp);
187 vfsync(ap->a_vp, ap->a_waitfor, 1, NULL, NULL);
188 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
189 if (ap->a_waitfor == MNT_WAIT)
190 hammer_wait_inode(ip);
195 * hammer_vop_read { vp, uio, ioflag, cred }
199 hammer_vop_read(struct vop_read_args *ap)
201 struct hammer_transaction trans;
212 if (ap->a_vp->v_type != VREG)
219 * Allow the UIO's size to override the sequential heuristic.
221 blksize = hammer_blocksize(uio->uio_offset);
222 seqcount = (uio->uio_resid + (blksize - 1)) / blksize;
223 ioseqcount = ap->a_ioflag >> 16;
224 if (seqcount < ioseqcount)
225 seqcount = ioseqcount;
227 hammer_start_transaction(&trans, ip->hmp);
230 * Access the data typically in HAMMER_BUFSIZE blocks via the
231 * buffer cache, but HAMMER may use a variable block size based
234 while (uio->uio_resid > 0 && uio->uio_offset < ip->ino_data.size) {
238 blksize = hammer_blocksize(uio->uio_offset);
239 offset = (int)uio->uio_offset & (blksize - 1);
240 base_offset = uio->uio_offset - offset;
242 if (hammer_debug_cluster_enable) {
244 * Use file_limit to prevent cluster_read() from
245 * creating buffers of the wrong block size past
248 file_limit = ip->ino_data.size;
249 if (base_offset < HAMMER_XDEMARC &&
250 file_limit > HAMMER_XDEMARC) {
251 file_limit = HAMMER_XDEMARC;
253 error = cluster_read(ap->a_vp,
254 file_limit, base_offset,
258 error = bread(ap->a_vp, base_offset, blksize, &bp);
261 kprintf("error %d\n", error);
266 /* bp->b_flags |= B_CLUSTEROK; temporarily disabled */
267 n = blksize - offset;
268 if (n > uio->uio_resid)
270 if (n > ip->ino_data.size - uio->uio_offset)
271 n = (int)(ip->ino_data.size - uio->uio_offset);
272 error = uiomove((char *)bp->b_data + offset, n, uio);
274 /* data has a lower priority then meta-data */
275 bp->b_flags |= B_AGE;
280 if ((ip->flags & HAMMER_INODE_RO) == 0 &&
281 (ip->hmp->mp->mnt_flag & MNT_NOATIME) == 0) {
282 ip->ino_data.atime = trans.time;
283 hammer_modify_inode(ip, HAMMER_INODE_ATIME);
285 hammer_done_transaction(&trans);
290 * hammer_vop_write { vp, uio, ioflag, cred }
294 hammer_vop_write(struct vop_write_args *ap)
296 struct hammer_transaction trans;
297 struct hammer_inode *ip;
309 if (ap->a_vp->v_type != VREG)
314 seqcount = ap->a_ioflag >> 16;
316 if (ip->flags & HAMMER_INODE_RO)
320 * Create a transaction to cover the operations we perform.
322 hammer_start_transaction(&trans, hmp);
328 if (ap->a_ioflag & IO_APPEND)
329 uio->uio_offset = ip->ino_data.size;
332 * Check for illegal write offsets. Valid range is 0...2^63-1.
334 * NOTE: the base_off assignment is required to work around what
335 * I consider to be a GCC-4 optimization bug.
337 if (uio->uio_offset < 0) {
338 hammer_done_transaction(&trans);
341 base_offset = uio->uio_offset + uio->uio_resid; /* work around gcc-4 */
342 if (uio->uio_resid > 0 && base_offset <= 0) {
343 hammer_done_transaction(&trans);
348 * Access the data typically in HAMMER_BUFSIZE blocks via the
349 * buffer cache, but HAMMER may use a variable block size based
352 while (uio->uio_resid > 0) {
357 if ((error = hammer_checkspace(hmp)) != 0)
360 blksize = hammer_blocksize(uio->uio_offset);
363 * Do not allow HAMMER to blow out the buffer cache. Very
364 * large UIOs can lockout other processes due to bwillwrite()
367 * Do not allow HAMMER to blow out system memory by
368 * accumulating too many records. Records are so well
369 * decoupled from the buffer cache that it is possible
370 * for userland to push data out to the media via
371 * direct-write, but build up the records queued to the
372 * backend faster then the backend can flush them out.
373 * HAMMER has hit its write limit but the frontend has
374 * no pushback to slow it down.
376 * The hammer inode is not locked during these operations.
377 * The vnode is locked which can interfere with the pageout
378 * daemon for non-UIO_NOCOPY writes but should not interfere
379 * with the buffer cache. Even so, we cannot afford to
380 * allow the pageout daemon to build up too many dirty buffer
386 * Pending record flush check.
388 if (hmp->rsv_recs > hammer_limit_recs / 2) {
390 * Get the inode on the flush list
392 if (ip->rsv_recs >= 64)
393 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
394 else if (ip->rsv_recs >= 16)
395 hammer_flush_inode(ip, 0);
398 * Keep the flusher going if the system keeps
401 delta = hmp->count_newrecords -
402 hmp->last_newrecords;
403 if (delta < 0 || delta > hammer_limit_recs / 2) {
404 hmp->last_newrecords = hmp->count_newrecords;
405 hammer_sync_hmp(hmp, MNT_NOWAIT);
409 * If we have gotten behind start slowing
412 delta = (hmp->rsv_recs - hammer_limit_recs) *
413 hz / hammer_limit_recs;
415 tsleep(&trans, 0, "hmrslo", delta);
419 * Calculate the blocksize at the current offset and figure
420 * out how much we can actually write.
422 blkmask = blksize - 1;
423 offset = (int)uio->uio_offset & blkmask;
424 base_offset = uio->uio_offset & ~(int64_t)blkmask;
425 n = blksize - offset;
426 if (n > uio->uio_resid)
428 if (uio->uio_offset + n > ip->ino_data.size) {
429 vnode_pager_setsize(ap->a_vp, uio->uio_offset + n);
433 if (uio->uio_segflg == UIO_NOCOPY) {
435 * Issuing a write with the same data backing the
436 * buffer. Instantiate the buffer to collect the
437 * backing vm pages, then read-in any missing bits.
439 * This case is used by vop_stdputpages().
441 bp = getblk(ap->a_vp, base_offset,
442 blksize, GETBLK_BHEAVY, 0);
443 if ((bp->b_flags & B_CACHE) == 0) {
445 error = bread(ap->a_vp, base_offset,
448 } else if (offset == 0 && uio->uio_resid >= blksize) {
450 * Even though we are entirely overwriting the buffer
451 * we may still have to zero it out to avoid a
452 * mmap/write visibility issue.
454 bp = getblk(ap->a_vp, base_offset, blksize, GETBLK_BHEAVY, 0);
455 if ((bp->b_flags & B_CACHE) == 0)
457 } else if (base_offset >= ip->ino_data.size) {
459 * If the base offset of the buffer is beyond the
460 * file EOF, we don't have to issue a read.
462 bp = getblk(ap->a_vp, base_offset,
463 blksize, GETBLK_BHEAVY, 0);
467 * Partial overwrite, read in any missing bits then
468 * replace the portion being written.
470 error = bread(ap->a_vp, base_offset, blksize, &bp);
475 error = uiomove((char *)bp->b_data + offset,
480 * If we screwed up we have to undo any VM size changes we
486 vtruncbuf(ap->a_vp, ip->ino_data.size,
487 hammer_blocksize(ip->ino_data.size));
491 /* bp->b_flags |= B_CLUSTEROK; temporarily disabled */
492 if (ip->ino_data.size < uio->uio_offset) {
493 ip->ino_data.size = uio->uio_offset;
494 flags = HAMMER_INODE_DDIRTY;
495 vnode_pager_setsize(ap->a_vp, ip->ino_data.size);
499 ip->ino_data.mtime = trans.time;
500 flags |= HAMMER_INODE_MTIME | HAMMER_INODE_BUFS;
501 hammer_modify_inode(ip, flags);
504 * Try to keep track of cached dirty data.
506 if ((bp->b_flags & B_DIRTY) == 0) {
512 * Final buffer disposition.
515 bp->b_flags |= B_AGE;
516 if (ap->a_ioflag & IO_SYNC) {
518 } else if (ap->a_ioflag & IO_DIRECT) {
524 hammer_done_transaction(&trans);
529 * hammer_vop_access { vp, mode, cred }
533 hammer_vop_access(struct vop_access_args *ap)
535 struct hammer_inode *ip = VTOI(ap->a_vp);
540 uid = hammer_to_unix_xid(&ip->ino_data.uid);
541 gid = hammer_to_unix_xid(&ip->ino_data.gid);
543 error = vop_helper_access(ap, uid, gid, ip->ino_data.mode,
544 ip->ino_data.uflags);
549 * hammer_vop_advlock { vp, id, op, fl, flags }
553 hammer_vop_advlock(struct vop_advlock_args *ap)
555 hammer_inode_t ip = VTOI(ap->a_vp);
557 return (lf_advlock(ap, &ip->advlock, ip->ino_data.size));
561 * hammer_vop_close { vp, fflag }
565 hammer_vop_close(struct vop_close_args *ap)
567 hammer_inode_t ip = VTOI(ap->a_vp);
569 if ((ip->flags | ip->sync_flags) & HAMMER_INODE_MODMASK)
570 hammer_inode_waitreclaims(ip->hmp);
571 return (vop_stdclose(ap));
575 * hammer_vop_ncreate { nch, dvp, vpp, cred, vap }
577 * The operating system has already ensured that the directory entry
578 * does not exist and done all appropriate namespace locking.
582 hammer_vop_ncreate(struct vop_ncreate_args *ap)
584 struct hammer_transaction trans;
585 struct hammer_inode *dip;
586 struct hammer_inode *nip;
587 struct nchandle *nch;
591 dip = VTOI(ap->a_dvp);
593 if (dip->flags & HAMMER_INODE_RO)
595 if ((error = hammer_checkspace(dip->hmp)) != 0)
599 * Create a transaction to cover the operations we perform.
601 hammer_start_transaction(&trans, dip->hmp);
604 * Create a new filesystem object of the requested type. The
605 * returned inode will be referenced and shared-locked to prevent
606 * it from being moved to the flusher.
609 error = hammer_create_inode(&trans, ap->a_vap, ap->a_cred,
612 hkprintf("hammer_create_inode error %d\n", error);
613 hammer_done_transaction(&trans);
619 * Add the new filesystem object to the directory. This will also
620 * bump the inode's link count.
622 error = hammer_ip_add_directory(&trans, dip,
623 nch->ncp->nc_name, nch->ncp->nc_nlen,
626 hkprintf("hammer_ip_add_directory error %d\n", error);
632 hammer_rel_inode(nip, 0);
633 hammer_done_transaction(&trans);
636 error = hammer_get_vnode(nip, ap->a_vpp);
637 hammer_done_transaction(&trans);
638 hammer_rel_inode(nip, 0);
640 cache_setunresolved(ap->a_nch);
641 cache_setvp(ap->a_nch, *ap->a_vpp);
648 * hammer_vop_getattr { vp, vap }
650 * Retrieve an inode's attribute information. When accessing inodes
651 * historically we fake the atime field to ensure consistent results.
652 * The atime field is stored in the B-Tree element and allowed to be
653 * updated without cycling the element.
657 hammer_vop_getattr(struct vop_getattr_args *ap)
659 struct hammer_inode *ip = VTOI(ap->a_vp);
660 struct vattr *vap = ap->a_vap;
662 vap->va_fsid = ip->hmp->fsid_udev;
664 * XXX munge the device if we are in a pseudo-fs, so user utilities
665 * do not think its the same 'filesystem'.
667 if (ip->obj_localization)
668 vap->va_fsid += ip->obj_localization;
669 vap->va_fileid = ip->ino_leaf.base.obj_id;
670 vap->va_mode = ip->ino_data.mode;
671 vap->va_nlink = ip->ino_data.nlinks;
672 vap->va_uid = hammer_to_unix_xid(&ip->ino_data.uid);
673 vap->va_gid = hammer_to_unix_xid(&ip->ino_data.gid);
676 vap->va_size = ip->ino_data.size;
679 * We must provide a consistent atime and mtime for snapshots
680 * so people can do a 'tar cf - ... | md5' on them and get
681 * consistent results.
683 if (ip->flags & HAMMER_INODE_RO) {
684 hammer_time_to_timespec(ip->ino_data.ctime, &vap->va_atime);
685 hammer_time_to_timespec(ip->ino_data.ctime, &vap->va_mtime);
687 hammer_time_to_timespec(ip->ino_data.atime, &vap->va_atime);
688 hammer_time_to_timespec(ip->ino_data.mtime, &vap->va_mtime);
690 hammer_time_to_timespec(ip->ino_data.ctime, &vap->va_ctime);
691 vap->va_flags = ip->ino_data.uflags;
692 vap->va_gen = 1; /* hammer inums are unique for all time */
693 vap->va_blocksize = HAMMER_BUFSIZE;
694 if (ip->ino_data.size >= HAMMER_XDEMARC) {
695 vap->va_bytes = (ip->ino_data.size + HAMMER_XBUFMASK64) &
697 } else if (ip->ino_data.size > HAMMER_BUFSIZE / 2) {
698 vap->va_bytes = (ip->ino_data.size + HAMMER_BUFMASK64) &
701 vap->va_bytes = (ip->ino_data.size + 15) & ~15;
703 vap->va_type = hammer_get_vnode_type(ip->ino_data.obj_type);
704 vap->va_filerev = 0; /* XXX */
705 /* mtime uniquely identifies any adjustments made to the file XXX */
706 vap->va_fsmid = ip->ino_data.mtime;
707 vap->va_uid_uuid = ip->ino_data.uid;
708 vap->va_gid_uuid = ip->ino_data.gid;
709 vap->va_fsid_uuid = ip->hmp->fsid;
710 vap->va_vaflags = VA_UID_UUID_VALID | VA_GID_UUID_VALID |
713 switch (ip->ino_data.obj_type) {
714 case HAMMER_OBJTYPE_CDEV:
715 case HAMMER_OBJTYPE_BDEV:
716 vap->va_rmajor = ip->ino_data.rmajor;
717 vap->va_rminor = ip->ino_data.rminor;
727 * hammer_vop_nresolve { nch, dvp, cred }
729 * Locate the requested directory entry.
733 hammer_vop_nresolve(struct vop_nresolve_args *ap)
735 struct hammer_transaction trans;
736 struct namecache *ncp;
740 struct hammer_cursor cursor;
748 u_int32_t localization;
751 * Misc initialization, plus handle as-of name extensions. Look for
752 * the '@@' extension. Note that as-of files and directories cannot
755 dip = VTOI(ap->a_dvp);
756 ncp = ap->a_nch->ncp;
757 asof = dip->obj_asof;
761 hammer_simple_transaction(&trans, dip->hmp);
763 for (i = 0; i < nlen; ++i) {
764 if (ncp->nc_name[i] == '@' && ncp->nc_name[i+1] == '@') {
765 asof = hammer_str_to_tid(ncp->nc_name + i + 2);
766 flags |= HAMMER_INODE_RO;
773 * If there is no path component the time extension is relative to
777 ip = hammer_get_inode(&trans, dip, dip->obj_id,
778 asof, dip->obj_localization,
781 error = hammer_get_vnode(ip, &vp);
782 hammer_rel_inode(ip, 0);
788 cache_setvp(ap->a_nch, vp);
795 * Calculate the namekey and setup the key range for the scan. This
796 * works kinda like a chained hash table where the lower 32 bits
797 * of the namekey synthesize the chain.
799 * The key range is inclusive of both key_beg and key_end.
801 namekey = hammer_directory_namekey(ncp->nc_name, nlen);
803 error = hammer_init_cursor(&trans, &cursor, &dip->cache[1], dip);
804 cursor.key_beg.localization = dip->obj_localization +
805 HAMMER_LOCALIZE_MISC;
806 cursor.key_beg.obj_id = dip->obj_id;
807 cursor.key_beg.key = namekey;
808 cursor.key_beg.create_tid = 0;
809 cursor.key_beg.delete_tid = 0;
810 cursor.key_beg.rec_type = HAMMER_RECTYPE_DIRENTRY;
811 cursor.key_beg.obj_type = 0;
813 cursor.key_end = cursor.key_beg;
814 cursor.key_end.key |= 0xFFFFFFFFULL;
816 cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE | HAMMER_CURSOR_ASOF;
819 * Scan all matching records (the chain), locate the one matching
820 * the requested path component.
822 * The hammer_ip_*() functions merge in-memory records with on-disk
823 * records for the purposes of the search.
826 localization = HAMMER_DEF_LOCALIZATION;
829 error = hammer_ip_first(&cursor);
831 error = hammer_ip_resolve_data(&cursor);
834 if (nlen == cursor.leaf->data_len - HAMMER_ENTRY_NAME_OFF &&
835 bcmp(ncp->nc_name, cursor.data->entry.name, nlen) == 0) {
836 obj_id = cursor.data->entry.obj_id;
837 localization = cursor.data->entry.localization;
840 error = hammer_ip_next(&cursor);
843 hammer_done_cursor(&cursor);
845 ip = hammer_get_inode(&trans, dip, obj_id,
849 error = hammer_get_vnode(ip, &vp);
850 hammer_rel_inode(ip, 0);
856 cache_setvp(ap->a_nch, vp);
859 } else if (error == ENOENT) {
860 cache_setvp(ap->a_nch, NULL);
863 hammer_done_transaction(&trans);
868 * hammer_vop_nlookupdotdot { dvp, vpp, cred }
870 * Locate the parent directory of a directory vnode.
872 * dvp is referenced but not locked. *vpp must be returned referenced and
873 * locked. A parent_obj_id of 0 does not necessarily indicate that we are
874 * at the root, instead it could indicate that the directory we were in was
877 * NOTE: as-of sequences are not linked into the directory structure. If
878 * we are at the root with a different asof then the mount point, reload
879 * the same directory with the mount point's asof. I'm not sure what this
880 * will do to NFS. We encode ASOF stamps in NFS file handles so it might not
881 * get confused, but it hasn't been tested.
885 hammer_vop_nlookupdotdot(struct vop_nlookupdotdot_args *ap)
887 struct hammer_transaction trans;
888 struct hammer_inode *dip;
889 struct hammer_inode *ip;
890 int64_t parent_obj_id;
891 u_int32_t parent_obj_localization;
895 dip = VTOI(ap->a_dvp);
896 asof = dip->obj_asof;
899 * Whos are parent? This could be the root of a pseudo-filesystem
900 * whos parent is in another localization domain.
902 parent_obj_id = dip->ino_data.parent_obj_id;
903 if (dip->obj_id == HAMMER_OBJID_ROOT)
904 parent_obj_localization = dip->ino_data.ext.obj.parent_obj_localization;
906 parent_obj_localization = dip->obj_localization;
908 if (parent_obj_id == 0) {
909 if (dip->obj_id == HAMMER_OBJID_ROOT &&
910 asof != dip->hmp->asof) {
911 parent_obj_id = dip->obj_id;
912 asof = dip->hmp->asof;
913 *ap->a_fakename = kmalloc(19, M_TEMP, M_WAITOK);
914 ksnprintf(*ap->a_fakename, 19, "0x%016llx",
922 hammer_simple_transaction(&trans, dip->hmp);
924 ip = hammer_get_inode(&trans, dip, parent_obj_id,
925 asof, parent_obj_localization,
928 error = hammer_get_vnode(ip, ap->a_vpp);
929 hammer_rel_inode(ip, 0);
933 hammer_done_transaction(&trans);
938 * hammer_vop_nlink { nch, dvp, vp, cred }
942 hammer_vop_nlink(struct vop_nlink_args *ap)
944 struct hammer_transaction trans;
945 struct hammer_inode *dip;
946 struct hammer_inode *ip;
947 struct nchandle *nch;
951 dip = VTOI(ap->a_dvp);
954 if (dip->flags & HAMMER_INODE_RO)
956 if (ip->flags & HAMMER_INODE_RO)
958 if ((error = hammer_checkspace(dip->hmp)) != 0)
962 * Create a transaction to cover the operations we perform.
964 hammer_start_transaction(&trans, dip->hmp);
967 * Add the filesystem object to the directory. Note that neither
968 * dip nor ip are referenced or locked, but their vnodes are
969 * referenced. This function will bump the inode's link count.
971 error = hammer_ip_add_directory(&trans, dip,
972 nch->ncp->nc_name, nch->ncp->nc_nlen,
979 cache_setunresolved(nch);
980 cache_setvp(nch, ap->a_vp);
982 hammer_done_transaction(&trans);
987 * hammer_vop_nmkdir { nch, dvp, vpp, cred, vap }
989 * The operating system has already ensured that the directory entry
990 * does not exist and done all appropriate namespace locking.
994 hammer_vop_nmkdir(struct vop_nmkdir_args *ap)
996 struct hammer_transaction trans;
997 struct hammer_inode *dip;
998 struct hammer_inode *nip;
999 struct nchandle *nch;
1003 dip = VTOI(ap->a_dvp);
1005 if (dip->flags & HAMMER_INODE_RO)
1007 if ((error = hammer_checkspace(dip->hmp)) != 0)
1011 * Create a transaction to cover the operations we perform.
1013 hammer_start_transaction(&trans, dip->hmp);
1016 * Create a new filesystem object of the requested type. The
1017 * returned inode will be referenced but not locked.
1019 error = hammer_create_inode(&trans, ap->a_vap, ap->a_cred,
1022 hkprintf("hammer_mkdir error %d\n", error);
1023 hammer_done_transaction(&trans);
1028 * Add the new filesystem object to the directory. This will also
1029 * bump the inode's link count.
1031 error = hammer_ip_add_directory(&trans, dip,
1032 nch->ncp->nc_name, nch->ncp->nc_nlen,
1035 hkprintf("hammer_mkdir (add) error %d\n", error);
1041 hammer_rel_inode(nip, 0);
1044 error = hammer_get_vnode(nip, ap->a_vpp);
1045 hammer_rel_inode(nip, 0);
1047 cache_setunresolved(ap->a_nch);
1048 cache_setvp(ap->a_nch, *ap->a_vpp);
1051 hammer_done_transaction(&trans);
1056 * hammer_vop_nmknod { nch, dvp, vpp, cred, vap }
1058 * The operating system has already ensured that the directory entry
1059 * does not exist and done all appropriate namespace locking.
1063 hammer_vop_nmknod(struct vop_nmknod_args *ap)
1065 struct hammer_transaction trans;
1066 struct hammer_inode *dip;
1067 struct hammer_inode *nip;
1068 struct nchandle *nch;
1073 dip = VTOI(ap->a_dvp);
1075 if (dip->flags & HAMMER_INODE_RO)
1077 if ((error = hammer_checkspace(dip->hmp)) != 0)
1081 * Create a transaction to cover the operations we perform.
1083 hammer_start_transaction(&trans, dip->hmp);
1086 * Create a new filesystem object of the requested type. The
1087 * returned inode will be referenced but not locked.
1089 * If mknod specifies a directory a pseudo-fs is created.
1091 pseudofs = (ap->a_vap->va_type == VDIR);
1092 error = hammer_create_inode(&trans, ap->a_vap, ap->a_cred,
1093 dip, pseudofs, &nip);
1095 hammer_done_transaction(&trans);
1101 * Add the new filesystem object to the directory. This will also
1102 * bump the inode's link count.
1104 error = hammer_ip_add_directory(&trans, dip,
1105 nch->ncp->nc_name, nch->ncp->nc_nlen,
1112 hammer_rel_inode(nip, 0);
1115 error = hammer_get_vnode(nip, ap->a_vpp);
1116 hammer_rel_inode(nip, 0);
1118 cache_setunresolved(ap->a_nch);
1119 cache_setvp(ap->a_nch, *ap->a_vpp);
1122 hammer_done_transaction(&trans);
1127 * hammer_vop_open { vp, mode, cred, fp }
1131 hammer_vop_open(struct vop_open_args *ap)
1135 ip = VTOI(ap->a_vp);
1137 if ((ap->a_mode & FWRITE) && (ip->flags & HAMMER_INODE_RO))
1139 return(vop_stdopen(ap));
1143 * hammer_vop_pathconf { vp, name, retval }
1147 hammer_vop_pathconf(struct vop_pathconf_args *ap)
1153 * hammer_vop_print { vp }
1157 hammer_vop_print(struct vop_print_args *ap)
1163 * hammer_vop_readdir { vp, uio, cred, *eofflag, *ncookies, off_t **cookies }
1167 hammer_vop_readdir(struct vop_readdir_args *ap)
1169 struct hammer_transaction trans;
1170 struct hammer_cursor cursor;
1171 struct hammer_inode *ip;
1173 hammer_base_elm_t base;
1181 ip = VTOI(ap->a_vp);
1183 saveoff = uio->uio_offset;
1185 if (ap->a_ncookies) {
1186 ncookies = uio->uio_resid / 16 + 1;
1187 if (ncookies > 1024)
1189 cookies = kmalloc(ncookies * sizeof(off_t), M_TEMP, M_WAITOK);
1197 hammer_simple_transaction(&trans, ip->hmp);
1200 * Handle artificial entries
1204 r = vop_write_dirent(&error, uio, ip->obj_id, DT_DIR, 1, ".");
1208 cookies[cookie_index] = saveoff;
1211 if (cookie_index == ncookies)
1215 if (ip->ino_data.parent_obj_id) {
1216 r = vop_write_dirent(&error, uio,
1217 ip->ino_data.parent_obj_id,
1220 r = vop_write_dirent(&error, uio,
1221 ip->obj_id, DT_DIR, 2, "..");
1226 cookies[cookie_index] = saveoff;
1229 if (cookie_index == ncookies)
1234 * Key range (begin and end inclusive) to scan. Directory keys
1235 * directly translate to a 64 bit 'seek' position.
1237 hammer_init_cursor(&trans, &cursor, &ip->cache[1], ip);
1238 cursor.key_beg.localization = ip->obj_localization +
1239 HAMMER_LOCALIZE_MISC;
1240 cursor.key_beg.obj_id = ip->obj_id;
1241 cursor.key_beg.create_tid = 0;
1242 cursor.key_beg.delete_tid = 0;
1243 cursor.key_beg.rec_type = HAMMER_RECTYPE_DIRENTRY;
1244 cursor.key_beg.obj_type = 0;
1245 cursor.key_beg.key = saveoff;
1247 cursor.key_end = cursor.key_beg;
1248 cursor.key_end.key = HAMMER_MAX_KEY;
1249 cursor.asof = ip->obj_asof;
1250 cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE | HAMMER_CURSOR_ASOF;
1252 error = hammer_ip_first(&cursor);
1254 while (error == 0) {
1255 error = hammer_ip_resolve_data(&cursor);
1258 base = &cursor.leaf->base;
1259 saveoff = base->key;
1260 KKASSERT(cursor.leaf->data_len > HAMMER_ENTRY_NAME_OFF);
1262 if (base->obj_id != ip->obj_id)
1263 panic("readdir: bad record at %p", cursor.node);
1265 r = vop_write_dirent(
1266 &error, uio, cursor.data->entry.obj_id,
1267 hammer_get_dtype(cursor.leaf->base.obj_type),
1268 cursor.leaf->data_len - HAMMER_ENTRY_NAME_OFF ,
1269 (void *)cursor.data->entry.name);
1274 cookies[cookie_index] = base->key;
1276 if (cookie_index == ncookies)
1278 error = hammer_ip_next(&cursor);
1280 hammer_done_cursor(&cursor);
1283 hammer_done_transaction(&trans);
1286 *ap->a_eofflag = (error == ENOENT);
1287 uio->uio_offset = saveoff;
1288 if (error && cookie_index == 0) {
1289 if (error == ENOENT)
1292 kfree(cookies, M_TEMP);
1293 *ap->a_ncookies = 0;
1294 *ap->a_cookies = NULL;
1297 if (error == ENOENT)
1300 *ap->a_ncookies = cookie_index;
1301 *ap->a_cookies = cookies;
1308 * hammer_vop_readlink { vp, uio, cred }
1312 hammer_vop_readlink(struct vop_readlink_args *ap)
1314 struct hammer_transaction trans;
1315 struct hammer_cursor cursor;
1316 struct hammer_inode *ip;
1319 ip = VTOI(ap->a_vp);
1322 * Shortcut if the symlink data was stuffed into ino_data.
1324 if (ip->ino_data.size <= HAMMER_INODE_BASESYMLEN) {
1325 error = uiomove(ip->ino_data.ext.symlink,
1326 ip->ino_data.size, ap->a_uio);
1333 hammer_simple_transaction(&trans, ip->hmp);
1334 hammer_init_cursor(&trans, &cursor, &ip->cache[1], ip);
1337 * Key range (begin and end inclusive) to scan. Directory keys
1338 * directly translate to a 64 bit 'seek' position.
1340 cursor.key_beg.localization = ip->obj_localization +
1341 HAMMER_LOCALIZE_MISC;
1342 cursor.key_beg.obj_id = ip->obj_id;
1343 cursor.key_beg.create_tid = 0;
1344 cursor.key_beg.delete_tid = 0;
1345 cursor.key_beg.rec_type = HAMMER_RECTYPE_FIX;
1346 cursor.key_beg.obj_type = 0;
1347 cursor.key_beg.key = HAMMER_FIXKEY_SYMLINK;
1348 cursor.asof = ip->obj_asof;
1349 cursor.flags |= HAMMER_CURSOR_ASOF;
1351 error = hammer_ip_lookup(&cursor);
1353 error = hammer_ip_resolve_data(&cursor);
1355 KKASSERT(cursor.leaf->data_len >=
1356 HAMMER_SYMLINK_NAME_OFF);
1357 error = uiomove(cursor.data->symlink.name,
1358 cursor.leaf->data_len -
1359 HAMMER_SYMLINK_NAME_OFF,
1363 hammer_done_cursor(&cursor);
1364 hammer_done_transaction(&trans);
1369 * hammer_vop_nremove { nch, dvp, cred }
1373 hammer_vop_nremove(struct vop_nremove_args *ap)
1375 struct hammer_transaction trans;
1376 struct hammer_inode *dip;
1379 dip = VTOI(ap->a_dvp);
1381 if (hammer_nohistory(dip) == 0 &&
1382 (error = hammer_checkspace(dip->hmp)) != 0) {
1386 hammer_start_transaction(&trans, dip->hmp);
1387 error = hammer_dounlink(&trans, ap->a_nch, ap->a_dvp, ap->a_cred, 0);
1388 hammer_done_transaction(&trans);
1394 * hammer_vop_nrename { fnch, tnch, fdvp, tdvp, cred }
1398 hammer_vop_nrename(struct vop_nrename_args *ap)
1400 struct hammer_transaction trans;
1401 struct namecache *fncp;
1402 struct namecache *tncp;
1403 struct hammer_inode *fdip;
1404 struct hammer_inode *tdip;
1405 struct hammer_inode *ip;
1406 struct hammer_cursor cursor;
1410 fdip = VTOI(ap->a_fdvp);
1411 tdip = VTOI(ap->a_tdvp);
1412 fncp = ap->a_fnch->ncp;
1413 tncp = ap->a_tnch->ncp;
1414 ip = VTOI(fncp->nc_vp);
1415 KKASSERT(ip != NULL);
1417 if (fdip->flags & HAMMER_INODE_RO)
1419 if (tdip->flags & HAMMER_INODE_RO)
1421 if (ip->flags & HAMMER_INODE_RO)
1423 if ((error = hammer_checkspace(fdip->hmp)) != 0)
1426 hammer_start_transaction(&trans, fdip->hmp);
1429 * Remove tncp from the target directory and then link ip as
1430 * tncp. XXX pass trans to dounlink
1432 * Force the inode sync-time to match the transaction so it is
1433 * in-sync with the creation of the target directory entry.
1435 error = hammer_dounlink(&trans, ap->a_tnch, ap->a_tdvp, ap->a_cred, 0);
1436 if (error == 0 || error == ENOENT) {
1437 error = hammer_ip_add_directory(&trans, tdip,
1438 tncp->nc_name, tncp->nc_nlen,
1441 ip->ino_data.parent_obj_id = tdip->obj_id;
1442 hammer_modify_inode(ip, HAMMER_INODE_DDIRTY);
1446 goto failed; /* XXX */
1449 * Locate the record in the originating directory and remove it.
1451 * Calculate the namekey and setup the key range for the scan. This
1452 * works kinda like a chained hash table where the lower 32 bits
1453 * of the namekey synthesize the chain.
1455 * The key range is inclusive of both key_beg and key_end.
1457 namekey = hammer_directory_namekey(fncp->nc_name, fncp->nc_nlen);
1459 hammer_init_cursor(&trans, &cursor, &fdip->cache[1], fdip);
1460 cursor.key_beg.localization = fdip->obj_localization +
1461 HAMMER_LOCALIZE_MISC;
1462 cursor.key_beg.obj_id = fdip->obj_id;
1463 cursor.key_beg.key = namekey;
1464 cursor.key_beg.create_tid = 0;
1465 cursor.key_beg.delete_tid = 0;
1466 cursor.key_beg.rec_type = HAMMER_RECTYPE_DIRENTRY;
1467 cursor.key_beg.obj_type = 0;
1469 cursor.key_end = cursor.key_beg;
1470 cursor.key_end.key |= 0xFFFFFFFFULL;
1471 cursor.asof = fdip->obj_asof;
1472 cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE | HAMMER_CURSOR_ASOF;
1475 * Scan all matching records (the chain), locate the one matching
1476 * the requested path component.
1478 * The hammer_ip_*() functions merge in-memory records with on-disk
1479 * records for the purposes of the search.
1481 error = hammer_ip_first(&cursor);
1482 while (error == 0) {
1483 if (hammer_ip_resolve_data(&cursor) != 0)
1485 nlen = cursor.leaf->data_len - HAMMER_ENTRY_NAME_OFF;
1487 if (fncp->nc_nlen == nlen &&
1488 bcmp(fncp->nc_name, cursor.data->entry.name, nlen) == 0) {
1491 error = hammer_ip_next(&cursor);
1495 * If all is ok we have to get the inode so we can adjust nlinks.
1497 * WARNING: hammer_ip_del_directory() may have to terminate the
1498 * cursor to avoid a recursion. It's ok to call hammer_done_cursor()
1502 error = hammer_ip_del_directory(&trans, &cursor, fdip, ip);
1505 * XXX A deadlock here will break rename's atomicy for the purposes
1506 * of crash recovery.
1508 if (error == EDEADLK) {
1509 hammer_done_cursor(&cursor);
1514 * Cleanup and tell the kernel that the rename succeeded.
1516 hammer_done_cursor(&cursor);
1518 cache_rename(ap->a_fnch, ap->a_tnch);
1521 hammer_done_transaction(&trans);
1526 * hammer_vop_nrmdir { nch, dvp, cred }
1530 hammer_vop_nrmdir(struct vop_nrmdir_args *ap)
1532 struct hammer_transaction trans;
1533 struct hammer_inode *dip;
1536 dip = VTOI(ap->a_dvp);
1538 if (hammer_nohistory(dip) == 0 &&
1539 (error = hammer_checkspace(dip->hmp)) != 0) {
1543 hammer_start_transaction(&trans, dip->hmp);
1544 error = hammer_dounlink(&trans, ap->a_nch, ap->a_dvp, ap->a_cred, 0);
1545 hammer_done_transaction(&trans);
1551 * hammer_vop_setattr { vp, vap, cred }
1555 hammer_vop_setattr(struct vop_setattr_args *ap)
1557 struct hammer_transaction trans;
1559 struct hammer_inode *ip;
1564 int64_t aligned_size;
1568 ip = ap->a_vp->v_data;
1571 if (ap->a_vp->v_mount->mnt_flag & MNT_RDONLY)
1573 if (ip->flags & HAMMER_INODE_RO)
1575 if (hammer_nohistory(ip) == 0 &&
1576 (error = hammer_checkspace(ip->hmp)) != 0) {
1580 hammer_start_transaction(&trans, ip->hmp);
1583 if (vap->va_flags != VNOVAL) {
1584 flags = ip->ino_data.uflags;
1585 error = vop_helper_setattr_flags(&flags, vap->va_flags,
1586 hammer_to_unix_xid(&ip->ino_data.uid),
1589 if (ip->ino_data.uflags != flags) {
1590 ip->ino_data.uflags = flags;
1591 modflags |= HAMMER_INODE_DDIRTY;
1593 if (ip->ino_data.uflags & (IMMUTABLE | APPEND)) {
1600 if (ip->ino_data.uflags & (IMMUTABLE | APPEND)) {
1604 if (vap->va_uid != (uid_t)VNOVAL || vap->va_gid != (gid_t)VNOVAL) {
1605 mode_t cur_mode = ip->ino_data.mode;
1606 uid_t cur_uid = hammer_to_unix_xid(&ip->ino_data.uid);
1607 gid_t cur_gid = hammer_to_unix_xid(&ip->ino_data.gid);
1611 error = vop_helper_chown(ap->a_vp, vap->va_uid, vap->va_gid,
1613 &cur_uid, &cur_gid, &cur_mode);
1615 hammer_guid_to_uuid(&uuid_uid, cur_uid);
1616 hammer_guid_to_uuid(&uuid_gid, cur_gid);
1617 if (bcmp(&uuid_uid, &ip->ino_data.uid,
1618 sizeof(uuid_uid)) ||
1619 bcmp(&uuid_gid, &ip->ino_data.gid,
1620 sizeof(uuid_gid)) ||
1621 ip->ino_data.mode != cur_mode
1623 ip->ino_data.uid = uuid_uid;
1624 ip->ino_data.gid = uuid_gid;
1625 ip->ino_data.mode = cur_mode;
1627 modflags |= HAMMER_INODE_DDIRTY;
1630 while (vap->va_size != VNOVAL && ip->ino_data.size != vap->va_size) {
1631 switch(ap->a_vp->v_type) {
1633 if (vap->va_size == ip->ino_data.size)
1636 * XXX break atomicy, we can deadlock the backend
1637 * if we do not release the lock. Probably not a
1640 blksize = hammer_blocksize(vap->va_size);
1641 if (vap->va_size < ip->ino_data.size) {
1642 vtruncbuf(ap->a_vp, vap->va_size, blksize);
1645 vnode_pager_setsize(ap->a_vp, vap->va_size);
1648 ip->ino_data.size = vap->va_size;
1649 modflags |= HAMMER_INODE_DDIRTY;
1652 * on-media truncation is cached in the inode until
1653 * the inode is synchronized.
1656 hammer_ip_frontend_trunc(ip, vap->va_size);
1657 hammer_update_rsv_databufs(ip);
1658 #ifdef DEBUG_TRUNCATE
1659 if (HammerTruncIp == NULL)
1662 if ((ip->flags & HAMMER_INODE_TRUNCATED) == 0) {
1663 ip->flags |= HAMMER_INODE_TRUNCATED;
1664 ip->trunc_off = vap->va_size;
1665 #ifdef DEBUG_TRUNCATE
1666 if (ip == HammerTruncIp)
1667 kprintf("truncate1 %016llx\n", ip->trunc_off);
1669 } else if (ip->trunc_off > vap->va_size) {
1670 ip->trunc_off = vap->va_size;
1671 #ifdef DEBUG_TRUNCATE
1672 if (ip == HammerTruncIp)
1673 kprintf("truncate2 %016llx\n", ip->trunc_off);
1676 #ifdef DEBUG_TRUNCATE
1677 if (ip == HammerTruncIp)
1678 kprintf("truncate3 %016llx (ignored)\n", vap->va_size);
1684 * If truncating we have to clean out a portion of
1685 * the last block on-disk. We do this in the
1686 * front-end buffer cache.
1688 aligned_size = (vap->va_size + (blksize - 1)) &
1689 ~(int64_t)(blksize - 1);
1690 if (truncating && vap->va_size < aligned_size) {
1694 aligned_size -= blksize;
1696 offset = (int)vap->va_size & (blksize - 1);
1697 error = bread(ap->a_vp, aligned_size,
1699 hammer_ip_frontend_trunc(ip, aligned_size);
1701 bzero(bp->b_data + offset,
1705 kprintf("ERROR %d\n", error);
1711 if ((ip->flags & HAMMER_INODE_TRUNCATED) == 0) {
1712 ip->flags |= HAMMER_INODE_TRUNCATED;
1713 ip->trunc_off = vap->va_size;
1714 } else if (ip->trunc_off > vap->va_size) {
1715 ip->trunc_off = vap->va_size;
1717 hammer_ip_frontend_trunc(ip, vap->va_size);
1718 ip->ino_data.size = vap->va_size;
1719 modflags |= HAMMER_INODE_DDIRTY;
1727 if (vap->va_atime.tv_sec != VNOVAL) {
1728 ip->ino_data.atime =
1729 hammer_timespec_to_time(&vap->va_atime);
1730 modflags |= HAMMER_INODE_ATIME;
1732 if (vap->va_mtime.tv_sec != VNOVAL) {
1733 ip->ino_data.mtime =
1734 hammer_timespec_to_time(&vap->va_mtime);
1735 modflags |= HAMMER_INODE_MTIME;
1737 if (vap->va_mode != (mode_t)VNOVAL) {
1738 mode_t cur_mode = ip->ino_data.mode;
1739 uid_t cur_uid = hammer_to_unix_xid(&ip->ino_data.uid);
1740 gid_t cur_gid = hammer_to_unix_xid(&ip->ino_data.gid);
1742 error = vop_helper_chmod(ap->a_vp, vap->va_mode, ap->a_cred,
1743 cur_uid, cur_gid, &cur_mode);
1744 if (error == 0 && ip->ino_data.mode != cur_mode) {
1745 ip->ino_data.mode = cur_mode;
1746 modflags |= HAMMER_INODE_DDIRTY;
1751 hammer_modify_inode(ip, modflags);
1752 hammer_done_transaction(&trans);
1757 * hammer_vop_nsymlink { nch, dvp, vpp, cred, vap, target }
1761 hammer_vop_nsymlink(struct vop_nsymlink_args *ap)
1763 struct hammer_transaction trans;
1764 struct hammer_inode *dip;
1765 struct hammer_inode *nip;
1766 struct nchandle *nch;
1767 hammer_record_t record;
1771 ap->a_vap->va_type = VLNK;
1774 dip = VTOI(ap->a_dvp);
1776 if (dip->flags & HAMMER_INODE_RO)
1778 if ((error = hammer_checkspace(dip->hmp)) != 0)
1782 * Create a transaction to cover the operations we perform.
1784 hammer_start_transaction(&trans, dip->hmp);
1787 * Create a new filesystem object of the requested type. The
1788 * returned inode will be referenced but not locked.
1791 error = hammer_create_inode(&trans, ap->a_vap, ap->a_cred,
1794 hammer_done_transaction(&trans);
1800 * Add a record representing the symlink. symlink stores the link
1801 * as pure data, not a string, and is no \0 terminated.
1804 bytes = strlen(ap->a_target);
1806 if (bytes <= HAMMER_INODE_BASESYMLEN) {
1807 bcopy(ap->a_target, nip->ino_data.ext.symlink, bytes);
1809 record = hammer_alloc_mem_record(nip, bytes);
1810 record->type = HAMMER_MEM_RECORD_GENERAL;
1812 record->leaf.base.localization = nip->obj_localization +
1813 HAMMER_LOCALIZE_MISC;
1814 record->leaf.base.key = HAMMER_FIXKEY_SYMLINK;
1815 record->leaf.base.rec_type = HAMMER_RECTYPE_FIX;
1816 record->leaf.data_len = bytes;
1817 KKASSERT(HAMMER_SYMLINK_NAME_OFF == 0);
1818 bcopy(ap->a_target, record->data->symlink.name, bytes);
1819 error = hammer_ip_add_record(&trans, record);
1823 * Set the file size to the length of the link.
1826 nip->ino_data.size = bytes;
1827 hammer_modify_inode(nip, HAMMER_INODE_DDIRTY);
1831 error = hammer_ip_add_directory(&trans, dip, nch->ncp->nc_name,
1832 nch->ncp->nc_nlen, nip);
1838 hammer_rel_inode(nip, 0);
1841 error = hammer_get_vnode(nip, ap->a_vpp);
1842 hammer_rel_inode(nip, 0);
1844 cache_setunresolved(ap->a_nch);
1845 cache_setvp(ap->a_nch, *ap->a_vpp);
1848 hammer_done_transaction(&trans);
1853 * hammer_vop_nwhiteout { nch, dvp, cred, flags }
1857 hammer_vop_nwhiteout(struct vop_nwhiteout_args *ap)
1859 struct hammer_transaction trans;
1860 struct hammer_inode *dip;
1863 dip = VTOI(ap->a_dvp);
1865 if (hammer_nohistory(dip) == 0 &&
1866 (error = hammer_checkspace(dip->hmp)) != 0) {
1870 hammer_start_transaction(&trans, dip->hmp);
1871 error = hammer_dounlink(&trans, ap->a_nch, ap->a_dvp,
1872 ap->a_cred, ap->a_flags);
1873 hammer_done_transaction(&trans);
1879 * hammer_vop_ioctl { vp, command, data, fflag, cred }
1883 hammer_vop_ioctl(struct vop_ioctl_args *ap)
1885 struct hammer_inode *ip = ap->a_vp->v_data;
1887 return(hammer_ioctl(ip, ap->a_command, ap->a_data,
1888 ap->a_fflag, ap->a_cred));
1893 hammer_vop_mountctl(struct vop_mountctl_args *ap)
1898 mp = ap->a_head.a_ops->head.vv_mount;
1901 case MOUNTCTL_SET_EXPORT:
1902 if (ap->a_ctllen != sizeof(struct export_args))
1904 error = hammer_vfs_export(mp, ap->a_op,
1905 (const struct export_args *)ap->a_ctl);
1908 error = journal_mountctl(ap);
1915 * hammer_vop_strategy { vp, bio }
1917 * Strategy call, used for regular file read & write only. Note that the
1918 * bp may represent a cluster.
1920 * To simplify operation and allow better optimizations in the future,
1921 * this code does not make any assumptions with regards to buffer alignment
1926 hammer_vop_strategy(struct vop_strategy_args *ap)
1931 bp = ap->a_bio->bio_buf;
1935 error = hammer_vop_strategy_read(ap);
1938 error = hammer_vop_strategy_write(ap);
1941 bp->b_error = error = EINVAL;
1942 bp->b_flags |= B_ERROR;
1950 * Read from a regular file. Iterate the related records and fill in the
1951 * BIO/BUF. Gaps are zero-filled.
1953 * The support code in hammer_object.c should be used to deal with mixed
1954 * in-memory and on-disk records.
1956 * NOTE: Can be called from the cluster code with an oversized buf.
1962 hammer_vop_strategy_read(struct vop_strategy_args *ap)
1964 struct hammer_transaction trans;
1965 struct hammer_inode *ip;
1966 struct hammer_cursor cursor;
1967 hammer_base_elm_t base;
1968 hammer_off_t disk_offset;
1982 ip = ap->a_vp->v_data;
1985 * The zone-2 disk offset may have been set by the cluster code via
1986 * a BMAP operation, or else should be NOOFFSET.
1988 * Checking the high bits for a match against zone-2 should suffice.
1990 nbio = push_bio(bio);
1991 if ((nbio->bio_offset & HAMMER_OFF_ZONE_MASK) ==
1992 HAMMER_ZONE_RAW_BUFFER) {
1993 error = hammer_io_direct_read(ip->hmp, nbio);
1998 * Well, that sucked. Do it the hard way. If all the stars are
1999 * aligned we may still be able to issue a direct-read.
2001 hammer_simple_transaction(&trans, ip->hmp);
2002 hammer_init_cursor(&trans, &cursor, &ip->cache[1], ip);
2005 * Key range (begin and end inclusive) to scan. Note that the key's
2006 * stored in the actual records represent BASE+LEN, not BASE. The
2007 * first record containing bio_offset will have a key > bio_offset.
2009 cursor.key_beg.localization = ip->obj_localization +
2010 HAMMER_LOCALIZE_MISC;
2011 cursor.key_beg.obj_id = ip->obj_id;
2012 cursor.key_beg.create_tid = 0;
2013 cursor.key_beg.delete_tid = 0;
2014 cursor.key_beg.obj_type = 0;
2015 cursor.key_beg.key = bio->bio_offset + 1;
2016 cursor.asof = ip->obj_asof;
2017 cursor.flags |= HAMMER_CURSOR_ASOF;
2019 cursor.key_end = cursor.key_beg;
2020 KKASSERT(ip->ino_data.obj_type == HAMMER_OBJTYPE_REGFILE);
2022 if (ip->ino_data.obj_type == HAMMER_OBJTYPE_DBFILE) {
2023 cursor.key_beg.rec_type = HAMMER_RECTYPE_DB;
2024 cursor.key_end.rec_type = HAMMER_RECTYPE_DB;
2025 cursor.key_end.key = 0x7FFFFFFFFFFFFFFFLL;
2029 ran_end = bio->bio_offset + bp->b_bufsize;
2030 cursor.key_beg.rec_type = HAMMER_RECTYPE_DATA;
2031 cursor.key_end.rec_type = HAMMER_RECTYPE_DATA;
2032 tmp64 = ran_end + MAXPHYS + 1; /* work-around GCC-4 bug */
2033 if (tmp64 < ran_end)
2034 cursor.key_end.key = 0x7FFFFFFFFFFFFFFFLL;
2036 cursor.key_end.key = ran_end + MAXPHYS + 1;
2038 cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE;
2040 error = hammer_ip_first(&cursor);
2043 while (error == 0) {
2045 * Get the base file offset of the record. The key for
2046 * data records is (base + bytes) rather then (base).
2048 base = &cursor.leaf->base;
2049 rec_offset = base->key - cursor.leaf->data_len;
2052 * Calculate the gap, if any, and zero-fill it.
2054 * n is the offset of the start of the record verses our
2055 * current seek offset in the bio.
2057 n = (int)(rec_offset - (bio->bio_offset + boff));
2059 if (n > bp->b_bufsize - boff)
2060 n = bp->b_bufsize - boff;
2061 bzero((char *)bp->b_data + boff, n);
2067 * Calculate the data offset in the record and the number
2068 * of bytes we can copy.
2070 * There are two degenerate cases. First, boff may already
2071 * be at bp->b_bufsize. Secondly, the data offset within
2072 * the record may exceed the record's size.
2076 n = cursor.leaf->data_len - roff;
2078 kprintf("strategy_read: bad n=%d roff=%d\n", n, roff);
2080 } else if (n > bp->b_bufsize - boff) {
2081 n = bp->b_bufsize - boff;
2085 * Deal with cached truncations. This cool bit of code
2086 * allows truncate()/ftruncate() to avoid having to sync
2089 * If the frontend is truncated then all backend records are
2090 * subject to the frontend's truncation.
2092 * If the backend is truncated then backend records on-disk
2093 * (but not in-memory) are subject to the backend's
2094 * truncation. In-memory records owned by the backend
2095 * represent data written after the truncation point on the
2096 * backend and must not be truncated.
2098 * Truncate operations deal with frontend buffer cache
2099 * buffers and frontend-owned in-memory records synchronously.
2101 if (ip->flags & HAMMER_INODE_TRUNCATED) {
2102 if (hammer_cursor_ondisk(&cursor) ||
2103 cursor.iprec->flush_state == HAMMER_FST_FLUSH) {
2104 if (ip->trunc_off <= rec_offset)
2106 else if (ip->trunc_off < rec_offset + n)
2107 n = (int)(ip->trunc_off - rec_offset);
2110 if (ip->sync_flags & HAMMER_INODE_TRUNCATED) {
2111 if (hammer_cursor_ondisk(&cursor)) {
2112 if (ip->sync_trunc_off <= rec_offset)
2114 else if (ip->sync_trunc_off < rec_offset + n)
2115 n = (int)(ip->sync_trunc_off - rec_offset);
2120 * Try to issue a direct read into our bio if possible,
2121 * otherwise resolve the element data into a hammer_buffer
2124 * The buffer on-disk should be zerod past any real
2125 * truncation point, but may not be for any synthesized
2126 * truncation point from above.
2128 if (boff == 0 && n == bp->b_bufsize &&
2129 ((cursor.leaf->data_offset + roff) & HAMMER_BUFMASK) == 0) {
2130 disk_offset = hammer_blockmap_lookup(
2132 cursor.leaf->data_offset + roff,
2136 nbio->bio_offset = disk_offset;
2137 error = hammer_io_direct_read(trans.hmp, nbio);
2140 error = hammer_ip_resolve_data(&cursor);
2142 bcopy((char *)cursor.data + roff,
2143 (char *)bp->b_data + boff, n);
2150 * Iterate until we have filled the request.
2153 if (boff == bp->b_bufsize)
2155 error = hammer_ip_next(&cursor);
2159 * There may have been a gap after the last record
2161 if (error == ENOENT)
2163 if (error == 0 && boff != bp->b_bufsize) {
2164 KKASSERT(boff < bp->b_bufsize);
2165 bzero((char *)bp->b_data + boff, bp->b_bufsize - boff);
2166 /* boff = bp->b_bufsize; */
2169 bp->b_error = error;
2171 bp->b_flags |= B_ERROR;
2176 hammer_cache_node(&ip->cache[1], cursor.node);
2177 hammer_done_cursor(&cursor);
2178 hammer_done_transaction(&trans);
2183 * BMAP operation - used to support cluster_read() only.
2185 * (struct vnode *vp, off_t loffset, off_t *doffsetp, int *runp, int *runb)
2187 * This routine may return EOPNOTSUPP if the opration is not supported for
2188 * the specified offset. The contents of the pointer arguments do not
2189 * need to be initialized in that case.
2191 * If a disk address is available and properly aligned return 0 with
2192 * *doffsetp set to the zone-2 address, and *runp / *runb set appropriately
2193 * to the run-length relative to that offset. Callers may assume that
2194 * *doffsetp is valid if 0 is returned, even if *runp is not sufficiently
2195 * large, so return EOPNOTSUPP if it is not sufficiently large.
2199 hammer_vop_bmap(struct vop_bmap_args *ap)
2201 struct hammer_transaction trans;
2202 struct hammer_inode *ip;
2203 struct hammer_cursor cursor;
2204 hammer_base_elm_t base;
2208 int64_t base_offset;
2209 int64_t base_disk_offset;
2210 int64_t last_offset;
2211 hammer_off_t last_disk_offset;
2212 hammer_off_t disk_offset;
2217 ip = ap->a_vp->v_data;
2220 * We can only BMAP regular files. We can't BMAP database files,
2223 if (ip->ino_data.obj_type != HAMMER_OBJTYPE_REGFILE)
2227 * bmap is typically called with runp/runb both NULL when used
2228 * for writing. We do not support BMAP for writing atm.
2230 if (ap->a_cmd != BUF_CMD_READ)
2234 * Scan the B-Tree to acquire blockmap addresses, then translate
2237 hammer_simple_transaction(&trans, ip->hmp);
2239 kprintf("bmap_beg %016llx ip->cache %p\n", ap->a_loffset, ip->cache[1]);
2241 hammer_init_cursor(&trans, &cursor, &ip->cache[1], ip);
2244 * Key range (begin and end inclusive) to scan. Note that the key's
2245 * stored in the actual records represent BASE+LEN, not BASE. The
2246 * first record containing bio_offset will have a key > bio_offset.
2248 cursor.key_beg.localization = ip->obj_localization +
2249 HAMMER_LOCALIZE_MISC;
2250 cursor.key_beg.obj_id = ip->obj_id;
2251 cursor.key_beg.create_tid = 0;
2252 cursor.key_beg.delete_tid = 0;
2253 cursor.key_beg.obj_type = 0;
2255 cursor.key_beg.key = ap->a_loffset - MAXPHYS + 1;
2257 cursor.key_beg.key = ap->a_loffset + 1;
2258 if (cursor.key_beg.key < 0)
2259 cursor.key_beg.key = 0;
2260 cursor.asof = ip->obj_asof;
2261 cursor.flags |= HAMMER_CURSOR_ASOF;
2263 cursor.key_end = cursor.key_beg;
2264 KKASSERT(ip->ino_data.obj_type == HAMMER_OBJTYPE_REGFILE);
2266 ran_end = ap->a_loffset + MAXPHYS;
2267 cursor.key_beg.rec_type = HAMMER_RECTYPE_DATA;
2268 cursor.key_end.rec_type = HAMMER_RECTYPE_DATA;
2269 tmp64 = ran_end + MAXPHYS + 1; /* work-around GCC-4 bug */
2270 if (tmp64 < ran_end)
2271 cursor.key_end.key = 0x7FFFFFFFFFFFFFFFLL;
2273 cursor.key_end.key = ran_end + MAXPHYS + 1;
2275 cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE;
2277 error = hammer_ip_first(&cursor);
2278 base_offset = last_offset = 0;
2279 base_disk_offset = last_disk_offset = 0;
2281 while (error == 0) {
2283 * Get the base file offset of the record. The key for
2284 * data records is (base + bytes) rather then (base).
2286 * NOTE: rec_offset + rec_len may exceed the end-of-file.
2287 * The extra bytes should be zero on-disk and the BMAP op
2288 * should still be ok.
2290 base = &cursor.leaf->base;
2291 rec_offset = base->key - cursor.leaf->data_len;
2292 rec_len = cursor.leaf->data_len;
2295 * Incorporate any cached truncation.
2297 * NOTE: Modifications to rec_len based on synthesized
2298 * truncation points remove the guarantee that any extended
2299 * data on disk is zero (since the truncations may not have
2300 * taken place on-media yet).
2302 if (ip->flags & HAMMER_INODE_TRUNCATED) {
2303 if (hammer_cursor_ondisk(&cursor) ||
2304 cursor.iprec->flush_state == HAMMER_FST_FLUSH) {
2305 if (ip->trunc_off <= rec_offset)
2307 else if (ip->trunc_off < rec_offset + rec_len)
2308 rec_len = (int)(ip->trunc_off - rec_offset);
2311 if (ip->sync_flags & HAMMER_INODE_TRUNCATED) {
2312 if (hammer_cursor_ondisk(&cursor)) {
2313 if (ip->sync_trunc_off <= rec_offset)
2315 else if (ip->sync_trunc_off < rec_offset + rec_len)
2316 rec_len = (int)(ip->sync_trunc_off - rec_offset);
2321 * Accumulate information. If we have hit a discontiguous
2322 * block reset base_offset unless we are already beyond the
2323 * requested offset. If we are, that's it, we stop.
2325 disk_offset = hammer_blockmap_lookup(trans.hmp,
2326 cursor.leaf->data_offset,
2330 if (rec_offset != last_offset ||
2331 disk_offset != last_disk_offset) {
2332 if (rec_offset > ap->a_loffset)
2334 base_offset = rec_offset;
2335 base_disk_offset = disk_offset;
2337 last_offset = rec_offset + rec_len;
2338 last_disk_offset = disk_offset + rec_len;
2340 error = hammer_ip_next(&cursor);
2344 kprintf("BMAP %016llx: %016llx - %016llx\n",
2345 ap->a_loffset, base_offset, last_offset);
2346 kprintf("BMAP %16s: %016llx - %016llx\n",
2347 "", base_disk_offset, last_disk_offset);
2351 hammer_cache_node(&ip->cache[1], cursor.node);
2353 kprintf("bmap_end2 %016llx ip->cache %p\n", ap->a_loffset, ip->cache[1]);
2356 hammer_done_cursor(&cursor);
2357 hammer_done_transaction(&trans);
2360 * If we couldn't find any records or the records we did find were
2361 * all behind the requested offset, return failure. A forward
2362 * truncation can leave a hole w/ no on-disk records.
2364 if (last_offset == 0 || last_offset < ap->a_loffset)
2365 return (EOPNOTSUPP);
2368 * Figure out the block size at the requested offset and adjust
2369 * our limits so the cluster_read() does not create inappropriately
2370 * sized buffer cache buffers.
2372 blksize = hammer_blocksize(ap->a_loffset);
2373 if (hammer_blocksize(base_offset) != blksize) {
2374 base_offset = hammer_blockdemarc(base_offset, ap->a_loffset);
2376 if (last_offset != ap->a_loffset &&
2377 hammer_blocksize(last_offset - 1) != blksize) {
2378 last_offset = hammer_blockdemarc(ap->a_loffset,
2383 * Returning EOPNOTSUPP simply prevents the direct-IO optimization
2386 disk_offset = base_disk_offset + (ap->a_loffset - base_offset);
2389 * If doffsetp is not aligned or the forward run size does
2390 * not cover a whole buffer, disallow the direct I/O.
2392 if ((disk_offset & HAMMER_BUFMASK) ||
2393 (last_offset - ap->a_loffset) < blksize) {
2396 *ap->a_doffsetp = disk_offset;
2398 *ap->a_runb = ap->a_loffset - base_offset;
2399 KKASSERT(*ap->a_runb >= 0);
2402 *ap->a_runp = last_offset - ap->a_loffset;
2403 KKASSERT(*ap->a_runp >= 0);
2411 * Write to a regular file. Because this is a strategy call the OS is
2412 * trying to actually get data onto the media.
2416 hammer_vop_strategy_write(struct vop_strategy_args *ap)
2418 hammer_record_t record;
2428 ip = ap->a_vp->v_data;
2431 KKASSERT(bp->b_bufsize == hammer_blocksize(bio->bio_offset));
2433 if (ip->flags & HAMMER_INODE_RO) {
2434 bp->b_error = EROFS;
2435 bp->b_flags |= B_ERROR;
2437 hammer_cleanup_write_io(ip);
2442 * Interlock with inode destruction (no in-kernel or directory
2443 * topology visibility). If we queue new IO while trying to
2444 * destroy the inode we can deadlock the vtrunc call in
2445 * hammer_inode_unloadable_check().
2447 if (ip->flags & (HAMMER_INODE_DELETING|HAMMER_INODE_DELETED)) {
2450 hammer_cleanup_write_io(ip);
2455 * Reserve space and issue a direct-write from the front-end.
2456 * NOTE: The direct_io code will hammer_bread/bcopy smaller
2459 * An in-memory record will be installed to reference the storage
2460 * until the flusher can get to it.
2462 * Since we own the high level bio the front-end will not try to
2463 * do a direct-read until the write completes.
2465 * NOTE: The only time we do not reserve a full-sized buffers
2466 * worth of data is if the file is small. We do not try to
2467 * allocate a fragment (from the small-data zone) at the end of
2468 * an otherwise large file as this can lead to wildly separated
2471 KKASSERT((bio->bio_offset & HAMMER_BUFMASK) == 0);
2472 KKASSERT(bio->bio_offset < ip->ino_data.size);
2473 if (bio->bio_offset || ip->ino_data.size > HAMMER_BUFSIZE / 2)
2474 bytes = bp->b_bufsize;
2476 bytes = ((int)ip->ino_data.size + 15) & ~15;
2478 record = hammer_ip_add_bulk(ip, bio->bio_offset, bp->b_data,
2481 hammer_io_direct_write(hmp, &record->leaf, bio);
2482 hammer_rel_mem_record(record);
2483 if (ip->rsv_recs > 1 && hmp->rsv_recs > hammer_limit_recs)
2484 hammer_flush_inode(ip, 0);
2486 bp->b_bio2.bio_offset = NOOFFSET;
2487 bp->b_error = error;
2488 bp->b_flags |= B_ERROR;
2491 hammer_cleanup_write_io(ip);
2496 * Clean-up after disposing of a dirty frontend buffer's data.
2497 * This is somewhat heuristical so try to be robust.
2500 hammer_cleanup_write_io(hammer_inode_t ip)
2502 if (ip->rsv_databufs) {
2504 --ip->hmp->rsv_databufs;
2509 * We can lose track of dirty buffer cache buffers if we truncate, this
2510 * routine will resynchronize the count.
2514 hammer_update_rsv_databufs(hammer_inode_t ip)
2522 RB_FOREACH(bp, buf_rb_tree, &ip->vp->v_rbdirty_tree) {
2528 delta = n - ip->rsv_databufs;
2529 ip->rsv_databufs += delta;
2530 ip->hmp->rsv_databufs += delta;
2534 * dounlink - disconnect a directory entry
2536 * XXX whiteout support not really in yet
2539 hammer_dounlink(hammer_transaction_t trans, struct nchandle *nch,
2540 struct vnode *dvp, struct ucred *cred, int flags)
2542 struct namecache *ncp;
2545 struct hammer_cursor cursor;
2550 * Calculate the namekey and setup the key range for the scan. This
2551 * works kinda like a chained hash table where the lower 32 bits
2552 * of the namekey synthesize the chain.
2554 * The key range is inclusive of both key_beg and key_end.
2559 if (dip->flags & HAMMER_INODE_RO)
2562 namekey = hammer_directory_namekey(ncp->nc_name, ncp->nc_nlen);
2564 hammer_init_cursor(trans, &cursor, &dip->cache[1], dip);
2565 cursor.key_beg.localization = dip->obj_localization +
2566 HAMMER_LOCALIZE_MISC;
2567 cursor.key_beg.obj_id = dip->obj_id;
2568 cursor.key_beg.key = namekey;
2569 cursor.key_beg.create_tid = 0;
2570 cursor.key_beg.delete_tid = 0;
2571 cursor.key_beg.rec_type = HAMMER_RECTYPE_DIRENTRY;
2572 cursor.key_beg.obj_type = 0;
2574 cursor.key_end = cursor.key_beg;
2575 cursor.key_end.key |= 0xFFFFFFFFULL;
2576 cursor.asof = dip->obj_asof;
2577 cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE | HAMMER_CURSOR_ASOF;
2580 * Scan all matching records (the chain), locate the one matching
2581 * the requested path component. info->last_error contains the
2582 * error code on search termination and could be 0, ENOENT, or
2585 * The hammer_ip_*() functions merge in-memory records with on-disk
2586 * records for the purposes of the search.
2588 error = hammer_ip_first(&cursor);
2590 while (error == 0) {
2591 error = hammer_ip_resolve_data(&cursor);
2594 nlen = cursor.leaf->data_len - HAMMER_ENTRY_NAME_OFF;
2596 if (ncp->nc_nlen == nlen &&
2597 bcmp(ncp->nc_name, cursor.data->entry.name, nlen) == 0) {
2600 error = hammer_ip_next(&cursor);
2604 * If all is ok we have to get the inode so we can adjust nlinks.
2605 * To avoid a deadlock with the flusher we must release the inode
2606 * lock on the directory when acquiring the inode for the entry.
2608 * If the target is a directory, it must be empty.
2611 hammer_unlock(&cursor.ip->lock);
2612 ip = hammer_get_inode(trans, dip, cursor.data->entry.obj_id,
2614 cursor.data->entry.localization,
2616 hammer_lock_sh(&cursor.ip->lock);
2617 if (error == ENOENT) {
2618 kprintf("obj_id %016llx\n", cursor.data->entry.obj_id);
2619 Debugger("ENOENT unlinking object that should exist");
2623 * If we are trying to remove a directory the directory must
2626 * WARNING: hammer_ip_check_directory_empty() may have to
2627 * terminate the cursor to avoid a deadlock. It is ok to
2628 * call hammer_done_cursor() twice.
2630 if (error == 0 && ip->ino_data.obj_type ==
2631 HAMMER_OBJTYPE_DIRECTORY) {
2632 error = hammer_ip_check_directory_empty(trans, ip);
2636 * Delete the directory entry.
2638 * WARNING: hammer_ip_del_directory() may have to terminate
2639 * the cursor to avoid a deadlock. It is ok to call
2640 * hammer_done_cursor() twice.
2643 error = hammer_ip_del_directory(trans, &cursor,
2646 hammer_done_cursor(&cursor);
2648 cache_setunresolved(nch);
2649 cache_setvp(nch, NULL);
2652 cache_inval_vp(ip->vp, CINV_DESTROY);
2655 hammer_rel_inode(ip, 0);
2657 hammer_done_cursor(&cursor);
2659 if (error == EDEADLK)
2665 /************************************************************************
2666 * FIFO AND SPECFS OPS *
2667 ************************************************************************
2672 hammer_vop_fifoclose (struct vop_close_args *ap)
2674 /* XXX update itimes */
2675 return (VOCALL(&fifo_vnode_vops, &ap->a_head));
2679 hammer_vop_fiforead (struct vop_read_args *ap)
2683 error = VOCALL(&fifo_vnode_vops, &ap->a_head);
2684 /* XXX update access time */
2689 hammer_vop_fifowrite (struct vop_write_args *ap)
2693 error = VOCALL(&fifo_vnode_vops, &ap->a_head);
2694 /* XXX update access time */
2699 hammer_vop_specclose (struct vop_close_args *ap)
2701 /* XXX update itimes */
2702 return (VOCALL(&spec_vnode_vops, &ap->a_head));
2706 hammer_vop_specread (struct vop_read_args *ap)
2708 /* XXX update access time */
2709 return (VOCALL(&spec_vnode_vops, &ap->a_head));
2713 hammer_vop_specwrite (struct vop_write_args *ap)
2715 /* XXX update last change time */
2716 return (VOCALL(&spec_vnode_vops, &ap->a_head));