2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * $DragonFly: src/sys/vfs/hammer/hammer_vnops.c,v 1.76 2008/06/23 07:31:14 dillon Exp $
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/kernel.h>
40 #include <sys/fcntl.h>
41 #include <sys/namecache.h>
42 #include <sys/vnode.h>
43 #include <sys/lockf.h>
44 #include <sys/event.h>
46 #include <sys/dirent.h>
47 #include <vm/vm_extern.h>
48 #include <vfs/fifofs/fifo.h>
54 /*static int hammer_vop_vnoperate(struct vop_generic_args *);*/
55 static int hammer_vop_fsync(struct vop_fsync_args *);
56 static int hammer_vop_read(struct vop_read_args *);
57 static int hammer_vop_write(struct vop_write_args *);
58 static int hammer_vop_access(struct vop_access_args *);
59 static int hammer_vop_advlock(struct vop_advlock_args *);
60 static int hammer_vop_close(struct vop_close_args *);
61 static int hammer_vop_ncreate(struct vop_ncreate_args *);
62 static int hammer_vop_getattr(struct vop_getattr_args *);
63 static int hammer_vop_nresolve(struct vop_nresolve_args *);
64 static int hammer_vop_nlookupdotdot(struct vop_nlookupdotdot_args *);
65 static int hammer_vop_nlink(struct vop_nlink_args *);
66 static int hammer_vop_nmkdir(struct vop_nmkdir_args *);
67 static int hammer_vop_nmknod(struct vop_nmknod_args *);
68 static int hammer_vop_open(struct vop_open_args *);
69 static int hammer_vop_pathconf(struct vop_pathconf_args *);
70 static int hammer_vop_print(struct vop_print_args *);
71 static int hammer_vop_readdir(struct vop_readdir_args *);
72 static int hammer_vop_readlink(struct vop_readlink_args *);
73 static int hammer_vop_nremove(struct vop_nremove_args *);
74 static int hammer_vop_nrename(struct vop_nrename_args *);
75 static int hammer_vop_nrmdir(struct vop_nrmdir_args *);
76 static int hammer_vop_setattr(struct vop_setattr_args *);
77 static int hammer_vop_strategy(struct vop_strategy_args *);
78 static int hammer_vop_bmap(struct vop_bmap_args *ap);
79 static int hammer_vop_nsymlink(struct vop_nsymlink_args *);
80 static int hammer_vop_nwhiteout(struct vop_nwhiteout_args *);
81 static int hammer_vop_ioctl(struct vop_ioctl_args *);
82 static int hammer_vop_mountctl(struct vop_mountctl_args *);
84 static int hammer_vop_fifoclose (struct vop_close_args *);
85 static int hammer_vop_fiforead (struct vop_read_args *);
86 static int hammer_vop_fifowrite (struct vop_write_args *);
88 static int hammer_vop_specclose (struct vop_close_args *);
89 static int hammer_vop_specread (struct vop_read_args *);
90 static int hammer_vop_specwrite (struct vop_write_args *);
92 struct vop_ops hammer_vnode_vops = {
93 .vop_default = vop_defaultop,
94 .vop_fsync = hammer_vop_fsync,
95 .vop_getpages = vop_stdgetpages,
96 .vop_putpages = vop_stdputpages,
97 .vop_read = hammer_vop_read,
98 .vop_write = hammer_vop_write,
99 .vop_access = hammer_vop_access,
100 .vop_advlock = hammer_vop_advlock,
101 .vop_close = hammer_vop_close,
102 .vop_ncreate = hammer_vop_ncreate,
103 .vop_getattr = hammer_vop_getattr,
104 .vop_inactive = hammer_vop_inactive,
105 .vop_reclaim = hammer_vop_reclaim,
106 .vop_nresolve = hammer_vop_nresolve,
107 .vop_nlookupdotdot = hammer_vop_nlookupdotdot,
108 .vop_nlink = hammer_vop_nlink,
109 .vop_nmkdir = hammer_vop_nmkdir,
110 .vop_nmknod = hammer_vop_nmknod,
111 .vop_open = hammer_vop_open,
112 .vop_pathconf = hammer_vop_pathconf,
113 .vop_print = hammer_vop_print,
114 .vop_readdir = hammer_vop_readdir,
115 .vop_readlink = hammer_vop_readlink,
116 .vop_nremove = hammer_vop_nremove,
117 .vop_nrename = hammer_vop_nrename,
118 .vop_nrmdir = hammer_vop_nrmdir,
119 .vop_setattr = hammer_vop_setattr,
120 .vop_bmap = hammer_vop_bmap,
121 .vop_strategy = hammer_vop_strategy,
122 .vop_nsymlink = hammer_vop_nsymlink,
123 .vop_nwhiteout = hammer_vop_nwhiteout,
124 .vop_ioctl = hammer_vop_ioctl,
125 .vop_mountctl = hammer_vop_mountctl
128 struct vop_ops hammer_spec_vops = {
129 .vop_default = spec_vnoperate,
130 .vop_fsync = hammer_vop_fsync,
131 .vop_read = hammer_vop_specread,
132 .vop_write = hammer_vop_specwrite,
133 .vop_access = hammer_vop_access,
134 .vop_close = hammer_vop_specclose,
135 .vop_getattr = hammer_vop_getattr,
136 .vop_inactive = hammer_vop_inactive,
137 .vop_reclaim = hammer_vop_reclaim,
138 .vop_setattr = hammer_vop_setattr
141 struct vop_ops hammer_fifo_vops = {
142 .vop_default = fifo_vnoperate,
143 .vop_fsync = hammer_vop_fsync,
144 .vop_read = hammer_vop_fiforead,
145 .vop_write = hammer_vop_fifowrite,
146 .vop_access = hammer_vop_access,
147 .vop_close = hammer_vop_fifoclose,
148 .vop_getattr = hammer_vop_getattr,
149 .vop_inactive = hammer_vop_inactive,
150 .vop_reclaim = hammer_vop_reclaim,
151 .vop_setattr = hammer_vop_setattr
154 #ifdef DEBUG_TRUNCATE
155 struct hammer_inode *HammerTruncIp;
158 static int hammer_dounlink(hammer_transaction_t trans, struct nchandle *nch,
159 struct vnode *dvp, struct ucred *cred, int flags);
160 static int hammer_vop_strategy_read(struct vop_strategy_args *ap);
161 static int hammer_vop_strategy_write(struct vop_strategy_args *ap);
162 static void hammer_cleanup_write_io(hammer_inode_t ip);
163 static void hammer_update_rsv_databufs(hammer_inode_t ip);
168 hammer_vop_vnoperate(struct vop_generic_args *)
170 return (VOCALL(&hammer_vnode_vops, ap));
175 * hammer_vop_fsync { vp, waitfor }
177 * fsync() an inode to disk and wait for it to be completely committed
178 * such that the information would not be undone if a crash occured after
183 hammer_vop_fsync(struct vop_fsync_args *ap)
185 hammer_inode_t ip = VTOI(ap->a_vp);
187 vfsync(ap->a_vp, ap->a_waitfor, 1, NULL, NULL);
188 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
189 if (ap->a_waitfor == MNT_WAIT)
190 hammer_wait_inode(ip);
195 * hammer_vop_read { vp, uio, ioflag, cred }
199 hammer_vop_read(struct vop_read_args *ap)
201 struct hammer_transaction trans;
212 if (ap->a_vp->v_type != VREG)
219 * Allow the UIO's size to override the sequential heuristic.
221 blksize = hammer_blocksize(uio->uio_offset);
222 seqcount = (uio->uio_resid + (blksize - 1)) / blksize;
223 ioseqcount = ap->a_ioflag >> 16;
224 if (seqcount < ioseqcount)
225 seqcount = ioseqcount;
227 hammer_start_transaction(&trans, ip->hmp);
230 * Access the data typically in HAMMER_BUFSIZE blocks via the
231 * buffer cache, but HAMMER may use a variable block size based
234 while (uio->uio_resid > 0 && uio->uio_offset < ip->ino_data.size) {
238 blksize = hammer_blocksize(uio->uio_offset);
239 offset = (int)uio->uio_offset & (blksize - 1);
240 base_offset = uio->uio_offset - offset;
242 if (hammer_debug_cluster_enable) {
244 * Use file_limit to prevent cluster_read() from
245 * creating buffers of the wrong block size past
248 file_limit = ip->ino_data.size;
249 if (base_offset < HAMMER_XDEMARC &&
250 file_limit > HAMMER_XDEMARC) {
251 file_limit = HAMMER_XDEMARC;
253 error = cluster_read(ap->a_vp,
254 file_limit, base_offset,
258 error = bread(ap->a_vp, base_offset, blksize, &bp);
261 kprintf("error %d\n", error);
266 /* bp->b_flags |= B_CLUSTEROK; temporarily disabled */
267 n = blksize - offset;
268 if (n > uio->uio_resid)
270 if (n > ip->ino_data.size - uio->uio_offset)
271 n = (int)(ip->ino_data.size - uio->uio_offset);
272 error = uiomove((char *)bp->b_data + offset, n, uio);
274 /* data has a lower priority then meta-data */
275 bp->b_flags |= B_AGE;
280 if ((ip->flags & HAMMER_INODE_RO) == 0 &&
281 (ip->hmp->mp->mnt_flag & MNT_NOATIME) == 0) {
282 ip->ino_data.atime = trans.time;
283 hammer_modify_inode(ip, HAMMER_INODE_ATIME);
285 hammer_done_transaction(&trans);
290 * hammer_vop_write { vp, uio, ioflag, cred }
294 hammer_vop_write(struct vop_write_args *ap)
296 struct hammer_transaction trans;
297 struct hammer_inode *ip;
310 if (ap->a_vp->v_type != VREG)
315 seqcount = ap->a_ioflag >> 16;
317 if (ip->flags & HAMMER_INODE_RO)
321 * Create a transaction to cover the operations we perform.
323 hammer_start_transaction(&trans, hmp);
329 if (ap->a_ioflag & IO_APPEND)
330 uio->uio_offset = ip->ino_data.size;
333 * Check for illegal write offsets. Valid range is 0...2^63-1.
335 * NOTE: the base_off assignment is required to work around what
336 * I consider to be a GCC-4 optimization bug.
338 if (uio->uio_offset < 0) {
339 hammer_done_transaction(&trans);
342 base_offset = uio->uio_offset + uio->uio_resid; /* work around gcc-4 */
343 if (uio->uio_resid > 0 && base_offset <= 0) {
344 hammer_done_transaction(&trans);
349 * Access the data typically in HAMMER_BUFSIZE blocks via the
350 * buffer cache, but HAMMER may use a variable block size based
354 while (uio->uio_resid > 0) {
359 if ((error = hammer_checkspace(hmp)) != 0)
363 * Do not allow HAMMER to blow out the buffer cache. Very
364 * large UIOs can lockout other processes due to bwillwrite()
367 * Do not allow HAMMER to blow out system memory by
368 * accumulating too many records. Records are so well
369 * decoupled from the buffer cache that it is possible
370 * for userland to push data out to the media via
371 * direct-write, but build up the records queued to the
372 * backend faster then the backend can flush them out.
373 * HAMMER has hit its write limit but the frontend has
374 * no pushback to slow it down.
376 * Always check at the beginning so separate writes are
377 * not able to bypass this code (count++).
379 * WARNING: Cannot unlock vp when doing a NOCOPY write as
380 * part of a putpages operation. Doing so could cause us
381 * to deadlock against the VM system when we try to re-lock.
383 if ((count++ & 15) == 0 || count > 64) {
387 if (uio->uio_segflg != UIO_NOCOPY) {
389 if ((ap->a_ioflag & IO_NOBWILL) == 0)
394 * Pending record flush check.
396 if (hmp->rsv_recs > hammer_limit_recs / 2) {
398 * Get the inode on the flush list
400 if (ip->rsv_recs >= 64)
401 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
402 else if (ip->rsv_recs >= 16)
403 hammer_flush_inode(ip, 0);
406 * Keep the flusher going if the system keeps
409 delta = hmp->count_newrecords -
410 hmp->last_newrecords;
411 if (delta < 0 || delta > hammer_limit_recs / 2) {
412 hmp->last_newrecords = hmp->count_newrecords;
413 hammer_sync_hmp(hmp, MNT_NOWAIT);
417 * If we have gotten behind start slowing
420 delta = (hmp->rsv_recs - hammer_limit_recs) *
421 hz / hammer_limit_recs;
423 tsleep(&trans, 0, "hmrslo", delta);
426 if (uio->uio_segflg != UIO_NOCOPY)
427 vn_lock(ap->a_vp, LK_EXCLUSIVE|LK_RETRY);
431 * Calculate the blocksize at the current offset and figure
432 * out how much we can actually write.
434 blksize = hammer_blocksize(uio->uio_offset);
435 blkmask = blksize - 1;
436 offset = (int)uio->uio_offset & blkmask;
437 base_offset = uio->uio_offset & ~(int64_t)blkmask;
438 n = blksize - offset;
439 if (n > uio->uio_resid)
441 if (uio->uio_offset + n > ip->ino_data.size) {
442 vnode_pager_setsize(ap->a_vp, uio->uio_offset + n);
446 if (uio->uio_segflg == UIO_NOCOPY) {
448 * Issuing a write with the same data backing the
449 * buffer. Instantiate the buffer to collect the
450 * backing vm pages, then read-in any missing bits.
452 * This case is used by vop_stdputpages().
454 bp = getblk(ap->a_vp, base_offset,
455 blksize, GETBLK_BHEAVY, 0);
456 if ((bp->b_flags & B_CACHE) == 0) {
458 error = bread(ap->a_vp, base_offset,
461 } else if (offset == 0 && uio->uio_resid >= blksize) {
463 * Even though we are entirely overwriting the buffer
464 * we may still have to zero it out to avoid a
465 * mmap/write visibility issue.
467 bp = getblk(ap->a_vp, base_offset, blksize, GETBLK_BHEAVY, 0);
468 if ((bp->b_flags & B_CACHE) == 0)
470 } else if (base_offset >= ip->ino_data.size) {
472 * If the base offset of the buffer is beyond the
473 * file EOF, we don't have to issue a read.
475 bp = getblk(ap->a_vp, base_offset,
476 blksize, GETBLK_BHEAVY, 0);
480 * Partial overwrite, read in any missing bits then
481 * replace the portion being written.
483 error = bread(ap->a_vp, base_offset, blksize, &bp);
488 error = uiomove((char *)bp->b_data + offset,
493 * If we screwed up we have to undo any VM size changes we
499 vtruncbuf(ap->a_vp, ip->ino_data.size,
500 hammer_blocksize(ip->ino_data.size));
504 /* bp->b_flags |= B_CLUSTEROK; temporarily disabled */
505 if (ip->ino_data.size < uio->uio_offset) {
506 ip->ino_data.size = uio->uio_offset;
507 flags = HAMMER_INODE_DDIRTY;
508 vnode_pager_setsize(ap->a_vp, ip->ino_data.size);
512 ip->ino_data.mtime = trans.time;
513 flags |= HAMMER_INODE_MTIME | HAMMER_INODE_BUFS;
514 hammer_modify_inode(ip, flags);
517 * Try to keep track of cached dirty data.
519 if ((bp->b_flags & B_DIRTY) == 0) {
525 * Final buffer disposition.
527 * If write_mode is non-zero we call bawrite()
528 * unconditionally. Otherwise we only use bawrite()
529 * if the writes are clearly sequential.
531 bp->b_flags |= B_AGE;
532 if (ap->a_ioflag & IO_SYNC) {
534 } else if (ap->a_ioflag & IO_DIRECT) {
540 else if (hammer_write_mode &&
541 ((int)uio->uio_offset & blkmask) == 0) {
543 bp->b_flags |= B_CLUSTEROK;
544 cluster_write(bp, ip->ino_data.size, XXX seqcount);
548 } else if ((ap->a_ioflag >> 16) == IO_SEQMAX &&
549 ((int)uio->uio_offset & blkmask) == 0) {
551 * If seqcount indicates sequential operation and
552 * we just finished filling a buffer, push it out
553 * now to prevent the buffer cache from becoming
554 * too full, which would trigger non-optimal
563 hammer_done_transaction(&trans);
568 * hammer_vop_access { vp, mode, cred }
572 hammer_vop_access(struct vop_access_args *ap)
574 struct hammer_inode *ip = VTOI(ap->a_vp);
579 uid = hammer_to_unix_xid(&ip->ino_data.uid);
580 gid = hammer_to_unix_xid(&ip->ino_data.gid);
582 error = vop_helper_access(ap, uid, gid, ip->ino_data.mode,
583 ip->ino_data.uflags);
588 * hammer_vop_advlock { vp, id, op, fl, flags }
592 hammer_vop_advlock(struct vop_advlock_args *ap)
594 hammer_inode_t ip = VTOI(ap->a_vp);
596 return (lf_advlock(ap, &ip->advlock, ip->ino_data.size));
600 * hammer_vop_close { vp, fflag }
604 hammer_vop_close(struct vop_close_args *ap)
606 hammer_inode_t ip = VTOI(ap->a_vp);
608 if ((ip->flags | ip->sync_flags) & HAMMER_INODE_MODMASK)
609 hammer_inode_waitreclaims(ip->hmp);
610 return (vop_stdclose(ap));
614 * hammer_vop_ncreate { nch, dvp, vpp, cred, vap }
616 * The operating system has already ensured that the directory entry
617 * does not exist and done all appropriate namespace locking.
621 hammer_vop_ncreate(struct vop_ncreate_args *ap)
623 struct hammer_transaction trans;
624 struct hammer_inode *dip;
625 struct hammer_inode *nip;
626 struct nchandle *nch;
630 dip = VTOI(ap->a_dvp);
632 if (dip->flags & HAMMER_INODE_RO)
634 if ((error = hammer_checkspace(dip->hmp)) != 0)
638 * Create a transaction to cover the operations we perform.
640 hammer_start_transaction(&trans, dip->hmp);
643 * Create a new filesystem object of the requested type. The
644 * returned inode will be referenced and shared-locked to prevent
645 * it from being moved to the flusher.
648 error = hammer_create_inode(&trans, ap->a_vap, ap->a_cred,
651 hkprintf("hammer_create_inode error %d\n", error);
652 hammer_done_transaction(&trans);
658 * Add the new filesystem object to the directory. This will also
659 * bump the inode's link count.
661 error = hammer_ip_add_directory(&trans, dip,
662 nch->ncp->nc_name, nch->ncp->nc_nlen,
665 hkprintf("hammer_ip_add_directory error %d\n", error);
671 hammer_rel_inode(nip, 0);
672 hammer_done_transaction(&trans);
675 error = hammer_get_vnode(nip, ap->a_vpp);
676 hammer_done_transaction(&trans);
677 hammer_rel_inode(nip, 0);
679 cache_setunresolved(ap->a_nch);
680 cache_setvp(ap->a_nch, *ap->a_vpp);
687 * hammer_vop_getattr { vp, vap }
689 * Retrieve an inode's attribute information. When accessing inodes
690 * historically we fake the atime field to ensure consistent results.
691 * The atime field is stored in the B-Tree element and allowed to be
692 * updated without cycling the element.
696 hammer_vop_getattr(struct vop_getattr_args *ap)
698 struct hammer_inode *ip = VTOI(ap->a_vp);
699 struct vattr *vap = ap->a_vap;
701 vap->va_fsid = ip->hmp->fsid_udev;
702 vap->va_fileid = ip->ino_leaf.base.obj_id;
703 vap->va_mode = ip->ino_data.mode;
704 vap->va_nlink = ip->ino_data.nlinks;
705 vap->va_uid = hammer_to_unix_xid(&ip->ino_data.uid);
706 vap->va_gid = hammer_to_unix_xid(&ip->ino_data.gid);
709 vap->va_size = ip->ino_data.size;
712 * We must provide a consistent atime and mtime for snapshots
713 * so people can do a 'tar cf - ... | md5' on them and get
714 * consistent results.
716 if (ip->flags & HAMMER_INODE_RO) {
717 hammer_time_to_timespec(ip->ino_data.ctime, &vap->va_atime);
718 hammer_time_to_timespec(ip->ino_data.ctime, &vap->va_mtime);
720 hammer_time_to_timespec(ip->ino_data.atime, &vap->va_atime);
721 hammer_time_to_timespec(ip->ino_data.mtime, &vap->va_mtime);
723 hammer_time_to_timespec(ip->ino_data.ctime, &vap->va_ctime);
724 vap->va_flags = ip->ino_data.uflags;
725 vap->va_gen = 1; /* hammer inums are unique for all time */
726 vap->va_blocksize = HAMMER_BUFSIZE;
727 if (ip->ino_data.size >= HAMMER_XDEMARC) {
728 vap->va_bytes = (ip->ino_data.size + HAMMER_XBUFMASK64) &
730 } else if (ip->ino_data.size > HAMMER_BUFSIZE / 2) {
731 vap->va_bytes = (ip->ino_data.size + HAMMER_BUFMASK64) &
734 vap->va_bytes = (ip->ino_data.size + 15) & ~15;
736 vap->va_type = hammer_get_vnode_type(ip->ino_data.obj_type);
737 vap->va_filerev = 0; /* XXX */
738 /* mtime uniquely identifies any adjustments made to the file XXX */
739 vap->va_fsmid = ip->ino_data.mtime;
740 vap->va_uid_uuid = ip->ino_data.uid;
741 vap->va_gid_uuid = ip->ino_data.gid;
742 vap->va_fsid_uuid = ip->hmp->fsid;
743 vap->va_vaflags = VA_UID_UUID_VALID | VA_GID_UUID_VALID |
746 switch (ip->ino_data.obj_type) {
747 case HAMMER_OBJTYPE_CDEV:
748 case HAMMER_OBJTYPE_BDEV:
749 vap->va_rmajor = ip->ino_data.rmajor;
750 vap->va_rminor = ip->ino_data.rminor;
760 * hammer_vop_nresolve { nch, dvp, cred }
762 * Locate the requested directory entry.
766 hammer_vop_nresolve(struct vop_nresolve_args *ap)
768 struct hammer_transaction trans;
769 struct namecache *ncp;
773 struct hammer_cursor cursor;
781 u_int32_t localization;
784 * Misc initialization, plus handle as-of name extensions. Look for
785 * the '@@' extension. Note that as-of files and directories cannot
788 dip = VTOI(ap->a_dvp);
789 ncp = ap->a_nch->ncp;
790 asof = dip->obj_asof;
794 hammer_simple_transaction(&trans, dip->hmp);
796 for (i = 0; i < nlen; ++i) {
797 if (ncp->nc_name[i] == '@' && ncp->nc_name[i+1] == '@') {
798 asof = hammer_str_to_tid(ncp->nc_name + i + 2);
799 flags |= HAMMER_INODE_RO;
806 * If there is no path component the time extension is relative to
810 ip = hammer_get_inode(&trans, dip, dip->obj_id,
811 asof, dip->obj_localization,
814 error = hammer_get_vnode(ip, &vp);
815 hammer_rel_inode(ip, 0);
821 cache_setvp(ap->a_nch, vp);
828 * Calculate the namekey and setup the key range for the scan. This
829 * works kinda like a chained hash table where the lower 32 bits
830 * of the namekey synthesize the chain.
832 * The key range is inclusive of both key_beg and key_end.
834 namekey = hammer_directory_namekey(ncp->nc_name, nlen);
836 error = hammer_init_cursor(&trans, &cursor, &dip->cache[1], dip);
837 cursor.key_beg.localization = dip->obj_localization +
838 HAMMER_LOCALIZE_MISC;
839 cursor.key_beg.obj_id = dip->obj_id;
840 cursor.key_beg.key = namekey;
841 cursor.key_beg.create_tid = 0;
842 cursor.key_beg.delete_tid = 0;
843 cursor.key_beg.rec_type = HAMMER_RECTYPE_DIRENTRY;
844 cursor.key_beg.obj_type = 0;
846 cursor.key_end = cursor.key_beg;
847 cursor.key_end.key |= 0xFFFFFFFFULL;
849 cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE | HAMMER_CURSOR_ASOF;
852 * Scan all matching records (the chain), locate the one matching
853 * the requested path component.
855 * The hammer_ip_*() functions merge in-memory records with on-disk
856 * records for the purposes of the search.
859 localization = HAMMER_DEF_LOCALIZATION;
862 error = hammer_ip_first(&cursor);
864 error = hammer_ip_resolve_data(&cursor);
867 if (nlen == cursor.leaf->data_len - HAMMER_ENTRY_NAME_OFF &&
868 bcmp(ncp->nc_name, cursor.data->entry.name, nlen) == 0) {
869 obj_id = cursor.data->entry.obj_id;
870 localization = cursor.data->entry.localization;
873 error = hammer_ip_next(&cursor);
876 hammer_done_cursor(&cursor);
878 ip = hammer_get_inode(&trans, dip, obj_id,
882 error = hammer_get_vnode(ip, &vp);
883 hammer_rel_inode(ip, 0);
889 cache_setvp(ap->a_nch, vp);
892 } else if (error == ENOENT) {
893 cache_setvp(ap->a_nch, NULL);
896 hammer_done_transaction(&trans);
901 * hammer_vop_nlookupdotdot { dvp, vpp, cred }
903 * Locate the parent directory of a directory vnode.
905 * dvp is referenced but not locked. *vpp must be returned referenced and
906 * locked. A parent_obj_id of 0 does not necessarily indicate that we are
907 * at the root, instead it could indicate that the directory we were in was
910 * NOTE: as-of sequences are not linked into the directory structure. If
911 * we are at the root with a different asof then the mount point, reload
912 * the same directory with the mount point's asof. I'm not sure what this
913 * will do to NFS. We encode ASOF stamps in NFS file handles so it might not
914 * get confused, but it hasn't been tested.
918 hammer_vop_nlookupdotdot(struct vop_nlookupdotdot_args *ap)
920 struct hammer_transaction trans;
921 struct hammer_inode *dip;
922 struct hammer_inode *ip;
923 int64_t parent_obj_id;
924 u_int32_t parent_obj_localization;
928 dip = VTOI(ap->a_dvp);
929 asof = dip->obj_asof;
932 * Whos are parent? This could be the root of a pseudo-filesystem
933 * whos parent is in another localization domain.
935 parent_obj_id = dip->ino_data.parent_obj_id;
936 if (dip->obj_id == HAMMER_OBJID_ROOT)
937 parent_obj_localization = dip->ino_data.ext.obj.parent_obj_localization;
939 parent_obj_localization = dip->obj_localization;
941 if (parent_obj_id == 0) {
942 if (dip->obj_id == HAMMER_OBJID_ROOT &&
943 asof != dip->hmp->asof) {
944 parent_obj_id = dip->obj_id;
945 asof = dip->hmp->asof;
946 *ap->a_fakename = kmalloc(19, M_TEMP, M_WAITOK);
947 ksnprintf(*ap->a_fakename, 19, "0x%016llx",
955 hammer_simple_transaction(&trans, dip->hmp);
957 ip = hammer_get_inode(&trans, dip, parent_obj_id,
958 asof, parent_obj_localization,
961 error = hammer_get_vnode(ip, ap->a_vpp);
962 hammer_rel_inode(ip, 0);
966 hammer_done_transaction(&trans);
971 * hammer_vop_nlink { nch, dvp, vp, cred }
975 hammer_vop_nlink(struct vop_nlink_args *ap)
977 struct hammer_transaction trans;
978 struct hammer_inode *dip;
979 struct hammer_inode *ip;
980 struct nchandle *nch;
984 dip = VTOI(ap->a_dvp);
987 if (dip->flags & HAMMER_INODE_RO)
989 if (ip->flags & HAMMER_INODE_RO)
991 if ((error = hammer_checkspace(dip->hmp)) != 0)
995 * Create a transaction to cover the operations we perform.
997 hammer_start_transaction(&trans, dip->hmp);
1000 * Add the filesystem object to the directory. Note that neither
1001 * dip nor ip are referenced or locked, but their vnodes are
1002 * referenced. This function will bump the inode's link count.
1004 error = hammer_ip_add_directory(&trans, dip,
1005 nch->ncp->nc_name, nch->ncp->nc_nlen,
1012 cache_setunresolved(nch);
1013 cache_setvp(nch, ap->a_vp);
1015 hammer_done_transaction(&trans);
1020 * hammer_vop_nmkdir { nch, dvp, vpp, cred, vap }
1022 * The operating system has already ensured that the directory entry
1023 * does not exist and done all appropriate namespace locking.
1027 hammer_vop_nmkdir(struct vop_nmkdir_args *ap)
1029 struct hammer_transaction trans;
1030 struct hammer_inode *dip;
1031 struct hammer_inode *nip;
1032 struct nchandle *nch;
1036 dip = VTOI(ap->a_dvp);
1038 if (dip->flags & HAMMER_INODE_RO)
1040 if ((error = hammer_checkspace(dip->hmp)) != 0)
1044 * Create a transaction to cover the operations we perform.
1046 hammer_start_transaction(&trans, dip->hmp);
1049 * Create a new filesystem object of the requested type. The
1050 * returned inode will be referenced but not locked.
1052 error = hammer_create_inode(&trans, ap->a_vap, ap->a_cred,
1055 hkprintf("hammer_mkdir error %d\n", error);
1056 hammer_done_transaction(&trans);
1061 * Add the new filesystem object to the directory. This will also
1062 * bump the inode's link count.
1064 error = hammer_ip_add_directory(&trans, dip,
1065 nch->ncp->nc_name, nch->ncp->nc_nlen,
1068 hkprintf("hammer_mkdir (add) error %d\n", error);
1074 hammer_rel_inode(nip, 0);
1077 error = hammer_get_vnode(nip, ap->a_vpp);
1078 hammer_rel_inode(nip, 0);
1080 cache_setunresolved(ap->a_nch);
1081 cache_setvp(ap->a_nch, *ap->a_vpp);
1084 hammer_done_transaction(&trans);
1089 * hammer_vop_nmknod { nch, dvp, vpp, cred, vap }
1091 * The operating system has already ensured that the directory entry
1092 * does not exist and done all appropriate namespace locking.
1096 hammer_vop_nmknod(struct vop_nmknod_args *ap)
1098 struct hammer_transaction trans;
1099 struct hammer_inode *dip;
1100 struct hammer_inode *nip;
1101 struct nchandle *nch;
1106 dip = VTOI(ap->a_dvp);
1108 if (dip->flags & HAMMER_INODE_RO)
1110 if ((error = hammer_checkspace(dip->hmp)) != 0)
1114 * Create a transaction to cover the operations we perform.
1116 hammer_start_transaction(&trans, dip->hmp);
1119 * Create a new filesystem object of the requested type. The
1120 * returned inode will be referenced but not locked.
1122 * If mknod specifies a directory a pseudo-fs is created.
1124 pseudofs = (ap->a_vap->va_type == VDIR);
1125 error = hammer_create_inode(&trans, ap->a_vap, ap->a_cred,
1126 dip, pseudofs, &nip);
1128 hammer_done_transaction(&trans);
1134 * Add the new filesystem object to the directory. This will also
1135 * bump the inode's link count.
1137 error = hammer_ip_add_directory(&trans, dip,
1138 nch->ncp->nc_name, nch->ncp->nc_nlen,
1145 hammer_rel_inode(nip, 0);
1148 error = hammer_get_vnode(nip, ap->a_vpp);
1149 hammer_rel_inode(nip, 0);
1151 cache_setunresolved(ap->a_nch);
1152 cache_setvp(ap->a_nch, *ap->a_vpp);
1155 hammer_done_transaction(&trans);
1160 * hammer_vop_open { vp, mode, cred, fp }
1164 hammer_vop_open(struct vop_open_args *ap)
1168 ip = VTOI(ap->a_vp);
1170 if ((ap->a_mode & FWRITE) && (ip->flags & HAMMER_INODE_RO))
1172 return(vop_stdopen(ap));
1176 * hammer_vop_pathconf { vp, name, retval }
1180 hammer_vop_pathconf(struct vop_pathconf_args *ap)
1186 * hammer_vop_print { vp }
1190 hammer_vop_print(struct vop_print_args *ap)
1196 * hammer_vop_readdir { vp, uio, cred, *eofflag, *ncookies, off_t **cookies }
1200 hammer_vop_readdir(struct vop_readdir_args *ap)
1202 struct hammer_transaction trans;
1203 struct hammer_cursor cursor;
1204 struct hammer_inode *ip;
1206 hammer_base_elm_t base;
1214 ip = VTOI(ap->a_vp);
1216 saveoff = uio->uio_offset;
1218 if (ap->a_ncookies) {
1219 ncookies = uio->uio_resid / 16 + 1;
1220 if (ncookies > 1024)
1222 cookies = kmalloc(ncookies * sizeof(off_t), M_TEMP, M_WAITOK);
1230 hammer_simple_transaction(&trans, ip->hmp);
1233 * Handle artificial entries
1237 r = vop_write_dirent(&error, uio, ip->obj_id, DT_DIR, 1, ".");
1241 cookies[cookie_index] = saveoff;
1244 if (cookie_index == ncookies)
1248 if (ip->ino_data.parent_obj_id) {
1249 r = vop_write_dirent(&error, uio,
1250 ip->ino_data.parent_obj_id,
1253 r = vop_write_dirent(&error, uio,
1254 ip->obj_id, DT_DIR, 2, "..");
1259 cookies[cookie_index] = saveoff;
1262 if (cookie_index == ncookies)
1267 * Key range (begin and end inclusive) to scan. Directory keys
1268 * directly translate to a 64 bit 'seek' position.
1270 hammer_init_cursor(&trans, &cursor, &ip->cache[1], ip);
1271 cursor.key_beg.localization = ip->obj_localization +
1272 HAMMER_LOCALIZE_MISC;
1273 cursor.key_beg.obj_id = ip->obj_id;
1274 cursor.key_beg.create_tid = 0;
1275 cursor.key_beg.delete_tid = 0;
1276 cursor.key_beg.rec_type = HAMMER_RECTYPE_DIRENTRY;
1277 cursor.key_beg.obj_type = 0;
1278 cursor.key_beg.key = saveoff;
1280 cursor.key_end = cursor.key_beg;
1281 cursor.key_end.key = HAMMER_MAX_KEY;
1282 cursor.asof = ip->obj_asof;
1283 cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE | HAMMER_CURSOR_ASOF;
1285 error = hammer_ip_first(&cursor);
1287 while (error == 0) {
1288 error = hammer_ip_resolve_data(&cursor);
1291 base = &cursor.leaf->base;
1292 saveoff = base->key;
1293 KKASSERT(cursor.leaf->data_len > HAMMER_ENTRY_NAME_OFF);
1295 if (base->obj_id != ip->obj_id)
1296 panic("readdir: bad record at %p", cursor.node);
1298 r = vop_write_dirent(
1299 &error, uio, cursor.data->entry.obj_id,
1300 hammer_get_dtype(cursor.leaf->base.obj_type),
1301 cursor.leaf->data_len - HAMMER_ENTRY_NAME_OFF ,
1302 (void *)cursor.data->entry.name);
1307 cookies[cookie_index] = base->key;
1309 if (cookie_index == ncookies)
1311 error = hammer_ip_next(&cursor);
1313 hammer_done_cursor(&cursor);
1316 hammer_done_transaction(&trans);
1319 *ap->a_eofflag = (error == ENOENT);
1320 uio->uio_offset = saveoff;
1321 if (error && cookie_index == 0) {
1322 if (error == ENOENT)
1325 kfree(cookies, M_TEMP);
1326 *ap->a_ncookies = 0;
1327 *ap->a_cookies = NULL;
1330 if (error == ENOENT)
1333 *ap->a_ncookies = cookie_index;
1334 *ap->a_cookies = cookies;
1341 * hammer_vop_readlink { vp, uio, cred }
1345 hammer_vop_readlink(struct vop_readlink_args *ap)
1347 struct hammer_transaction trans;
1348 struct hammer_cursor cursor;
1349 struct hammer_inode *ip;
1352 ip = VTOI(ap->a_vp);
1355 * Shortcut if the symlink data was stuffed into ino_data.
1357 if (ip->ino_data.size <= HAMMER_INODE_BASESYMLEN) {
1358 error = uiomove(ip->ino_data.ext.symlink,
1359 ip->ino_data.size, ap->a_uio);
1366 hammer_simple_transaction(&trans, ip->hmp);
1367 hammer_init_cursor(&trans, &cursor, &ip->cache[1], ip);
1370 * Key range (begin and end inclusive) to scan. Directory keys
1371 * directly translate to a 64 bit 'seek' position.
1373 cursor.key_beg.localization = ip->obj_localization +
1374 HAMMER_LOCALIZE_MISC;
1375 cursor.key_beg.obj_id = ip->obj_id;
1376 cursor.key_beg.create_tid = 0;
1377 cursor.key_beg.delete_tid = 0;
1378 cursor.key_beg.rec_type = HAMMER_RECTYPE_FIX;
1379 cursor.key_beg.obj_type = 0;
1380 cursor.key_beg.key = HAMMER_FIXKEY_SYMLINK;
1381 cursor.asof = ip->obj_asof;
1382 cursor.flags |= HAMMER_CURSOR_ASOF;
1384 error = hammer_ip_lookup(&cursor);
1386 error = hammer_ip_resolve_data(&cursor);
1388 KKASSERT(cursor.leaf->data_len >=
1389 HAMMER_SYMLINK_NAME_OFF);
1390 error = uiomove(cursor.data->symlink.name,
1391 cursor.leaf->data_len -
1392 HAMMER_SYMLINK_NAME_OFF,
1396 hammer_done_cursor(&cursor);
1397 hammer_done_transaction(&trans);
1402 * hammer_vop_nremove { nch, dvp, cred }
1406 hammer_vop_nremove(struct vop_nremove_args *ap)
1408 struct hammer_transaction trans;
1409 struct hammer_inode *dip;
1412 dip = VTOI(ap->a_dvp);
1414 if (hammer_nohistory(dip) == 0 &&
1415 (error = hammer_checkspace(dip->hmp)) != 0) {
1419 hammer_start_transaction(&trans, dip->hmp);
1420 error = hammer_dounlink(&trans, ap->a_nch, ap->a_dvp, ap->a_cred, 0);
1421 hammer_done_transaction(&trans);
1427 * hammer_vop_nrename { fnch, tnch, fdvp, tdvp, cred }
1431 hammer_vop_nrename(struct vop_nrename_args *ap)
1433 struct hammer_transaction trans;
1434 struct namecache *fncp;
1435 struct namecache *tncp;
1436 struct hammer_inode *fdip;
1437 struct hammer_inode *tdip;
1438 struct hammer_inode *ip;
1439 struct hammer_cursor cursor;
1443 fdip = VTOI(ap->a_fdvp);
1444 tdip = VTOI(ap->a_tdvp);
1445 fncp = ap->a_fnch->ncp;
1446 tncp = ap->a_tnch->ncp;
1447 ip = VTOI(fncp->nc_vp);
1448 KKASSERT(ip != NULL);
1450 if (fdip->flags & HAMMER_INODE_RO)
1452 if (tdip->flags & HAMMER_INODE_RO)
1454 if (ip->flags & HAMMER_INODE_RO)
1456 if ((error = hammer_checkspace(fdip->hmp)) != 0)
1459 hammer_start_transaction(&trans, fdip->hmp);
1462 * Remove tncp from the target directory and then link ip as
1463 * tncp. XXX pass trans to dounlink
1465 * Force the inode sync-time to match the transaction so it is
1466 * in-sync with the creation of the target directory entry.
1468 error = hammer_dounlink(&trans, ap->a_tnch, ap->a_tdvp, ap->a_cred, 0);
1469 if (error == 0 || error == ENOENT) {
1470 error = hammer_ip_add_directory(&trans, tdip,
1471 tncp->nc_name, tncp->nc_nlen,
1474 ip->ino_data.parent_obj_id = tdip->obj_id;
1475 hammer_modify_inode(ip, HAMMER_INODE_DDIRTY);
1479 goto failed; /* XXX */
1482 * Locate the record in the originating directory and remove it.
1484 * Calculate the namekey and setup the key range for the scan. This
1485 * works kinda like a chained hash table where the lower 32 bits
1486 * of the namekey synthesize the chain.
1488 * The key range is inclusive of both key_beg and key_end.
1490 namekey = hammer_directory_namekey(fncp->nc_name, fncp->nc_nlen);
1492 hammer_init_cursor(&trans, &cursor, &fdip->cache[1], fdip);
1493 cursor.key_beg.localization = fdip->obj_localization +
1494 HAMMER_LOCALIZE_MISC;
1495 cursor.key_beg.obj_id = fdip->obj_id;
1496 cursor.key_beg.key = namekey;
1497 cursor.key_beg.create_tid = 0;
1498 cursor.key_beg.delete_tid = 0;
1499 cursor.key_beg.rec_type = HAMMER_RECTYPE_DIRENTRY;
1500 cursor.key_beg.obj_type = 0;
1502 cursor.key_end = cursor.key_beg;
1503 cursor.key_end.key |= 0xFFFFFFFFULL;
1504 cursor.asof = fdip->obj_asof;
1505 cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE | HAMMER_CURSOR_ASOF;
1508 * Scan all matching records (the chain), locate the one matching
1509 * the requested path component.
1511 * The hammer_ip_*() functions merge in-memory records with on-disk
1512 * records for the purposes of the search.
1514 error = hammer_ip_first(&cursor);
1515 while (error == 0) {
1516 if (hammer_ip_resolve_data(&cursor) != 0)
1518 nlen = cursor.leaf->data_len - HAMMER_ENTRY_NAME_OFF;
1520 if (fncp->nc_nlen == nlen &&
1521 bcmp(fncp->nc_name, cursor.data->entry.name, nlen) == 0) {
1524 error = hammer_ip_next(&cursor);
1528 * If all is ok we have to get the inode so we can adjust nlinks.
1530 * WARNING: hammer_ip_del_directory() may have to terminate the
1531 * cursor to avoid a recursion. It's ok to call hammer_done_cursor()
1535 error = hammer_ip_del_directory(&trans, &cursor, fdip, ip);
1538 * XXX A deadlock here will break rename's atomicy for the purposes
1539 * of crash recovery.
1541 if (error == EDEADLK) {
1542 hammer_done_cursor(&cursor);
1547 * Cleanup and tell the kernel that the rename succeeded.
1549 hammer_done_cursor(&cursor);
1551 cache_rename(ap->a_fnch, ap->a_tnch);
1554 hammer_done_transaction(&trans);
1559 * hammer_vop_nrmdir { nch, dvp, cred }
1563 hammer_vop_nrmdir(struct vop_nrmdir_args *ap)
1565 struct hammer_transaction trans;
1566 struct hammer_inode *dip;
1569 dip = VTOI(ap->a_dvp);
1571 if (hammer_nohistory(dip) == 0 &&
1572 (error = hammer_checkspace(dip->hmp)) != 0) {
1576 hammer_start_transaction(&trans, dip->hmp);
1577 error = hammer_dounlink(&trans, ap->a_nch, ap->a_dvp, ap->a_cred, 0);
1578 hammer_done_transaction(&trans);
1584 * hammer_vop_setattr { vp, vap, cred }
1588 hammer_vop_setattr(struct vop_setattr_args *ap)
1590 struct hammer_transaction trans;
1592 struct hammer_inode *ip;
1597 int64_t aligned_size;
1601 ip = ap->a_vp->v_data;
1604 if (ap->a_vp->v_mount->mnt_flag & MNT_RDONLY)
1606 if (ip->flags & HAMMER_INODE_RO)
1608 if (hammer_nohistory(ip) == 0 &&
1609 (error = hammer_checkspace(ip->hmp)) != 0) {
1613 hammer_start_transaction(&trans, ip->hmp);
1616 if (vap->va_flags != VNOVAL) {
1617 flags = ip->ino_data.uflags;
1618 error = vop_helper_setattr_flags(&flags, vap->va_flags,
1619 hammer_to_unix_xid(&ip->ino_data.uid),
1622 if (ip->ino_data.uflags != flags) {
1623 ip->ino_data.uflags = flags;
1624 modflags |= HAMMER_INODE_DDIRTY;
1626 if (ip->ino_data.uflags & (IMMUTABLE | APPEND)) {
1633 if (ip->ino_data.uflags & (IMMUTABLE | APPEND)) {
1637 if (vap->va_uid != (uid_t)VNOVAL || vap->va_gid != (gid_t)VNOVAL) {
1638 mode_t cur_mode = ip->ino_data.mode;
1639 uid_t cur_uid = hammer_to_unix_xid(&ip->ino_data.uid);
1640 gid_t cur_gid = hammer_to_unix_xid(&ip->ino_data.gid);
1644 error = vop_helper_chown(ap->a_vp, vap->va_uid, vap->va_gid,
1646 &cur_uid, &cur_gid, &cur_mode);
1648 hammer_guid_to_uuid(&uuid_uid, cur_uid);
1649 hammer_guid_to_uuid(&uuid_gid, cur_gid);
1650 if (bcmp(&uuid_uid, &ip->ino_data.uid,
1651 sizeof(uuid_uid)) ||
1652 bcmp(&uuid_gid, &ip->ino_data.gid,
1653 sizeof(uuid_gid)) ||
1654 ip->ino_data.mode != cur_mode
1656 ip->ino_data.uid = uuid_uid;
1657 ip->ino_data.gid = uuid_gid;
1658 ip->ino_data.mode = cur_mode;
1660 modflags |= HAMMER_INODE_DDIRTY;
1663 while (vap->va_size != VNOVAL && ip->ino_data.size != vap->va_size) {
1664 switch(ap->a_vp->v_type) {
1666 if (vap->va_size == ip->ino_data.size)
1669 * XXX break atomicy, we can deadlock the backend
1670 * if we do not release the lock. Probably not a
1673 blksize = hammer_blocksize(vap->va_size);
1674 if (vap->va_size < ip->ino_data.size) {
1675 vtruncbuf(ap->a_vp, vap->va_size, blksize);
1678 vnode_pager_setsize(ap->a_vp, vap->va_size);
1681 ip->ino_data.size = vap->va_size;
1682 modflags |= HAMMER_INODE_DDIRTY;
1685 * on-media truncation is cached in the inode until
1686 * the inode is synchronized.
1689 hammer_ip_frontend_trunc(ip, vap->va_size);
1690 hammer_update_rsv_databufs(ip);
1691 #ifdef DEBUG_TRUNCATE
1692 if (HammerTruncIp == NULL)
1695 if ((ip->flags & HAMMER_INODE_TRUNCATED) == 0) {
1696 ip->flags |= HAMMER_INODE_TRUNCATED;
1697 ip->trunc_off = vap->va_size;
1698 #ifdef DEBUG_TRUNCATE
1699 if (ip == HammerTruncIp)
1700 kprintf("truncate1 %016llx\n", ip->trunc_off);
1702 } else if (ip->trunc_off > vap->va_size) {
1703 ip->trunc_off = vap->va_size;
1704 #ifdef DEBUG_TRUNCATE
1705 if (ip == HammerTruncIp)
1706 kprintf("truncate2 %016llx\n", ip->trunc_off);
1709 #ifdef DEBUG_TRUNCATE
1710 if (ip == HammerTruncIp)
1711 kprintf("truncate3 %016llx (ignored)\n", vap->va_size);
1717 * If truncating we have to clean out a portion of
1718 * the last block on-disk. We do this in the
1719 * front-end buffer cache.
1721 aligned_size = (vap->va_size + (blksize - 1)) &
1722 ~(int64_t)(blksize - 1);
1723 if (truncating && vap->va_size < aligned_size) {
1727 aligned_size -= blksize;
1729 offset = (int)vap->va_size & (blksize - 1);
1730 error = bread(ap->a_vp, aligned_size,
1732 hammer_ip_frontend_trunc(ip, aligned_size);
1734 bzero(bp->b_data + offset,
1738 kprintf("ERROR %d\n", error);
1744 if ((ip->flags & HAMMER_INODE_TRUNCATED) == 0) {
1745 ip->flags |= HAMMER_INODE_TRUNCATED;
1746 ip->trunc_off = vap->va_size;
1747 } else if (ip->trunc_off > vap->va_size) {
1748 ip->trunc_off = vap->va_size;
1750 hammer_ip_frontend_trunc(ip, vap->va_size);
1751 ip->ino_data.size = vap->va_size;
1752 modflags |= HAMMER_INODE_DDIRTY;
1760 if (vap->va_atime.tv_sec != VNOVAL) {
1761 ip->ino_data.atime =
1762 hammer_timespec_to_time(&vap->va_atime);
1763 modflags |= HAMMER_INODE_ATIME;
1765 if (vap->va_mtime.tv_sec != VNOVAL) {
1766 ip->ino_data.mtime =
1767 hammer_timespec_to_time(&vap->va_mtime);
1768 modflags |= HAMMER_INODE_MTIME;
1770 if (vap->va_mode != (mode_t)VNOVAL) {
1771 mode_t cur_mode = ip->ino_data.mode;
1772 uid_t cur_uid = hammer_to_unix_xid(&ip->ino_data.uid);
1773 gid_t cur_gid = hammer_to_unix_xid(&ip->ino_data.gid);
1775 error = vop_helper_chmod(ap->a_vp, vap->va_mode, ap->a_cred,
1776 cur_uid, cur_gid, &cur_mode);
1777 if (error == 0 && ip->ino_data.mode != cur_mode) {
1778 ip->ino_data.mode = cur_mode;
1779 modflags |= HAMMER_INODE_DDIRTY;
1784 hammer_modify_inode(ip, modflags);
1785 hammer_done_transaction(&trans);
1790 * hammer_vop_nsymlink { nch, dvp, vpp, cred, vap, target }
1794 hammer_vop_nsymlink(struct vop_nsymlink_args *ap)
1796 struct hammer_transaction trans;
1797 struct hammer_inode *dip;
1798 struct hammer_inode *nip;
1799 struct nchandle *nch;
1800 hammer_record_t record;
1804 ap->a_vap->va_type = VLNK;
1807 dip = VTOI(ap->a_dvp);
1809 if (dip->flags & HAMMER_INODE_RO)
1811 if ((error = hammer_checkspace(dip->hmp)) != 0)
1815 * Create a transaction to cover the operations we perform.
1817 hammer_start_transaction(&trans, dip->hmp);
1820 * Create a new filesystem object of the requested type. The
1821 * returned inode will be referenced but not locked.
1824 error = hammer_create_inode(&trans, ap->a_vap, ap->a_cred,
1827 hammer_done_transaction(&trans);
1833 * Add a record representing the symlink. symlink stores the link
1834 * as pure data, not a string, and is no \0 terminated.
1837 bytes = strlen(ap->a_target);
1839 if (bytes <= HAMMER_INODE_BASESYMLEN) {
1840 bcopy(ap->a_target, nip->ino_data.ext.symlink, bytes);
1842 record = hammer_alloc_mem_record(nip, bytes);
1843 record->type = HAMMER_MEM_RECORD_GENERAL;
1845 record->leaf.base.localization = nip->obj_localization +
1846 HAMMER_LOCALIZE_MISC;
1847 record->leaf.base.key = HAMMER_FIXKEY_SYMLINK;
1848 record->leaf.base.rec_type = HAMMER_RECTYPE_FIX;
1849 record->leaf.data_len = bytes;
1850 KKASSERT(HAMMER_SYMLINK_NAME_OFF == 0);
1851 bcopy(ap->a_target, record->data->symlink.name, bytes);
1852 error = hammer_ip_add_record(&trans, record);
1856 * Set the file size to the length of the link.
1859 nip->ino_data.size = bytes;
1860 hammer_modify_inode(nip, HAMMER_INODE_DDIRTY);
1864 error = hammer_ip_add_directory(&trans, dip, nch->ncp->nc_name,
1865 nch->ncp->nc_nlen, nip);
1871 hammer_rel_inode(nip, 0);
1874 error = hammer_get_vnode(nip, ap->a_vpp);
1875 hammer_rel_inode(nip, 0);
1877 cache_setunresolved(ap->a_nch);
1878 cache_setvp(ap->a_nch, *ap->a_vpp);
1881 hammer_done_transaction(&trans);
1886 * hammer_vop_nwhiteout { nch, dvp, cred, flags }
1890 hammer_vop_nwhiteout(struct vop_nwhiteout_args *ap)
1892 struct hammer_transaction trans;
1893 struct hammer_inode *dip;
1896 dip = VTOI(ap->a_dvp);
1898 if (hammer_nohistory(dip) == 0 &&
1899 (error = hammer_checkspace(dip->hmp)) != 0) {
1903 hammer_start_transaction(&trans, dip->hmp);
1904 error = hammer_dounlink(&trans, ap->a_nch, ap->a_dvp,
1905 ap->a_cred, ap->a_flags);
1906 hammer_done_transaction(&trans);
1912 * hammer_vop_ioctl { vp, command, data, fflag, cred }
1916 hammer_vop_ioctl(struct vop_ioctl_args *ap)
1918 struct hammer_inode *ip = ap->a_vp->v_data;
1920 return(hammer_ioctl(ip, ap->a_command, ap->a_data,
1921 ap->a_fflag, ap->a_cred));
1926 hammer_vop_mountctl(struct vop_mountctl_args *ap)
1931 mp = ap->a_head.a_ops->head.vv_mount;
1934 case MOUNTCTL_SET_EXPORT:
1935 if (ap->a_ctllen != sizeof(struct export_args))
1937 error = hammer_vfs_export(mp, ap->a_op,
1938 (const struct export_args *)ap->a_ctl);
1941 error = journal_mountctl(ap);
1948 * hammer_vop_strategy { vp, bio }
1950 * Strategy call, used for regular file read & write only. Note that the
1951 * bp may represent a cluster.
1953 * To simplify operation and allow better optimizations in the future,
1954 * this code does not make any assumptions with regards to buffer alignment
1959 hammer_vop_strategy(struct vop_strategy_args *ap)
1964 bp = ap->a_bio->bio_buf;
1968 error = hammer_vop_strategy_read(ap);
1971 error = hammer_vop_strategy_write(ap);
1974 bp->b_error = error = EINVAL;
1975 bp->b_flags |= B_ERROR;
1983 * Read from a regular file. Iterate the related records and fill in the
1984 * BIO/BUF. Gaps are zero-filled.
1986 * The support code in hammer_object.c should be used to deal with mixed
1987 * in-memory and on-disk records.
1989 * NOTE: Can be called from the cluster code with an oversized buf.
1995 hammer_vop_strategy_read(struct vop_strategy_args *ap)
1997 struct hammer_transaction trans;
1998 struct hammer_inode *ip;
1999 struct hammer_cursor cursor;
2000 hammer_base_elm_t base;
2001 hammer_off_t disk_offset;
2015 ip = ap->a_vp->v_data;
2018 * The zone-2 disk offset may have been set by the cluster code via
2019 * a BMAP operation, or else should be NOOFFSET.
2021 * Checking the high bits for a match against zone-2 should suffice.
2023 nbio = push_bio(bio);
2024 if ((nbio->bio_offset & HAMMER_OFF_ZONE_MASK) ==
2025 HAMMER_ZONE_RAW_BUFFER) {
2026 error = hammer_io_direct_read(ip->hmp, nbio);
2031 * Well, that sucked. Do it the hard way. If all the stars are
2032 * aligned we may still be able to issue a direct-read.
2034 hammer_simple_transaction(&trans, ip->hmp);
2035 hammer_init_cursor(&trans, &cursor, &ip->cache[1], ip);
2038 * Key range (begin and end inclusive) to scan. Note that the key's
2039 * stored in the actual records represent BASE+LEN, not BASE. The
2040 * first record containing bio_offset will have a key > bio_offset.
2042 cursor.key_beg.localization = ip->obj_localization +
2043 HAMMER_LOCALIZE_MISC;
2044 cursor.key_beg.obj_id = ip->obj_id;
2045 cursor.key_beg.create_tid = 0;
2046 cursor.key_beg.delete_tid = 0;
2047 cursor.key_beg.obj_type = 0;
2048 cursor.key_beg.key = bio->bio_offset + 1;
2049 cursor.asof = ip->obj_asof;
2050 cursor.flags |= HAMMER_CURSOR_ASOF;
2052 cursor.key_end = cursor.key_beg;
2053 KKASSERT(ip->ino_data.obj_type == HAMMER_OBJTYPE_REGFILE);
2055 if (ip->ino_data.obj_type == HAMMER_OBJTYPE_DBFILE) {
2056 cursor.key_beg.rec_type = HAMMER_RECTYPE_DB;
2057 cursor.key_end.rec_type = HAMMER_RECTYPE_DB;
2058 cursor.key_end.key = 0x7FFFFFFFFFFFFFFFLL;
2062 ran_end = bio->bio_offset + bp->b_bufsize;
2063 cursor.key_beg.rec_type = HAMMER_RECTYPE_DATA;
2064 cursor.key_end.rec_type = HAMMER_RECTYPE_DATA;
2065 tmp64 = ran_end + MAXPHYS + 1; /* work-around GCC-4 bug */
2066 if (tmp64 < ran_end)
2067 cursor.key_end.key = 0x7FFFFFFFFFFFFFFFLL;
2069 cursor.key_end.key = ran_end + MAXPHYS + 1;
2071 cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE;
2073 error = hammer_ip_first(&cursor);
2076 while (error == 0) {
2078 * Get the base file offset of the record. The key for
2079 * data records is (base + bytes) rather then (base).
2081 base = &cursor.leaf->base;
2082 rec_offset = base->key - cursor.leaf->data_len;
2085 * Calculate the gap, if any, and zero-fill it.
2087 * n is the offset of the start of the record verses our
2088 * current seek offset in the bio.
2090 n = (int)(rec_offset - (bio->bio_offset + boff));
2092 if (n > bp->b_bufsize - boff)
2093 n = bp->b_bufsize - boff;
2094 bzero((char *)bp->b_data + boff, n);
2100 * Calculate the data offset in the record and the number
2101 * of bytes we can copy.
2103 * There are two degenerate cases. First, boff may already
2104 * be at bp->b_bufsize. Secondly, the data offset within
2105 * the record may exceed the record's size.
2109 n = cursor.leaf->data_len - roff;
2111 kprintf("strategy_read: bad n=%d roff=%d\n", n, roff);
2113 } else if (n > bp->b_bufsize - boff) {
2114 n = bp->b_bufsize - boff;
2118 * Deal with cached truncations. This cool bit of code
2119 * allows truncate()/ftruncate() to avoid having to sync
2122 * If the frontend is truncated then all backend records are
2123 * subject to the frontend's truncation.
2125 * If the backend is truncated then backend records on-disk
2126 * (but not in-memory) are subject to the backend's
2127 * truncation. In-memory records owned by the backend
2128 * represent data written after the truncation point on the
2129 * backend and must not be truncated.
2131 * Truncate operations deal with frontend buffer cache
2132 * buffers and frontend-owned in-memory records synchronously.
2134 if (ip->flags & HAMMER_INODE_TRUNCATED) {
2135 if (hammer_cursor_ondisk(&cursor) ||
2136 cursor.iprec->flush_state == HAMMER_FST_FLUSH) {
2137 if (ip->trunc_off <= rec_offset)
2139 else if (ip->trunc_off < rec_offset + n)
2140 n = (int)(ip->trunc_off - rec_offset);
2143 if (ip->sync_flags & HAMMER_INODE_TRUNCATED) {
2144 if (hammer_cursor_ondisk(&cursor)) {
2145 if (ip->sync_trunc_off <= rec_offset)
2147 else if (ip->sync_trunc_off < rec_offset + n)
2148 n = (int)(ip->sync_trunc_off - rec_offset);
2153 * Try to issue a direct read into our bio if possible,
2154 * otherwise resolve the element data into a hammer_buffer
2157 * The buffer on-disk should be zerod past any real
2158 * truncation point, but may not be for any synthesized
2159 * truncation point from above.
2161 if (boff == 0 && n == bp->b_bufsize &&
2162 ((cursor.leaf->data_offset + roff) & HAMMER_BUFMASK) == 0) {
2163 disk_offset = hammer_blockmap_lookup(
2165 cursor.leaf->data_offset + roff,
2169 nbio->bio_offset = disk_offset;
2170 error = hammer_io_direct_read(trans.hmp, nbio);
2173 error = hammer_ip_resolve_data(&cursor);
2175 bcopy((char *)cursor.data + roff,
2176 (char *)bp->b_data + boff, n);
2183 * Iterate until we have filled the request.
2186 if (boff == bp->b_bufsize)
2188 error = hammer_ip_next(&cursor);
2192 * There may have been a gap after the last record
2194 if (error == ENOENT)
2196 if (error == 0 && boff != bp->b_bufsize) {
2197 KKASSERT(boff < bp->b_bufsize);
2198 bzero((char *)bp->b_data + boff, bp->b_bufsize - boff);
2199 /* boff = bp->b_bufsize; */
2202 bp->b_error = error;
2204 bp->b_flags |= B_ERROR;
2209 hammer_cache_node(&ip->cache[1], cursor.node);
2210 hammer_done_cursor(&cursor);
2211 hammer_done_transaction(&trans);
2216 * BMAP operation - used to support cluster_read() only.
2218 * (struct vnode *vp, off_t loffset, off_t *doffsetp, int *runp, int *runb)
2220 * This routine may return EOPNOTSUPP if the opration is not supported for
2221 * the specified offset. The contents of the pointer arguments do not
2222 * need to be initialized in that case.
2224 * If a disk address is available and properly aligned return 0 with
2225 * *doffsetp set to the zone-2 address, and *runp / *runb set appropriately
2226 * to the run-length relative to that offset. Callers may assume that
2227 * *doffsetp is valid if 0 is returned, even if *runp is not sufficiently
2228 * large, so return EOPNOTSUPP if it is not sufficiently large.
2232 hammer_vop_bmap(struct vop_bmap_args *ap)
2234 struct hammer_transaction trans;
2235 struct hammer_inode *ip;
2236 struct hammer_cursor cursor;
2237 hammer_base_elm_t base;
2241 int64_t base_offset;
2242 int64_t base_disk_offset;
2243 int64_t last_offset;
2244 hammer_off_t last_disk_offset;
2245 hammer_off_t disk_offset;
2250 ip = ap->a_vp->v_data;
2253 * We can only BMAP regular files. We can't BMAP database files,
2256 if (ip->ino_data.obj_type != HAMMER_OBJTYPE_REGFILE)
2260 * bmap is typically called with runp/runb both NULL when used
2261 * for writing. We do not support BMAP for writing atm.
2263 if (ap->a_cmd != BUF_CMD_READ)
2267 * Scan the B-Tree to acquire blockmap addresses, then translate
2270 hammer_simple_transaction(&trans, ip->hmp);
2272 kprintf("bmap_beg %016llx ip->cache %p\n", ap->a_loffset, ip->cache[1]);
2274 hammer_init_cursor(&trans, &cursor, &ip->cache[1], ip);
2277 * Key range (begin and end inclusive) to scan. Note that the key's
2278 * stored in the actual records represent BASE+LEN, not BASE. The
2279 * first record containing bio_offset will have a key > bio_offset.
2281 cursor.key_beg.localization = ip->obj_localization +
2282 HAMMER_LOCALIZE_MISC;
2283 cursor.key_beg.obj_id = ip->obj_id;
2284 cursor.key_beg.create_tid = 0;
2285 cursor.key_beg.delete_tid = 0;
2286 cursor.key_beg.obj_type = 0;
2288 cursor.key_beg.key = ap->a_loffset - MAXPHYS + 1;
2290 cursor.key_beg.key = ap->a_loffset + 1;
2291 if (cursor.key_beg.key < 0)
2292 cursor.key_beg.key = 0;
2293 cursor.asof = ip->obj_asof;
2294 cursor.flags |= HAMMER_CURSOR_ASOF;
2296 cursor.key_end = cursor.key_beg;
2297 KKASSERT(ip->ino_data.obj_type == HAMMER_OBJTYPE_REGFILE);
2299 ran_end = ap->a_loffset + MAXPHYS;
2300 cursor.key_beg.rec_type = HAMMER_RECTYPE_DATA;
2301 cursor.key_end.rec_type = HAMMER_RECTYPE_DATA;
2302 tmp64 = ran_end + MAXPHYS + 1; /* work-around GCC-4 bug */
2303 if (tmp64 < ran_end)
2304 cursor.key_end.key = 0x7FFFFFFFFFFFFFFFLL;
2306 cursor.key_end.key = ran_end + MAXPHYS + 1;
2308 cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE;
2310 error = hammer_ip_first(&cursor);
2311 base_offset = last_offset = 0;
2312 base_disk_offset = last_disk_offset = 0;
2314 while (error == 0) {
2316 * Get the base file offset of the record. The key for
2317 * data records is (base + bytes) rather then (base).
2319 * NOTE: rec_offset + rec_len may exceed the end-of-file.
2320 * The extra bytes should be zero on-disk and the BMAP op
2321 * should still be ok.
2323 base = &cursor.leaf->base;
2324 rec_offset = base->key - cursor.leaf->data_len;
2325 rec_len = cursor.leaf->data_len;
2328 * Incorporate any cached truncation.
2330 * NOTE: Modifications to rec_len based on synthesized
2331 * truncation points remove the guarantee that any extended
2332 * data on disk is zero (since the truncations may not have
2333 * taken place on-media yet).
2335 if (ip->flags & HAMMER_INODE_TRUNCATED) {
2336 if (hammer_cursor_ondisk(&cursor) ||
2337 cursor.iprec->flush_state == HAMMER_FST_FLUSH) {
2338 if (ip->trunc_off <= rec_offset)
2340 else if (ip->trunc_off < rec_offset + rec_len)
2341 rec_len = (int)(ip->trunc_off - rec_offset);
2344 if (ip->sync_flags & HAMMER_INODE_TRUNCATED) {
2345 if (hammer_cursor_ondisk(&cursor)) {
2346 if (ip->sync_trunc_off <= rec_offset)
2348 else if (ip->sync_trunc_off < rec_offset + rec_len)
2349 rec_len = (int)(ip->sync_trunc_off - rec_offset);
2354 * Accumulate information. If we have hit a discontiguous
2355 * block reset base_offset unless we are already beyond the
2356 * requested offset. If we are, that's it, we stop.
2358 disk_offset = hammer_blockmap_lookup(trans.hmp,
2359 cursor.leaf->data_offset,
2363 if (rec_offset != last_offset ||
2364 disk_offset != last_disk_offset) {
2365 if (rec_offset > ap->a_loffset)
2367 base_offset = rec_offset;
2368 base_disk_offset = disk_offset;
2370 last_offset = rec_offset + rec_len;
2371 last_disk_offset = disk_offset + rec_len;
2373 error = hammer_ip_next(&cursor);
2377 kprintf("BMAP %016llx: %016llx - %016llx\n",
2378 ap->a_loffset, base_offset, last_offset);
2379 kprintf("BMAP %16s: %016llx - %016llx\n",
2380 "", base_disk_offset, last_disk_offset);
2384 hammer_cache_node(&ip->cache[1], cursor.node);
2386 kprintf("bmap_end2 %016llx ip->cache %p\n", ap->a_loffset, ip->cache[1]);
2389 hammer_done_cursor(&cursor);
2390 hammer_done_transaction(&trans);
2393 * If we couldn't find any records or the records we did find were
2394 * all behind the requested offset, return failure. A forward
2395 * truncation can leave a hole w/ no on-disk records.
2397 if (last_offset == 0 || last_offset < ap->a_loffset)
2398 return (EOPNOTSUPP);
2401 * Figure out the block size at the requested offset and adjust
2402 * our limits so the cluster_read() does not create inappropriately
2403 * sized buffer cache buffers.
2405 blksize = hammer_blocksize(ap->a_loffset);
2406 if (hammer_blocksize(base_offset) != blksize) {
2407 base_offset = hammer_blockdemarc(base_offset, ap->a_loffset);
2409 if (last_offset != ap->a_loffset &&
2410 hammer_blocksize(last_offset - 1) != blksize) {
2411 last_offset = hammer_blockdemarc(ap->a_loffset,
2416 * Returning EOPNOTSUPP simply prevents the direct-IO optimization
2419 disk_offset = base_disk_offset + (ap->a_loffset - base_offset);
2422 * If doffsetp is not aligned or the forward run size does
2423 * not cover a whole buffer, disallow the direct I/O.
2425 if ((disk_offset & HAMMER_BUFMASK) ||
2426 (last_offset - ap->a_loffset) < blksize) {
2429 *ap->a_doffsetp = disk_offset;
2431 *ap->a_runb = ap->a_loffset - base_offset;
2432 KKASSERT(*ap->a_runb >= 0);
2435 *ap->a_runp = last_offset - ap->a_loffset;
2436 KKASSERT(*ap->a_runp >= 0);
2444 * Write to a regular file. Because this is a strategy call the OS is
2445 * trying to actually get data onto the media.
2449 hammer_vop_strategy_write(struct vop_strategy_args *ap)
2451 hammer_record_t record;
2461 ip = ap->a_vp->v_data;
2464 KKASSERT(bp->b_bufsize == hammer_blocksize(bio->bio_offset));
2466 if (ip->flags & HAMMER_INODE_RO) {
2467 bp->b_error = EROFS;
2468 bp->b_flags |= B_ERROR;
2470 hammer_cleanup_write_io(ip);
2475 * Interlock with inode destruction (no in-kernel or directory
2476 * topology visibility). If we queue new IO while trying to
2477 * destroy the inode we can deadlock the vtrunc call in
2478 * hammer_inode_unloadable_check().
2480 if (ip->flags & (HAMMER_INODE_DELETING|HAMMER_INODE_DELETED)) {
2483 hammer_cleanup_write_io(ip);
2488 * Reserve space and issue a direct-write from the front-end.
2489 * NOTE: The direct_io code will hammer_bread/bcopy smaller
2492 * An in-memory record will be installed to reference the storage
2493 * until the flusher can get to it.
2495 * Since we own the high level bio the front-end will not try to
2496 * do a direct-read until the write completes.
2498 * NOTE: The only time we do not reserve a full-sized buffers
2499 * worth of data is if the file is small. We do not try to
2500 * allocate a fragment (from the small-data zone) at the end of
2501 * an otherwise large file as this can lead to wildly separated
2504 KKASSERT((bio->bio_offset & HAMMER_BUFMASK) == 0);
2505 KKASSERT(bio->bio_offset < ip->ino_data.size);
2506 if (bio->bio_offset || ip->ino_data.size > HAMMER_BUFSIZE / 2)
2507 bytes = bp->b_bufsize;
2509 bytes = ((int)ip->ino_data.size + 15) & ~15;
2511 record = hammer_ip_add_bulk(ip, bio->bio_offset, bp->b_data,
2514 hammer_io_direct_write(hmp, &record->leaf, bio);
2515 hammer_rel_mem_record(record);
2516 if (ip->rsv_recs > 1 && hmp->rsv_recs > hammer_limit_recs)
2517 hammer_flush_inode(ip, 0);
2519 bp->b_bio2.bio_offset = NOOFFSET;
2520 bp->b_error = error;
2521 bp->b_flags |= B_ERROR;
2524 hammer_cleanup_write_io(ip);
2529 * Clean-up after disposing of a dirty frontend buffer's data.
2530 * This is somewhat heuristical so try to be robust.
2533 hammer_cleanup_write_io(hammer_inode_t ip)
2535 if (ip->rsv_databufs) {
2537 --ip->hmp->rsv_databufs;
2542 * We can lose track of dirty buffer cache buffers if we truncate, this
2543 * routine will resynchronize the count.
2547 hammer_update_rsv_databufs(hammer_inode_t ip)
2555 RB_FOREACH(bp, buf_rb_tree, &ip->vp->v_rbdirty_tree) {
2561 delta = n - ip->rsv_databufs;
2562 ip->rsv_databufs += delta;
2563 ip->hmp->rsv_databufs += delta;
2567 * dounlink - disconnect a directory entry
2569 * XXX whiteout support not really in yet
2572 hammer_dounlink(hammer_transaction_t trans, struct nchandle *nch,
2573 struct vnode *dvp, struct ucred *cred, int flags)
2575 struct namecache *ncp;
2578 struct hammer_cursor cursor;
2583 * Calculate the namekey and setup the key range for the scan. This
2584 * works kinda like a chained hash table where the lower 32 bits
2585 * of the namekey synthesize the chain.
2587 * The key range is inclusive of both key_beg and key_end.
2592 if (dip->flags & HAMMER_INODE_RO)
2595 namekey = hammer_directory_namekey(ncp->nc_name, ncp->nc_nlen);
2597 hammer_init_cursor(trans, &cursor, &dip->cache[1], dip);
2598 cursor.key_beg.localization = dip->obj_localization +
2599 HAMMER_LOCALIZE_MISC;
2600 cursor.key_beg.obj_id = dip->obj_id;
2601 cursor.key_beg.key = namekey;
2602 cursor.key_beg.create_tid = 0;
2603 cursor.key_beg.delete_tid = 0;
2604 cursor.key_beg.rec_type = HAMMER_RECTYPE_DIRENTRY;
2605 cursor.key_beg.obj_type = 0;
2607 cursor.key_end = cursor.key_beg;
2608 cursor.key_end.key |= 0xFFFFFFFFULL;
2609 cursor.asof = dip->obj_asof;
2610 cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE | HAMMER_CURSOR_ASOF;
2613 * Scan all matching records (the chain), locate the one matching
2614 * the requested path component. info->last_error contains the
2615 * error code on search termination and could be 0, ENOENT, or
2618 * The hammer_ip_*() functions merge in-memory records with on-disk
2619 * records for the purposes of the search.
2621 error = hammer_ip_first(&cursor);
2623 while (error == 0) {
2624 error = hammer_ip_resolve_data(&cursor);
2627 nlen = cursor.leaf->data_len - HAMMER_ENTRY_NAME_OFF;
2629 if (ncp->nc_nlen == nlen &&
2630 bcmp(ncp->nc_name, cursor.data->entry.name, nlen) == 0) {
2633 error = hammer_ip_next(&cursor);
2637 * If all is ok we have to get the inode so we can adjust nlinks.
2638 * To avoid a deadlock with the flusher we must release the inode
2639 * lock on the directory when acquiring the inode for the entry.
2641 * If the target is a directory, it must be empty.
2644 hammer_unlock(&cursor.ip->lock);
2645 ip = hammer_get_inode(trans, dip, cursor.data->entry.obj_id,
2647 cursor.data->entry.localization,
2649 hammer_lock_sh(&cursor.ip->lock);
2650 if (error == ENOENT) {
2651 kprintf("obj_id %016llx\n", cursor.data->entry.obj_id);
2652 Debugger("ENOENT unlinking object that should exist");
2656 * If we are trying to remove a directory the directory must
2659 * WARNING: hammer_ip_check_directory_empty() may have to
2660 * terminate the cursor to avoid a deadlock. It is ok to
2661 * call hammer_done_cursor() twice.
2663 if (error == 0 && ip->ino_data.obj_type ==
2664 HAMMER_OBJTYPE_DIRECTORY) {
2665 error = hammer_ip_check_directory_empty(trans, ip);
2669 * Delete the directory entry.
2671 * WARNING: hammer_ip_del_directory() may have to terminate
2672 * the cursor to avoid a deadlock. It is ok to call
2673 * hammer_done_cursor() twice.
2676 error = hammer_ip_del_directory(trans, &cursor,
2679 hammer_done_cursor(&cursor);
2681 cache_setunresolved(nch);
2682 cache_setvp(nch, NULL);
2685 cache_inval_vp(ip->vp, CINV_DESTROY);
2688 hammer_rel_inode(ip, 0);
2690 hammer_done_cursor(&cursor);
2692 if (error == EDEADLK)
2698 /************************************************************************
2699 * FIFO AND SPECFS OPS *
2700 ************************************************************************
2705 hammer_vop_fifoclose (struct vop_close_args *ap)
2707 /* XXX update itimes */
2708 return (VOCALL(&fifo_vnode_vops, &ap->a_head));
2712 hammer_vop_fiforead (struct vop_read_args *ap)
2716 error = VOCALL(&fifo_vnode_vops, &ap->a_head);
2717 /* XXX update access time */
2722 hammer_vop_fifowrite (struct vop_write_args *ap)
2726 error = VOCALL(&fifo_vnode_vops, &ap->a_head);
2727 /* XXX update access time */
2732 hammer_vop_specclose (struct vop_close_args *ap)
2734 /* XXX update itimes */
2735 return (VOCALL(&spec_vnode_vops, &ap->a_head));
2739 hammer_vop_specread (struct vop_read_args *ap)
2741 /* XXX update access time */
2742 return (VOCALL(&spec_vnode_vops, &ap->a_head));
2746 hammer_vop_specwrite (struct vop_write_args *ap)
2748 /* XXX update last change time */
2749 return (VOCALL(&spec_vnode_vops, &ap->a_head));