2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * $DragonFly: src/sys/vfs/hammer/hammer_vnops.c,v 1.71 2008/06/17 04:02:38 dillon Exp $
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/kernel.h>
40 #include <sys/fcntl.h>
41 #include <sys/namecache.h>
42 #include <sys/vnode.h>
43 #include <sys/lockf.h>
44 #include <sys/event.h>
46 #include <sys/dirent.h>
47 #include <vm/vm_extern.h>
48 #include <vfs/fifofs/fifo.h>
54 /*static int hammer_vop_vnoperate(struct vop_generic_args *);*/
55 static int hammer_vop_fsync(struct vop_fsync_args *);
56 static int hammer_vop_read(struct vop_read_args *);
57 static int hammer_vop_write(struct vop_write_args *);
58 static int hammer_vop_access(struct vop_access_args *);
59 static int hammer_vop_advlock(struct vop_advlock_args *);
60 static int hammer_vop_close(struct vop_close_args *);
61 static int hammer_vop_ncreate(struct vop_ncreate_args *);
62 static int hammer_vop_getattr(struct vop_getattr_args *);
63 static int hammer_vop_nresolve(struct vop_nresolve_args *);
64 static int hammer_vop_nlookupdotdot(struct vop_nlookupdotdot_args *);
65 static int hammer_vop_nlink(struct vop_nlink_args *);
66 static int hammer_vop_nmkdir(struct vop_nmkdir_args *);
67 static int hammer_vop_nmknod(struct vop_nmknod_args *);
68 static int hammer_vop_open(struct vop_open_args *);
69 static int hammer_vop_pathconf(struct vop_pathconf_args *);
70 static int hammer_vop_print(struct vop_print_args *);
71 static int hammer_vop_readdir(struct vop_readdir_args *);
72 static int hammer_vop_readlink(struct vop_readlink_args *);
73 static int hammer_vop_nremove(struct vop_nremove_args *);
74 static int hammer_vop_nrename(struct vop_nrename_args *);
75 static int hammer_vop_nrmdir(struct vop_nrmdir_args *);
76 static int hammer_vop_setattr(struct vop_setattr_args *);
77 static int hammer_vop_strategy(struct vop_strategy_args *);
78 static int hammer_vop_bmap(struct vop_bmap_args *ap);
79 static int hammer_vop_nsymlink(struct vop_nsymlink_args *);
80 static int hammer_vop_nwhiteout(struct vop_nwhiteout_args *);
81 static int hammer_vop_ioctl(struct vop_ioctl_args *);
82 static int hammer_vop_mountctl(struct vop_mountctl_args *);
84 static int hammer_vop_fifoclose (struct vop_close_args *);
85 static int hammer_vop_fiforead (struct vop_read_args *);
86 static int hammer_vop_fifowrite (struct vop_write_args *);
88 static int hammer_vop_specclose (struct vop_close_args *);
89 static int hammer_vop_specread (struct vop_read_args *);
90 static int hammer_vop_specwrite (struct vop_write_args *);
92 struct vop_ops hammer_vnode_vops = {
93 .vop_default = vop_defaultop,
94 .vop_fsync = hammer_vop_fsync,
95 .vop_getpages = vop_stdgetpages,
96 .vop_putpages = vop_stdputpages,
97 .vop_read = hammer_vop_read,
98 .vop_write = hammer_vop_write,
99 .vop_access = hammer_vop_access,
100 .vop_advlock = hammer_vop_advlock,
101 .vop_close = hammer_vop_close,
102 .vop_ncreate = hammer_vop_ncreate,
103 .vop_getattr = hammer_vop_getattr,
104 .vop_inactive = hammer_vop_inactive,
105 .vop_reclaim = hammer_vop_reclaim,
106 .vop_nresolve = hammer_vop_nresolve,
107 .vop_nlookupdotdot = hammer_vop_nlookupdotdot,
108 .vop_nlink = hammer_vop_nlink,
109 .vop_nmkdir = hammer_vop_nmkdir,
110 .vop_nmknod = hammer_vop_nmknod,
111 .vop_open = hammer_vop_open,
112 .vop_pathconf = hammer_vop_pathconf,
113 .vop_print = hammer_vop_print,
114 .vop_readdir = hammer_vop_readdir,
115 .vop_readlink = hammer_vop_readlink,
116 .vop_nremove = hammer_vop_nremove,
117 .vop_nrename = hammer_vop_nrename,
118 .vop_nrmdir = hammer_vop_nrmdir,
119 .vop_setattr = hammer_vop_setattr,
120 .vop_bmap = hammer_vop_bmap,
121 .vop_strategy = hammer_vop_strategy,
122 .vop_nsymlink = hammer_vop_nsymlink,
123 .vop_nwhiteout = hammer_vop_nwhiteout,
124 .vop_ioctl = hammer_vop_ioctl,
125 .vop_mountctl = hammer_vop_mountctl
128 struct vop_ops hammer_spec_vops = {
129 .vop_default = spec_vnoperate,
130 .vop_fsync = hammer_vop_fsync,
131 .vop_read = hammer_vop_specread,
132 .vop_write = hammer_vop_specwrite,
133 .vop_access = hammer_vop_access,
134 .vop_close = hammer_vop_specclose,
135 .vop_getattr = hammer_vop_getattr,
136 .vop_inactive = hammer_vop_inactive,
137 .vop_reclaim = hammer_vop_reclaim,
138 .vop_setattr = hammer_vop_setattr
141 struct vop_ops hammer_fifo_vops = {
142 .vop_default = fifo_vnoperate,
143 .vop_fsync = hammer_vop_fsync,
144 .vop_read = hammer_vop_fiforead,
145 .vop_write = hammer_vop_fifowrite,
146 .vop_access = hammer_vop_access,
147 .vop_close = hammer_vop_fifoclose,
148 .vop_getattr = hammer_vop_getattr,
149 .vop_inactive = hammer_vop_inactive,
150 .vop_reclaim = hammer_vop_reclaim,
151 .vop_setattr = hammer_vop_setattr
154 #ifdef DEBUG_TRUNCATE
155 struct hammer_inode *HammerTruncIp;
158 static int hammer_dounlink(hammer_transaction_t trans, struct nchandle *nch,
159 struct vnode *dvp, struct ucred *cred, int flags);
160 static int hammer_vop_strategy_read(struct vop_strategy_args *ap);
161 static int hammer_vop_strategy_write(struct vop_strategy_args *ap);
162 static void hammer_cleanup_write_io(hammer_inode_t ip);
163 static void hammer_update_rsv_databufs(hammer_inode_t ip);
168 hammer_vop_vnoperate(struct vop_generic_args *)
170 return (VOCALL(&hammer_vnode_vops, ap));
175 * hammer_vop_fsync { vp, waitfor }
179 hammer_vop_fsync(struct vop_fsync_args *ap)
181 hammer_inode_t ip = VTOI(ap->a_vp);
183 vfsync(ap->a_vp, ap->a_waitfor, 1, NULL, NULL);
184 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
185 if (ap->a_waitfor == MNT_WAIT)
186 hammer_wait_inode(ip);
191 * hammer_vop_read { vp, uio, ioflag, cred }
195 hammer_vop_read(struct vop_read_args *ap)
197 struct hammer_transaction trans;
206 if (ap->a_vp->v_type != VREG)
210 seqcount = ap->a_ioflag >> 16;
212 hammer_start_transaction(&trans, ip->hmp);
215 * Access the data in HAMMER_BUFSIZE blocks via the buffer cache.
218 while (uio->uio_resid > 0 && uio->uio_offset < ip->ino_data.size) {
219 offset = uio->uio_offset & HAMMER_BUFMASK;
220 if (hammer_debug_cluster_enable) {
221 error = cluster_read(ap->a_vp, ip->ino_data.size,
222 uio->uio_offset - offset,
224 MAXBSIZE, seqcount, &bp);
226 error = bread(ap->a_vp, uio->uio_offset - offset,
227 HAMMER_BUFSIZE, &bp);
234 /* bp->b_flags |= B_CLUSTEROK; temporarily disabled */
235 n = HAMMER_BUFSIZE - offset;
236 if (n > uio->uio_resid)
238 if (n > ip->ino_data.size - uio->uio_offset)
239 n = (int)(ip->ino_data.size - uio->uio_offset);
240 error = uiomove((char *)bp->b_data + offset, n, uio);
242 /* data has a lower priority then meta-data */
243 bp->b_flags |= B_AGE;
248 if ((ip->flags & HAMMER_INODE_RO) == 0 &&
249 (ip->hmp->mp->mnt_flag & MNT_NOATIME) == 0) {
250 ip->ino_leaf.atime = trans.time;
251 hammer_modify_inode(ip, HAMMER_INODE_ITIMES);
253 hammer_done_transaction(&trans);
258 * hammer_vop_write { vp, uio, ioflag, cred }
262 hammer_vop_write(struct vop_write_args *ap)
264 struct hammer_transaction trans;
265 struct hammer_inode *ip;
276 if (ap->a_vp->v_type != VREG)
280 seqcount = ap->a_ioflag >> 16;
282 if (ip->flags & HAMMER_INODE_RO)
286 * Create a transaction to cover the operations we perform.
288 hammer_start_transaction(&trans, ip->hmp);
294 if (ap->a_ioflag & IO_APPEND)
295 uio->uio_offset = ip->ino_data.size;
298 * Check for illegal write offsets. Valid range is 0...2^63-1.
300 * NOTE: the base_off assignment is required to work around what
301 * I consider to be a GCC-4 optimization bug.
303 if (uio->uio_offset < 0) {
304 hammer_done_transaction(&trans);
307 base_offset = uio->uio_offset + uio->uio_resid; /* work around gcc-4 */
308 if (uio->uio_resid > 0 && base_offset <= 0) {
309 hammer_done_transaction(&trans);
314 * Access the data in HAMMER_BUFSIZE blocks via the buffer cache.
317 while (uio->uio_resid > 0) {
320 if ((error = hammer_checkspace(trans.hmp)) != 0)
324 * Do not allow HAMMER to blow out the buffer cache.
326 * Do not allow HAMMER to blow out system memory by
327 * accumulating too many records. Records are decoupled
328 * from the buffer cache.
330 * Always check at the beginning so separate writes are
331 * not able to bypass this code.
333 * WARNING: Cannot unlock vp when doing a NOCOPY write as
334 * part of a putpages operation. Doing so could cause us
335 * to deadlock against the VM system when we try to re-lock.
337 if ((count++ & 15) == 0) {
338 if (uio->uio_segflg != UIO_NOCOPY) {
340 if ((ap->a_ioflag & IO_NOBWILL) == 0)
343 if (ip->rsv_recs > hammer_limit_irecs)
344 hammer_wait_inode_recs(ip);
345 if (uio->uio_segflg != UIO_NOCOPY)
346 vn_lock(ap->a_vp, LK_EXCLUSIVE|LK_RETRY);
349 rel_offset = (int)(uio->uio_offset & HAMMER_BUFMASK);
350 base_offset = uio->uio_offset & ~HAMMER_BUFMASK64;
351 n = HAMMER_BUFSIZE - rel_offset;
352 if (n > uio->uio_resid)
354 if (uio->uio_offset + n > ip->ino_data.size) {
355 vnode_pager_setsize(ap->a_vp, uio->uio_offset + n);
359 if (uio->uio_segflg == UIO_NOCOPY) {
361 * Issuing a write with the same data backing the
362 * buffer. Instantiate the buffer to collect the
363 * backing vm pages, then read-in any missing bits.
365 * This case is used by vop_stdputpages().
367 bp = getblk(ap->a_vp, base_offset,
368 HAMMER_BUFSIZE, GETBLK_BHEAVY, 0);
369 if ((bp->b_flags & B_CACHE) == 0) {
371 error = bread(ap->a_vp, base_offset,
372 HAMMER_BUFSIZE, &bp);
374 } else if (rel_offset == 0 && uio->uio_resid >= HAMMER_BUFSIZE) {
376 * Even though we are entirely overwriting the buffer
377 * we may still have to zero it out to avoid a
378 * mmap/write visibility issue.
380 bp = getblk(ap->a_vp, base_offset,
381 HAMMER_BUFSIZE, GETBLK_BHEAVY, 0);
382 if ((bp->b_flags & B_CACHE) == 0)
384 } else if (base_offset >= ip->ino_data.size) {
386 * If the base offset of the buffer is beyond the
387 * file EOF, we don't have to issue a read.
389 bp = getblk(ap->a_vp, base_offset,
390 HAMMER_BUFSIZE, GETBLK_BHEAVY, 0);
394 * Partial overwrite, read in any missing bits then
395 * replace the portion being written.
397 error = bread(ap->a_vp, base_offset,
398 HAMMER_BUFSIZE, &bp);
403 error = uiomove((char *)bp->b_data + rel_offset,
408 * If we screwed up we have to undo any VM size changes we
414 vtruncbuf(ap->a_vp, ip->ino_data.size,
419 /* bp->b_flags |= B_CLUSTEROK; temporarily disabled */
420 if (ip->ino_data.size < uio->uio_offset) {
421 ip->ino_data.size = uio->uio_offset;
422 flags = HAMMER_INODE_DDIRTY;
423 vnode_pager_setsize(ap->a_vp, ip->ino_data.size);
427 ip->ino_data.mtime = trans.time;
428 flags |= HAMMER_INODE_ITIMES | HAMMER_INODE_BUFS;
429 flags |= HAMMER_INODE_DDIRTY; /* XXX mtime */
430 hammer_modify_inode(ip, flags);
433 * Try to keep track of cached dirty data.
435 if ((bp->b_flags & B_DIRTY) == 0) {
437 ++ip->hmp->rsv_databufs;
441 * Final buffer disposition.
443 * If write_mode is non-zero we call bawrite()
444 * unconditionally. Otherwise we only use bawrite()
445 * if the writes are clearly sequential.
447 bp->b_flags |= B_AGE;
448 if (ap->a_ioflag & IO_SYNC) {
450 } else if (ap->a_ioflag & IO_DIRECT) {
452 } else if (hammer_write_mode &&
453 (uio->uio_offset & HAMMER_BUFMASK) == 0) {
455 /* strategy write cannot handled clustered writes */
456 bp->b_flags |= B_CLUSTEROK;
457 cluster_write(bp, ip->ino_data.size, seqcount);
461 } else if ((ap->a_ioflag >> 16) == IO_SEQMAX &&
462 (uio->uio_offset & HAMMER_BUFMASK) == 0) {
464 * If seqcount indicates sequential operation and
465 * we just finished filling a buffer, push it out
466 * now to prevent the buffer cache from becoming
467 * too full, which would trigger non-optimal
475 hammer_done_transaction(&trans);
480 * hammer_vop_access { vp, mode, cred }
484 hammer_vop_access(struct vop_access_args *ap)
486 struct hammer_inode *ip = VTOI(ap->a_vp);
491 uid = hammer_to_unix_xid(&ip->ino_data.uid);
492 gid = hammer_to_unix_xid(&ip->ino_data.gid);
494 error = vop_helper_access(ap, uid, gid, ip->ino_data.mode,
495 ip->ino_data.uflags);
500 * hammer_vop_advlock { vp, id, op, fl, flags }
504 hammer_vop_advlock(struct vop_advlock_args *ap)
506 struct hammer_inode *ip = VTOI(ap->a_vp);
508 return (lf_advlock(ap, &ip->advlock, ip->ino_data.size));
512 * hammer_vop_close { vp, fflag }
516 hammer_vop_close(struct vop_close_args *ap)
518 return (vop_stdclose(ap));
522 * hammer_vop_ncreate { nch, dvp, vpp, cred, vap }
524 * The operating system has already ensured that the directory entry
525 * does not exist and done all appropriate namespace locking.
529 hammer_vop_ncreate(struct vop_ncreate_args *ap)
531 struct hammer_transaction trans;
532 struct hammer_inode *dip;
533 struct hammer_inode *nip;
534 struct nchandle *nch;
538 dip = VTOI(ap->a_dvp);
540 if (dip->flags & HAMMER_INODE_RO)
542 if ((error = hammer_checkspace(dip->hmp)) != 0)
546 * Create a transaction to cover the operations we perform.
548 hammer_start_transaction(&trans, dip->hmp);
551 * Create a new filesystem object of the requested type. The
552 * returned inode will be referenced and shared-locked to prevent
553 * it from being moved to the flusher.
556 error = hammer_create_inode(&trans, ap->a_vap, ap->a_cred, dip, &nip);
558 hkprintf("hammer_create_inode error %d\n", error);
559 hammer_done_transaction(&trans);
565 * Add the new filesystem object to the directory. This will also
566 * bump the inode's link count.
568 error = hammer_ip_add_directory(&trans, dip, nch->ncp, nip);
570 hkprintf("hammer_ip_add_directory error %d\n", error);
576 hammer_rel_inode(nip, 0);
577 hammer_done_transaction(&trans);
580 error = hammer_get_vnode(nip, ap->a_vpp);
581 hammer_done_transaction(&trans);
582 hammer_rel_inode(nip, 0);
584 cache_setunresolved(ap->a_nch);
585 cache_setvp(ap->a_nch, *ap->a_vpp);
592 * hammer_vop_getattr { vp, vap }
594 * Retrieve an inode's attribute information. When accessing inodes
595 * historically we fake the atime field to ensure consistent results.
596 * The atime field is stored in the B-Tree element and allowed to be
597 * updated without cycling the element.
601 hammer_vop_getattr(struct vop_getattr_args *ap)
603 struct hammer_inode *ip = VTOI(ap->a_vp);
604 struct vattr *vap = ap->a_vap;
607 if (cache_check_fsmid_vp(ap->a_vp, &ip->fsmid) &&
608 (vp->v_mount->mnt_flag & MNT_RDONLY) == 0 &&
613 hammer_itimes(ap->a_vp);
616 vap->va_fsid = ip->hmp->fsid_udev;
617 vap->va_fileid = ip->ino_leaf.base.obj_id;
618 vap->va_mode = ip->ino_data.mode;
619 vap->va_nlink = ip->ino_data.nlinks;
620 vap->va_uid = hammer_to_unix_xid(&ip->ino_data.uid);
621 vap->va_gid = hammer_to_unix_xid(&ip->ino_data.gid);
624 vap->va_size = ip->ino_data.size;
625 if (ip->flags & HAMMER_INODE_RO)
626 hammer_to_timespec(ip->ino_data.mtime, &vap->va_atime);
628 hammer_to_timespec(ip->ino_leaf.atime, &vap->va_atime);
629 hammer_to_timespec(ip->ino_data.mtime, &vap->va_mtime);
630 hammer_to_timespec(ip->ino_data.ctime, &vap->va_ctime);
631 vap->va_flags = ip->ino_data.uflags;
632 vap->va_gen = 1; /* hammer inums are unique for all time */
633 vap->va_blocksize = HAMMER_BUFSIZE;
634 vap->va_bytes = (ip->ino_data.size + 63) & ~63;
635 vap->va_type = hammer_get_vnode_type(ip->ino_data.obj_type);
636 vap->va_filerev = 0; /* XXX */
637 /* mtime uniquely identifies any adjustments made to the file */
638 vap->va_fsmid = ip->ino_data.mtime;
639 vap->va_uid_uuid = ip->ino_data.uid;
640 vap->va_gid_uuid = ip->ino_data.gid;
641 vap->va_fsid_uuid = ip->hmp->fsid;
642 vap->va_vaflags = VA_UID_UUID_VALID | VA_GID_UUID_VALID |
645 switch (ip->ino_data.obj_type) {
646 case HAMMER_OBJTYPE_CDEV:
647 case HAMMER_OBJTYPE_BDEV:
648 vap->va_rmajor = ip->ino_data.rmajor;
649 vap->va_rminor = ip->ino_data.rminor;
659 * hammer_vop_nresolve { nch, dvp, cred }
661 * Locate the requested directory entry.
665 hammer_vop_nresolve(struct vop_nresolve_args *ap)
667 struct hammer_transaction trans;
668 struct namecache *ncp;
672 struct hammer_cursor cursor;
682 * Misc initialization, plus handle as-of name extensions. Look for
683 * the '@@' extension. Note that as-of files and directories cannot
686 dip = VTOI(ap->a_dvp);
687 ncp = ap->a_nch->ncp;
688 asof = dip->obj_asof;
692 hammer_simple_transaction(&trans, dip->hmp);
694 for (i = 0; i < nlen; ++i) {
695 if (ncp->nc_name[i] == '@' && ncp->nc_name[i+1] == '@') {
696 asof = hammer_str_to_tid(ncp->nc_name + i + 2);
697 flags |= HAMMER_INODE_RO;
704 * If there is no path component the time extension is relative to
708 ip = hammer_get_inode(&trans, &dip->cache[1], dip->obj_id,
709 asof, flags, &error);
711 error = hammer_get_vnode(ip, &vp);
712 hammer_rel_inode(ip, 0);
718 cache_setvp(ap->a_nch, vp);
725 * Calculate the namekey and setup the key range for the scan. This
726 * works kinda like a chained hash table where the lower 32 bits
727 * of the namekey synthesize the chain.
729 * The key range is inclusive of both key_beg and key_end.
731 namekey = hammer_directory_namekey(ncp->nc_name, nlen);
733 error = hammer_init_cursor(&trans, &cursor, &dip->cache[0], dip);
734 cursor.key_beg.localization = HAMMER_LOCALIZE_MISC;
735 cursor.key_beg.obj_id = dip->obj_id;
736 cursor.key_beg.key = namekey;
737 cursor.key_beg.create_tid = 0;
738 cursor.key_beg.delete_tid = 0;
739 cursor.key_beg.rec_type = HAMMER_RECTYPE_DIRENTRY;
740 cursor.key_beg.obj_type = 0;
742 cursor.key_end = cursor.key_beg;
743 cursor.key_end.key |= 0xFFFFFFFFULL;
745 cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE | HAMMER_CURSOR_ASOF;
748 * Scan all matching records (the chain), locate the one matching
749 * the requested path component.
751 * The hammer_ip_*() functions merge in-memory records with on-disk
752 * records for the purposes of the search.
757 error = hammer_ip_first(&cursor);
759 error = hammer_ip_resolve_data(&cursor);
762 if (nlen == cursor.leaf->data_len - HAMMER_ENTRY_NAME_OFF &&
763 bcmp(ncp->nc_name, cursor.data->entry.name, nlen) == 0) {
764 obj_id = cursor.data->entry.obj_id;
767 error = hammer_ip_next(&cursor);
770 hammer_done_cursor(&cursor);
772 ip = hammer_get_inode(&trans, &dip->cache[1],
773 obj_id, asof, flags, &error);
775 error = hammer_get_vnode(ip, &vp);
776 hammer_rel_inode(ip, 0);
782 cache_setvp(ap->a_nch, vp);
785 } else if (error == ENOENT) {
786 cache_setvp(ap->a_nch, NULL);
789 hammer_done_transaction(&trans);
794 * hammer_vop_nlookupdotdot { dvp, vpp, cred }
796 * Locate the parent directory of a directory vnode.
798 * dvp is referenced but not locked. *vpp must be returned referenced and
799 * locked. A parent_obj_id of 0 does not necessarily indicate that we are
800 * at the root, instead it could indicate that the directory we were in was
803 * NOTE: as-of sequences are not linked into the directory structure. If
804 * we are at the root with a different asof then the mount point, reload
805 * the same directory with the mount point's asof. I'm not sure what this
806 * will do to NFS. We encode ASOF stamps in NFS file handles so it might not
807 * get confused, but it hasn't been tested.
811 hammer_vop_nlookupdotdot(struct vop_nlookupdotdot_args *ap)
813 struct hammer_transaction trans;
814 struct hammer_inode *dip;
815 struct hammer_inode *ip;
816 int64_t parent_obj_id;
820 dip = VTOI(ap->a_dvp);
821 asof = dip->obj_asof;
822 parent_obj_id = dip->ino_data.parent_obj_id;
824 if (parent_obj_id == 0) {
825 if (dip->obj_id == HAMMER_OBJID_ROOT &&
826 asof != dip->hmp->asof) {
827 parent_obj_id = dip->obj_id;
828 asof = dip->hmp->asof;
829 *ap->a_fakename = kmalloc(19, M_TEMP, M_WAITOK);
830 ksnprintf(*ap->a_fakename, 19, "0x%016llx",
838 hammer_simple_transaction(&trans, dip->hmp);
840 ip = hammer_get_inode(&trans, &dip->cache[1], parent_obj_id,
841 asof, dip->flags, &error);
843 error = hammer_get_vnode(ip, ap->a_vpp);
844 hammer_rel_inode(ip, 0);
848 hammer_done_transaction(&trans);
853 * hammer_vop_nlink { nch, dvp, vp, cred }
857 hammer_vop_nlink(struct vop_nlink_args *ap)
859 struct hammer_transaction trans;
860 struct hammer_inode *dip;
861 struct hammer_inode *ip;
862 struct nchandle *nch;
866 dip = VTOI(ap->a_dvp);
869 if (dip->flags & HAMMER_INODE_RO)
871 if (ip->flags & HAMMER_INODE_RO)
873 if ((error = hammer_checkspace(dip->hmp)) != 0)
877 * Create a transaction to cover the operations we perform.
879 hammer_start_transaction(&trans, dip->hmp);
882 * Add the filesystem object to the directory. Note that neither
883 * dip nor ip are referenced or locked, but their vnodes are
884 * referenced. This function will bump the inode's link count.
886 error = hammer_ip_add_directory(&trans, dip, nch->ncp, ip);
892 cache_setunresolved(nch);
893 cache_setvp(nch, ap->a_vp);
895 hammer_done_transaction(&trans);
900 * hammer_vop_nmkdir { nch, dvp, vpp, cred, vap }
902 * The operating system has already ensured that the directory entry
903 * does not exist and done all appropriate namespace locking.
907 hammer_vop_nmkdir(struct vop_nmkdir_args *ap)
909 struct hammer_transaction trans;
910 struct hammer_inode *dip;
911 struct hammer_inode *nip;
912 struct nchandle *nch;
916 dip = VTOI(ap->a_dvp);
918 if (dip->flags & HAMMER_INODE_RO)
920 if ((error = hammer_checkspace(dip->hmp)) != 0)
924 * Create a transaction to cover the operations we perform.
926 hammer_start_transaction(&trans, dip->hmp);
929 * Create a new filesystem object of the requested type. The
930 * returned inode will be referenced but not locked.
932 error = hammer_create_inode(&trans, ap->a_vap, ap->a_cred, dip, &nip);
934 hkprintf("hammer_mkdir error %d\n", error);
935 hammer_done_transaction(&trans);
940 * Add the new filesystem object to the directory. This will also
941 * bump the inode's link count.
943 error = hammer_ip_add_directory(&trans, dip, nch->ncp, nip);
945 hkprintf("hammer_mkdir (add) error %d\n", error);
951 hammer_rel_inode(nip, 0);
954 error = hammer_get_vnode(nip, ap->a_vpp);
955 hammer_rel_inode(nip, 0);
957 cache_setunresolved(ap->a_nch);
958 cache_setvp(ap->a_nch, *ap->a_vpp);
961 hammer_done_transaction(&trans);
966 * hammer_vop_nmknod { nch, dvp, vpp, cred, vap }
968 * The operating system has already ensured that the directory entry
969 * does not exist and done all appropriate namespace locking.
973 hammer_vop_nmknod(struct vop_nmknod_args *ap)
975 struct hammer_transaction trans;
976 struct hammer_inode *dip;
977 struct hammer_inode *nip;
978 struct nchandle *nch;
982 dip = VTOI(ap->a_dvp);
984 if (dip->flags & HAMMER_INODE_RO)
986 if ((error = hammer_checkspace(dip->hmp)) != 0)
990 * Create a transaction to cover the operations we perform.
992 hammer_start_transaction(&trans, dip->hmp);
995 * Create a new filesystem object of the requested type. The
996 * returned inode will be referenced but not locked.
998 error = hammer_create_inode(&trans, ap->a_vap, ap->a_cred, dip, &nip);
1000 hammer_done_transaction(&trans);
1006 * Add the new filesystem object to the directory. This will also
1007 * bump the inode's link count.
1009 error = hammer_ip_add_directory(&trans, dip, nch->ncp, nip);
1015 hammer_rel_inode(nip, 0);
1018 error = hammer_get_vnode(nip, ap->a_vpp);
1019 hammer_rel_inode(nip, 0);
1021 cache_setunresolved(ap->a_nch);
1022 cache_setvp(ap->a_nch, *ap->a_vpp);
1025 hammer_done_transaction(&trans);
1030 * hammer_vop_open { vp, mode, cred, fp }
1034 hammer_vop_open(struct vop_open_args *ap)
1038 ip = VTOI(ap->a_vp);
1040 if ((ap->a_mode & FWRITE) && (ip->flags & HAMMER_INODE_RO))
1042 return(vop_stdopen(ap));
1046 * hammer_vop_pathconf { vp, name, retval }
1050 hammer_vop_pathconf(struct vop_pathconf_args *ap)
1056 * hammer_vop_print { vp }
1060 hammer_vop_print(struct vop_print_args *ap)
1066 * hammer_vop_readdir { vp, uio, cred, *eofflag, *ncookies, off_t **cookies }
1070 hammer_vop_readdir(struct vop_readdir_args *ap)
1072 struct hammer_transaction trans;
1073 struct hammer_cursor cursor;
1074 struct hammer_inode *ip;
1076 hammer_base_elm_t base;
1084 ip = VTOI(ap->a_vp);
1086 saveoff = uio->uio_offset;
1088 if (ap->a_ncookies) {
1089 ncookies = uio->uio_resid / 16 + 1;
1090 if (ncookies > 1024)
1092 cookies = kmalloc(ncookies * sizeof(off_t), M_TEMP, M_WAITOK);
1100 hammer_simple_transaction(&trans, ip->hmp);
1103 * Handle artificial entries
1107 r = vop_write_dirent(&error, uio, ip->obj_id, DT_DIR, 1, ".");
1111 cookies[cookie_index] = saveoff;
1114 if (cookie_index == ncookies)
1118 if (ip->ino_data.parent_obj_id) {
1119 r = vop_write_dirent(&error, uio,
1120 ip->ino_data.parent_obj_id,
1123 r = vop_write_dirent(&error, uio,
1124 ip->obj_id, DT_DIR, 2, "..");
1129 cookies[cookie_index] = saveoff;
1132 if (cookie_index == ncookies)
1137 * Key range (begin and end inclusive) to scan. Directory keys
1138 * directly translate to a 64 bit 'seek' position.
1140 hammer_init_cursor(&trans, &cursor, &ip->cache[0], ip);
1141 cursor.key_beg.localization = HAMMER_LOCALIZE_MISC;
1142 cursor.key_beg.obj_id = ip->obj_id;
1143 cursor.key_beg.create_tid = 0;
1144 cursor.key_beg.delete_tid = 0;
1145 cursor.key_beg.rec_type = HAMMER_RECTYPE_DIRENTRY;
1146 cursor.key_beg.obj_type = 0;
1147 cursor.key_beg.key = saveoff;
1149 cursor.key_end = cursor.key_beg;
1150 cursor.key_end.key = HAMMER_MAX_KEY;
1151 cursor.asof = ip->obj_asof;
1152 cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE | HAMMER_CURSOR_ASOF;
1154 error = hammer_ip_first(&cursor);
1156 while (error == 0) {
1157 error = hammer_ip_resolve_data(&cursor);
1160 base = &cursor.leaf->base;
1161 saveoff = base->key;
1162 KKASSERT(cursor.leaf->data_len > HAMMER_ENTRY_NAME_OFF);
1164 if (base->obj_id != ip->obj_id)
1165 panic("readdir: bad record at %p", cursor.node);
1167 r = vop_write_dirent(
1168 &error, uio, cursor.data->entry.obj_id,
1169 hammer_get_dtype(cursor.leaf->base.obj_type),
1170 cursor.leaf->data_len - HAMMER_ENTRY_NAME_OFF ,
1171 (void *)cursor.data->entry.name);
1176 cookies[cookie_index] = base->key;
1178 if (cookie_index == ncookies)
1180 error = hammer_ip_next(&cursor);
1182 hammer_done_cursor(&cursor);
1185 hammer_done_transaction(&trans);
1188 *ap->a_eofflag = (error == ENOENT);
1189 uio->uio_offset = saveoff;
1190 if (error && cookie_index == 0) {
1191 if (error == ENOENT)
1194 kfree(cookies, M_TEMP);
1195 *ap->a_ncookies = 0;
1196 *ap->a_cookies = NULL;
1199 if (error == ENOENT)
1202 *ap->a_ncookies = cookie_index;
1203 *ap->a_cookies = cookies;
1210 * hammer_vop_readlink { vp, uio, cred }
1214 hammer_vop_readlink(struct vop_readlink_args *ap)
1216 struct hammer_transaction trans;
1217 struct hammer_cursor cursor;
1218 struct hammer_inode *ip;
1221 ip = VTOI(ap->a_vp);
1224 * Shortcut if the symlink data was stuffed into ino_data.
1226 if (ip->ino_data.size <= HAMMER_INODE_BASESYMLEN) {
1227 error = uiomove(ip->ino_data.ext.symlink,
1228 ip->ino_data.size, ap->a_uio);
1235 hammer_simple_transaction(&trans, ip->hmp);
1236 hammer_init_cursor(&trans, &cursor, &ip->cache[0], ip);
1239 * Key range (begin and end inclusive) to scan. Directory keys
1240 * directly translate to a 64 bit 'seek' position.
1242 cursor.key_beg.localization = HAMMER_LOCALIZE_MISC; /* XXX */
1243 cursor.key_beg.obj_id = ip->obj_id;
1244 cursor.key_beg.create_tid = 0;
1245 cursor.key_beg.delete_tid = 0;
1246 cursor.key_beg.rec_type = HAMMER_RECTYPE_FIX;
1247 cursor.key_beg.obj_type = 0;
1248 cursor.key_beg.key = HAMMER_FIXKEY_SYMLINK;
1249 cursor.asof = ip->obj_asof;
1250 cursor.flags |= HAMMER_CURSOR_ASOF;
1252 error = hammer_ip_lookup(&cursor);
1254 error = hammer_ip_resolve_data(&cursor);
1256 KKASSERT(cursor.leaf->data_len >=
1257 HAMMER_SYMLINK_NAME_OFF);
1258 error = uiomove(cursor.data->symlink.name,
1259 cursor.leaf->data_len -
1260 HAMMER_SYMLINK_NAME_OFF,
1264 hammer_done_cursor(&cursor);
1265 hammer_done_transaction(&trans);
1270 * hammer_vop_nremove { nch, dvp, cred }
1274 hammer_vop_nremove(struct vop_nremove_args *ap)
1276 struct hammer_transaction trans;
1277 struct hammer_inode *dip;
1280 dip = VTOI(ap->a_dvp);
1282 if (hammer_nohistory(dip) == 0 &&
1283 (error = hammer_checkspace(dip->hmp)) != 0) {
1287 hammer_start_transaction(&trans, dip->hmp);
1288 error = hammer_dounlink(&trans, ap->a_nch, ap->a_dvp, ap->a_cred, 0);
1289 hammer_done_transaction(&trans);
1295 * hammer_vop_nrename { fnch, tnch, fdvp, tdvp, cred }
1299 hammer_vop_nrename(struct vop_nrename_args *ap)
1301 struct hammer_transaction trans;
1302 struct namecache *fncp;
1303 struct namecache *tncp;
1304 struct hammer_inode *fdip;
1305 struct hammer_inode *tdip;
1306 struct hammer_inode *ip;
1307 struct hammer_cursor cursor;
1311 fdip = VTOI(ap->a_fdvp);
1312 tdip = VTOI(ap->a_tdvp);
1313 fncp = ap->a_fnch->ncp;
1314 tncp = ap->a_tnch->ncp;
1315 ip = VTOI(fncp->nc_vp);
1316 KKASSERT(ip != NULL);
1318 if (fdip->flags & HAMMER_INODE_RO)
1320 if (tdip->flags & HAMMER_INODE_RO)
1322 if (ip->flags & HAMMER_INODE_RO)
1324 if ((error = hammer_checkspace(fdip->hmp)) != 0)
1327 hammer_start_transaction(&trans, fdip->hmp);
1330 * Remove tncp from the target directory and then link ip as
1331 * tncp. XXX pass trans to dounlink
1333 * Force the inode sync-time to match the transaction so it is
1334 * in-sync with the creation of the target directory entry.
1336 error = hammer_dounlink(&trans, ap->a_tnch, ap->a_tdvp, ap->a_cred, 0);
1337 if (error == 0 || error == ENOENT) {
1338 error = hammer_ip_add_directory(&trans, tdip, tncp, ip);
1340 ip->ino_data.parent_obj_id = tdip->obj_id;
1341 hammer_modify_inode(ip, HAMMER_INODE_DDIRTY);
1345 goto failed; /* XXX */
1348 * Locate the record in the originating directory and remove it.
1350 * Calculate the namekey and setup the key range for the scan. This
1351 * works kinda like a chained hash table where the lower 32 bits
1352 * of the namekey synthesize the chain.
1354 * The key range is inclusive of both key_beg and key_end.
1356 namekey = hammer_directory_namekey(fncp->nc_name, fncp->nc_nlen);
1358 hammer_init_cursor(&trans, &cursor, &fdip->cache[0], fdip);
1359 cursor.key_beg.localization = HAMMER_LOCALIZE_MISC;
1360 cursor.key_beg.obj_id = fdip->obj_id;
1361 cursor.key_beg.key = namekey;
1362 cursor.key_beg.create_tid = 0;
1363 cursor.key_beg.delete_tid = 0;
1364 cursor.key_beg.rec_type = HAMMER_RECTYPE_DIRENTRY;
1365 cursor.key_beg.obj_type = 0;
1367 cursor.key_end = cursor.key_beg;
1368 cursor.key_end.key |= 0xFFFFFFFFULL;
1369 cursor.asof = fdip->obj_asof;
1370 cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE | HAMMER_CURSOR_ASOF;
1373 * Scan all matching records (the chain), locate the one matching
1374 * the requested path component.
1376 * The hammer_ip_*() functions merge in-memory records with on-disk
1377 * records for the purposes of the search.
1379 error = hammer_ip_first(&cursor);
1380 while (error == 0) {
1381 if (hammer_ip_resolve_data(&cursor) != 0)
1383 nlen = cursor.leaf->data_len - HAMMER_ENTRY_NAME_OFF;
1385 if (fncp->nc_nlen == nlen &&
1386 bcmp(fncp->nc_name, cursor.data->entry.name, nlen) == 0) {
1389 error = hammer_ip_next(&cursor);
1393 * If all is ok we have to get the inode so we can adjust nlinks.
1395 * WARNING: hammer_ip_del_directory() may have to terminate the
1396 * cursor to avoid a recursion. It's ok to call hammer_done_cursor()
1400 error = hammer_ip_del_directory(&trans, &cursor, fdip, ip);
1403 * XXX A deadlock here will break rename's atomicy for the purposes
1404 * of crash recovery.
1406 if (error == EDEADLK) {
1407 hammer_done_cursor(&cursor);
1412 * Cleanup and tell the kernel that the rename succeeded.
1414 hammer_done_cursor(&cursor);
1416 cache_rename(ap->a_fnch, ap->a_tnch);
1419 hammer_done_transaction(&trans);
1424 * hammer_vop_nrmdir { nch, dvp, cred }
1428 hammer_vop_nrmdir(struct vop_nrmdir_args *ap)
1430 struct hammer_transaction trans;
1431 struct hammer_inode *dip;
1434 dip = VTOI(ap->a_dvp);
1436 if (hammer_nohistory(dip) == 0 &&
1437 (error = hammer_checkspace(dip->hmp)) != 0) {
1441 hammer_start_transaction(&trans, dip->hmp);
1442 error = hammer_dounlink(&trans, ap->a_nch, ap->a_dvp, ap->a_cred, 0);
1443 hammer_done_transaction(&trans);
1449 * hammer_vop_setattr { vp, vap, cred }
1453 hammer_vop_setattr(struct vop_setattr_args *ap)
1455 struct hammer_transaction trans;
1457 struct hammer_inode *ip;
1465 ip = ap->a_vp->v_data;
1468 if (ap->a_vp->v_mount->mnt_flag & MNT_RDONLY)
1470 if (ip->flags & HAMMER_INODE_RO)
1472 if (hammer_nohistory(ip) == 0 &&
1473 (error = hammer_checkspace(ip->hmp)) != 0) {
1477 hammer_start_transaction(&trans, ip->hmp);
1480 if (vap->va_flags != VNOVAL) {
1481 flags = ip->ino_data.uflags;
1482 error = vop_helper_setattr_flags(&flags, vap->va_flags,
1483 hammer_to_unix_xid(&ip->ino_data.uid),
1486 if (ip->ino_data.uflags != flags) {
1487 ip->ino_data.uflags = flags;
1488 modflags |= HAMMER_INODE_DDIRTY;
1490 if (ip->ino_data.uflags & (IMMUTABLE | APPEND)) {
1497 if (ip->ino_data.uflags & (IMMUTABLE | APPEND)) {
1501 if (vap->va_uid != (uid_t)VNOVAL || vap->va_gid != (gid_t)VNOVAL) {
1502 mode_t cur_mode = ip->ino_data.mode;
1503 uid_t cur_uid = hammer_to_unix_xid(&ip->ino_data.uid);
1504 gid_t cur_gid = hammer_to_unix_xid(&ip->ino_data.gid);
1508 error = vop_helper_chown(ap->a_vp, vap->va_uid, vap->va_gid,
1510 &cur_uid, &cur_gid, &cur_mode);
1512 hammer_guid_to_uuid(&uuid_uid, cur_uid);
1513 hammer_guid_to_uuid(&uuid_gid, cur_gid);
1514 if (bcmp(&uuid_uid, &ip->ino_data.uid,
1515 sizeof(uuid_uid)) ||
1516 bcmp(&uuid_gid, &ip->ino_data.gid,
1517 sizeof(uuid_gid)) ||
1518 ip->ino_data.mode != cur_mode
1520 ip->ino_data.uid = uuid_uid;
1521 ip->ino_data.gid = uuid_gid;
1522 ip->ino_data.mode = cur_mode;
1524 modflags |= HAMMER_INODE_DDIRTY;
1527 while (vap->va_size != VNOVAL && ip->ino_data.size != vap->va_size) {
1528 switch(ap->a_vp->v_type) {
1530 if (vap->va_size == ip->ino_data.size)
1533 * XXX break atomicy, we can deadlock the backend
1534 * if we do not release the lock. Probably not a
1537 if (vap->va_size < ip->ino_data.size) {
1538 vtruncbuf(ap->a_vp, vap->va_size,
1542 vnode_pager_setsize(ap->a_vp, vap->va_size);
1545 ip->ino_data.size = vap->va_size;
1546 modflags |= HAMMER_INODE_DDIRTY;
1547 aligned_size = (vap->va_size + HAMMER_BUFMASK) &
1551 * on-media truncation is cached in the inode until
1552 * the inode is synchronized.
1555 hammer_ip_frontend_trunc(ip, vap->va_size);
1556 hammer_update_rsv_databufs(ip);
1557 #ifdef DEBUG_TRUNCATE
1558 if (HammerTruncIp == NULL)
1561 if ((ip->flags & HAMMER_INODE_TRUNCATED) == 0) {
1562 ip->flags |= HAMMER_INODE_TRUNCATED;
1563 ip->trunc_off = vap->va_size;
1564 #ifdef DEBUG_TRUNCATE
1565 if (ip == HammerTruncIp)
1566 kprintf("truncate1 %016llx\n", ip->trunc_off);
1568 } else if (ip->trunc_off > vap->va_size) {
1569 ip->trunc_off = vap->va_size;
1570 #ifdef DEBUG_TRUNCATE
1571 if (ip == HammerTruncIp)
1572 kprintf("truncate2 %016llx\n", ip->trunc_off);
1575 #ifdef DEBUG_TRUNCATE
1576 if (ip == HammerTruncIp)
1577 kprintf("truncate3 %016llx (ignored)\n", vap->va_size);
1583 * If truncating we have to clean out a portion of
1584 * the last block on-disk. We do this in the
1585 * front-end buffer cache.
1587 if (truncating && vap->va_size < aligned_size) {
1591 aligned_size -= HAMMER_BUFSIZE;
1593 offset = vap->va_size & HAMMER_BUFMASK;
1594 error = bread(ap->a_vp, aligned_size,
1595 HAMMER_BUFSIZE, &bp);
1596 hammer_ip_frontend_trunc(ip, aligned_size);
1598 bzero(bp->b_data + offset,
1599 HAMMER_BUFSIZE - offset);
1602 kprintf("ERROR %d\n", error);
1608 if ((ip->flags & HAMMER_INODE_TRUNCATED) == 0) {
1609 ip->flags |= HAMMER_INODE_TRUNCATED;
1610 ip->trunc_off = vap->va_size;
1611 } else if (ip->trunc_off > vap->va_size) {
1612 ip->trunc_off = vap->va_size;
1614 hammer_ip_frontend_trunc(ip, vap->va_size);
1615 ip->ino_data.size = vap->va_size;
1616 modflags |= HAMMER_INODE_DDIRTY;
1624 if (vap->va_atime.tv_sec != VNOVAL) {
1625 ip->ino_leaf.atime =
1626 hammer_timespec_to_transid(&vap->va_atime);
1627 modflags |= HAMMER_INODE_ITIMES;
1629 if (vap->va_mtime.tv_sec != VNOVAL) {
1630 ip->ino_data.mtime =
1631 hammer_timespec_to_transid(&vap->va_mtime);
1632 modflags |= HAMMER_INODE_ITIMES;
1633 modflags |= HAMMER_INODE_DDIRTY; /* XXX mtime */
1635 if (vap->va_mode != (mode_t)VNOVAL) {
1636 mode_t cur_mode = ip->ino_data.mode;
1637 uid_t cur_uid = hammer_to_unix_xid(&ip->ino_data.uid);
1638 gid_t cur_gid = hammer_to_unix_xid(&ip->ino_data.gid);
1640 error = vop_helper_chmod(ap->a_vp, vap->va_mode, ap->a_cred,
1641 cur_uid, cur_gid, &cur_mode);
1642 if (error == 0 && ip->ino_data.mode != cur_mode) {
1643 ip->ino_data.mode = cur_mode;
1644 modflags |= HAMMER_INODE_DDIRTY;
1649 hammer_modify_inode(ip, modflags);
1650 hammer_done_transaction(&trans);
1655 * hammer_vop_nsymlink { nch, dvp, vpp, cred, vap, target }
1659 hammer_vop_nsymlink(struct vop_nsymlink_args *ap)
1661 struct hammer_transaction trans;
1662 struct hammer_inode *dip;
1663 struct hammer_inode *nip;
1664 struct nchandle *nch;
1665 hammer_record_t record;
1669 ap->a_vap->va_type = VLNK;
1672 dip = VTOI(ap->a_dvp);
1674 if (dip->flags & HAMMER_INODE_RO)
1676 if ((error = hammer_checkspace(dip->hmp)) != 0)
1680 * Create a transaction to cover the operations we perform.
1682 hammer_start_transaction(&trans, dip->hmp);
1685 * Create a new filesystem object of the requested type. The
1686 * returned inode will be referenced but not locked.
1689 error = hammer_create_inode(&trans, ap->a_vap, ap->a_cred, dip, &nip);
1691 hammer_done_transaction(&trans);
1697 * Add a record representing the symlink. symlink stores the link
1698 * as pure data, not a string, and is no \0 terminated.
1701 bytes = strlen(ap->a_target);
1703 if (bytes <= HAMMER_INODE_BASESYMLEN) {
1704 bcopy(ap->a_target, nip->ino_data.ext.symlink, bytes);
1706 record = hammer_alloc_mem_record(nip, bytes);
1707 record->type = HAMMER_MEM_RECORD_GENERAL;
1709 record->leaf.base.localization = HAMMER_LOCALIZE_MISC;
1710 record->leaf.base.key = HAMMER_FIXKEY_SYMLINK;
1711 record->leaf.base.rec_type = HAMMER_RECTYPE_FIX;
1712 record->leaf.data_len = bytes;
1713 KKASSERT(HAMMER_SYMLINK_NAME_OFF == 0);
1714 bcopy(ap->a_target, record->data->symlink.name, bytes);
1715 error = hammer_ip_add_record(&trans, record);
1719 * Set the file size to the length of the link.
1722 nip->ino_data.size = bytes;
1723 hammer_modify_inode(nip, HAMMER_INODE_DDIRTY);
1727 error = hammer_ip_add_directory(&trans, dip, nch->ncp, nip);
1733 hammer_rel_inode(nip, 0);
1736 error = hammer_get_vnode(nip, ap->a_vpp);
1737 hammer_rel_inode(nip, 0);
1739 cache_setunresolved(ap->a_nch);
1740 cache_setvp(ap->a_nch, *ap->a_vpp);
1743 hammer_done_transaction(&trans);
1748 * hammer_vop_nwhiteout { nch, dvp, cred, flags }
1752 hammer_vop_nwhiteout(struct vop_nwhiteout_args *ap)
1754 struct hammer_transaction trans;
1755 struct hammer_inode *dip;
1758 dip = VTOI(ap->a_dvp);
1760 if (hammer_nohistory(dip) == 0 &&
1761 (error = hammer_checkspace(dip->hmp)) != 0) {
1765 hammer_start_transaction(&trans, dip->hmp);
1766 error = hammer_dounlink(&trans, ap->a_nch, ap->a_dvp,
1767 ap->a_cred, ap->a_flags);
1768 hammer_done_transaction(&trans);
1774 * hammer_vop_ioctl { vp, command, data, fflag, cred }
1778 hammer_vop_ioctl(struct vop_ioctl_args *ap)
1780 struct hammer_inode *ip = ap->a_vp->v_data;
1782 return(hammer_ioctl(ip, ap->a_command, ap->a_data,
1783 ap->a_fflag, ap->a_cred));
1788 hammer_vop_mountctl(struct vop_mountctl_args *ap)
1793 mp = ap->a_head.a_ops->head.vv_mount;
1796 case MOUNTCTL_SET_EXPORT:
1797 if (ap->a_ctllen != sizeof(struct export_args))
1799 error = hammer_vfs_export(mp, ap->a_op,
1800 (const struct export_args *)ap->a_ctl);
1803 error = journal_mountctl(ap);
1810 * hammer_vop_strategy { vp, bio }
1812 * Strategy call, used for regular file read & write only. Note that the
1813 * bp may represent a cluster.
1815 * To simplify operation and allow better optimizations in the future,
1816 * this code does not make any assumptions with regards to buffer alignment
1821 hammer_vop_strategy(struct vop_strategy_args *ap)
1826 bp = ap->a_bio->bio_buf;
1830 error = hammer_vop_strategy_read(ap);
1833 error = hammer_vop_strategy_write(ap);
1836 bp->b_error = error = EINVAL;
1837 bp->b_flags |= B_ERROR;
1845 * Read from a regular file. Iterate the related records and fill in the
1846 * BIO/BUF. Gaps are zero-filled.
1848 * The support code in hammer_object.c should be used to deal with mixed
1849 * in-memory and on-disk records.
1855 hammer_vop_strategy_read(struct vop_strategy_args *ap)
1857 struct hammer_transaction trans;
1858 struct hammer_inode *ip;
1859 struct hammer_cursor cursor;
1860 hammer_base_elm_t base;
1874 ip = ap->a_vp->v_data;
1877 * The zone-2 disk offset may have been set by the cluster code via
1878 * a BMAP operation. Take care not to confuse it with the bio_offset
1879 * set by hammer_io_direct_write(), which is a device-relative offset.
1881 * Checking the high bits should suffice.
1883 nbio = push_bio(bio);
1884 if ((nbio->bio_offset & HAMMER_OFF_ZONE_MASK) ==
1885 HAMMER_ZONE_RAW_BUFFER) {
1886 error = hammer_io_direct_read(ip->hmp, nbio->bio_offset, bio);
1893 hammer_simple_transaction(&trans, ip->hmp);
1894 hammer_init_cursor(&trans, &cursor, &ip->cache[1], ip);
1897 * Key range (begin and end inclusive) to scan. Note that the key's
1898 * stored in the actual records represent BASE+LEN, not BASE. The
1899 * first record containing bio_offset will have a key > bio_offset.
1901 cursor.key_beg.localization = HAMMER_LOCALIZE_MISC;
1902 cursor.key_beg.obj_id = ip->obj_id;
1903 cursor.key_beg.create_tid = 0;
1904 cursor.key_beg.delete_tid = 0;
1905 cursor.key_beg.obj_type = 0;
1906 cursor.key_beg.key = bio->bio_offset + 1;
1907 cursor.asof = ip->obj_asof;
1908 cursor.flags |= HAMMER_CURSOR_ASOF;
1910 cursor.key_end = cursor.key_beg;
1911 KKASSERT(ip->ino_data.obj_type == HAMMER_OBJTYPE_REGFILE);
1913 if (ip->ino_data.obj_type == HAMMER_OBJTYPE_DBFILE) {
1914 cursor.key_beg.rec_type = HAMMER_RECTYPE_DB;
1915 cursor.key_end.rec_type = HAMMER_RECTYPE_DB;
1916 cursor.key_end.key = 0x7FFFFFFFFFFFFFFFLL;
1920 ran_end = bio->bio_offset + bp->b_bufsize;
1921 cursor.key_beg.rec_type = HAMMER_RECTYPE_DATA;
1922 cursor.key_end.rec_type = HAMMER_RECTYPE_DATA;
1923 tmp64 = ran_end + MAXPHYS + 1; /* work-around GCC-4 bug */
1924 if (tmp64 < ran_end)
1925 cursor.key_end.key = 0x7FFFFFFFFFFFFFFFLL;
1927 cursor.key_end.key = ran_end + MAXPHYS + 1;
1929 cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE;
1931 error = hammer_ip_first(&cursor);
1934 while (error == 0) {
1936 * Get the base file offset of the record. The key for
1937 * data records is (base + bytes) rather then (base).
1939 base = &cursor.leaf->base;
1940 rec_offset = base->key - cursor.leaf->data_len;
1943 * Calculate the gap, if any, and zero-fill it.
1945 * n is the offset of the start of the record verses our
1946 * current seek offset in the bio.
1948 n = (int)(rec_offset - (bio->bio_offset + boff));
1950 if (n > bp->b_bufsize - boff)
1951 n = bp->b_bufsize - boff;
1952 bzero((char *)bp->b_data + boff, n);
1958 * Calculate the data offset in the record and the number
1959 * of bytes we can copy.
1961 * There are two degenerate cases. First, boff may already
1962 * be at bp->b_bufsize. Secondly, the data offset within
1963 * the record may exceed the record's size.
1967 n = cursor.leaf->data_len - roff;
1969 kprintf("strategy_read: bad n=%d roff=%d\n", n, roff);
1971 } else if (n > bp->b_bufsize - boff) {
1972 n = bp->b_bufsize - boff;
1976 * Deal with cached truncations. This cool bit of code
1977 * allows truncate()/ftruncate() to avoid having to sync
1980 * If the frontend is truncated then all backend records are
1981 * subject to the frontend's truncation.
1983 * If the backend is truncated then backend records on-disk
1984 * (but not in-memory) are subject to the backend's
1985 * truncation. In-memory records owned by the backend
1986 * represent data written after the truncation point on the
1987 * backend and must not be truncated.
1989 * Truncate operations deal with frontend buffer cache
1990 * buffers and frontend-owned in-memory records synchronously.
1992 if (ip->flags & HAMMER_INODE_TRUNCATED) {
1993 if (hammer_cursor_ondisk(&cursor) ||
1994 cursor.iprec->flush_state == HAMMER_FST_FLUSH) {
1995 if (ip->trunc_off <= rec_offset)
1997 else if (ip->trunc_off < rec_offset + n)
1998 n = (int)(ip->trunc_off - rec_offset);
2001 if (ip->sync_flags & HAMMER_INODE_TRUNCATED) {
2002 if (hammer_cursor_ondisk(&cursor)) {
2003 if (ip->sync_trunc_off <= rec_offset)
2005 else if (ip->sync_trunc_off < rec_offset + n)
2006 n = (int)(ip->sync_trunc_off - rec_offset);
2011 * Try to issue a direct read into our bio if possible,
2012 * otherwise resolve the element data into a hammer_buffer
2015 if (n && boff == 0 &&
2016 ((cursor.leaf->data_offset + roff) & HAMMER_BUFMASK) == 0) {
2017 error = hammer_io_direct_read(
2019 cursor.leaf->data_offset + roff,
2023 error = hammer_ip_resolve_data(&cursor);
2025 bcopy((char *)cursor.data + roff,
2026 (char *)bp->b_data + boff, n);
2033 * Iterate until we have filled the request.
2036 if (boff == bp->b_bufsize)
2038 error = hammer_ip_next(&cursor);
2042 * There may have been a gap after the last record
2044 if (error == ENOENT)
2046 if (error == 0 && boff != bp->b_bufsize) {
2047 KKASSERT(boff < bp->b_bufsize);
2048 bzero((char *)bp->b_data + boff, bp->b_bufsize - boff);
2049 /* boff = bp->b_bufsize; */
2052 bp->b_error = error;
2054 bp->b_flags |= B_ERROR;
2059 hammer_cache_node(cursor.node, &ip->cache[1]);
2060 hammer_done_cursor(&cursor);
2061 hammer_done_transaction(&trans);
2066 * BMAP operation - used to support cluster_read() only.
2068 * (struct vnode *vp, off_t loffset, off_t *doffsetp, int *runp, int *runb)
2070 * This routine may return EOPNOTSUPP if the opration is not supported for
2071 * the specified offset. The contents of the pointer arguments do not
2072 * need to be initialized in that case.
2074 * If a disk address is available and properly aligned return 0 with
2075 * *doffsetp set to the zone-2 address, and *runp / *runb set appropriately
2076 * to the run-length relative to that offset. Callers may assume that
2077 * *doffsetp is valid if 0 is returned, even if *runp is not sufficiently
2078 * large, so return EOPNOTSUPP if it is not sufficiently large.
2082 hammer_vop_bmap(struct vop_bmap_args *ap)
2084 struct hammer_transaction trans;
2085 struct hammer_inode *ip;
2086 struct hammer_cursor cursor;
2087 hammer_base_elm_t base;
2091 int64_t base_offset;
2092 int64_t base_disk_offset;
2093 int64_t last_offset;
2094 hammer_off_t last_disk_offset;
2095 hammer_off_t disk_offset;
2099 ip = ap->a_vp->v_data;
2102 * We can only BMAP regular files. We can't BMAP database files,
2105 if (ip->ino_data.obj_type != HAMMER_OBJTYPE_REGFILE)
2109 * bmap is typically called with runp/runb both NULL when used
2110 * for writing. We do not support BMAP for writing atm.
2112 if (ap->a_runp == NULL && ap->a_runb == NULL)
2116 * Scan the B-Tree to acquire blockmap addresses, then translate
2119 hammer_simple_transaction(&trans, ip->hmp);
2121 kprintf("bmap_beg %016llx ip->cache %p\n", ap->a_loffset, ip->cache[1]);
2123 hammer_init_cursor(&trans, &cursor, &ip->cache[1], ip);
2126 * Key range (begin and end inclusive) to scan. Note that the key's
2127 * stored in the actual records represent BASE+LEN, not BASE. The
2128 * first record containing bio_offset will have a key > bio_offset.
2130 cursor.key_beg.localization = HAMMER_LOCALIZE_MISC;
2131 cursor.key_beg.obj_id = ip->obj_id;
2132 cursor.key_beg.create_tid = 0;
2133 cursor.key_beg.delete_tid = 0;
2134 cursor.key_beg.obj_type = 0;
2136 cursor.key_beg.key = ap->a_loffset - MAXPHYS + 1;
2138 cursor.key_beg.key = ap->a_loffset + 1;
2139 if (cursor.key_beg.key < 0)
2140 cursor.key_beg.key = 0;
2141 cursor.asof = ip->obj_asof;
2142 cursor.flags |= HAMMER_CURSOR_ASOF;
2144 cursor.key_end = cursor.key_beg;
2145 KKASSERT(ip->ino_data.obj_type == HAMMER_OBJTYPE_REGFILE);
2147 ran_end = ap->a_loffset + MAXPHYS;
2148 cursor.key_beg.rec_type = HAMMER_RECTYPE_DATA;
2149 cursor.key_end.rec_type = HAMMER_RECTYPE_DATA;
2150 tmp64 = ran_end + MAXPHYS + 1; /* work-around GCC-4 bug */
2151 if (tmp64 < ran_end)
2152 cursor.key_end.key = 0x7FFFFFFFFFFFFFFFLL;
2154 cursor.key_end.key = ran_end + MAXPHYS + 1;
2156 cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE;
2158 error = hammer_ip_first(&cursor);
2159 base_offset = last_offset = 0;
2160 base_disk_offset = last_disk_offset = 0;
2162 while (error == 0) {
2164 * Get the base file offset of the record. The key for
2165 * data records is (base + bytes) rather then (base).
2167 base = &cursor.leaf->base;
2168 rec_offset = base->key - cursor.leaf->data_len;
2169 rec_len = cursor.leaf->data_len;
2172 * Incorporate any cached truncation
2174 if (ip->flags & HAMMER_INODE_TRUNCATED) {
2175 if (hammer_cursor_ondisk(&cursor) ||
2176 cursor.iprec->flush_state == HAMMER_FST_FLUSH) {
2177 if (ip->trunc_off <= rec_offset)
2179 else if (ip->trunc_off < rec_offset + rec_len)
2180 rec_len = (int)(ip->trunc_off - rec_offset);
2183 if (ip->sync_flags & HAMMER_INODE_TRUNCATED) {
2184 if (hammer_cursor_ondisk(&cursor)) {
2185 if (ip->sync_trunc_off <= rec_offset)
2187 else if (ip->sync_trunc_off < rec_offset + rec_len)
2188 rec_len = (int)(ip->sync_trunc_off - rec_offset);
2193 * Accumulate information. If we have hit a discontiguous
2194 * block reset base_offset unless we are already beyond the
2195 * requested offset. If we are, that's it, we stop.
2197 disk_offset = hammer_blockmap_lookup(trans.hmp,
2198 cursor.leaf->data_offset,
2202 if (rec_offset != last_offset ||
2203 disk_offset != last_disk_offset) {
2204 if (rec_offset > ap->a_loffset)
2206 base_offset = rec_offset;
2207 base_disk_offset = disk_offset;
2209 last_offset = rec_offset + rec_len;
2210 last_disk_offset = disk_offset + rec_len;
2212 error = hammer_ip_next(&cursor);
2216 kprintf("BMAP %016llx: %016llx - %016llx\n",
2217 ap->a_loffset, base_offset, last_offset);
2218 kprintf("BMAP %16s: %016llx - %016llx\n",
2219 "", base_disk_offset, last_disk_offset);
2223 hammer_cache_node(cursor.node, &ip->cache[1]);
2225 kprintf("bmap_end2 %016llx ip->cache %p\n", ap->a_loffset, ip->cache[1]);
2228 hammer_done_cursor(&cursor);
2229 hammer_done_transaction(&trans);
2231 if (base_offset == 0 || base_offset > ap->a_loffset ||
2232 last_offset < ap->a_loffset) {
2235 disk_offset = base_disk_offset + (ap->a_loffset - base_offset);
2238 * If doffsetp is not aligned or the forward run size does
2239 * not cover a whole buffer, disallow the direct I/O.
2241 if ((disk_offset & HAMMER_BUFMASK) ||
2242 (last_offset - ap->a_loffset) < HAMMER_BUFSIZE) {
2245 *ap->a_doffsetp = disk_offset;
2247 *ap->a_runb = ap->a_loffset - base_offset;
2249 *ap->a_runp = last_offset - ap->a_loffset;
2257 * Write to a regular file. Because this is a strategy call the OS is
2258 * trying to actually sync data to the media. HAMMER can only flush
2259 * the entire inode (so the TID remains properly synchronized).
2261 * Basically all we do here is place the bio on the inode's flush queue
2262 * and activate the flusher.
2266 hammer_vop_strategy_write(struct vop_strategy_args *ap)
2268 hammer_record_t record;
2278 ip = ap->a_vp->v_data;
2281 if (ip->flags & HAMMER_INODE_RO) {
2282 bp->b_error = EROFS;
2283 bp->b_flags |= B_ERROR;
2285 hammer_cleanup_write_io(ip);
2290 * Interlock with inode destruction (no in-kernel or directory
2291 * topology visibility). If we queue new IO while trying to
2292 * destroy the inode we can deadlock the vtrunc call in
2293 * hammer_inode_unloadable_check().
2295 if (ip->flags & (HAMMER_INODE_DELETING|HAMMER_INODE_DELETED)) {
2298 hammer_cleanup_write_io(ip);
2303 * Reserve space and issue a direct-write from the front-end.
2304 * NOTE: The direct_io code will hammer_bread/bcopy smaller
2307 * An in-memory record will be installed to reference the storage
2308 * until the flusher can get to it.
2310 * Since we own the high level bio the front-end will not try to
2311 * do a direct-read until the write completes.
2313 * NOTE: The only time we do not reserve a full-sized buffers
2314 * worth of data is if the file is small. We do not try to
2315 * allocate a fragment (from the small-data zone) at the end of
2316 * an otherwise large file as this can lead to wildly separated
2319 KKASSERT((bio->bio_offset & HAMMER_BUFMASK) == 0);
2320 KKASSERT(bio->bio_offset < ip->ino_data.size);
2321 if (bio->bio_offset || ip->ino_data.size > HAMMER_BUFSIZE / 2)
2322 bytes = (bp->b_bufsize + HAMMER_BUFMASK) & ~HAMMER_BUFMASK;
2324 bytes = ((int)ip->ino_data.size + 15) & ~15;
2326 record = hammer_ip_add_bulk(ip, bio->bio_offset, bp->b_data,
2329 hammer_io_direct_write(hmp, &record->leaf, bio);
2330 hammer_rel_mem_record(record);
2331 if (hmp->rsv_recs > hammer_limit_recs &&
2332 ip->rsv_recs > hammer_limit_irecs / 10) {
2333 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
2334 } else if (ip->rsv_recs > hammer_limit_irecs / 2) {
2335 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
2338 bp->b_bio2.bio_offset = NOOFFSET;
2339 bp->b_error = error;
2340 bp->b_flags |= B_ERROR;
2343 hammer_cleanup_write_io(ip);
2348 * Clean-up after disposing of a dirty frontend buffer's data.
2349 * This is somewhat heuristical so try to be robust.
2352 hammer_cleanup_write_io(hammer_inode_t ip)
2354 if (ip->rsv_databufs) {
2356 --ip->hmp->rsv_databufs;
2361 * We can lose track of dirty buffer cache buffers if we truncate, this
2362 * routine will resynchronize the count.
2366 hammer_update_rsv_databufs(hammer_inode_t ip)
2374 RB_FOREACH(bp, buf_rb_tree, &ip->vp->v_rbdirty_tree) {
2380 delta = n - ip->rsv_databufs;
2381 ip->rsv_databufs += delta;
2382 ip->hmp->rsv_databufs += delta;
2386 * dounlink - disconnect a directory entry
2388 * XXX whiteout support not really in yet
2391 hammer_dounlink(hammer_transaction_t trans, struct nchandle *nch,
2392 struct vnode *dvp, struct ucred *cred, int flags)
2394 struct namecache *ncp;
2397 struct hammer_cursor cursor;
2402 * Calculate the namekey and setup the key range for the scan. This
2403 * works kinda like a chained hash table where the lower 32 bits
2404 * of the namekey synthesize the chain.
2406 * The key range is inclusive of both key_beg and key_end.
2411 if (dip->flags & HAMMER_INODE_RO)
2414 namekey = hammer_directory_namekey(ncp->nc_name, ncp->nc_nlen);
2416 hammer_init_cursor(trans, &cursor, &dip->cache[0], dip);
2417 cursor.key_beg.localization = HAMMER_LOCALIZE_MISC;
2418 cursor.key_beg.obj_id = dip->obj_id;
2419 cursor.key_beg.key = namekey;
2420 cursor.key_beg.create_tid = 0;
2421 cursor.key_beg.delete_tid = 0;
2422 cursor.key_beg.rec_type = HAMMER_RECTYPE_DIRENTRY;
2423 cursor.key_beg.obj_type = 0;
2425 cursor.key_end = cursor.key_beg;
2426 cursor.key_end.key |= 0xFFFFFFFFULL;
2427 cursor.asof = dip->obj_asof;
2428 cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE | HAMMER_CURSOR_ASOF;
2431 * Scan all matching records (the chain), locate the one matching
2432 * the requested path component. info->last_error contains the
2433 * error code on search termination and could be 0, ENOENT, or
2436 * The hammer_ip_*() functions merge in-memory records with on-disk
2437 * records for the purposes of the search.
2439 error = hammer_ip_first(&cursor);
2441 while (error == 0) {
2442 error = hammer_ip_resolve_data(&cursor);
2445 nlen = cursor.leaf->data_len - HAMMER_ENTRY_NAME_OFF;
2447 if (ncp->nc_nlen == nlen &&
2448 bcmp(ncp->nc_name, cursor.data->entry.name, nlen) == 0) {
2451 error = hammer_ip_next(&cursor);
2455 * If all is ok we have to get the inode so we can adjust nlinks.
2456 * To avoid a deadlock with the flusher we must release the inode
2457 * lock on the directory when acquiring the inode for the entry.
2459 * If the target is a directory, it must be empty.
2462 hammer_unlock(&cursor.ip->lock);
2463 ip = hammer_get_inode(trans, &dip->cache[1],
2464 cursor.data->entry.obj_id,
2465 dip->hmp->asof, 0, &error);
2466 hammer_lock_sh(&cursor.ip->lock);
2467 if (error == ENOENT) {
2468 kprintf("obj_id %016llx\n", cursor.data->entry.obj_id);
2469 Debugger("ENOENT unlinking object that should exist");
2473 * If we are trying to remove a directory the directory must
2476 * WARNING: hammer_ip_check_directory_empty() may have to
2477 * terminate the cursor to avoid a deadlock. It is ok to
2478 * call hammer_done_cursor() twice.
2480 if (error == 0 && ip->ino_data.obj_type ==
2481 HAMMER_OBJTYPE_DIRECTORY) {
2482 error = hammer_ip_check_directory_empty(trans, ip);
2486 * Delete the directory entry.
2488 * WARNING: hammer_ip_del_directory() may have to terminate
2489 * the cursor to avoid a deadlock. It is ok to call
2490 * hammer_done_cursor() twice.
2493 error = hammer_ip_del_directory(trans, &cursor,
2496 hammer_done_cursor(&cursor);
2498 cache_setunresolved(nch);
2499 cache_setvp(nch, NULL);
2502 cache_inval_vp(ip->vp, CINV_DESTROY);
2505 hammer_rel_inode(ip, 0);
2507 hammer_done_cursor(&cursor);
2509 if (error == EDEADLK)
2515 /************************************************************************
2516 * FIFO AND SPECFS OPS *
2517 ************************************************************************
2522 hammer_vop_fifoclose (struct vop_close_args *ap)
2524 /* XXX update itimes */
2525 return (VOCALL(&fifo_vnode_vops, &ap->a_head));
2529 hammer_vop_fiforead (struct vop_read_args *ap)
2533 error = VOCALL(&fifo_vnode_vops, &ap->a_head);
2534 /* XXX update access time */
2539 hammer_vop_fifowrite (struct vop_write_args *ap)
2543 error = VOCALL(&fifo_vnode_vops, &ap->a_head);
2544 /* XXX update access time */
2549 hammer_vop_specclose (struct vop_close_args *ap)
2551 /* XXX update itimes */
2552 return (VOCALL(&spec_vnode_vops, &ap->a_head));
2556 hammer_vop_specread (struct vop_read_args *ap)
2558 /* XXX update access time */
2559 return (VOCALL(&spec_vnode_vops, &ap->a_head));
2563 hammer_vop_specwrite (struct vop_write_args *ap)
2565 /* XXX update last change time */
2566 return (VOCALL(&spec_vnode_vops, &ap->a_head));