2 * Copyright (c) 2007 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * $DragonFly: src/sys/vfs/hammer/hammer_inode.c,v 1.3 2007/11/07 00:43:24 dillon Exp $
42 hammer_vop_inactive(struct vop_inactive_args *ap)
44 struct hammer_inode *ip = VTOI(ap->a_vp);
52 hammer_vop_reclaim(struct vop_reclaim_args *ap)
54 struct hammer_inode *ip;
58 if ((ip = vp->v_data) != NULL)
59 hammer_unload_inode(ip, NULL);
64 * Obtain a vnode for the specified inode number. An exclusively locked
67 * To avoid deadlocks we cannot hold the inode lock while we are creating
68 * a new vnode. We can prevent the inode from going away, however. If
69 * we race another vget we just throw away our newly created vnode.
72 hammer_vfs_vget(struct mount *mp, ino_t ino, struct vnode **vpp)
74 struct hammer_mount *hmp = (void *)mp->mnt_data;
75 struct hammer_inode *ip;
79 * Get/allocate the hammer_inode structure. The structure must be
80 * unlocked while we manipulate the related vnode to avoid a
83 ip = hammer_get_inode(hmp, ino, &error);
88 hammer_lock_to_ref(&ip->lock);
89 error = hammer_get_vnode(ip, LK_EXCLUSIVE, vpp);
90 hammer_put_inode_ref(ip);
95 * Return a locked vnode for the specified inode. The inode must be
96 * referenced but NOT LOCKED on entry and will remain referenced on
100 hammer_get_vnode(struct hammer_inode *ip, int lktype, struct vnode **vpp)
106 if ((vp = ip->vp) == NULL) {
107 error = getnewvnode(VT_HAMMER, ip->hmp->mp, vpp, 0, 0);
110 if (ip->vp == NULL) {
113 vp->v_type = hammer_get_vnode_type(
114 ip->ino_rec.base.base.obj_type);
115 vp->v_data = (void *)ip;
116 /* vnode locked by getnewvnode() */
123 * loop if the vget fails (aka races), or if the vp
124 * no longer matches ip->vp.
126 if (vget(vp, LK_EXCLUSIVE) == 0) {
137 * Get and lock a HAMMER inode. These functions do not attach or detach
140 struct hammer_inode *
141 hammer_get_inode(struct hammer_mount *hmp, u_int64_t obj_id, int *errorp)
143 struct hammer_btree_info binfo;
144 struct hammer_inode_info iinfo;
145 struct hammer_base_elm key;
146 struct hammer_inode *ip;
149 * Determine if we already have an inode cached. If we do then
152 iinfo.obj_id = obj_id;
153 iinfo.obj_asof = HAMMER_MAX_TID; /* XXX */
155 ip = hammer_ino_rb_tree_RB_LOOKUP_INFO(&hmp->rb_inos_root, &iinfo);
157 hammer_lock(&ip->lock);
162 ip = kmalloc(sizeof(*ip), M_HAMMER, M_WAITOK|M_ZERO);
164 ip->obj_asof = iinfo.obj_asof;
168 * If we do not have an inode cached search the HAMMER on-disk B-Tree
171 hammer_btree_info_init(&binfo, hmp->rootcl);
172 key.obj_id = ip->obj_id;
174 key.create_tid = iinfo.obj_asof;
176 key.rec_type = HAMMER_RECTYPE_INODE;
179 *errorp = hammer_btree_lookup(&binfo, &key, HAMMER_BTREE_GET_RECORD |
180 HAMMER_BTREE_GET_DATA);
183 * On success the B-Tree lookup will hold the appropriate
184 * buffer cache buffers and provide a pointer to the requested
185 * information. Copy the information to the in-memory inode.
188 ip->ino_rec = binfo.rec->inode;
189 ip->ino_data = binfo.data->inode;
191 hammer_btree_info_done(&binfo);
194 * On success load the inode's record and data and insert the
195 * inode into the B-Tree. It is possible to race another lookup
196 * insertion of the same inode so deal with that condition too.
199 if (RB_INSERT(hammer_ino_rb_tree, &hmp->rb_inos_root, ip)) {
211 hammer_lock_inode(struct hammer_inode *ip)
213 hammer_lock(&ip->lock);
217 hammer_put_inode(struct hammer_inode *ip)
219 hammer_unlock(&ip->lock);
223 hammer_put_inode_ref(struct hammer_inode *ip)
225 hammer_unref(&ip->lock);
229 * (called via RB_SCAN)
232 hammer_unload_inode(struct hammer_inode *ip, void *data __unused)
236 KKASSERT(ip->lock.refs == 0);
237 if ((vp = ip->vp) != NULL) {
242 RB_REMOVE(hammer_ino_rb_tree, &ip->hmp->rb_inos_root, ip);
248 * A transaction has modified an inode, requiring a new record and possibly
249 * also data to be written out.
252 hammer_modify_inode(struct hammer_transaction *trans,
253 struct hammer_inode *ip, int flags)
260 * Access the filesystem buffer containing the cluster-relative byte
261 * offset, validate the buffer type, load *bufferp and return a
262 * pointer to the requested data.
264 * If buf_type is 0 the buffer is assumed to be a pure-data buffer and
265 * no type or crc check is performed.
267 * XXX add a flag for the buffer type and check the CRC here XXX
270 hammer_bread(struct hammer_cluster *cluster, int32_t cloff,
272 int *errorp, struct hammer_buffer **bufferp)
274 struct hammer_buffer *buffer;
279 * Load the correct filesystem buffer, replacing *bufferp.
281 buf_no = cloff / HAMMER_BUFSIZE;
283 if (buffer == NULL || buffer->cluster != cluster ||
284 buffer->buf_no != buf_no) {
286 hammer_put_buffer(buffer, 0);
287 buffer = hammer_get_buffer(cluster, buf_no, 0, errorp);
294 * Validate the buffer type and crc XXX
296 buf_off = cloff & HAMMER_BUFMASK;
298 if (buf_type != buffer->ondisk->head.buf_type) {
299 kprintf("BUFFER HEAD TYPE MISMATCH %llx %llx\n",
300 buf_type, buffer->ondisk->head.buf_type);
304 if (buf_off < sizeof(buffer->ondisk->head)) {
305 kprintf("BUFFER OFFSET TOO LOW %d\n", buf_off);
313 * Return a pointer to the buffer data.
316 return((char *)buffer->ondisk + buf_off);