FC path inquiry settings.
[dragonfly.git] / sys / vfs / hammer / hammer_inode.c
CommitLineData
427e5fc6
MD
1/*
2 * Copyright (c) 2007 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
8cd0a023 34 * $DragonFly: src/sys/vfs/hammer/hammer_inode.c,v 1.4 2007/11/19 00:53:40 dillon Exp $
427e5fc6
MD
35 */
36
37#include "hammer.h"
38#include <sys/buf.h>
39#include <sys/buf2.h>
40
427e5fc6
MD
41int
42hammer_vop_inactive(struct vop_inactive_args *ap)
43{
66325755 44 struct hammer_inode *ip = VTOI(ap->a_vp);
27ea2398 45
66325755
MD
46 if (ip == NULL)
47 vrecycle(ap->a_vp);
427e5fc6
MD
48 return(0);
49}
50
51int
52hammer_vop_reclaim(struct vop_reclaim_args *ap)
53{
427e5fc6
MD
54 struct hammer_inode *ip;
55 struct vnode *vp;
56
57 vp = ap->a_vp;
66325755
MD
58 if ((ip = vp->v_data) != NULL)
59 hammer_unload_inode(ip, NULL);
427e5fc6
MD
60 return(0);
61}
62
63/*
66325755
MD
64 * Obtain a vnode for the specified inode number. An exclusively locked
65 * vnode is returned.
427e5fc6
MD
66 */
67int
68hammer_vfs_vget(struct mount *mp, ino_t ino, struct vnode **vpp)
69{
70 struct hammer_mount *hmp = (void *)mp->mnt_data;
66325755
MD
71 struct hammer_inode *ip;
72 int error;
73
74 /*
75 * Get/allocate the hammer_inode structure. The structure must be
76 * unlocked while we manipulate the related vnode to avoid a
77 * deadlock.
78 */
79 ip = hammer_get_inode(hmp, ino, &error);
80 if (ip == NULL) {
81 *vpp = NULL;
82 return(error);
83 }
66325755 84 error = hammer_get_vnode(ip, LK_EXCLUSIVE, vpp);
8cd0a023 85 hammer_rel_inode(ip);
66325755
MD
86 return (error);
87}
88
89/*
90 * Return a locked vnode for the specified inode. The inode must be
91 * referenced but NOT LOCKED on entry and will remain referenced on
92 * return.
93 */
94int
95hammer_get_vnode(struct hammer_inode *ip, int lktype, struct vnode **vpp)
96{
97 struct vnode *vp;
98 int error = 0;
99
100 for (;;) {
101 if ((vp = ip->vp) == NULL) {
102 error = getnewvnode(VT_HAMMER, ip->hmp->mp, vpp, 0, 0);
103 if (error)
104 break;
8cd0a023
MD
105 hammer_lock_ex(&ip->lock);
106 if (ip->vp != NULL) {
107 hammer_unlock(&ip->lock);
108 vp->v_type = VBAD;
109 vx_put(vp);
110 continue;
66325755 111 }
8cd0a023
MD
112 hammer_ref(&ip->lock);
113 vp = *vpp;
114 ip->vp = vp;
115 vp->v_type = hammer_get_vnode_type(
116 ip->ino_rec.base.base.obj_type);
117 vp->v_data = (void *)ip;
118 /* vnode locked by getnewvnode() */
119 /* make related vnode dirty if inode dirty? */
120 hammer_unlock(&ip->lock);
121 break;
122 }
123
124 /*
125 * loop if the vget fails (aka races), or if the vp
126 * no longer matches ip->vp.
127 */
128 if (vget(vp, LK_EXCLUSIVE) == 0) {
129 if (vp == ip->vp)
130 break;
131 vput(vp);
66325755
MD
132 }
133 }
134 return(error);
135}
136
137/*
8cd0a023
MD
138 * Acquire a HAMMER inode. The returned inode is not locked. These functions
139 * do not attach or detach the related vnode (use hammer_get_vnode() for
140 * that).
66325755
MD
141 */
142struct hammer_inode *
143hammer_get_inode(struct hammer_mount *hmp, u_int64_t obj_id, int *errorp)
144{
427e5fc6 145 struct hammer_inode_info iinfo;
8cd0a023 146 struct hammer_cursor cursor;
427e5fc6 147 struct hammer_inode *ip;
427e5fc6
MD
148
149 /*
150 * Determine if we already have an inode cached. If we do then
151 * we are golden.
152 */
66325755
MD
153 iinfo.obj_id = obj_id;
154 iinfo.obj_asof = HAMMER_MAX_TID; /* XXX */
427e5fc6
MD
155loop:
156 ip = hammer_ino_rb_tree_RB_LOOKUP_INFO(&hmp->rb_inos_root, &iinfo);
157 if (ip) {
8cd0a023 158 hammer_ref(&ip->lock);
66325755
MD
159 *errorp = 0;
160 return(ip);
427e5fc6
MD
161 }
162
427e5fc6 163 ip = kmalloc(sizeof(*ip), M_HAMMER, M_WAITOK|M_ZERO);
66325755 164 ip->obj_id = obj_id;
27ea2398 165 ip->obj_asof = iinfo.obj_asof;
66325755 166 ip->hmp = hmp;
8cd0a023 167 RB_INIT(&ip->rec_tree);
427e5fc6
MD
168
169 /*
8cd0a023 170 * Locate the on-disk inode.
427e5fc6
MD
171 * If we do not have an inode cached search the HAMMER on-disk B-Tree
172 * for it.
173 */
427e5fc6 174
8cd0a023
MD
175 hammer_init_cursor_hmp(&cursor, hmp);
176 cursor.key_beg.obj_id = ip->obj_id;
177 cursor.key_beg.key = 0;
178 cursor.key_beg.create_tid = iinfo.obj_asof;
179 cursor.key_beg.delete_tid = 0;
180 cursor.key_beg.rec_type = HAMMER_RECTYPE_INODE;
181 cursor.key_beg.obj_type = 0;
182 cursor.flags = HAMMER_BTREE_GET_RECORD | HAMMER_BTREE_GET_DATA;
183
184 *errorp = hammer_btree_lookup(&cursor);
427e5fc6
MD
185
186 /*
187 * On success the B-Tree lookup will hold the appropriate
188 * buffer cache buffers and provide a pointer to the requested
189 * information. Copy the information to the in-memory inode.
190 */
66325755 191 if (*errorp == 0) {
8cd0a023
MD
192 ip->ino_rec = cursor.record->inode;
193 ip->ino_data = cursor.data->inode;
427e5fc6 194 }
8cd0a023
MD
195 hammer_cache_node(cursor.node, &ip->cache);
196 hammer_done_cursor(&cursor);
427e5fc6
MD
197
198 /*
199 * On success load the inode's record and data and insert the
200 * inode into the B-Tree. It is possible to race another lookup
201 * insertion of the same inode so deal with that condition too.
202 */
66325755 203 if (*errorp == 0) {
8cd0a023 204 hammer_ref(&ip->lock);
427e5fc6 205 if (RB_INSERT(hammer_ino_rb_tree, &hmp->rb_inos_root, ip)) {
8cd0a023
MD
206 hammer_uncache_node(&ip->cache);
207 hammer_unref(&ip->lock);
427e5fc6
MD
208 kfree(ip, M_HAMMER);
209 goto loop;
210 }
427e5fc6 211 } else {
66325755
MD
212 kfree(ip, M_HAMMER);
213 ip = NULL;
427e5fc6 214 }
66325755
MD
215 return (ip);
216}
217
8cd0a023
MD
218/*
219 * Create a new filesystem object, returning the inode in *ipp. The
220 * returned inode will be referenced but not locked.
221 *
222 * The inode is created in-memory and will be delay-synchronized to the
223 * disk.
224 */
225int
226hammer_create_inode(struct hammer_transaction *trans, struct vattr *vap,
227 struct ucred *cred, struct hammer_inode *dip,
228 struct hammer_inode **ipp)
66325755 229{
8cd0a023
MD
230 struct hammer_mount *hmp;
231 struct hammer_inode *ip;
66325755 232
8cd0a023
MD
233 hmp = trans->hmp;
234 ip = kmalloc(sizeof(*ip), M_HAMMER, M_WAITOK|M_ZERO);
235 ip->obj_id = ++hmp->last_ino;
236 KKASSERT(ip->obj_id != 0);
237 ip->obj_asof = HAMMER_MAX_TID; /* XXX */
238 ip->hmp = hmp;
239 ip->flags = HAMMER_INODE_DDIRTY | HAMMER_INODE_RDIRTY |
240 HAMMER_INODE_ITIMES;
241 ip->last_tid = trans->tid;
242
243 RB_INIT(&ip->rec_tree);
244
245 ip->ino_rec.ino_atime = trans->tid;
246 ip->ino_rec.ino_mtime = trans->tid;
247 ip->ino_rec.ino_size = 0;
248 ip->ino_rec.ino_nlinks = 0;
249 /* XXX */
250 ip->ino_rec.base.rec_id = ++hmp->rootvol->ondisk->vol0_recid;
251 hammer_modify_volume(hmp->rootvol);
252 KKASSERT(ip->ino_rec.base.rec_id != 0);
253 ip->ino_rec.base.base.obj_id = ip->obj_id;
254 ip->ino_rec.base.base.key = 0;
255 ip->ino_rec.base.base.create_tid = trans->tid;
256 ip->ino_rec.base.base.delete_tid = 0;
257 ip->ino_rec.base.base.rec_type = HAMMER_RECTYPE_INODE;
258 ip->ino_rec.base.base.obj_type = hammer_get_obj_type(vap->va_type);
259
260 ip->ino_data.version = HAMMER_INODE_DATA_VERSION;
261 ip->ino_data.mode = vap->va_mode;
262 ip->ino_data.ctime = trans->tid;
263 ip->ino_data.parent_obj_id = (dip) ? dip->ino_rec.base.base.obj_id : 0;
264 if (vap->va_vaflags & VA_UID_UUID_VALID)
265 ip->ino_data.uid = vap->va_uid_uuid;
266 else
267 hammer_guid_to_uuid(&ip->ino_data.uid, vap->va_uid);
268 if (vap->va_vaflags & VA_GID_UUID_VALID)
269 ip->ino_data.gid = vap->va_gid_uuid;
270 else
271 hammer_guid_to_uuid(&ip->ino_data.gid, vap->va_gid);
272
273 hammer_ref(&ip->lock);
274 if (RB_INSERT(hammer_ino_rb_tree, &hmp->rb_inos_root, ip)) {
275 hammer_unref(&ip->lock);
276 panic("hammer_create_inode: duplicate obj_id");
277 }
278 *ipp = ip;
279 return(0);
66325755
MD
280}
281
282void
8cd0a023 283hammer_rel_inode(struct hammer_inode *ip)
66325755 284{
8cd0a023 285 /* XXX check last ref */
66325755 286 hammer_unref(&ip->lock);
427e5fc6
MD
287}
288
27ea2398 289/*
8cd0a023
MD
290 * Unload and destroy the specified inode.
291 *
27ea2398
MD
292 * (called via RB_SCAN)
293 */
294int
66325755 295hammer_unload_inode(struct hammer_inode *ip, void *data __unused)
27ea2398 296{
66325755 297 KKASSERT(ip->lock.refs == 0);
8cd0a023
MD
298 KKASSERT(ip->vp == NULL);
299 hammer_ref(&ip->lock);
66325755 300 RB_REMOVE(hammer_ino_rb_tree, &ip->hmp->rb_inos_root, ip);
8cd0a023
MD
301
302 hammer_uncache_node(&ip->cache);
303
304 /* XXX flush */
27ea2398
MD
305 kfree(ip, M_HAMMER);
306 return(0);
307}
308
427e5fc6 309/*
66325755
MD
310 * A transaction has modified an inode, requiring a new record and possibly
311 * also data to be written out.
427e5fc6 312 */
66325755
MD
313void
314hammer_modify_inode(struct hammer_transaction *trans,
315 struct hammer_inode *ip, int flags)
427e5fc6 316{
66325755 317 ip->flags |= flags;
8cd0a023
MD
318 ip->last_tid = trans->tid;
319}
320
321/************************************************************************
322 * HAMMER INODE MERGED-RECORD FUNCTIONS *
323 ************************************************************************
324 *
325 * These functions augment the B-Tree scanning functions in hammer_btree.c
326 * by merging in-memory records with on-disk records.
327 */
328
329hammer_record_ondisk_t
330hammer_ip_first(hammer_cursor_t cursor, struct hammer_inode *ip)
331{
332 KKASSERT(0);
333 return(NULL);
334}
335
336hammer_record_ondisk_t
337hammer_ip_next(hammer_cursor_t cursor)
338{
339 KKASSERT(0);
340 return(NULL);
341}
342
343int
344hammer_ip_resolve_data(hammer_cursor_t cursor)
345{
66325755 346 KKASSERT(0);
8cd0a023 347 return(NULL);
427e5fc6
MD
348}
349
350/*
351 * Access the filesystem buffer containing the cluster-relative byte
352 * offset, validate the buffer type, load *bufferp and return a
8cd0a023
MD
353 * pointer to the requested data. The buffer is reference and locked on
354 * return.
427e5fc6
MD
355 *
356 * If buf_type is 0 the buffer is assumed to be a pure-data buffer and
357 * no type or crc check is performed.
358 *
8cd0a023
MD
359 * If *bufferp is not NULL on entry it is assumed to contain a locked
360 * and referenced buffer which will then be replaced.
361 *
362 * If the caller is holding another unrelated buffer locked it must be
363 * passed in reorderbuf so we can properly order buffer locks.
364 *
427e5fc6
MD
365 * XXX add a flag for the buffer type and check the CRC here XXX
366 */
367void *
8cd0a023
MD
368hammer_bread(hammer_cluster_t cluster, int32_t cloff,
369 u_int64_t buf_type, int *errorp,
370 struct hammer_buffer **bufferp)
427e5fc6 371{
8cd0a023 372 hammer_buffer_t buffer;
427e5fc6
MD
373 int32_t buf_no;
374 int32_t buf_off;
375
376 /*
377 * Load the correct filesystem buffer, replacing *bufferp.
378 */
379 buf_no = cloff / HAMMER_BUFSIZE;
380 buffer = *bufferp;
381 if (buffer == NULL || buffer->cluster != cluster ||
382 buffer->buf_no != buf_no) {
8cd0a023
MD
383 if (buffer) {
384 hammer_unlock(&buffer->io.lock);
385 hammer_rel_buffer(buffer, 0);
386 }
427e5fc6
MD
387 buffer = hammer_get_buffer(cluster, buf_no, 0, errorp);
388 *bufferp = buffer;
389 if (buffer == NULL)
390 return(NULL);
8cd0a023 391 hammer_lock_ex(&buffer->io.lock);
427e5fc6
MD
392 }
393
394 /*
8cd0a023 395 * Validate the buffer type
427e5fc6
MD
396 */
397 buf_off = cloff & HAMMER_BUFMASK;
398 if (buf_type) {
399 if (buf_type != buffer->ondisk->head.buf_type) {
27ea2398
MD
400 kprintf("BUFFER HEAD TYPE MISMATCH %llx %llx\n",
401 buf_type, buffer->ondisk->head.buf_type);
427e5fc6
MD
402 *errorp = EIO;
403 return(NULL);
404 }
405 if (buf_off < sizeof(buffer->ondisk->head)) {
27ea2398 406 kprintf("BUFFER OFFSET TOO LOW %d\n", buf_off);
427e5fc6
MD
407 *errorp = EIO;
408 return(NULL);
409 }
427e5fc6
MD
410 }
411
412 /*
413 * Return a pointer to the buffer data.
414 */
415 *errorp = 0;
416 return((char *)buffer->ondisk + buf_off);
417}
418