Add quirk for SONY SMO drive. This (pre SCSI-2) drive returns a mystic
[dragonfly.git] / sys / vfs / hammer / hammer_inode.c
CommitLineData
427e5fc6
MD
1/*
2 * Copyright (c) 2007 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
66325755 34 * $DragonFly: src/sys/vfs/hammer/hammer_inode.c,v 1.3 2007/11/07 00:43:24 dillon Exp $
427e5fc6
MD
35 */
36
37#include "hammer.h"
38#include <sys/buf.h>
39#include <sys/buf2.h>
40
427e5fc6
MD
41int
42hammer_vop_inactive(struct vop_inactive_args *ap)
43{
66325755 44 struct hammer_inode *ip = VTOI(ap->a_vp);
27ea2398 45
66325755
MD
46 if (ip == NULL)
47 vrecycle(ap->a_vp);
427e5fc6
MD
48 return(0);
49}
50
51int
52hammer_vop_reclaim(struct vop_reclaim_args *ap)
53{
427e5fc6
MD
54 struct hammer_inode *ip;
55 struct vnode *vp;
56
57 vp = ap->a_vp;
66325755
MD
58 if ((ip = vp->v_data) != NULL)
59 hammer_unload_inode(ip, NULL);
427e5fc6
MD
60 return(0);
61}
62
63/*
66325755
MD
64 * Obtain a vnode for the specified inode number. An exclusively locked
65 * vnode is returned.
66 *
67 * To avoid deadlocks we cannot hold the inode lock while we are creating
68 * a new vnode. We can prevent the inode from going away, however. If
69 * we race another vget we just throw away our newly created vnode.
427e5fc6
MD
70 */
71int
72hammer_vfs_vget(struct mount *mp, ino_t ino, struct vnode **vpp)
73{
74 struct hammer_mount *hmp = (void *)mp->mnt_data;
66325755
MD
75 struct hammer_inode *ip;
76 int error;
77
78 /*
79 * Get/allocate the hammer_inode structure. The structure must be
80 * unlocked while we manipulate the related vnode to avoid a
81 * deadlock.
82 */
83 ip = hammer_get_inode(hmp, ino, &error);
84 if (ip == NULL) {
85 *vpp = NULL;
86 return(error);
87 }
88 hammer_lock_to_ref(&ip->lock);
89 error = hammer_get_vnode(ip, LK_EXCLUSIVE, vpp);
90 hammer_put_inode_ref(ip);
91 return (error);
92}
93
94/*
95 * Return a locked vnode for the specified inode. The inode must be
96 * referenced but NOT LOCKED on entry and will remain referenced on
97 * return.
98 */
99int
100hammer_get_vnode(struct hammer_inode *ip, int lktype, struct vnode **vpp)
101{
102 struct vnode *vp;
103 int error = 0;
104
105 for (;;) {
106 if ((vp = ip->vp) == NULL) {
107 error = getnewvnode(VT_HAMMER, ip->hmp->mp, vpp, 0, 0);
108 if (error)
109 break;
110 if (ip->vp == NULL) {
111 vp = *vpp;
112 ip->vp = vp;
113 vp->v_type = hammer_get_vnode_type(
114 ip->ino_rec.base.base.obj_type);
115 vp->v_data = (void *)ip;
116 /* vnode locked by getnewvnode() */
117 break;
118 }
119 vp->v_type = VBAD;
120 vx_put(vp);
121 } else {
122 /*
123 * loop if the vget fails (aka races), or if the vp
124 * no longer matches ip->vp.
125 */
126 if (vget(vp, LK_EXCLUSIVE) == 0) {
127 if (vp == ip->vp)
128 break;
129 vput(vp);
130 }
131 }
132 }
133 return(error);
134}
135
136/*
137 * Get and lock a HAMMER inode. These functions do not attach or detach
138 * the related vnode.
139 */
140struct hammer_inode *
141hammer_get_inode(struct hammer_mount *hmp, u_int64_t obj_id, int *errorp)
142{
427e5fc6
MD
143 struct hammer_btree_info binfo;
144 struct hammer_inode_info iinfo;
145 struct hammer_base_elm key;
146 struct hammer_inode *ip;
427e5fc6
MD
147
148 /*
149 * Determine if we already have an inode cached. If we do then
150 * we are golden.
151 */
66325755
MD
152 iinfo.obj_id = obj_id;
153 iinfo.obj_asof = HAMMER_MAX_TID; /* XXX */
427e5fc6
MD
154loop:
155 ip = hammer_ino_rb_tree_RB_LOOKUP_INFO(&hmp->rb_inos_root, &iinfo);
156 if (ip) {
66325755
MD
157 hammer_lock(&ip->lock);
158 *errorp = 0;
159 return(ip);
427e5fc6
MD
160 }
161
427e5fc6 162 ip = kmalloc(sizeof(*ip), M_HAMMER, M_WAITOK|M_ZERO);
66325755 163 ip->obj_id = obj_id;
27ea2398 164 ip->obj_asof = iinfo.obj_asof;
66325755 165 ip->hmp = hmp;
427e5fc6
MD
166
167 /*
168 * If we do not have an inode cached search the HAMMER on-disk B-Tree
169 * for it.
170 */
171 hammer_btree_info_init(&binfo, hmp->rootcl);
66325755 172 key.obj_id = ip->obj_id;
427e5fc6 173 key.key = 0;
66325755 174 key.create_tid = iinfo.obj_asof;
427e5fc6
MD
175 key.delete_tid = 0;
176 key.rec_type = HAMMER_RECTYPE_INODE;
177 key.obj_type = 0;
178
66325755
MD
179 *errorp = hammer_btree_lookup(&binfo, &key, HAMMER_BTREE_GET_RECORD |
180 HAMMER_BTREE_GET_DATA);
427e5fc6
MD
181
182 /*
183 * On success the B-Tree lookup will hold the appropriate
184 * buffer cache buffers and provide a pointer to the requested
185 * information. Copy the information to the in-memory inode.
186 */
66325755 187 if (*errorp == 0) {
427e5fc6
MD
188 ip->ino_rec = binfo.rec->inode;
189 ip->ino_data = binfo.data->inode;
190 }
191 hammer_btree_info_done(&binfo);
192
193 /*
194 * On success load the inode's record and data and insert the
195 * inode into the B-Tree. It is possible to race another lookup
196 * insertion of the same inode so deal with that condition too.
197 */
66325755 198 if (*errorp == 0) {
427e5fc6 199 if (RB_INSERT(hammer_ino_rb_tree, &hmp->rb_inos_root, ip)) {
427e5fc6
MD
200 kfree(ip, M_HAMMER);
201 goto loop;
202 }
427e5fc6 203 } else {
66325755
MD
204 kfree(ip, M_HAMMER);
205 ip = NULL;
427e5fc6 206 }
66325755
MD
207 return (ip);
208}
209
210void
211hammer_lock_inode(struct hammer_inode *ip)
212{
213 hammer_lock(&ip->lock);
214}
215
216void
217hammer_put_inode(struct hammer_inode *ip)
218{
219 hammer_unlock(&ip->lock);
220}
221
222void
223hammer_put_inode_ref(struct hammer_inode *ip)
224{
225 hammer_unref(&ip->lock);
427e5fc6
MD
226}
227
27ea2398
MD
228/*
229 * (called via RB_SCAN)
230 */
231int
66325755 232hammer_unload_inode(struct hammer_inode *ip, void *data __unused)
27ea2398 233{
27ea2398
MD
234 struct vnode *vp;
235
66325755 236 KKASSERT(ip->lock.refs == 0);
27ea2398
MD
237 if ((vp = ip->vp) != NULL) {
238 ip->vp = NULL;
239 vp->v_data = NULL;
240 /* XXX */
241 }
66325755 242 RB_REMOVE(hammer_ino_rb_tree, &ip->hmp->rb_inos_root, ip);
27ea2398
MD
243 kfree(ip, M_HAMMER);
244 return(0);
245}
246
427e5fc6 247/*
66325755
MD
248 * A transaction has modified an inode, requiring a new record and possibly
249 * also data to be written out.
427e5fc6 250 */
66325755
MD
251void
252hammer_modify_inode(struct hammer_transaction *trans,
253 struct hammer_inode *ip, int flags)
427e5fc6 254{
66325755
MD
255 ip->flags |= flags;
256 KKASSERT(0);
427e5fc6
MD
257}
258
259/*
260 * Access the filesystem buffer containing the cluster-relative byte
261 * offset, validate the buffer type, load *bufferp and return a
262 * pointer to the requested data.
263 *
264 * If buf_type is 0 the buffer is assumed to be a pure-data buffer and
265 * no type or crc check is performed.
266 *
267 * XXX add a flag for the buffer type and check the CRC here XXX
268 */
269void *
270hammer_bread(struct hammer_cluster *cluster, int32_t cloff,
271 u_int64_t buf_type,
272 int *errorp, struct hammer_buffer **bufferp)
273{
274 struct hammer_buffer *buffer;
275 int32_t buf_no;
276 int32_t buf_off;
277
278 /*
279 * Load the correct filesystem buffer, replacing *bufferp.
280 */
281 buf_no = cloff / HAMMER_BUFSIZE;
282 buffer = *bufferp;
283 if (buffer == NULL || buffer->cluster != cluster ||
284 buffer->buf_no != buf_no) {
285 if (buffer)
66325755 286 hammer_put_buffer(buffer, 0);
427e5fc6
MD
287 buffer = hammer_get_buffer(cluster, buf_no, 0, errorp);
288 *bufferp = buffer;
289 if (buffer == NULL)
290 return(NULL);
291 }
292
293 /*
294 * Validate the buffer type and crc XXX
295 */
296 buf_off = cloff & HAMMER_BUFMASK;
297 if (buf_type) {
298 if (buf_type != buffer->ondisk->head.buf_type) {
27ea2398
MD
299 kprintf("BUFFER HEAD TYPE MISMATCH %llx %llx\n",
300 buf_type, buffer->ondisk->head.buf_type);
427e5fc6
MD
301 *errorp = EIO;
302 return(NULL);
303 }
304 if (buf_off < sizeof(buffer->ondisk->head)) {
27ea2398 305 kprintf("BUFFER OFFSET TOO LOW %d\n", buf_off);
427e5fc6
MD
306 *errorp = EIO;
307 return(NULL);
308 }
309 /* XXX crc */
310 }
311
312 /*
313 * Return a pointer to the buffer data.
314 */
315 *errorp = 0;
316 return((char *)buffer->ondisk + buf_off);
317}
318