2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * $DragonFly: src/sys/vfs/hammer/hammer_cursor.c,v 1.36 2008/07/04 07:25:36 dillon Exp $
38 * HAMMER B-Tree index - cursor support routines
42 static int hammer_load_cursor_parent(hammer_cursor_t cursor, int try_exclusive);
45 * Initialize a fresh cursor using the B-Tree node cache. If the cache
46 * is not available initialize a fresh cursor at the root of the filesystem.
49 hammer_init_cursor(hammer_transaction_t trans, hammer_cursor_t cursor,
50 hammer_node_cache_t cache, hammer_inode_t ip)
52 hammer_volume_t volume;
56 bzero(cursor, sizeof(*cursor));
58 cursor->trans = trans;
61 * If the cursor operation is on behalf of an inode, lock
64 if ((cursor->ip = ip) != NULL) {
66 if (trans->type == HAMMER_TRANS_FLS)
67 hammer_lock_ex(&ip->lock);
69 hammer_lock_sh(&ip->lock);
73 * Step 1 - acquire a locked node from the cache if possible
75 if (cache && cache->node) {
76 node = hammer_ref_node_safe(trans->hmp, cache, &error);
78 hammer_lock_sh_lowpri(&node->lock);
79 if (node->flags & HAMMER_NODE_DELETED) {
80 hammer_unlock(&node->lock);
81 hammer_rel_node(node);
90 * Step 2 - If we couldn't get a node from the cache, get
91 * the one from the root of the filesystem.
93 while (node == NULL) {
94 volume = hammer_get_root_volume(trans->hmp, &error);
97 node = hammer_get_node(trans->hmp,
98 volume->ondisk->vol0_btree_root,
100 hammer_rel_volume(volume, 0);
103 hammer_lock_sh_lowpri(&node->lock);
106 * If someone got in before we could lock the node, retry.
108 if (node->flags & HAMMER_NODE_DELETED) {
109 hammer_unlock(&node->lock);
110 hammer_rel_node(node);
114 if (volume->ondisk->vol0_btree_root != node->node_offset) {
115 hammer_unlock(&node->lock);
116 hammer_rel_node(node);
123 * Step 3 - finish initializing the cursor by acquiring the parent
127 error = hammer_load_cursor_parent(cursor, 0);
128 KKASSERT(error == 0);
129 /* if (error) hammer_done_cursor(cursor); */
134 * Normalize a cursor. Sometimes cursors can be left in a state
135 * where node is NULL. If the cursor is in this state, cursor up.
138 hammer_normalize_cursor(hammer_cursor_t cursor)
140 if (cursor->node == NULL) {
141 KKASSERT(cursor->parent != NULL);
142 hammer_cursor_up(cursor);
148 * We are finished with a cursor. We NULL out various fields as sanity
149 * check, in case the structure is inappropriately used afterwords.
152 hammer_done_cursor(hammer_cursor_t cursor)
156 if (cursor->parent) {
157 hammer_unlock(&cursor->parent->lock);
158 hammer_rel_node(cursor->parent);
159 cursor->parent = NULL;
162 hammer_unlock(&cursor->node->lock);
163 hammer_rel_node(cursor->node);
166 if (cursor->data_buffer) {
167 hammer_rel_buffer(cursor->data_buffer, 0);
168 cursor->data_buffer = NULL;
170 if (cursor->record_buffer) {
171 hammer_rel_buffer(cursor->record_buffer, 0);
172 cursor->record_buffer = NULL;
174 if ((ip = cursor->ip) != NULL) {
176 hammer_rel_mem_record(cursor->iprec);
177 cursor->iprec = NULL;
179 KKASSERT(ip->cursor_ip_refs > 0);
180 --ip->cursor_ip_refs;
181 hammer_unlock(&ip->lock);
187 * If we deadlocked this node will be referenced. Do a quick
188 * lock/unlock to wait for the deadlock condition to clear.
190 if (cursor->deadlk_node) {
191 hammer_lock_ex_ident(&cursor->deadlk_node->lock, "hmrdlk");
192 hammer_unlock(&cursor->deadlk_node->lock);
193 hammer_rel_node(cursor->deadlk_node);
194 cursor->deadlk_node = NULL;
196 if (cursor->deadlk_rec) {
197 hammer_wait_mem_record_ident(cursor->deadlk_rec, "hmmdlr");
198 hammer_rel_mem_record(cursor->deadlk_rec);
199 cursor->deadlk_rec = NULL;
204 cursor->left_bound = NULL;
205 cursor->right_bound = NULL;
206 cursor->trans = NULL;
210 * Upgrade cursor->node and cursor->parent to exclusive locks. This
211 * function can return EDEADLK.
213 * The lock must already be either held shared or already held exclusively
216 * If we fail to upgrade the lock and cursor->deadlk_node is NULL,
217 * we add another reference to the node that failed and set
218 * cursor->deadlk_node so hammer_done_cursor() can block on it.
221 hammer_cursor_upgrade(hammer_cursor_t cursor)
225 error = hammer_lock_upgrade(&cursor->node->lock);
226 if (error && cursor->deadlk_node == NULL) {
227 cursor->deadlk_node = cursor->node;
228 hammer_ref_node(cursor->deadlk_node);
229 } else if (error == 0 && cursor->parent) {
230 error = hammer_lock_upgrade(&cursor->parent->lock);
231 if (error && cursor->deadlk_node == NULL) {
232 cursor->deadlk_node = cursor->parent;
233 hammer_ref_node(cursor->deadlk_node);
240 hammer_cursor_upgrade_node(hammer_cursor_t cursor)
244 error = hammer_lock_upgrade(&cursor->node->lock);
245 if (error && cursor->deadlk_node == NULL) {
246 cursor->deadlk_node = cursor->node;
247 hammer_ref_node(cursor->deadlk_node);
253 * Downgrade cursor->node and cursor->parent to shared locks. This
254 * function can return EDEADLK.
257 hammer_cursor_downgrade(hammer_cursor_t cursor)
259 if (hammer_lock_excl_owned(&cursor->node->lock, curthread))
260 hammer_lock_downgrade(&cursor->node->lock);
261 if (cursor->parent &&
262 hammer_lock_excl_owned(&cursor->parent->lock, curthread)) {
263 hammer_lock_downgrade(&cursor->parent->lock);
268 * Seek the cursor to the specified node and index.
270 * The caller must ref the node prior to calling this routine and release
271 * it after it returns. If the seek succeeds the cursor will gain its own
275 hammer_cursor_seek(hammer_cursor_t cursor, hammer_node_t node, int index)
279 hammer_cursor_downgrade(cursor);
282 if (cursor->node != node) {
283 hammer_unlock(&cursor->node->lock);
284 hammer_rel_node(cursor->node);
286 hammer_ref_node(node);
287 hammer_lock_sh(&node->lock);
288 KKASSERT ((node->flags & HAMMER_NODE_DELETED) == 0);
290 if (cursor->parent) {
291 hammer_unlock(&cursor->parent->lock);
292 hammer_rel_node(cursor->parent);
293 cursor->parent = NULL;
294 cursor->parent_index = 0;
296 error = hammer_load_cursor_parent(cursor, 0);
298 cursor->index = index;
303 * Load the parent of cursor->node into cursor->parent.
307 hammer_load_cursor_parent(hammer_cursor_t cursor, int try_exclusive)
310 hammer_node_t parent;
312 hammer_btree_elm_t elm;
316 hmp = cursor->trans->hmp;
318 if (cursor->node->ondisk->parent) {
320 parent = hammer_btree_get_parent(node, &parent_index,
321 &error, try_exclusive);
323 elm = &parent->ondisk->elms[parent_index];
324 cursor->parent = parent;
325 cursor->parent_index = parent_index;
326 cursor->left_bound = &elm[0].internal.base;
327 cursor->right_bound = &elm[1].internal.base;
330 cursor->parent = NULL;
331 cursor->parent_index = 0;
332 cursor->left_bound = &hmp->root_btree_beg;
333 cursor->right_bound = &hmp->root_btree_end;
340 * Cursor up to our parent node. Return ENOENT if we are at the root of
344 hammer_cursor_up(hammer_cursor_t cursor)
348 hammer_cursor_downgrade(cursor);
351 * If the parent is NULL we are at the root of the B-Tree and
354 if (cursor->parent == NULL)
358 * Set the node to its parent.
360 hammer_unlock(&cursor->node->lock);
361 hammer_rel_node(cursor->node);
362 cursor->node = cursor->parent;
363 cursor->index = cursor->parent_index;
364 cursor->parent = NULL;
365 cursor->parent_index = 0;
367 error = hammer_load_cursor_parent(cursor, 0);
372 * Special cursor up given a locked cursor. The orignal node is not
373 * unlocked and released and the cursor is not downgraded. If we are
374 * unable to acquire and lock the parent, EDEADLK is returned.
377 hammer_cursor_up_locked(hammer_cursor_t cursor)
383 * If the parent is NULL we are at the root of the B-Tree and
386 if (cursor->parent == NULL)
392 * Set the node to its parent.
394 cursor->node = cursor->parent;
395 cursor->index = cursor->parent_index;
396 cursor->parent = NULL;
397 cursor->parent_index = 0;
400 * load the new parent, attempt to exclusively lock it. Note that
401 * we are still holding the old parent (now cursor->node) exclusively
402 * locked. This can return EDEADLK.
404 error = hammer_load_cursor_parent(cursor, 1);
406 cursor->parent = cursor->node;
407 cursor->parent_index = cursor->index;
416 * Cursor down through the current node, which must be an internal node.
418 * This routine adjusts the cursor and sets index to 0.
421 hammer_cursor_down(hammer_cursor_t cursor)
424 hammer_btree_elm_t elm;
428 * The current node becomes the current parent
430 hammer_cursor_downgrade(cursor);
432 KKASSERT(cursor->index >= 0 && cursor->index < node->ondisk->count);
433 if (cursor->parent) {
434 hammer_unlock(&cursor->parent->lock);
435 hammer_rel_node(cursor->parent);
437 cursor->parent = node;
438 cursor->parent_index = cursor->index;
443 * Extract element to push into at (node,index), set bounds.
445 elm = &node->ondisk->elms[cursor->parent_index];
448 * Ok, push down into elm. If elm specifies an internal or leaf
449 * node the current node must be an internal node. If elm specifies
450 * a spike then the current node must be a leaf node.
452 switch(elm->base.btype) {
453 case HAMMER_BTREE_TYPE_INTERNAL:
454 case HAMMER_BTREE_TYPE_LEAF:
455 KKASSERT(node->ondisk->type == HAMMER_BTREE_TYPE_INTERNAL);
456 KKASSERT(elm->internal.subtree_offset != 0);
457 cursor->left_bound = &elm[0].internal.base;
458 cursor->right_bound = &elm[1].internal.base;
459 node = hammer_get_node(cursor->trans->hmp,
460 elm->internal.subtree_offset, 0, &error);
462 KASSERT(elm->base.btype == node->ondisk->type, ("BTYPE MISMATCH %c %c NODE %p\n", elm->base.btype, node->ondisk->type, node));
463 if (node->ondisk->parent != cursor->parent->node_offset)
464 panic("node %p %016llx vs %016llx\n", node, node->ondisk->parent, cursor->parent->node_offset);
465 KKASSERT(node->ondisk->parent == cursor->parent->node_offset);
469 panic("hammer_cursor_down: illegal btype %02x (%c)\n",
471 (elm->base.btype ? elm->base.btype : '?'));
475 hammer_lock_sh(&node->lock);
476 KKASSERT ((node->flags & HAMMER_NODE_DELETED) == 0);