2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * $DragonFly: src/sys/vfs/hammer/hammer_cursor.c,v 1.28 2008/06/11 22:33:21 dillon Exp $
38 * HAMMER B-Tree index - cursor support routines
42 static int hammer_load_cursor_parent(hammer_cursor_t cursor, int try_exclusive);
45 * Initialize a fresh cursor using the B-Tree node cache. If the cache
46 * is not available initialize a fresh cursor at the root of the filesystem.
49 hammer_init_cursor(hammer_transaction_t trans, hammer_cursor_t cursor,
50 struct hammer_node **cache, hammer_inode_t ip)
52 hammer_volume_t volume;
56 bzero(cursor, sizeof(*cursor));
58 cursor->trans = trans;
61 * If the cursor operation is on behalf of an inode, lock
64 if ((cursor->ip = ip) != NULL) {
66 if (trans->type == HAMMER_TRANS_FLS)
67 hammer_lock_ex(&ip->lock);
69 hammer_lock_sh(&ip->lock);
73 * Step 1 - acquire a locked node from the cache if possible
75 if (cache && *cache) {
76 node = hammer_ref_node_safe(trans->hmp, cache, &error);
78 hammer_lock_sh(&node->lock);
79 if (node->flags & HAMMER_NODE_DELETED) {
80 hammer_unlock(&node->lock);
81 hammer_rel_node(node);
90 * Step 2 - If we couldn't get a node from the cache, get
91 * the one from the root of the filesystem.
93 while (node == NULL) {
94 volume = hammer_get_root_volume(trans->hmp, &error);
97 node = hammer_get_node(trans->hmp,
98 volume->ondisk->vol0_btree_root,
100 hammer_rel_volume(volume, 0);
103 hammer_lock_sh(&node->lock);
106 * If someone got in before we could lock the node, retry.
108 if (node->flags & HAMMER_NODE_DELETED) {
109 hammer_unlock(&node->lock);
110 hammer_rel_node(node);
114 if (volume->ondisk->vol0_btree_root != node->node_offset) {
115 hammer_unlock(&node->lock);
116 hammer_rel_node(node);
123 * Step 3 - finish initializing the cursor by acquiring the parent
127 error = hammer_load_cursor_parent(cursor, 0);
128 KKASSERT(error == 0);
129 /* if (error) hammer_done_cursor(cursor); */
135 hammer_reinit_cursor(hammer_cursor_t cursor)
137 hammer_transaction_t trans;
139 struct hammer_node **cache;
141 trans = cursor->trans;
143 hammer_done_cursor(cursor);
144 cache = ip ? &ip->cache[0] : NULL;
145 error = hammer_init_cursor(trans, cursor, cache, ip);
152 * Normalize a cursor. Sometimes cursors can be left in a state
153 * where node is NULL. If the cursor is in this state, cursor up.
156 hammer_normalize_cursor(hammer_cursor_t cursor)
158 if (cursor->node == NULL) {
159 KKASSERT(cursor->parent != NULL);
160 hammer_cursor_up(cursor);
166 * We are finished with a cursor. We NULL out various fields as sanity
167 * check, in case the structure is inappropriately used afterwords.
170 hammer_done_cursor(hammer_cursor_t cursor)
174 if (cursor->parent) {
175 hammer_unlock(&cursor->parent->lock);
176 hammer_rel_node(cursor->parent);
177 cursor->parent = NULL;
180 hammer_unlock(&cursor->node->lock);
181 hammer_rel_node(cursor->node);
184 if (cursor->data_buffer) {
185 hammer_rel_buffer(cursor->data_buffer, 0);
186 cursor->data_buffer = NULL;
188 if (cursor->record_buffer) {
189 hammer_rel_buffer(cursor->record_buffer, 0);
190 cursor->record_buffer = NULL;
192 if ((ip = cursor->ip) != NULL) {
193 hammer_mem_done(cursor);
194 KKASSERT(ip->cursor_ip_refs > 0);
195 --ip->cursor_ip_refs;
196 hammer_unlock(&ip->lock);
202 * If we deadlocked this node will be referenced. Do a quick
203 * lock/unlock to wait for the deadlock condition to clear.
205 if (cursor->deadlk_node) {
206 hammer_lock_ex_ident(&cursor->deadlk_node->lock, "hmrdlk");
207 hammer_unlock(&cursor->deadlk_node->lock);
208 hammer_rel_node(cursor->deadlk_node);
209 tsleep(&cursor->deadlk_node, 0, "hmrdel", 1);
210 cursor->deadlk_node = NULL;
212 if (cursor->deadlk_rec) {
213 hammer_wait_mem_record_ident(cursor->deadlk_rec, "hmmdlr");
214 hammer_rel_mem_record(cursor->deadlk_rec);
215 tsleep(&cursor->deadlk_rec, 0, "hmrdel", 1);
216 cursor->deadlk_rec = NULL;
221 cursor->left_bound = NULL;
222 cursor->right_bound = NULL;
223 cursor->trans = NULL;
227 * Upgrade cursor->node and cursor->parent to exclusive locks. This
228 * function can return EDEADLK.
230 * The lock must already be either held shared or already held exclusively
233 * If we fail to upgrade the lock and cursor->deadlk_node is NULL,
234 * we add another reference to the node that failed and set
235 * cursor->deadlk_node so hammer_done_cursor() can block on it.
238 hammer_cursor_upgrade(hammer_cursor_t cursor)
242 error = hammer_lock_upgrade(&cursor->node->lock);
243 if (error && cursor->deadlk_node == NULL) {
244 cursor->deadlk_node = cursor->node;
245 hammer_ref_node(cursor->deadlk_node);
246 } else if (error == 0 && cursor->parent) {
247 error = hammer_lock_upgrade(&cursor->parent->lock);
248 if (error && cursor->deadlk_node == NULL) {
249 cursor->deadlk_node = cursor->parent;
250 hammer_ref_node(cursor->deadlk_node);
257 * Downgrade cursor->node and cursor->parent to shared locks. This
258 * function can return EDEADLK.
261 hammer_cursor_downgrade(hammer_cursor_t cursor)
263 if (hammer_lock_excl_owned(&cursor->node->lock, curthread))
264 hammer_lock_downgrade(&cursor->node->lock);
265 if (cursor->parent &&
266 hammer_lock_excl_owned(&cursor->parent->lock, curthread)) {
267 hammer_lock_downgrade(&cursor->parent->lock);
272 * Seek the cursor to the specified node and index.
275 hammer_cursor_seek(hammer_cursor_t cursor, hammer_node_t node, int index)
279 hammer_cursor_downgrade(cursor);
282 if (cursor->node != node) {
283 hammer_unlock(&cursor->node->lock);
284 hammer_rel_node(cursor->node);
286 hammer_ref_node(node);
287 hammer_lock_sh(&node->lock);
288 KKASSERT ((node->flags & HAMMER_NODE_DELETED) == 0);
290 if (cursor->parent) {
291 hammer_unlock(&cursor->parent->lock);
292 hammer_rel_node(cursor->parent);
293 cursor->parent = NULL;
294 cursor->parent_index = 0;
296 error = hammer_load_cursor_parent(cursor, 0);
298 cursor->index = index;
303 * Load the parent of cursor->node into cursor->parent.
307 hammer_load_cursor_parent(hammer_cursor_t cursor, int try_exclusive)
310 hammer_node_t parent;
312 hammer_btree_elm_t elm;
316 hmp = cursor->trans->hmp;
318 if (cursor->node->ondisk->parent) {
320 parent = hammer_get_node(hmp, node->ondisk->parent, 0, &error);
324 if (hammer_lock_ex_try(&parent->lock)) {
325 hammer_rel_node(parent);
329 hammer_lock_sh(&parent->lock);
331 KKASSERT ((parent->flags & HAMMER_NODE_DELETED) == 0);
333 for (i = 0; i < parent->ondisk->count; ++i) {
334 elm = &parent->ondisk->elms[i];
335 if (parent->ondisk->elms[i].internal.subtree_offset ==
340 if (i == parent->ondisk->count) {
341 hammer_unlock(&parent->lock);
342 panic("Bad B-Tree link: parent %p node %p\n", parent, node);
344 KKASSERT(i != parent->ondisk->count);
345 cursor->parent = parent;
346 cursor->parent_index = i;
347 cursor->left_bound = &elm[0].internal.base;
348 cursor->right_bound = &elm[1].internal.base;
351 cursor->parent = NULL;
352 cursor->parent_index = 0;
353 cursor->left_bound = &hmp->root_btree_beg;
354 cursor->right_bound = &hmp->root_btree_end;
361 * Cursor up to our parent node. Return ENOENT if we are at the root of
365 hammer_cursor_up(hammer_cursor_t cursor)
369 hammer_cursor_downgrade(cursor);
372 * If the parent is NULL we are at the root of the B-Tree and
375 if (cursor->parent == NULL)
379 * Set the node to its parent.
381 hammer_unlock(&cursor->node->lock);
382 hammer_rel_node(cursor->node);
383 cursor->node = cursor->parent;
384 cursor->index = cursor->parent_index;
385 cursor->parent = NULL;
386 cursor->parent_index = 0;
388 error = hammer_load_cursor_parent(cursor, 0);
393 * Special cursor up given a locked cursor. The orignal node is not
394 * unlocked and released and the cursor is not downgraded. If we are
395 * unable to acquire and lock the parent, EDEADLK is returned.
398 hammer_cursor_up_locked(hammer_cursor_t cursor)
404 * If the parent is NULL we are at the root of the B-Tree and
407 if (cursor->parent == NULL)
413 * Set the node to its parent.
415 cursor->node = cursor->parent;
416 cursor->index = cursor->parent_index;
417 cursor->parent = NULL;
418 cursor->parent_index = 0;
421 * load the new parent, attempt to exclusively lock it. Note that
422 * we are still holding the old parent (now cursor->node) exclusively
423 * locked. This can return EDEADLK.
425 error = hammer_load_cursor_parent(cursor, 1);
427 cursor->parent = cursor->node;
428 cursor->parent_index = cursor->index;
437 * Cursor down through the current node, which must be an internal node.
439 * This routine adjusts the cursor and sets index to 0.
442 hammer_cursor_down(hammer_cursor_t cursor)
445 hammer_btree_elm_t elm;
449 * The current node becomes the current parent
451 hammer_cursor_downgrade(cursor);
453 KKASSERT(cursor->index >= 0 && cursor->index < node->ondisk->count);
454 if (cursor->parent) {
455 hammer_unlock(&cursor->parent->lock);
456 hammer_rel_node(cursor->parent);
458 cursor->parent = node;
459 cursor->parent_index = cursor->index;
464 * Extract element to push into at (node,index), set bounds.
466 elm = &node->ondisk->elms[cursor->parent_index];
469 * Ok, push down into elm. If elm specifies an internal or leaf
470 * node the current node must be an internal node. If elm specifies
471 * a spike then the current node must be a leaf node.
473 switch(elm->base.btype) {
474 case HAMMER_BTREE_TYPE_INTERNAL:
475 case HAMMER_BTREE_TYPE_LEAF:
476 KKASSERT(node->ondisk->type == HAMMER_BTREE_TYPE_INTERNAL);
477 KKASSERT(elm->internal.subtree_offset != 0);
478 cursor->left_bound = &elm[0].internal.base;
479 cursor->right_bound = &elm[1].internal.base;
480 node = hammer_get_node(cursor->trans->hmp,
481 elm->internal.subtree_offset, 0, &error);
483 KASSERT(elm->base.btype == node->ondisk->type, ("BTYPE MISMATCH %c %c NODE %p\n", elm->base.btype, node->ondisk->type, node));
484 if (node->ondisk->parent != cursor->parent->node_offset)
485 panic("node %p %016llx vs %016llx\n", node, node->ondisk->parent, cursor->parent->node_offset);
486 KKASSERT(node->ondisk->parent == cursor->parent->node_offset);
490 panic("hammer_cursor_down: illegal btype %02x (%c)\n",
492 (elm->base.btype ? elm->base.btype : '?'));
496 hammer_lock_sh(&node->lock);
497 KKASSERT ((node->flags & HAMMER_NODE_DELETED) == 0);