2 * Copyright (c) 2009 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 static int rebalance_node(struct hammer_ioc_rebalance *rebal,
38 hammer_cursor_t cursor, hammer_node_lock_t lcache);
39 static void rebalance_closeout(hammer_node_lock_t base_item, int base_count,
40 hammer_btree_elm_t elm);
41 static void rebalance_parent_ptrs(hammer_node_lock_t base_item, int index,
42 hammer_node_lock_t item, hammer_node_lock_t chld_item);
45 * Iterate through the specified range of object ids and rebalance B-Tree
46 * leaf and internal nodes we encounter. A forwards iteration is used.
48 * All leafs are at the same depth. We use the b-tree scan code loosely
49 * to position ourselves and create degenerate cases to skip indices
50 * that we have rebalanced in bulk.
54 hammer_ioc_rebalance(hammer_transaction_t trans, hammer_inode_t ip,
55 struct hammer_ioc_rebalance *rebal)
57 struct hammer_cursor cursor;
58 struct hammer_node_lock lcache;
59 hammer_btree_leaf_elm_t elm;
63 if ((rebal->key_beg.localization | rebal->key_end.localization) &
64 HAMMER_LOCALIZE_PSEUDOFS_MASK) {
67 if (rebal->key_beg.localization > rebal->key_end.localization)
69 if (rebal->key_beg.localization == rebal->key_end.localization) {
70 if (rebal->key_beg.obj_id > rebal->key_end.obj_id)
72 /* key-space limitations - no check needed */
74 if (rebal->saturation < HAMMER_BTREE_INT_ELMS / 2)
75 rebal->saturation = HAMMER_BTREE_INT_ELMS / 2;
76 if (rebal->saturation > HAMMER_BTREE_INT_ELMS)
77 rebal->saturation = HAMMER_BTREE_INT_ELMS;
79 rebal->key_cur = rebal->key_beg;
80 rebal->key_cur.localization &= HAMMER_LOCALIZE_MASK;
81 rebal->key_cur.localization += ip->obj_localization;
83 hammer_btree_lcache_init(trans->hmp, &lcache, 2);
85 seq = trans->hmp->flusher.done;
88 * Scan forwards. Retries typically occur if a deadlock is detected.
91 error = hammer_init_cursor(trans, &cursor, NULL, NULL);
93 hammer_done_cursor(&cursor);
96 cursor.key_beg = rebal->key_cur;
97 cursor.key_end = rebal->key_end;
98 cursor.key_end.localization &= HAMMER_LOCALIZE_MASK;
99 cursor.key_end.localization += ip->obj_localization;
100 cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE;
101 cursor.flags |= HAMMER_CURSOR_BACKEND;
104 * Cause internal nodes to be returned on the way up. Internal nodes
105 * are not returned on the way down so we can create a degenerate
106 * case to handle internal nodes as a trailing function of their
109 * Note that by not setting INSERTING or PRUNING no boundary
110 * corrections will be made and a sync lock is not needed for the
111 * B-Tree scan itself.
113 cursor.flags |= HAMMER_CURSOR_REBLOCKING;
115 error = hammer_btree_first(&cursor);
119 * Rebalancing can be hard on the memory allocator, make
120 * sure there is enough free memory before doing it.
122 if (vm_test_nominal()) {
123 hammer_unlock_cursor(&cursor);
125 hammer_lock_cursor(&cursor);
129 * We only care about internal nodes visited for the last
130 * time on the way up... that is, a trailing scan of the
131 * internal node after all of its children have been recursed
134 if (cursor.node->ondisk->type == HAMMER_BTREE_TYPE_INTERNAL) {
136 * Leave cursor.index alone, we want to recurse
137 * through all children of the internal node before
140 * Process the internal node on the way up after
141 * the last child's sub-tree has been balanced.
143 if (cursor.index == cursor.node->ondisk->count - 1) {
144 hammer_sync_lock_sh(trans);
145 error = rebalance_node(rebal, &cursor, &lcache);
146 hammer_sync_unlock(trans);
150 * We don't need to iterate through all the leaf
151 * elements, we only care about the parent (internal)
154 cursor.index = cursor.node->ondisk->count - 1;
160 * Update returned scan position and do a flush if
163 * WARNING: We extract the base using the leaf element
164 * type but this could be an internal node. The
165 * base is the same either way.
167 * However, due to the rebalancing operation the
168 * cursor position may have exceeded the right-hand
171 * WARNING: See warnings in hammer_unlock_cursor()
174 elm = &cursor.node->ondisk->elms[cursor.index].leaf;
175 rebal->key_cur = elm->base;
176 ++rebal->stat_ncount;
178 while (hammer_flusher_meta_halflimit(trans->hmp) ||
179 hammer_flusher_undo_exhausted(trans, 2)) {
180 hammer_unlock_cursor(&cursor);
181 hammer_flusher_wait(trans->hmp, seq);
182 hammer_lock_cursor(&cursor);
183 seq = hammer_flusher_async_one(trans->hmp);
187 * Before iterating check if the rebalance operation caused
188 * the cursor to index past the right-hand boundary and make
189 * sure to stop if it does. Otherwise the iteration may
190 * panic e.g. due to the key maxing out its fields and no
191 * longer being within the strict bounds of the root node.
193 if (hammer_btree_cmp(&rebal->key_cur, &cursor.key_end) > 0) {
194 rebal->key_cur = cursor.key_end;
199 * Iterate, stop if a signal was received.
201 if ((error = hammer_signal_check(trans->hmp)) != 0)
203 error = hammer_btree_iterate(&cursor);
207 hammer_done_cursor(&cursor);
208 if (error == EDEADLK) {
209 ++rebal->stat_collisions;
212 if (error == EINTR) {
213 rebal->head.flags |= HAMMER_IOC_HEAD_INTR;
217 rebal->key_cur.localization &= HAMMER_LOCALIZE_MASK;
218 hammer_btree_lcache_free(trans->hmp, &lcache);
223 * Rebalance an internal node, called via a trailing upward recursion.
224 * All the children have already been individually rebalanced.
226 * To rebalance we scan the elements in the children and pack them,
227 * so we actually need to lock the children and the children's children.
231 * C C C C C C C children (first level) (internal or leaf nodes)
232 * children's elements (second level)
234 * <<<---------- pack children's elements, possibly remove excess
235 * children after packing.
237 * NOTE: The mirror_tids, parent pointers, and child pointers must be updated.
238 * Any live tracked B-Tree nodes must be updated (we worm out of that
239 * by not allowing any). And boundary elements must be preserved.
241 * NOTE: If the children are leaf nodes we may have a degenerate case
242 * case where there are no elements in the leafs.
247 rebalance_node(struct hammer_ioc_rebalance *rebal, hammer_cursor_t cursor,
248 struct hammer_node_lock *lcache)
250 struct hammer_node_lock lockroot;
251 hammer_node_lock_t base_item;
252 hammer_node_lock_t chld_item;
253 hammer_node_lock_t item;
254 hammer_btree_elm_t elm;
257 u_int8_t type1 __debugvar;
267 * Lock the parent node via the cursor, collect and lock our
268 * children and children's children.
270 * By the way, this is a LOT of locks.
272 hammer_node_lock_init(&lockroot, cursor->node);
273 error = hammer_cursor_upgrade(cursor);
276 error = hammer_btree_lock_children(cursor, 2, &lockroot, lcache);
281 * Make a copy of all the locked on-disk data to simplify the element
282 * shifting we are going to have to do. We will modify the copy
285 hammer_btree_lock_copy(cursor, &lockroot);
288 * Look at the first child node.
290 if (TAILQ_FIRST(&lockroot.list) == NULL)
292 type1 = TAILQ_FIRST(&lockroot.list)->node->ondisk->type;
295 * Figure out the total number of children's children and
296 * calculate the average number of elements per child.
298 * The minimum avg_elms is 1 when count > 0. avg_elms * root_elms
299 * is always greater or equal to count.
301 * If count == 0 we hit a degenerate case which will cause
302 * avg_elms to also calculate as 0.
304 if (hammer_debug_general & 0x1000)
305 kprintf("lockroot %p count %d\n", &lockroot, lockroot.count);
307 TAILQ_FOREACH(item, &lockroot.list, entry) {
308 if (hammer_debug_general & 0x1000)
309 kprintf("add count %d\n", item->count);
310 count += item->count;
311 KKASSERT(item->node->ondisk->type == type1);
313 avg_elms = (count + (lockroot.count - 1)) / lockroot.count;
314 KKASSERT(avg_elms >= 0);
317 * If the average number of elements per child is too low then
318 * calculate the desired number of children (n) such that the
319 * average number of elements is reasonable.
321 * If the desired number of children is 1 then avg_elms will
322 * wind up being count, which may still be smaller then saturation
325 if (count && avg_elms < rebal->saturation) {
326 n = (count + (rebal->saturation - 1)) / rebal->saturation;
327 avg_elms = (count + (n - 1)) / n;
331 * Pack the elements in the children. Elements for each item is
332 * packed into base_item until avg_elms is reached, then base_item
335 * hammer_cursor_moved_element() is called for each element moved
336 * to update tracked cursors, including the index beyond the last
337 * element (at count).
339 * Any cursors tracking the internal node itself must also be
340 * updated, potentially repointing them at a leaf and clearing
343 base_item = TAILQ_FIRST(&lockroot.list);
347 TAILQ_FOREACH(item, &lockroot.list, entry) {
349 KKASSERT(item->count == node->ondisk->count);
350 chld_item = TAILQ_FIRST(&item->list);
351 for (i = 0; i < item->count; ++i) {
353 * Closeout. If the next element is at index 0
354 * just use the existing separator in the parent.
356 if (base_count == avg_elms) {
358 elm = &lockroot.node->ondisk->elms[
361 elm = &node->ondisk->elms[i];
363 rebalance_closeout(base_item, base_count, elm);
364 base_item = TAILQ_NEXT(base_item, entry);
371 * Check degenerate no-work case. Otherwise pack
374 * All changes are made to the copy.
376 if (item == base_item && i == base_count) {
379 chld_item = TAILQ_NEXT(chld_item, entry);
386 elm = &base_item->copy->elms[base_count];
387 *elm = node->ondisk->elms[i];
388 base_item->flags |= HAMMER_NODE_LOCK_UPDATED;
391 * Adjust the mirror_tid of the target and the
392 * internal element linkage.
394 * The parent node (lockroot.node) should already
395 * have an aggregate mirror_tid so we do not have
396 * to update that. However, it is possible for us
397 * to catch a hammer_btree_mirror_propagate() with
398 * its pants down. Update the parent if necessary.
400 tid = node->ondisk->mirror_tid;
402 if (base_item->copy->mirror_tid < tid) {
403 base_item->copy->mirror_tid = tid;
404 if (lockroot.copy->mirror_tid < tid) {
405 lockroot.copy->mirror_tid = tid;
407 HAMMER_NODE_LOCK_UPDATED;
409 if (lockroot.copy->elms[root_count].
410 internal.mirror_tid < tid) {
411 lockroot.copy->elms[root_count].
412 internal.mirror_tid = tid;
414 HAMMER_NODE_LOCK_UPDATED;
416 base_item->flags |= HAMMER_NODE_LOCK_UPDATED;
420 * We moved elm. The parent pointers for any
421 * children of elm must be repointed.
423 if (item != base_item &&
424 node->ondisk->type == HAMMER_BTREE_TYPE_INTERNAL) {
426 rebalance_parent_ptrs(base_item, base_count,
429 hammer_cursor_moved_element(item->parent->node,
436 chld_item = TAILQ_NEXT(chld_item, entry);
440 * Always call at the end (i == number of elements) in
441 * case a cursor is sitting indexed there.
443 hammer_cursor_moved_element(item->parent->node, item->index,
445 base_item->node, base_count);
449 * Packing complete, close-out base_item using the right-hand
450 * boundary of the original parent.
452 * If we will be deleting nodes from the root shift the old
453 * right-hand-boundary to the new ending index.
455 elm = &lockroot.node->ondisk->elms[lockroot.node->ondisk->count];
456 rebalance_closeout(base_item, base_count, elm);
458 if (lockroot.copy->count != root_count) {
459 lockroot.copy->count = root_count;
460 lockroot.copy->elms[root_count] = *elm;
461 lockroot.flags |= HAMMER_NODE_LOCK_UPDATED;
465 * Any extra items beyond base_item are now completely empty and
466 * can be destroyed. Queue the destruction up in the copy. Note
467 * that none of the destroyed nodes are part of our cursor.
469 * The cursor is locked so it isn't on the tracking list. It
470 * should have been pointing at the boundary element (at root_count).
471 * When deleting elements from the root (which is cursor.node), we
472 * have to update the cursor.index manually to keep it in bounds.
474 while ((base_item = TAILQ_NEXT(base_item, entry)) != NULL) {
475 hammer_cursor_removed_node(base_item->node, lockroot.node,
477 hammer_cursor_deleted_element(lockroot.node, base_count);
478 base_item->copy->type = HAMMER_BTREE_TYPE_DELETED;
479 base_item->copy->count = 0;
480 base_item->flags |= HAMMER_NODE_LOCK_UPDATED;
481 if (cursor->index > lockroot.copy->count)
483 ++rebal->stat_deletions;
487 * All done, sync the locked child tree to disk. This will also
488 * flush and delete deleted nodes.
490 rebal->stat_nrebal += hammer_btree_sync_copy(cursor, &lockroot);
492 hammer_btree_unlock_children(cursor->trans->hmp, &lockroot, lcache);
493 hammer_cursor_downgrade(cursor);
498 * Close-out the child base_item. This node contains base_count
501 * If the node is an internal node the right-hand boundary must be
506 rebalance_closeout(hammer_node_lock_t base_item, int base_count,
507 hammer_btree_elm_t elm)
509 hammer_node_lock_t parent;
510 hammer_btree_elm_t base_elm;
511 hammer_btree_elm_t rbound_elm;
515 * Update the count. NOTE: base_count can be 0 for the
516 * degenerate leaf case.
518 if (hammer_debug_general & 0x1000) {
519 kprintf("rebalance_closeout %016llx:",
520 (long long)base_item->node->node_offset);
522 if (base_item->copy->count != base_count) {
523 base_item->flags |= HAMMER_NODE_LOCK_UPDATED;
524 base_item->copy->count = base_count;
525 if (hammer_debug_general & 0x1000)
526 kprintf(" (count update)");
530 * If we are closing out an internal node we must assign
531 * a right-hand boundary. Use the element contents as the
532 * right-hand boundary.
534 * Internal nodes are required to have at least one child,
535 * otherwise the left and right boundary would end up being
536 * the same element. Only leaf nodes can be empty.
538 * Rebalancing may cut-off an internal node such that the
539 * new right hand boundary is the next element anyway, but
540 * we still have to make sure that subtree_offset, btype,
541 * and mirror_tid are all 0.
543 if (base_item->copy->type == HAMMER_BTREE_TYPE_INTERNAL) {
544 KKASSERT(base_count != 0);
545 base_elm = &base_item->copy->elms[base_count];
547 if (bcmp(base_elm, elm, sizeof(*elm)) != 0 ||
548 elm->internal.subtree_offset ||
549 elm->internal.mirror_tid ||
552 base_elm->internal.subtree_offset = 0;
553 base_elm->internal.mirror_tid = 0;
554 base_elm->base.btype = 0;
555 base_item->flags |= HAMMER_NODE_LOCK_UPDATED;
556 if (hammer_debug_general & 0x1000)
557 kprintf(" (rhs update)");
559 if (hammer_debug_general & 0x1000)
560 kprintf(" (rhs same)");
565 * The parent's boundary must be updated. Be careful to retain
566 * the btype and non-base internal fields as that information is
569 parent = base_item->parent;
570 rbound_elm = &parent->copy->elms[base_item->index + 1];
571 if (bcmp(&rbound_elm->base, &elm->base, sizeof(elm->base)) != 0) {
572 save = rbound_elm->base.btype;
573 rbound_elm->base = elm->base;
574 rbound_elm->base.btype = save;
575 parent->flags |= HAMMER_NODE_LOCK_UPDATED;
576 if (hammer_debug_general & 0x1000) {
577 kprintf(" (parent bound update %d)",
578 base_item->index + 1);
581 if (hammer_debug_general & 0x1000)
586 * An element in item has moved to base_item. We must update the parent
587 * pointer of the node the element points to (which is chld_item).
591 rebalance_parent_ptrs(hammer_node_lock_t base_item, int index,
592 hammer_node_lock_t item, hammer_node_lock_t chld_item)
594 KKASSERT(chld_item->node->ondisk->parent == item->node->node_offset);
595 chld_item->copy->parent = base_item->node->node_offset;
596 chld_item->flags |= HAMMER_NODE_LOCK_UPDATED;
597 hammer_cursor_parent_changed(chld_item->node,
598 item->node, base_item->node, index);