2 * Copyright (c) 2007 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * $DragonFly: src/sys/vfs/hammer/hammer_btree.c,v 1.11 2007/12/29 09:01:27 dillon Exp $
40 * HAMMER implements a modified B+Tree. In documentation this will
41 * simply be refered to as the HAMMER B-Tree. Basically a B-Tree
42 * looks like a B+Tree (A B-Tree which stores its records only at the leafs
43 * of the tree), but adds two additional boundary elements which describe
44 * the left-most and right-most element a node is able to represent. In
45 * otherwords, we have boundary elements at the two ends of a B-Tree node
46 * instead of sub-tree pointers.
48 * A B-Tree internal node looks like this:
50 * B N N N N N N B <-- boundary and internal elements
51 * S S S S S S S <-- subtree pointers
53 * A B-Tree leaf node basically looks like this:
55 * L L L L L L L L <-- leaf elemenets
57 * The radix for an internal node is 1 less then a leaf but we get a
58 * number of significant benefits for our troubles.
60 * The big benefit to using a B-Tree containing boundary information
61 * is that it is possible to cache pointers into the middle of the tree
62 * and not have to start searches, insertions, OR deletions at the root
63 * node. In particular, searches are able to progress in a definitive
64 * direction from any point in the tree without revisting nodes. This
65 * greatly improves the efficiency of many operations, most especially
68 * B-Trees also make the stacking of trees fairly straightforward.
70 * INTER-CLUSTER ELEMENTS: An element of an internal node may reference
71 * the root of another cluster rather then a node in the current cluster.
72 * This is known as an inter-cluster references. Only B-Tree searches
73 * will cross cluster boundaries. The rebalancing and collapse code does
74 * not attempt to move children between clusters. A major effect of this
75 * is that we have to relax minimum element count requirements and allow
76 * trees to become somewhat unabalanced.
78 * INSERTIONS AND DELETIONS: When inserting we split full nodes on our
79 * way down as an optimization. I originally experimented with rebalancing
80 * nodes on the way down for deletions but it created a huge mess due to
81 * the way inter-cluster linkages work. Instead, now I simply allow
82 * the tree to become unbalanced and allow leaf nodes to become empty.
83 * The delete code will try to clean things up from the bottom-up but
84 * will stop if related elements are not in-core or if it cannot get a node
91 static int btree_search(hammer_cursor_t cursor, int flags);
92 static int btree_split_internal(hammer_cursor_t cursor);
93 static int btree_split_leaf(hammer_cursor_t cursor);
94 static int btree_remove(hammer_cursor_t cursor);
95 static int btree_set_parent(hammer_node_t node, hammer_btree_elm_t elm);
97 static int btree_rebalance(hammer_cursor_t cursor);
98 static int btree_collapse(hammer_cursor_t cursor);
100 static int btree_node_is_full(hammer_node_ondisk_t node);
101 static void hammer_make_separator(hammer_base_elm_t key1,
102 hammer_base_elm_t key2, hammer_base_elm_t dest);
105 * Iterate records after a search. The cursor is iterated forwards past
106 * the current record until a record matching the key-range requirements
107 * is found. ENOENT is returned if the iteration goes past the ending
110 * The iteration is inclusive of key_beg and can be inclusive or exclusive
111 * of key_end depending on whether HAMMER_CURSOR_END_INCLUSIVE is set.
113 * cursor->key_beg may or may not be modified by this function during
114 * the iteration. XXX future - in case of an inverted lock we may have
115 * to reinitiate the lookup and set key_beg to properly pick up where we
119 hammer_btree_iterate(hammer_cursor_t cursor)
121 hammer_node_ondisk_t node;
122 hammer_btree_elm_t elm;
128 * Skip past the current record
130 node = cursor->node->ondisk;
133 if (cursor->index < node->count &&
134 (cursor->flags & HAMMER_CURSOR_ATEDISK)) {
139 * Loop until an element is found or we are done.
143 * We iterate up the tree and then index over one element
144 * while we are at the last element in the current node.
146 * NOTE: This can pop us up to another cluster.
148 * If we are at the root of the root cluster, cursor_up
151 * NOTE: hammer_cursor_up() will adjust cursor->key_beg
152 * when told to re-search for the cluster tag.
154 * XXX this could be optimized by storing the information in
155 * the parent reference.
157 * XXX we can lose the node lock temporarily, this could mess
160 if (cursor->index == node->count) {
161 error = hammer_cursor_up(cursor, 0);
164 node = cursor->node->ondisk;
165 KKASSERT(cursor->index != node->count);
171 * Check internal or leaf element. Determine if the record
172 * at the cursor has gone beyond the end of our range.
173 * Remember that our key range is end-exclusive.
175 * Generally we recurse down through internal nodes. An
176 * internal node can only be returned if INCLUSTER is set
177 * and the node represents a cluster-push record. Internal
178 * elements do not contain create_tid/delete_tid information.
180 if (node->type == HAMMER_BTREE_TYPE_INTERNAL) {
181 elm = &node->elms[cursor->index];
182 r = hammer_btree_cmp(&cursor->key_end, &elm[0].base);
183 s = hammer_btree_cmp(&cursor->key_beg, &elm[1].base);
184 kprintf("SCAN %d r/s %d/%d\n", cursor->index, r, s);
189 if (r == 0 && (cursor->flags & HAMMER_CURSOR_END_INCLUSIVE) == 0) {
194 if ((cursor->flags & HAMMER_CURSOR_INCLUSTER) == 0 ||
195 elm->internal.rec_offset == 0) {
196 error = hammer_cursor_down(cursor);
199 KKASSERT(cursor->index == 0);
200 node = cursor->node->ondisk;
204 elm = &node->elms[cursor->index];
205 r = hammer_btree_cmp(&cursor->key_end, &elm->base);
210 if (r == 0 && (cursor->flags & HAMMER_CURSOR_END_INCLUSIVE) == 0) {
214 if ((cursor->flags & HAMMER_CURSOR_ALLHISTORY) == 0 &&
215 hammer_btree_chkts(cursor->key_beg.create_tid,
231 * Lookup cursor->key_beg. 0 is returned on success, ENOENT if the entry
232 * could not be found, and a fatal error otherwise.
234 * The cursor is suitably positioned for a deletion on success, and suitably
235 * positioned for an insertion on ENOENT.
237 * The cursor may begin anywhere, the search will traverse clusters in
238 * either direction to locate the requested element.
241 hammer_btree_lookup(hammer_cursor_t cursor)
245 error = btree_search(cursor, 0);
246 if (error == 0 && cursor->flags)
247 error = hammer_btree_extract(cursor, cursor->flags);
252 * Execute the logic required to start an iteration. The first record
253 * located within the specified range is returned and iteration control
254 * flags are adjusted for successive hammer_btree_iterate() calls.
257 hammer_btree_first(hammer_cursor_t cursor)
261 error = hammer_btree_lookup(cursor);
262 if (error == ENOENT) {
263 cursor->flags &= ~HAMMER_CURSOR_ATEDISK;
264 error = hammer_btree_iterate(cursor);
266 cursor->flags |= HAMMER_CURSOR_ATEDISK;
271 * Extract the record and/or data associated with the cursor's current
272 * position. Any prior record or data stored in the cursor is replaced.
273 * The cursor must be positioned at a leaf node.
275 * NOTE: Most extractions occur at the leaf of the B-Tree. The only
276 * extraction allowed at an internal element is at a cluster-push.
277 * Cluster-push elements have records but no data.
280 hammer_btree_extract(hammer_cursor_t cursor, int flags)
282 hammer_node_ondisk_t node;
283 hammer_btree_elm_t elm;
284 hammer_cluster_t cluster;
291 * A cluster record type has no data reference, the information
292 * is stored directly in the record and B-Tree element.
294 * The case where the data reference resolves to the same buffer
295 * as the record reference must be handled.
297 node = cursor->node->ondisk;
298 elm = &node->elms[cursor->index];
299 cluster = cursor->node->cluster;
300 cursor->flags &= ~HAMMER_CURSOR_DATA_EMBEDDED;
305 * Internal elements can only be cluster pushes. A cluster push has
308 if (node->type == HAMMER_BTREE_TYPE_INTERNAL) {
309 cloff = elm->leaf.rec_offset;
310 KKASSERT(cloff != 0);
311 cursor->record = hammer_bread(cluster, cloff,
312 HAMMER_FSBUF_RECORDS, &error,
313 &cursor->record_buffer);
320 if ((flags & HAMMER_CURSOR_GET_RECORD) && error == 0) {
321 cloff = elm->leaf.rec_offset;
322 cursor->record = hammer_bread(cluster, cloff,
323 HAMMER_FSBUF_RECORDS, &error,
324 &cursor->record_buffer);
328 if ((flags & HAMMER_CURSOR_GET_DATA) && error == 0) {
329 if ((cloff ^ elm->leaf.data_offset) & ~HAMMER_BUFMASK) {
331 * The data is not in the same buffer as the last
332 * record we cached, but it could still be embedded
333 * in a record. Note that we may not have loaded the
334 * record's buffer above, depending on flags.
336 if ((elm->leaf.rec_offset ^ elm->leaf.data_offset) &
338 if (elm->leaf.data_len & HAMMER_BUFMASK)
339 buf_type = HAMMER_FSBUF_DATA;
341 buf_type = 0; /* pure data buffer */
343 buf_type = HAMMER_FSBUF_RECORDS;
345 cursor->data = hammer_bread(cluster,
346 elm->leaf.data_offset,
348 &cursor->data_buffer);
351 * Data in same buffer as record. Note that we
352 * leave any existing data_buffer intact, even
353 * though we don't use it in this case, in case
354 * other records extracted during an iteration
357 * The data must be embedded in the record for this
360 * Just assume the buffer type is correct.
362 cursor->data = (void *)
363 ((char *)cursor->record_buffer->ondisk +
364 (elm->leaf.data_offset & HAMMER_BUFMASK));
365 roff = (char *)cursor->data - (char *)cursor->record;
366 KKASSERT (roff >= 0 && roff < HAMMER_RECORD_SIZE);
367 cursor->flags |= HAMMER_CURSOR_DATA_EMBEDDED;
375 * Insert a leaf element into the B-Tree at the current cursor position.
376 * The cursor is positioned such that the element at and beyond the cursor
377 * are shifted to make room for the new record.
379 * The caller must call hammer_btree_lookup() with the HAMMER_CURSOR_INSERT
380 * flag set and that call must return ENOENT before this function can be
383 * ENOSPC is returned if there is no room to insert a new record.
386 hammer_btree_insert(hammer_cursor_t cursor, hammer_btree_elm_t elm)
388 hammer_node_ondisk_t parent;
389 hammer_node_ondisk_t node;
393 /* HANDLED BY CALLER */
395 * Issue a search to get our cursor at the right place. The search
396 * will get us to a leaf node.
398 * The search also does some setup for our insert, so there is always
401 error = btree_search(cursor, HAMMER_CURSOR_INSERT);
402 if (error != ENOENT) {
410 * Insert the element at the leaf node and update the count in the
411 * parent. It is possible for parent to be NULL, indicating that
412 * the root of the B-Tree in the cluster is a leaf. It is also
413 * possible for the leaf to be empty.
415 * Remember that the right-hand boundary is not included in the
418 node = cursor->node->ondisk;
420 KKASSERT(node->type == HAMMER_BTREE_TYPE_LEAF);
421 KKASSERT(node->count < HAMMER_BTREE_LEAF_ELMS);
422 if (i != node->count) {
423 bcopy(&node->elms[i], &node->elms[i+1],
424 (node->count - i) * sizeof(*elm));
426 node->elms[i] = *elm;
428 hammer_modify_node(cursor->node);
431 * Adjust the sub-tree count in the parent. note that the parent
432 * may be in a different cluster.
434 if (cursor->parent) {
435 parent = cursor->parent->ondisk;
436 i = cursor->parent_index;
437 ++parent->elms[i].internal.subtree_count;
438 KKASSERT(parent->elms[i].internal.subtree_count <= node->count);
439 hammer_modify_node(cursor->parent);
445 * Delete a record from the B-Tree's at the current cursor position.
446 * The cursor is positioned such that the current element is the one
449 * On return the cursor will be positioned after the deleted element and
450 * MAY point to an internal node. It will be suitable for the continuation
451 * of an iteration but not for an insertion or deletion.
453 * Deletions will attempt to partially rebalance the B-Tree in an upward
454 * direction. It is possible to end up with empty leafs. An empty internal
455 * node is impossible (worst case: it has one element pointing to an empty
459 hammer_btree_delete(hammer_cursor_t cursor)
461 hammer_node_ondisk_t ondisk;
463 hammer_node_t parent;
464 hammer_btree_elm_t elm;
469 /* HANDLED BY CALLER */
471 * Locate the leaf element to delete. The search is also responsible
472 * for doing some of the rebalancing work on its way down.
474 error = btree_search(cursor, HAMMER_CURSOR_DELETE);
480 * Delete the element from the leaf node.
482 * Remember that leaf nodes do not have boundaries.
485 ondisk = node->ondisk;
488 KKASSERT(ondisk->type == HAMMER_BTREE_TYPE_LEAF);
489 if (i + 1 != ondisk->count) {
490 bcopy(&ondisk->elms[i+1], &ondisk->elms[i],
491 (ondisk->count - i - 1) * sizeof(ondisk->elms[0]));
494 hammer_modify_node(node);
495 if (cursor->parent != NULL) {
497 * Adjust parent's notion of the leaf's count. subtree_count
498 * is only approximate, it is allowed to be too small but
499 * never allowed to be too large. Make sure we don't drop
502 parent = cursor->parent;
503 elm = &parent->ondisk->elms[cursor->parent_index];
504 if (elm->internal.subtree_count)
505 --elm->internal.subtree_count;
506 KKASSERT(elm->internal.subtree_count <= ondisk->count);
507 hammer_modify_node(parent);
511 * It is possible, but not desireable, to stop here. If the element
512 * count drops to 0 (which is allowed for a leaf), try recursively
513 * remove the B-Tree node.
515 * XXX rebalancing calls would go here too.
517 * This may reposition the cursor at one of the parent's of the
520 if (ondisk->count == 0) {
521 error = btree_remove(cursor);
531 * PRIMAY B-TREE SEARCH SUPPORT PROCEDURE
533 * Search a cluster's B-Tree for cursor->key_beg, return the matching node.
535 * The search can begin ANYWHERE in the B-Tree. As a first step the search
536 * iterates up the tree as necessary to properly position itself prior to
537 * actually doing the sarch.
539 * INSERTIONS: The search will split full nodes and leaves on its way down
540 * and guarentee that the leaf it ends up on is not full. If we run out
541 * of space the search continues to the leaf (to position the cursor for
542 * the spike), but ENOSPC is returned.
544 * DELETIONS: The search will rebalance the tree on its way down. XXX
546 * The search is only guarenteed to end up on a leaf if an error code of 0
547 * is returned, or if inserting and an error code of ENOENT is returned.
548 * Otherwise it can stop at an internal node. On success a search returns
549 * a leaf node unless INCLUSTER is set and the search located a cluster push
550 * node (which is an internal node).
554 btree_search(hammer_cursor_t cursor, int flags)
556 hammer_node_ondisk_t node;
557 hammer_cluster_t cluster;
563 flags |= cursor->flags;
566 * Move our cursor up the tree until we find a node whos range covers
567 * the key we are trying to locate. This may move us between
570 * The left bound is inclusive, the right bound is non-inclusive.
571 * It is ok to cursor up too far so when cursoring across a cluster
574 * First see if we can skip the whole cluster. hammer_cursor_up()
575 * handles both cases but this way we don't check the cluster
576 * bounds when going up the tree within a cluster.
578 * NOTE: If INCLUSTER is set and we are at the root of the cluster,
579 * hammer_cursor_up() will return ENOENT.
581 cluster = cursor->node->cluster;
583 hammer_btree_cmp(&cursor->key_beg, &cluster->clu_btree_beg) < 0 ||
584 hammer_btree_cmp(&cursor->key_beg, &cluster->clu_btree_end) >= 0) {
585 error = hammer_cursor_toroot(cursor);
588 error = hammer_cursor_up(cursor, 0);
591 cluster = cursor->node->cluster;
595 * Deal with normal cursoring within a cluster. The right bound
596 * is non-inclusive. That is, the bounds form a separator.
598 while (hammer_btree_cmp(&cursor->key_beg, cursor->left_bound) < 0 ||
599 hammer_btree_cmp(&cursor->key_beg, cursor->right_bound) >= 0) {
600 error = hammer_cursor_up(cursor, 0);
606 * We better have ended up with a node somewhere, and our second
607 * while loop had better not have traversed up a cluster.
609 KKASSERT(cursor->node != NULL && cursor->node->cluster == cluster);
612 * If we are inserting we can't start at a full node if the parent
613 * is also full (because there is no way to split the node),
614 * continue running up the tree until we hit the root of the
615 * root cluster or until the requirement is satisfied.
617 * NOTE: These cursor-up's CAN continue to cross cluster boundaries.
619 * XXX as an optimization it should be possible to unbalance the tree
620 * and stop at the root of the current cluster.
622 while (flags & HAMMER_CURSOR_INSERT) {
623 if (btree_node_is_full(cursor->node->ondisk) == 0)
625 if (cursor->parent == NULL)
627 if (cursor->parent->ondisk->count != HAMMER_BTREE_INT_ELMS)
629 error = hammer_cursor_up(cursor, 0);
630 /* cluster and node are now may become stale */
634 /* cluster = cursor->node->cluster; not needed until next cluster = */
638 * If we are deleting we can't start at an internal node with only
639 * one element unless it is root, because all of our code assumes
640 * that internal nodes will never be empty. Just do this generally
641 * for both leaf and internal nodes to get better balance.
643 * This handles the case where the cursor is sitting at a leaf and
644 * either the leaf or parent contain an insufficient number of
647 * NOTE: These cursor-up's CAN continue to cross cluster boundaries.
649 * XXX NOTE: Iterations may not set this flag anyway.
651 while (flags & HAMMER_CURSOR_DELETE) {
652 if (cursor->node->ondisk->count > 1)
654 if (cursor->parent == NULL)
656 KKASSERT(cursor->node->ondisk->count != 0);
657 error = hammer_cursor_up(cursor, 0);
658 /* cluster and node are now may become stale */
666 * Push down through internal nodes to locate the requested key.
668 cluster = cursor->node->cluster;
669 node = cursor->node->ondisk;
670 while (node->type == HAMMER_BTREE_TYPE_INTERNAL) {
673 * If we are a the root node and deleting, try to collapse
674 * all of the root's children into the root. This is the
675 * only point where tree depth is reduced.
677 * XXX NOTE: Iterations may not set this flag anyway.
679 if ((flags & HAMMER_CURSOR_DELETE) && cursor->parent == NULL) {
680 error = btree_collapse(cursor);
681 /* node becomes stale after call */
686 node = cursor->node->ondisk;
689 * Scan the node to find the subtree index to push down into.
690 * We go one-past, then back-up.
692 for (i = 0; i < node->count; ++i) {
693 r = hammer_btree_cmp(&cursor->key_beg,
694 &node->elms[i].base);
700 * It is possible for the search to terminate at i == 0,
701 * which is to the LEFT of the LEFT boundary but the RIGHT
702 * of the parent's boundary on the left of the sub-tree
703 * element. This case can occur due to deletions (see
706 * When this case occurs an ENOENT return is guarenteed but
707 * if inserting we must still terminate at a leaf. The
708 * solution is to make the node's left boundary inherit the
709 * boundary stored in the parent.
711 * When doing this inheritance some fields in 'base' are
712 * actually related to the internal element's child
713 * specification and not to the key. These have to be
716 * If we terminate at i == count it is still possible to
717 * be to the RIGHT of the RIGHT boundary but still to the
718 * LEFT of the parent's RIGHT boundary. The solution is to
719 * adjust the RIGHT boundary to match the parent. This
720 * case can occur due to deletions (see btree_remove()).
725 if ((flags & HAMMER_CURSOR_INSERT) == 0) {
729 save = node->elms[0].subtree_type;
730 node->elms[0].base = *cursor->left_bound;
731 node->elms[0].subtree_type = save;
732 hammer_modify_node(cursor->node);
733 } else if (i == node->count) {
735 * Terminate early if not inserting and the key is
736 * beyond the uncorrected right hand boundary. The
737 * index must be PAST the last element to prevent an
738 * iteration from returning elements to the left of
741 if ((flags & HAMMER_CURSOR_INSERT) == 0 &&
742 hammer_btree_cmp(&cursor->key_beg,
743 &node->elms[i].base) >= 0
750 * Correct a right-hand boundary mismatch. The push
751 * index is the last element (i-1).
753 if (hammer_btree_cmp(&node->elms[i].base,
754 cursor->right_bound) != 0) {
755 node->elms[i].base = *cursor->right_bound;
756 hammer_modify_node(cursor->node);
761 * The push-down index is now i - 1.
768 * Handle insertion and deletion requirements.
770 * If inserting split full nodes. The split code will
771 * adjust cursor->node and cursor->index if the current
772 * index winds up in the new node.
774 if (flags & HAMMER_CURSOR_INSERT) {
775 if (node->count == HAMMER_BTREE_INT_ELMS) {
776 error = btree_split_internal(cursor);
781 flags &= ~HAMMER_CURSOR_INSERT;
784 * reload stale pointers
787 node = cursor->node->ondisk;
793 * If deleting rebalance - do not allow the child to have
794 * just one element or we will not be able to delete it.
796 * Neither internal or leaf nodes (except a root-leaf) are
797 * allowed to drop to 0 elements. (XXX - well, leaf nodes
798 * can at the moment).
800 * Our separators may have been reorganized after rebalancing,
801 * so we have to pop back up and rescan.
803 * XXX test for subtree_count < maxelms / 2, minus 1 or 2
806 * XXX NOTE: Iterations may not set this flag anyway.
808 if (flags & HAMMER_CURSOR_DELETE) {
809 if (node->elms[i].internal.subtree_count <= 1) {
810 error = btree_rebalance(cursor);
813 /* cursor->index is invalid after call */
819 * Cluster pushes are done with internal elements. If this
820 * is a cluster push (rec_offset != 0), and INCLUSTER is set,
823 * However, because this is an internal node we have to
824 * determine whether key_beg is within its range and return
825 * 0 or ENOENT appropriately.
827 if ((flags & HAMMER_CURSOR_INCLUSTER) &&
828 node->elms[i].internal.rec_offset) {
829 r = hammer_btree_cmp(&cursor->key_beg,
830 &node->elms[i+1].base);
831 error = (r < 0) ? 0 : (enospc ? ENOSPC : ENOENT);
836 * Push down (push into new node, existing node becomes
837 * the parent) and continue the search.
839 error = hammer_cursor_down(cursor);
840 /* node and cluster become stale */
843 node = cursor->node->ondisk;
844 cluster = cursor->node->cluster;
848 * We are at a leaf, do a linear search of the key array.
850 * On success the index is set to the matching element and 0
853 * On failure the index is set to the insertion point and ENOENT
856 * Boundaries are not stored in leaf nodes, so the index can wind
857 * up to the left of element 0 (index == 0) or past the end of
858 * the array (index == node->count).
860 KKASSERT(node->count <= HAMMER_BTREE_LEAF_ELMS);
862 for (i = 0; i < node->count; ++i) {
863 r = hammer_btree_cmp(&cursor->key_beg, &node->elms[i].base);
866 * Stop if we've flipped past key_beg
872 * Return an exact match
882 * No exact match was found, i is now at the insertion point.
884 * If inserting split a full leaf before returning. This
885 * may have the side effect of adjusting cursor->node and
889 if ((flags & HAMMER_CURSOR_INSERT) &&
890 node->count == HAMMER_BTREE_LEAF_ELMS) {
891 error = btree_split_leaf(cursor);
896 flags &= ~HAMMER_CURSOR_INSERT;
899 * reload stale pointers
903 node = &cursor->node->internal;
908 * We reached a leaf but did not find the key we were looking for.
909 * If this is an insert we will be properly positioned for an insert
910 * (ENOENT) or spike (ENOSPC) operation.
912 error = enospc ? ENOSPC : ENOENT;
918 /************************************************************************
919 * SPLITTING AND MERGING *
920 ************************************************************************
922 * These routines do all the dirty work required to split and merge nodes.
926 * Split an internal node into two nodes and move the separator at the split
927 * point to the parent. Note that the parent's parent's element pointing
928 * to our parent will have an incorrect subtree_count (we don't update it).
929 * It will be low, which is ok.
931 * (cursor->node, cursor->index) indicates the element the caller intends
932 * to push into. We will adjust node and index if that element winds
933 * up in the split node.
935 * If we are at the root of a cluster a new root must be created with two
936 * elements, one pointing to the original root and one pointing to the
937 * newly allocated split node.
939 * NOTE! Being at the root of a cluster is different from being at the
940 * root of the root cluster. cursor->parent will not be NULL and
941 * cursor->node->ondisk.parent must be tested against 0. Theoretically
942 * we could propogate the algorithm into the parent and deal with multiple
943 * 'roots' in the cluster header, but it's easier not to.
947 btree_split_internal(hammer_cursor_t cursor)
949 hammer_node_ondisk_t ondisk;
951 hammer_node_t parent;
952 hammer_node_t new_node;
953 hammer_btree_elm_t elm;
954 hammer_btree_elm_t parent_elm;
960 const int esize = sizeof(*elm);
963 * We are splitting but elms[split] will be promoted to the parent,
964 * leaving the right hand node with one less element. If the
965 * insertion point will be on the left-hand side adjust the split
966 * point to give the right hand side one additional node.
969 ondisk = node->ondisk;
970 split = (ondisk->count + 1) / 2;
971 if (cursor->index <= split)
976 * If we are at the root of the cluster, create a new root node with
977 * 1 element and split normally. Avoid making major modifications
978 * until we know the whole operation will work.
980 * The root of the cluster is different from the root of the root
981 * cluster. Use the node's on-disk structure's parent offset to
984 if (ondisk->parent == 0) {
985 parent = hammer_alloc_btree(node->cluster, &error);
988 hammer_lock_ex(&parent->lock);
989 ondisk = parent->ondisk;
992 ondisk->type = HAMMER_BTREE_TYPE_INTERNAL;
993 ondisk->elms[0].base = node->cluster->clu_btree_beg;
994 ondisk->elms[0].internal.subtree_type = node->ondisk->type;
995 ondisk->elms[0].internal.subtree_offset = node->node_offset;
996 ondisk->elms[1].base = node->cluster->clu_btree_end;
998 parent_index = 0; /* index of current node in parent */
1001 parent = cursor->parent;
1002 parent_index = cursor->parent_index;
1003 KKASSERT(parent->cluster == node->cluster);
1007 * Split node into new_node at the split point.
1009 * B O O O P N N B <-- P = node->elms[split]
1010 * 0 1 2 3 4 5 6 <-- subtree indices
1015 * B O O O B B N N B <--- inner boundary points are 'P'
1019 new_node = hammer_alloc_btree(node->cluster, &error);
1020 if (new_node == NULL) {
1022 hammer_unlock(&parent->lock);
1023 hammer_free_btree(parent->cluster, parent->node_offset);
1024 hammer_rel_node(parent);
1028 hammer_lock_ex(&new_node->lock);
1031 * Create the new node. P becomes the left-hand boundary in the
1032 * new node. Copy the right-hand boundary as well.
1034 * elm is the new separator.
1036 ondisk = node->ondisk;
1037 elm = &ondisk->elms[split];
1038 bcopy(elm, &new_node->ondisk->elms[0],
1039 (ondisk->count - split + 1) * esize);
1040 new_node->ondisk->count = ondisk->count - split;
1041 new_node->ondisk->parent = parent->node_offset;
1042 new_node->ondisk->type = HAMMER_BTREE_TYPE_INTERNAL;
1043 KKASSERT(ondisk->type == new_node->ondisk->type);
1046 * Cleanup the original node. P becomes the new boundary, its
1047 * subtree_offset was moved to the new node. If we had created
1048 * a new root its parent pointer may have changed.
1050 elm->internal.subtree_offset = 0;
1051 ondisk->count = split;
1054 * Insert the separator into the parent, fixup the parent's
1055 * reference to the original node, and reference the new node.
1056 * The separator is P.
1058 * Remember that base.count does not include the right-hand boundary.
1060 ondisk = parent->ondisk;
1061 KKASSERT(ondisk->count != HAMMER_BTREE_INT_ELMS);
1062 ondisk->elms[parent_index].internal.subtree_count = split;
1063 parent_elm = &ondisk->elms[parent_index+1];
1064 bcopy(parent_elm, parent_elm + 1,
1065 (ondisk->count - parent_index) * esize);
1066 parent_elm->internal.base = elm->base; /* separator P */
1067 parent_elm->internal.subtree_offset = new_node->node_offset;
1068 parent_elm->internal.subtree_count = new_node->ondisk->count;
1069 parent_elm->internal.subtree_type = new_node->ondisk->type;
1073 * The children of new_node need their parent pointer set to new_node.
1075 for (i = 0; i < new_node->ondisk->count; ++i) {
1076 elm = &new_node->ondisk->elms[i];
1077 error = btree_set_parent(new_node, elm);
1079 panic("btree_split_internal: btree-fixup problem");
1084 * The cluster's root pointer may have to be updated.
1087 node->cluster->ondisk->clu_btree_root = parent->node_offset;
1088 hammer_modify_cluster(node->cluster);
1089 node->ondisk->parent = parent->node_offset;
1090 if (cursor->parent) {
1091 hammer_unlock(&cursor->parent->lock);
1092 hammer_rel_node(cursor->parent);
1094 cursor->parent = parent; /* lock'd and ref'd */
1097 hammer_modify_node(node);
1098 hammer_modify_node(new_node);
1099 hammer_modify_node(parent);
1102 * Ok, now adjust the cursor depending on which element the original
1103 * index was pointing at. If we are >= the split point the push node
1104 * is now in the new node.
1106 * NOTE: If we are at the split point itself we cannot stay with the
1107 * original node because the push index will point at the right-hand
1108 * boundary, which is illegal.
1110 * NOTE: The cursor's parent or parent_index must be adjusted for
1111 * the case where a new parent (new root) was created, and the case
1112 * where the cursor is now pointing at the split node.
1114 if (cursor->index >= split) {
1115 cursor->parent_index = parent_index + 1;
1116 cursor->index -= split;
1117 hammer_unlock(&cursor->node->lock);
1118 hammer_rel_node(cursor->node);
1119 cursor->node = new_node; /* locked and ref'd */
1121 cursor->parent_index = parent_index;
1122 hammer_unlock(&new_node->lock);
1123 hammer_rel_node(new_node);
1127 * Fixup left and right bounds
1129 parent_elm = &parent->ondisk->elms[cursor->parent_index];
1130 cursor->left_bound = &parent_elm[0].internal.base;
1131 cursor->right_bound = &parent_elm[1].internal.base;
1137 * Same as the above, but splits a full leaf node.
1141 btree_split_leaf(hammer_cursor_t cursor)
1143 hammer_node_ondisk_t ondisk;
1144 hammer_node_t parent;
1146 hammer_node_t new_leaf;
1147 hammer_btree_elm_t elm;
1148 hammer_btree_elm_t parent_elm;
1153 const size_t esize = sizeof(*elm);
1156 * Calculate the split point. If the insertion point will be on
1157 * the left-hand side adjust the split point to give the right
1158 * hand side one additional node.
1160 leaf = cursor->node;
1161 ondisk = leaf->ondisk;
1162 split = (ondisk->count + 1) / 2;
1163 if (cursor->index <= split)
1168 * If we are at the root of the tree, create a new root node with
1169 * 1 element and split normally. Avoid making major modifications
1170 * until we know the whole operation will work.
1172 if (ondisk->parent == 0) {
1173 parent = hammer_alloc_btree(leaf->cluster, &error);
1176 hammer_lock_ex(&parent->lock);
1177 ondisk = parent->ondisk;
1180 ondisk->type = HAMMER_BTREE_TYPE_INTERNAL;
1181 ondisk->elms[0].base = leaf->cluster->clu_btree_beg;
1182 ondisk->elms[0].internal.subtree_type = leaf->ondisk->type;
1183 ondisk->elms[0].internal.subtree_offset = leaf->node_offset;
1184 ondisk->elms[1].base = leaf->cluster->clu_btree_end;
1186 parent_index = 0; /* insertion point in parent */
1189 parent = cursor->parent;
1190 parent_index = cursor->parent_index;
1191 KKASSERT(parent->cluster == leaf->cluster);
1195 * Split leaf into new_leaf at the split point. Select a separator
1196 * value in-between the two leafs but with a bent towards the right
1197 * leaf since comparisons use an 'elm >= separator' inequality.
1206 new_leaf = hammer_alloc_btree(leaf->cluster, &error);
1207 if (new_leaf == NULL) {
1209 hammer_unlock(&parent->lock);
1210 hammer_free_btree(parent->cluster, parent->node_offset);
1211 hammer_rel_node(parent);
1215 hammer_lock_ex(&new_leaf->lock);
1218 * Create the new node. P become the left-hand boundary in the
1219 * new node. Copy the right-hand boundary as well.
1221 ondisk = leaf->ondisk;
1222 elm = &ondisk->elms[split];
1223 bcopy(elm, &new_leaf->ondisk->elms[0], (ondisk->count - split) * esize);
1224 new_leaf->ondisk->count = ondisk->count - split;
1225 new_leaf->ondisk->parent = parent->node_offset;
1226 new_leaf->ondisk->type = HAMMER_BTREE_TYPE_LEAF;
1227 KKASSERT(ondisk->type == new_leaf->ondisk->type);
1230 * Cleanup the original node. Because this is a leaf node and
1231 * leaf nodes do not have a right-hand boundary, there
1232 * aren't any special edge cases to clean up. We just fixup the
1235 ondisk->count = split;
1238 * Insert the separator into the parent, fixup the parent's
1239 * reference to the original node, and reference the new node.
1240 * The separator is P.
1242 * Remember that base.count does not include the right-hand boundary.
1243 * We are copying parent_index+1 to parent_index+2, not +0 to +1.
1245 ondisk = parent->ondisk;
1246 KKASSERT(ondisk->count != HAMMER_BTREE_INT_ELMS);
1247 ondisk->elms[parent_index].internal.subtree_count = split;
1248 parent_elm = &ondisk->elms[parent_index+1];
1249 bcopy(parent_elm, parent_elm + 1,
1250 (ondisk->count - parent_index) * esize);
1251 hammer_make_separator(&elm[-1].base, &elm[0].base, &parent_elm->base);
1252 parent_elm->internal.subtree_offset = new_leaf->node_offset;
1253 parent_elm->internal.subtree_count = new_leaf->ondisk->count;
1254 parent_elm->internal.subtree_type = new_leaf->ondisk->type;
1258 * The cluster's root pointer may have to be updated.
1261 leaf->cluster->ondisk->clu_btree_root = parent->node_offset;
1262 hammer_modify_cluster(leaf->cluster);
1263 leaf->ondisk->parent = parent->node_offset;
1264 if (cursor->parent) {
1265 hammer_unlock(&cursor->parent->lock);
1266 hammer_rel_node(cursor->parent);
1268 cursor->parent = parent; /* lock'd and ref'd */
1271 hammer_modify_node(leaf);
1272 hammer_modify_node(new_leaf);
1273 hammer_modify_node(parent);
1276 * Ok, now adjust the cursor depending on which element the original
1277 * index was pointing at. If we are >= the split point the push node
1278 * is now in the new node.
1280 * NOTE: If we are at the split point itself we cannot stay with the
1281 * original node because the push index will point at the right-hand
1282 * boundary, which is illegal.
1284 if (cursor->index >= split) {
1285 cursor->parent_index = parent_index + 1;
1286 cursor->index -= split;
1287 hammer_unlock(&cursor->node->lock);
1288 hammer_rel_node(cursor->node);
1289 cursor->node = new_leaf;
1291 cursor->parent_index = parent_index;
1292 hammer_unlock(&new_leaf->lock);
1293 hammer_rel_node(new_leaf);
1297 * Fixup left and right bounds
1299 parent_elm = &parent->ondisk->elms[cursor->parent_index];
1300 cursor->left_bound = &parent_elm[0].internal.base;
1301 cursor->right_bound = &parent_elm[1].internal.base;
1307 * Attempt to remove the empty B-Tree node at (cursor->node). Returns 0
1308 * on success, EAGAIN if we could not acquire the necessary locks, or some
1311 * On return the cursor may end up pointing at an internal node, suitable
1312 * for further iteration but not for insertion or deletion.
1314 * cursor->node may be an internal node or a leaf node.
1317 btree_remove(hammer_cursor_t cursor)
1319 hammer_node_ondisk_t ondisk;
1320 hammer_btree_elm_t elm;
1323 hammer_node_t parent;
1328 * If we are at the root of the root cluster there is nothing to
1329 * remove, but an internal node at the root of a cluster is not
1330 * allowed to be empty so convert it to a leaf node.
1332 if (cursor->parent == NULL) {
1333 ondisk = cursor->node->ondisk;
1334 KKASSERT(ondisk->parent == 0);
1335 ondisk->type = HAMMER_BTREE_TYPE_LEAF;
1337 hammer_modify_node(cursor->node);
1338 kprintf("EMPTY ROOT OF ROOT CLUSTER -> LEAF\n");
1343 * Retain a reference to cursor->node, ex-lock again (2 locks now)
1344 * so we do not lose the lock when we cursor around.
1346 save = cursor->node;
1347 hammer_ref_node(save);
1348 hammer_lock_ex(&save->lock);
1351 * We need to be able to lock the parent of the parent. Do this
1352 * non-blocking and return EAGAIN if the lock cannot be acquired.
1353 * non-blocking is required in order to avoid a deadlock.
1355 * After we cursor up, parent is moved to node and the new parent
1356 * is the parent of the parent.
1358 error = hammer_cursor_up(cursor, 1);
1360 kprintf("BTREE_REMOVE: Cannot lock parent\n");
1361 hammer_unlock(&save->lock);
1362 hammer_rel_node(save);
1367 * At this point we want to remove the element at (node, index),
1368 * which is now the (original) parent pointing to the saved node.
1369 * Removing the element allows us to then free the node it was
1372 * However, an internal node is not allowed to have 0 elements, so
1373 * if the count would drop to 0 we have to recurse. It is possible
1374 * for the recursion to fail.
1376 * NOTE: The cursor is in an indeterminant position after recursing,
1377 * but will still be suitable for an iteration.
1379 node = cursor->node;
1380 KKASSERT(node->ondisk->count > 0);
1381 if (node->ondisk->count == 1) {
1382 error = btree_remove(cursor);
1384 kprintf("BTREE_REMOVE: Successful!\n");
1385 hammer_flush_node(save);
1386 hammer_free_btree(save->cluster, save->node_offset);
1388 kprintf("BTREE_REMOVE: Recursion failed %d\n", error);
1390 hammer_unlock(&save->lock);
1391 hammer_rel_node(save);
1396 * Remove the element at (node, index) and adjust the parent's
1399 * NOTE! If removing element 0 an internal node's left-hand boundary
1400 * will no longer match its parent. If removing a mid-element the
1401 * boundary will no longer match a child's left hand or right hand
1404 * BxBxBxB remove a (x[0]): internal node's left-hand
1405 * | | | boundary no longer matches
1408 * remove b (x[1]): a's right hand boundary no
1409 * longer matches parent.
1411 * remove c (x[2]): b's right hand boundary no
1412 * longer matches parent.
1414 * These cases are corrected in btree_search().
1417 kprintf("BTREE_REMOVE: Removing element %d\n", cursor->index);
1419 ondisk = node->ondisk;
1421 bcopy(&ondisk->elms[i+1], &ondisk->elms[i],
1422 (ondisk->count - i) * sizeof(ondisk->elms[0]));
1424 hammer_modify_node(cursor->node);
1427 * Adjust the parent-parent's (now parent) reference to the parent
1430 if ((parent = cursor->parent) != NULL) {
1431 elm = &parent->ondisk->elms[cursor->parent_index];
1432 if (elm->internal.subtree_count != ondisk->count) {
1433 elm->internal.subtree_count = ondisk->count;
1434 hammer_modify_node(parent);
1436 if (elm->subtree_type != HAMMER_BTREE_TYPE_CLUSTER &&
1437 elm->subtree_type != ondisk->type) {
1438 elm->subtree_type = ondisk->type;
1439 hammer_modify_node(parent);
1444 * Free the saved node.
1446 hammer_flush_node(save);
1447 hammer_free_btree(save->cluster, save->node_offset);
1448 hammer_unlock(&save->lock);
1449 hammer_rel_node(save);
1454 * The child represented by the element in internal node node needs
1455 * to have its parent pointer adjusted.
1459 btree_set_parent(hammer_node_t node, hammer_btree_elm_t elm)
1461 hammer_volume_t volume;
1462 hammer_cluster_t cluster;
1463 hammer_node_t child;
1468 switch(elm->internal.subtree_type) {
1469 case HAMMER_BTREE_TYPE_LEAF:
1470 case HAMMER_BTREE_TYPE_INTERNAL:
1471 child = hammer_get_node(node->cluster,
1472 elm->internal.subtree_offset, &error);
1474 hammer_lock_ex(&child->lock);
1475 child->ondisk->parent = node->node_offset;
1476 hammer_modify_node(child);
1477 hammer_unlock(&child->lock);
1478 hammer_rel_node(child);
1481 case HAMMER_BTREE_TYPE_CLUSTER:
1482 volume = hammer_get_volume(node->cluster->volume->hmp,
1483 elm->internal.subtree_vol_no, &error);
1486 cluster = hammer_get_cluster(volume,
1487 elm->internal.subtree_clu_no,
1489 hammer_rel_volume(volume, 0);
1492 hammer_lock_ex(&cluster->io.lock);
1493 cluster->ondisk->clu_btree_parent_offset = node->node_offset;
1494 hammer_unlock(&cluster->io.lock);
1495 KKASSERT(cluster->ondisk->clu_btree_parent_clu_no ==
1496 node->cluster->clu_no);
1497 KKASSERT(cluster->ondisk->clu_btree_parent_vol_no ==
1498 node->cluster->volume->vol_no);
1499 hammer_modify_cluster(cluster);
1500 hammer_rel_cluster(cluster, 0);
1503 hammer_print_btree_elm(elm, HAMMER_BTREE_TYPE_INTERNAL, -1);
1504 panic("btree_set_parent: bad subtree_type");
1505 break; /* NOT REACHED */
1513 * This routine is called on the internal node (node) prior to recursing down
1514 * through (node, index) when the node referenced by (node, index) MIGHT
1515 * have too few elements for the caller to perform a deletion.
1517 * cursor->index is invalid on return because the separators may have gotten
1518 * adjusted, the caller must rescan the node's elements. The caller may set
1519 * cursor->index to -1 if it wants us to do a general rebalancing.
1521 * This routine rebalances the children of the (node), collapsing children
1522 * together if possible. On return each child will have at least L/2-1
1523 * elements unless the node only has one child.
1525 * NOTE: Because we do not update the parent's parent in the split code,
1526 * the subtree_count used by the caller may be incorrect. We correct it
1527 * here. Also note that we cannot change the depth of the tree's leaf
1528 * nodes here (see btree_collapse()).
1530 * NOTE: We make no attempt to rebalance inter-cluster elements.
1534 btree_rebalance(hammer_cursor_t cursor)
1536 hammer_node_ondisk_t ondisk;
1538 hammer_node_t children[HAMMER_BTREE_INT_ELMS];
1539 hammer_node_t child;
1540 hammer_btree_elm_t elm;
1541 hammer_btree_elm_t elms;
1542 int i, j, n, nelms, goal;
1543 int maxelms, halfelms;
1547 * If the elm being recursed through is an inter-cluster reference,
1548 * don't worry about it.
1550 ondisk = cursor->node->ondisk;
1551 elm = &ondisk->elms[cursor->index];
1552 if (elm->internal.subtree_type == HAMMER_BTREE_TYPE_CLUSTER)
1555 KKASSERT(elm->internal.subtree_offset != 0);
1559 * Load the children of node and do any necessary corrections
1560 * to subtree_count. subtree_count may be too low due to the
1561 * way insertions split nodes. Get a count of the total number
1562 * of actual elements held by our children.
1566 for (i = n = 0; i < node->base.count; ++i) {
1567 struct hammer_btree_internal_elm *elm;
1569 elm = &node->elms[i];
1571 child_buffer[i] = NULL; /* must be preinitialized for bread */
1572 if (elm->subtree_offset == 0)
1574 child = hammer_bread(cursor->cluster, elm->subtree_offset,
1575 HAMMER_FSBUF_BTREE, &error,
1576 &child_buffer[i], XXX);
1577 children[i] = child;
1581 KKASSERT(node->base.subtype == child->base.type);
1584 * Accumulate n for a good child, update the node's count
1587 if (node->elms[i].subtree_count != child->base.count) {
1588 node->elms[i].subtree_count = child->base.count;
1590 n += node->elms[i].subtree_count;
1596 * Collect all the children's elements together
1599 elms = kmalloc(sizeof(*elms) * (nelms + 1), M_HAMMER, M_WAITOK|M_ZERO);
1600 for (i = n = 0; i < node->base.count; ++i) {
1601 child = children[i];
1602 for (j = 0; j < child->base.count; ++j) {
1603 elms[n].owner = child;
1604 if (node->base.subtype == HAMMER_BTREE_TYPE_LEAF)
1605 elms[n].u.leaf = child->leaf.elms[j];
1607 elms[n].u.internal = child->internal.elms[j];
1611 KKASSERT(n == nelms);
1614 * Store a boundary in the elms array to ease the code below. This
1615 * is only used if the children are internal nodes.
1617 elms[n].u.internal = node->elms[i];
1620 * Calculate the number of elements each child should have (goal) by
1621 * reducing the number of elements until we achieve at least
1622 * halfelms - 1 per child, unless we are a degenerate case.
1624 maxelms = btree_max_elements(node->base.subtype);
1625 halfelms = maxelms / 2;
1627 goal = halfelms - 1;
1628 while (i && n / i < goal)
1632 * Now rebalance using the specified goal
1634 for (i = n = 0; i < node->base.count; ++i) {
1635 struct hammer_buffer *subchild_buffer = NULL;
1636 struct hammer_btree_internal_node *subchild;
1638 child = children[i];
1639 for (j = 0; j < goal && n < nelms; ++j) {
1640 if (node->base.subtype == HAMMER_BTREE_TYPE_LEAF) {
1641 child->leaf.elms[j] = elms[n].u.leaf;
1643 child->internal.elms[j] = elms[n].u.internal;
1647 * If the element's parent has changed we have to
1648 * update the parent pointer. This is somewhat
1651 if (elms[n].owner != child &&
1652 node->base.subtype == HAMMER_BTREE_TYPE_INTERNAL) {
1653 subchild = hammer_bread(cursor->cluster,
1654 elms[n].u.internal.subtree_offset,
1657 &subchild_buffer, XXX);
1659 subchild->base.parent =
1660 hammer_bclu_offset(child_buffer[i],
1662 hammer_modify_buffer(subchild_buffer);
1669 * Set right boundary if the children are internal nodes.
1671 if (node->base.subtype == HAMMER_BTREE_TYPE_INTERNAL)
1672 child->internal.elms[j] = elms[n].u.internal;
1673 child->base.count = j;
1674 hammer_modify_buffer(child_buffer[i]);
1675 if (subchild_buffer)
1676 hammer_put_buffer(subchild_buffer, 0);
1679 * If we have run out of elements, break out
1686 * Physically destroy any left-over children. These children's
1687 * elements have been packed into prior children. The node's
1688 * right hand boundary and count gets shifted to index i.
1690 * The subtree count in the node's parent MUST be updated because
1691 * we are removing elements. The subtree_count field is allowed to
1692 * be too small, but not too large!
1694 if (i != node->base.count) {
1696 node->elms[n] = node->elms[node->base.count];
1697 while (i < node->base.count) {
1698 hammer_free_btree_ptr(child_buffer[i], children[i]);
1699 hammer_put_buffer(child_buffer[i], 0);
1702 node->base.count = n;
1703 if (cursor->parent) {
1704 cursor->parent->elms[cursor->parent_index].subtree_count = n;
1705 hammer_modify_buffer(cursor->parent_buffer);
1709 kfree(elms, M_HAMMER);
1711 hammer_modify_buffer(cursor->node_buffer);
1712 for (i = 0; i < node->base.count; ++i) {
1713 if (child_buffer[i])
1714 hammer_put_buffer(child_buffer[i], 0);
1720 * This routine is only called if the cursor is at the root node and the
1721 * root node is an internal node. We attempt to collapse the root node
1722 * by replacing it with all of its children, reducing tree depth by one.
1724 * This is the only way to reduce tree depth in a HAMMER filesystem.
1725 * Note that all leaf nodes are at the same depth.
1727 * This is a fairly expensive operation because we not only have to load
1728 * the root's children, we also have to scan each child and adjust the
1729 * parent offset for each element in each child. Nasty all around.
1733 btree_collapse(hammer_cursor_t cursor)
1735 hammer_btree_node_ondisk_t root, child;
1736 hammer_btree_node_ondisk_t children[HAMMER_BTREE_INT_ELMS];
1737 struct hammer_buffer *child_buffer[HAMMER_BTREE_INT_ELMS];
1742 int32_t root_offset;
1743 u_int8_t subsubtype;
1745 root = cursor->node;
1746 count = root->base.count;
1747 root_offset = hammer_bclu_offset(cursor->node_buffer, root);
1750 * Sum up the number of children each element has. This value is
1751 * only approximate due to the way the insertion node works. It
1752 * may be too small but it will never be too large.
1754 * Quickly terminate the collapse if the elements have too many
1757 KKASSERT(root->base.parent == 0); /* must be root node */
1758 KKASSERT(root->base.type == HAMMER_BTREE_TYPE_INTERNAL);
1759 KKASSERT(count <= HAMMER_BTREE_INT_ELMS);
1761 for (i = n = 0; i < count; ++i) {
1762 n += root->internal.elms[i].subtree_count;
1764 if (n > btree_max_elements(root->base.subtype))
1768 * Iterate through the elements again and correct the subtree_count.
1769 * Terminate the collapse if we wind up with too many.
1774 for (i = n = 0; i < count; ++i) {
1775 struct hammer_btree_internal_elm *elm;
1777 elm = &root->internal.elms[i];
1778 child_buffer[i] = NULL;
1780 if (elm->subtree_offset == 0)
1782 child = hammer_bread(cursor->cluster, elm->subtree_offset,
1783 HAMMER_FSBUF_BTREE, &error,
1784 &child_buffer[i], XXX);
1785 children[i] = child;
1788 KKASSERT(root->base.subtype == child->base.type);
1791 * Accumulate n for a good child, update the root's count
1794 if (root->internal.elms[i].subtree_count != child->base.count) {
1795 root->internal.elms[i].subtree_count = child->base.count;
1798 n += root->internal.elms[i].subtree_count;
1800 if (error || n > btree_max_elements(root->base.subtype))
1804 * Ok, we can collapse the root. If the root's children are leafs
1805 * the collapse is really simple. If they are internal nodes the
1806 * collapse is not so simple because we have to fixup the parent
1807 * pointers for the root's children's children.
1809 * When collapsing an internal node the far left and far right
1810 * element's boundaries should match the root's left and right
1813 if (root->base.subtype == HAMMER_BTREE_TYPE_LEAF) {
1814 for (i = n = 0; i < count; ++i) {
1815 child = children[i];
1816 for (j = 0; j < child->base.count; ++j) {
1817 root->leaf.elms[n] = child->leaf.elms[j];
1821 root->base.type = root->base.subtype;
1822 root->base.subtype = 0;
1823 root->base.count = n;
1824 root->leaf.link_left = 0;
1825 root->leaf.link_right = 0;
1827 struct hammer_btree_internal_elm *elm;
1828 struct hammer_btree_internal_node *subchild;
1829 struct hammer_buffer *subchild_buffer = NULL;
1832 child = children[0];
1833 subsubtype = child->base.subtype;
1834 KKASSERT(child->base.count > 0);
1835 KKASSERT(root->internal.elms[0].base.key ==
1836 child->internal.elms[0].base.key);
1837 child = children[count-1];
1838 KKASSERT(child->base.count > 0);
1839 KKASSERT(root->internal.elms[count].base.key ==
1840 child->internal.elms[child->base.count].base.key);
1844 for (i = n = 0; i < count; ++i) {
1845 child = children[i];
1846 KKASSERT(child->base.subtype == subsubtype);
1847 for (j = 0; j < child->base.count; ++j) {
1848 elm = &child->internal.elms[j];
1850 root->internal.elms[n] = *elm;
1851 subchild = hammer_bread(cursor->cluster,
1852 elm->subtree_offset,
1858 subchild->base.parent = root_offset;
1859 hammer_modify_buffer(subchild_buffer);
1863 /* make sure the right boundary is correct */
1864 /* (this gets overwritten when the loop continues) */
1865 /* XXX generate a new separator? */
1866 root->internal.elms[n] = child->internal.elms[j];
1868 root->base.type = HAMMER_BTREE_TYPE_INTERNAL;
1869 root->base.subtype = subsubtype;
1870 if (subchild_buffer)
1871 hammer_put_buffer(subchild_buffer, 0);
1880 hammer_modify_buffer(cursor->node_buffer);
1881 for (i = 0; i < count; ++i) {
1882 if (child_buffer[i])
1883 hammer_put_buffer(child_buffer[i], 0);
1890 /************************************************************************
1891 * MISCELLANIOUS SUPPORT *
1892 ************************************************************************/
1895 * Compare two B-Tree elements, return -N, 0, or +N (e.g. similar to strcmp).
1897 * See also hammer_rec_rb_compare() and hammer_rec_cmp() in hammer_object.c.
1900 hammer_btree_cmp(hammer_base_elm_t key1, hammer_base_elm_t key2)
1902 if (key1->obj_id < key2->obj_id)
1904 if (key1->obj_id > key2->obj_id)
1907 if (key1->rec_type < key2->rec_type)
1909 if (key1->rec_type > key2->rec_type)
1912 if (key1->key < key2->key)
1914 if (key1->key > key2->key)
1920 * Test a non-zero timestamp against an element to determine whether the
1921 * element is visible.
1924 hammer_btree_chkts(hammer_tid_t create_tid, hammer_base_elm_t base)
1926 if (create_tid < base->create_tid)
1928 if (base->delete_tid && create_tid >= base->delete_tid)
1934 * Create a separator half way inbetween key1 and key2. For fields just
1935 * one unit apart, the separator will match key2.
1937 * The handling of delete_tid is a little confusing. It is only possible
1938 * to have one record in the B-Tree where all fields match except delete_tid.
1939 * This means, worse case, two adjacent elements may have a create_tid that
1940 * is one-apart and cause the separator to choose the right-hand element's
1941 * create_tid. e.g. (create,delete): (1,x)(2,x) -> separator is (2,x).
1943 * So all we have to do is set delete_tid to the right-hand element to
1944 * guarentee that the separator is properly between the two elements.
1946 #define MAKE_SEPARATOR(key1, key2, dest, field) \
1947 dest->field = key1->field + ((key2->field - key1->field + 1) >> 1);
1950 hammer_make_separator(hammer_base_elm_t key1, hammer_base_elm_t key2,
1951 hammer_base_elm_t dest)
1953 bzero(dest, sizeof(*dest));
1954 MAKE_SEPARATOR(key1, key2, dest, obj_id);
1955 MAKE_SEPARATOR(key1, key2, dest, rec_type);
1956 MAKE_SEPARATOR(key1, key2, dest, key);
1957 MAKE_SEPARATOR(key1, key2, dest, create_tid);
1958 dest->delete_tid = key2->delete_tid;
1961 #undef MAKE_SEPARATOR
1964 * Return whether a generic internal or leaf node is full
1967 btree_node_is_full(hammer_node_ondisk_t node)
1969 switch(node->type) {
1970 case HAMMER_BTREE_TYPE_INTERNAL:
1971 if (node->count == HAMMER_BTREE_INT_ELMS)
1974 case HAMMER_BTREE_TYPE_LEAF:
1975 if (node->count == HAMMER_BTREE_LEAF_ELMS)
1979 panic("illegal btree subtype");
1986 btree_max_elements(u_int8_t type)
1988 if (type == HAMMER_BTREE_TYPE_LEAF)
1989 return(HAMMER_BTREE_LEAF_ELMS);
1990 if (type == HAMMER_BTREE_TYPE_INTERNAL)
1991 return(HAMMER_BTREE_INT_ELMS);
1992 panic("btree_max_elements: bad type %d\n", type);
1997 hammer_print_btree_node(hammer_node_ondisk_t ondisk)
1999 hammer_btree_elm_t elm;
2002 kprintf("node %p count=%d parent=%d type=%c\n",
2003 ondisk, ondisk->count, ondisk->parent, ondisk->type);
2006 * Dump both boundary elements if an internal node
2008 if (ondisk->type == HAMMER_BTREE_TYPE_INTERNAL) {
2009 for (i = 0; i <= ondisk->count; ++i) {
2010 elm = &ondisk->elms[i];
2011 hammer_print_btree_elm(elm, ondisk->type, i);
2014 for (i = 0; i < ondisk->count; ++i) {
2015 elm = &ondisk->elms[i];
2016 hammer_print_btree_elm(elm, ondisk->type, i);
2022 hammer_print_btree_elm(hammer_btree_elm_t elm, u_int8_t type, int i)
2025 kprintf("\tobjid = %016llx\n", elm->base.obj_id);
2026 kprintf("\tkey = %016llx\n", elm->base.key);
2027 kprintf("\tcreate_tid = %016llx\n", elm->base.create_tid);
2028 kprintf("\tdelete_tid = %016llx\n", elm->base.delete_tid);
2029 kprintf("\trec_type = %04x\n", elm->base.rec_type);
2030 kprintf("\tobj_type = %02x\n", elm->base.obj_type);
2031 kprintf("\tsubtree_type = %02x\n", elm->subtree_type);
2033 if (type == HAMMER_BTREE_TYPE_INTERNAL) {
2034 if (elm->internal.rec_offset) {
2035 kprintf("\tcluster_rec = %08x\n",
2036 elm->internal.rec_offset);
2037 kprintf("\tcluster_id = %08x\n",
2038 elm->internal.subtree_clu_no);
2039 kprintf("\tvolno = %08x\n",
2040 elm->internal.subtree_vol_no);
2042 kprintf("\tsubtree_off = %08x\n",
2043 elm->internal.subtree_offset);
2045 kprintf("\tsubtree_count= %d\n", elm->internal.subtree_count);
2047 kprintf("\trec_offset = %08x\n", elm->leaf.rec_offset);
2048 kprintf("\tdata_offset = %08x\n", elm->leaf.data_offset);
2049 kprintf("\tdata_len = %08x\n", elm->leaf.data_len);
2050 kprintf("\tdata_crc = %08x\n", elm->leaf.data_crc);