2 * Copyright (c) 2007 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * $DragonFly: src/sys/vfs/hammer/hammer_btree.c,v 1.5 2007/11/20 07:16:28 dillon Exp $
40 * HAMMER implements a modified B+Tree. In documentation this will
41 * simply be refered to as the HAMMER B-Tree. Basically a B-Tree
42 * looks like a B+Tree (A B-Tree which stores its records only at the leafs
43 * of the tree), but adds two additional boundary elements which describe
44 * the left-most and right-most element a node is able to represent. In
45 * otherwords, we have boundary elements at the two ends of a B-Tree node
46 * instead of sub-tree pointers.
48 * A B-Tree internal node looks like this:
50 * B N N N N N N B <-- boundary and internal elements
51 * S S S S S S S <-- subtree pointers
53 * A B-Tree leaf node basically looks like this:
55 * L L L L L L L L <-- leaf elemenets
57 * The radix for an internal node is 1 less then a leaf but we get a
58 * number of significant benefits for our troubles.
60 * The big benefit to using a B-Tree containing boundary information
61 * is that it is possible to cache pointers into the middle of the tree
62 * and not have to start searches, insertions, OR deletions at the root
63 * node. In particular, searches are able to progress in a definitive
64 * direction from any point in the tree without revisting nodes. This
65 * greatly improves the efficiency of many operations, most especially
68 * B-Trees also make the stacking of trees fairly straightforward.
70 * INTER-CLUSTER ELEMENTS: An element of an internal node may reference
71 * the root of another cluster rather then a node in the current cluster.
72 * This is known as an inter-cluster references. Only B-Tree searches
73 * will cross cluster boundaries. The rebalancing and collapse code does
74 * not attempt to move children between clusters. A major effect of this
75 * is that we have to relax minimum element count requirements and allow
76 * trees to become somewhat unabalanced.
78 * INSERTIONS AND DELETIONS: When inserting we split full nodes on our
79 * way down as an optimization. I originally experimented with rebalancing
80 * nodes on the way down for deletions but it created a huge mess due to
81 * the way inter-cluster linkages work. Instead, now I simply allow
82 * the tree to become unbalanced and allow leaf nodes to become empty.
83 * The delete code will try to clean things up from the bottom-up but
84 * will stop if related elements are not in-core or if it cannot get a node
91 static int btree_search(hammer_cursor_t cursor, int flags);
92 static int btree_split_internal(hammer_cursor_t cursor);
93 static int btree_split_leaf(hammer_cursor_t cursor);
94 static int btree_remove(hammer_node_t node, int index);
96 static int btree_rebalance(hammer_cursor_t cursor);
97 static int btree_collapse(hammer_cursor_t cursor);
99 static int btree_node_is_full(hammer_node_ondisk_t node);
100 static void hammer_make_separator(hammer_base_elm_t key1,
101 hammer_base_elm_t key2, hammer_base_elm_t dest);
104 * Iterate records after a search. The cursor is iterated forwards past
105 * the current record until a record matching the key-range requirements
106 * is found. ENOENT is returned if the iteration goes past the ending
109 * key_beg/key_end is an INCLUSVE range. i.e. if you are scanning to load
110 * a 4096 byte buffer key_beg might specify an offset of 0 and key_end an
113 * cursor->key_beg may or may not be modified by this function during
117 hammer_btree_iterate(hammer_cursor_t cursor)
119 hammer_node_ondisk_t node;
120 hammer_btree_elm_t elm;
127 * Skip past the current record
129 node = cursor->node->ondisk;
132 if (cursor->index < node->count)
136 * Loop until an element is found or we are done.
140 * We iterate up the tree and then index over one element
141 * while we are at the last element in the current node.
143 * NOTE: This can pop us up to another cluster.
145 * If we are at the root of the root cluster, cursor_up
148 * NOTE: hammer_cursor_up() will adjust cursor->key_beg
149 * when told to re-search for the cluster tag.
151 * XXX this could be optimized by storing the information in
152 * the parent reference.
154 if (cursor->index == node->count) {
155 error = hammer_cursor_up(cursor);
158 node = cursor->node->ondisk;
159 KKASSERT(cursor->index != node->count);
165 * Iterate down the tree while we are at an internal node.
166 * Nodes cannot be empty, assert the case because if one is
167 * we will wind up in an infinite loop.
169 * We can avoid iterating through large swaths of transaction
170 * id space if the left and right separators are the same
171 * except for their transaction spaces. We can then skip
172 * the node if the left and right transaction spaces are the
173 * same sign. This directly optimized accesses to files with
174 * HUGE transactional histories, such as database files,
175 * allowing us to avoid having to iterate through the entire
178 if (node->type == HAMMER_BTREE_TYPE_INTERNAL) {
179 KKASSERT(node->count != 0);
180 elm = &node->elms[cursor->index];
181 if (elm[0].base.obj_id == elm[1].base.obj_id &&
182 elm[0].base.rec_type == elm[1].base.rec_type &&
183 elm[0].base.key == elm[1].base.key) {
185 * Left side transaction space
187 save_key = cursor->key_beg.key;
188 cursor->key_beg.key = elm[0].base.key;
189 r = hammer_btree_cmp(&cursor->key_beg,
191 cursor->key_beg.key = save_key;
194 * Right side transaction space
196 save_key = cursor->key_end.key;
197 cursor->key_end.key = elm[1].base.key;
198 s = hammer_btree_cmp(&cursor->key_end,
200 cursor->key_end.key = save_key;
203 * If our range is entirely on one side or
204 * the other side we can skip the sub-tree.
206 if ((r < 0 && s < 0) || (r > 0 && s > 0)) {
211 error = hammer_cursor_down(cursor);
214 KKASSERT(cursor->index == 0);
215 node = cursor->node->ondisk;
222 * Determine if the record at the cursor has gone beyond the
223 * end of our range. Remember that our key range is inclusive.
225 * When iterating we may have to 'pick out' records matching
226 * our transaction requirements. A comparison return of
227 * +1 or -1 indicates a transactional record that is too
228 * old or too new but does not terminate the search.
230 elm = &node->elms[cursor->index];
231 r = hammer_btree_cmp(&cursor->key_end, &elm->base);
232 if (r == -1 || r == 1) {
238 * The search ends if the element goes beyond our key_end
239 * (after checking transactional return values above).
240 * Otherwise we have a successful match.
242 error = (r < 0) ? ENOENT : 0;
249 * Lookup cursor->key_beg. 0 is returned on success, ENOENT if the entry
250 * could not be found, and a fatal error otherwise.
252 * The cursor is suitably positioned for a deletion on success, and suitably
253 * positioned for an insertion on ENOENT.
255 * The cursor may begin anywhere, the search will traverse clusters in
256 * either direction to locate the requested element.
259 hammer_btree_lookup(hammer_cursor_t cursor)
263 error = btree_search(cursor, 0);
264 if (error == 0 && cursor->flags)
265 error = hammer_btree_extract(cursor, cursor->flags);
270 * Extract the record and/or data associated with the cursor's current
271 * position. Any prior record or data stored in the cursor is replaced.
272 * The cursor must be positioned at a leaf node.
274 * NOTE: Only records can be extracted from internal B-Tree nodes, and
275 * only for inter-cluster references. At the moment we only support
276 * extractions from leaf nodes.
279 hammer_btree_extract(hammer_cursor_t cursor, int flags)
281 hammer_node_ondisk_t node;
282 hammer_btree_elm_t elm;
283 hammer_cluster_t cluster;
288 * A cluster record type has no data reference, the information
289 * is stored directly in the record and B-Tree element.
291 * The case where the data reference resolves to the same buffer
292 * as the record reference must be handled.
294 node = cursor->node->ondisk;
295 KKASSERT(node->type == HAMMER_BTREE_TYPE_LEAF);
296 elm = &node->elms[cursor->index];
297 cluster = cursor->node->cluster;
300 if ((flags & HAMMER_CURSOR_GET_RECORD) && error == 0) {
301 cloff = elm->leaf.rec_offset;
302 cursor->record = hammer_bread(cluster, cloff,
303 HAMMER_FSBUF_RECORDS, &error,
304 &cursor->record_buffer);
308 if ((flags & HAMMER_CURSOR_GET_DATA) && error == 0) {
309 if ((cloff ^ elm->leaf.data_offset) & ~HAMMER_BUFMASK) {
311 * Data in different buffer than record
313 cursor->data = hammer_bread(cluster,
314 elm->leaf.data_offset,
315 HAMMER_FSBUF_DATA, &error,
316 &cursor->data_buffer);
319 * Data in same buffer as record. Note that we
320 * leave any existing data_buffer intact, even
321 * though we don't use it in this case, in case
322 * other records extracted during an iteration
325 cursor->data = (void *)
326 ((char *)cursor->record_buffer->ondisk +
327 (elm->leaf.data_offset & HAMMER_BUFMASK));
335 * Insert a leaf element into the B-Tree at the current cursor position.
336 * The cursor is positioned such that the element at and beyond the cursor
337 * are shifted to make room for the new record.
339 * The caller must call hammer_btree_lookup() with the HAMMER_CURSOR_INSERT
340 * flag set and that call must return ENOENT before this function can be
343 * ENOSPC is returned if there is no room to insert a new record.
346 hammer_btree_insert(hammer_cursor_t cursor, hammer_btree_elm_t elm)
348 hammer_node_ondisk_t parent;
349 hammer_node_ondisk_t node;
353 /* HANDLED BY CALLER */
355 * Issue a search to get our cursor at the right place. The search
356 * will get us to a leaf node.
358 * The search also does some setup for our insert, so there is always
361 error = btree_search(cursor, HAMMER_CURSOR_INSERT);
362 if (error != ENOENT) {
370 * Insert the element at the leaf node and update the count in the
371 * parent. It is possible for parent to be NULL, indicating that
372 * the root of the B-Tree in the cluster is a leaf. It is also
373 * possible for the leaf to be empty.
375 * Remember that the right-hand boundary is not included in the
378 node = cursor->node->ondisk;
380 KKASSERT(node->type == HAMMER_BTREE_TYPE_LEAF);
381 KKASSERT(node->count < HAMMER_BTREE_LEAF_ELMS);
382 if (i != node->count) {
383 bcopy(&node->elms[i], &node->elms[i+1],
384 (node->count - i) * sizeof(*elm));
386 node->elms[i] = *elm;
388 hammer_modify_node(cursor->node);
390 if ((parent = cursor->parent->ondisk) != NULL) {
391 i = cursor->parent_index;
392 ++parent->elms[i].internal.subtree_count;
393 KKASSERT(parent->elms[i].internal.subtree_count <= node->count);
394 hammer_modify_node(cursor->parent);
400 * Delete a record from the B-Tree's at the current cursor position.
401 * The cursor is positioned such that the current element is the one
404 * The caller must call hammer_btree_lookup() with the HAMMER_CURSOR_DELETE
405 * flag set and that call must return 0 before this function can be
408 * It is possible that we will be asked to delete the last element in a
409 * leaf. This case only occurs if the downward search was unable to
410 * rebalance us, which in turn can occur if our parent has inter-cluster
411 * elements. So the 0-element case for a leaf is allowed.
414 hammer_btree_delete(hammer_cursor_t cursor)
416 hammer_node_ondisk_t ondisk;
418 hammer_node_t parent;
419 hammer_btree_elm_t elm;
424 /* HANDLED BY CALLER */
426 * Locate the leaf element to delete. The search is also responsible
427 * for doing some of the rebalancing work on its way down.
429 error = btree_search(cursor, HAMMER_CURSOR_DELETE);
435 * Delete the element from the leaf node.
437 * Remember that leaf nodes do not have boundaries.
440 ondisk = node->ondisk;
443 KKASSERT(ondisk->type == HAMMER_BTREE_TYPE_LEAF);
444 if (i + 1 != ondisk->count) {
445 bcopy(&ondisk->elms[i+1], &ondisk->elms[i],
446 (ondisk->count - i - 1) * sizeof(ondisk->elms[0]));
449 if (cursor->parent != NULL) {
451 * Adjust parent's notion of the leaf's count. subtree_count
452 * is only approximate, it is allowed to be too small but
453 * never allowed to be too large. Make sure we don't drop
456 parent = cursor->parent;
457 elm = &parent->ondisk->elms[cursor->parent_index];
458 if (elm->internal.subtree_count)
459 --elm->internal.subtree_count;
460 KKASSERT(elm->internal.subtree_count <= ondisk->count);
461 hammer_modify_node(parent);
465 * If the leaf is empty try to remove the subtree reference
466 * in at (parent, parent_index). This will unbalance the
469 * Note that internal nodes must have at least one element
470 * so their boundary information is properly laid out. If
471 * we would cause our parent to become empty we try to
472 * recurse up the tree, but if that doesn't work we just
473 * leave the tree with an empty leaf.
475 if (ondisk->count == 0) {
476 error = btree_remove(cursor->parent, cursor->parent_index);
478 hammer_free_btree(node->cluster, node->node_offset);
479 } else if (error == EAGAIN) {
480 hammer_modify_node(node);
482 } /* else a real error occured XXX */
484 hammer_modify_node(node);
491 * PRIMAY B-TREE SEARCH SUPPORT PROCEDURE
493 * Search a cluster's B-Tree for cursor->key_beg, return the matching node.
495 * The search begins at the current node and will instantiate a NULL
496 * parent if necessary and if not at the root of the cluster. On return
497 * parent will be non-NULL unless the cursor is sitting at a root-leaf.
499 * The search code may be forced to iterate up the tree if the conditions
500 * required for an insertion or deletion are not met. This does not occur
503 * INSERTIONS: The search will split full nodes and leaves on its way down
504 * and guarentee that the leaf it ends up on is not full.
506 * DELETIONS: The search will rebalance the tree on its way down.
510 btree_search(hammer_cursor_t cursor, int flags)
512 hammer_node_ondisk_t node;
513 hammer_cluster_t cluster;
518 flags |= cursor->flags;
521 * Move our cursor up the tree until we find a node whos range covers
522 * the key we are trying to locate. This may move us between
525 * The left bound is inclusive, the right bound is non-inclusive.
526 * It is ok to cursor up too far so when cursoring across a cluster
529 * First see if we can skip the whole cluster. hammer_cursor_up()
530 * handles both cases but this way we don't check the cluster
531 * bounds when going up the tree within a cluster.
533 cluster = cursor->node->cluster;
535 hammer_btree_cmp(&cursor->key_beg, &cluster->clu_btree_beg) < 0 ||
536 hammer_btree_cmp(&cursor->key_beg, &cluster->clu_btree_end) >= 0) {
537 error = hammer_cursor_toroot(cursor);
540 error = hammer_cursor_up(cursor);
543 cluster = cursor->node->cluster;
547 * Deal with normal cursoring within a cluster. The right bound
548 * is non-inclusive. That is, the bounds form a separator.
550 while (hammer_btree_cmp(&cursor->key_beg, cursor->left_bound) < 0 ||
551 hammer_btree_cmp(&cursor->key_beg, cursor->right_bound) >= 0) {
552 error = hammer_cursor_up(cursor);
558 * We better have ended up with a node somewhere, and our second
559 * while loop had better not have traversed up a cluster.
561 KKASSERT(cursor->node != NULL && cursor->node->cluster == cluster);
564 * If we are inserting we can't start at a full node if the parent
565 * is also full (because there is no way to split the node),
566 * continue running up the tree until we hit the root of the
567 * root cluster or until the requirement is satisfied.
569 * NOTE: These cursor-up's CAN continue to cross cluster boundaries.
571 * XXX as an optimization it should be possible to unbalance the tree
572 * and stop at the root of the current cluster.
574 while (flags & HAMMER_CURSOR_INSERT) {
575 if (btree_node_is_full(cursor->node->ondisk) == 0)
577 if (cursor->parent == NULL)
579 if (cursor->parent->ondisk->count != HAMMER_BTREE_INT_ELMS)
581 error = hammer_cursor_up(cursor);
582 /* cluster and node are now may become stale */
586 /* cluster = cursor->node->cluster; not needed until next cluster = */
590 * If we are deleting we can't start at an internal node with only
591 * one element unless it is root, because all of our code assumes
592 * that internal nodes will never be empty. Just do this generally
593 * for both leaf and internal nodes to get better balance.
595 * This handles the case where the cursor is sitting at a leaf and
596 * either the leaf or parent contain an insufficient number of
599 * NOTE: These cursor-up's CAN continue to cross cluster boundaries.
601 * XXX NOTE: Iterations may not set this flag anyway.
603 while (flags & HAMMER_CURSOR_DELETE) {
604 if (cursor->node->ondisk->count > 1)
606 if (cursor->parent == NULL)
608 KKASSERT(cursor->node->ondisk->count != 0);
609 error = hammer_cursor_up(cursor);
610 /* cluster and node are now may become stale */
618 * Push down through internal nodes to locate the requested key.
620 cluster = cursor->node->cluster;
621 node = cursor->node->ondisk;
622 while (node->type == HAMMER_BTREE_TYPE_INTERNAL) {
625 * If we are a the root node and deleting, try to collapse
626 * all of the root's children into the root. This is the
627 * only point where tree depth is reduced.
629 * XXX NOTE: Iterations may not set this flag anyway.
631 if ((flags & HAMMER_CURSOR_DELETE) && cursor->parent == NULL) {
632 error = btree_collapse(cursor);
633 /* node becomes stale after call */
637 node = cursor->node->ondisk;
641 * Scan the node to find the subtree index to push down into.
642 * We go one-past, then back-up. The key should never be
643 * less then the left-hand boundary so I should never wind
646 for (i = 0; i < node->count; ++i) {
647 r = hammer_btree_cmp(&cursor->key_beg,
648 &node->elms[i].base);
655 * The push-down index is now i - 1.
661 * Handle insertion and deletion requirements.
663 * If inserting split full nodes. The split code will
664 * adjust cursor->node and cursor->index if the current
665 * index winds up in the new node.
667 if (flags & HAMMER_CURSOR_INSERT) {
668 if (node->count == HAMMER_BTREE_INT_ELMS) {
669 error = btree_split_internal(cursor);
673 * reload stale pointers
676 node = cursor->node->ondisk;
682 * If deleting rebalance - do not allow the child to have
683 * just one element or we will not be able to delete it.
685 * Neither internal or leaf nodes (except a root-leaf) are
686 * allowed to drop to 0 elements. (XXX - well, leaf nodes
687 * can at the moment).
689 * Our separators may have been reorganized after rebalancing,
690 * so we have to pop back up and rescan.
692 * XXX test for subtree_count < maxelms / 2, minus 1 or 2
695 * XXX NOTE: Iterations may not set this flag anyway.
697 if (flags & HAMMER_CURSOR_DELETE) {
698 if (node->elms[i].internal.subtree_count <= 1) {
699 error = btree_rebalance(cursor);
702 /* cursor->index is invalid after call */
709 * Push down (push into new node, existing node becomes
712 error = hammer_cursor_down(cursor);
713 /* node and cluster become stale */
716 node = cursor->node->ondisk;
717 cluster = cursor->node->cluster;
721 * We are at a leaf, do a linear search of the key array.
722 * (XXX do a binary search). On success the index is set to the
723 * matching element, on failure the index is set to the insertion
726 * Boundaries are not stored in leaf nodes, so the index can wind
727 * up to the left of element 0 (index == 0) or past the end of
728 * the array (index == node->count).
730 KKASSERT(node->count <= HAMMER_BTREE_LEAF_ELMS);
732 for (i = 0; i < node->count; ++i) {
733 r = hammer_btree_cmp(&cursor->key_beg, &node->elms[i].base);
736 * Stop if we've flipped past key_beg
742 * Return an exact match
752 * No exact match was found, i is now at the insertion point.
754 * If inserting split a full leaf before returning. This
755 * may have the side effect of adjusting cursor->node and
759 if ((flags & HAMMER_CURSOR_INSERT) &&
760 node->count == HAMMER_BTREE_LEAF_ELMS) {
761 error = btree_split_leaf(cursor);
764 node = &cursor->node->internal;
775 /************************************************************************
776 * SPLITTING AND MERGING *
777 ************************************************************************
779 * These routines do all the dirty work required to split and merge nodes.
783 * Split an internal node into two nodes and move the separator at the split
784 * point to the parent. Note that the parent's parent's element pointing
785 * to our parent will have an incorrect subtree_count (we don't update it).
786 * It will be low, which is ok.
788 * (cursor->node, cursor->index) indicates the element the caller intends
789 * to push into. We will adjust node and index if that element winds
790 * up in the split node.
792 * If we are at the root of a cluster a new root must be created with two
793 * elements, one pointing to the original root and one pointing to the
794 * newly allocated split node.
796 * NOTE! Being at the root of a cluster is different from being at the
797 * root of the root cluster. cursor->parent will not be NULL and
798 * cursor->node->ondisk.parent must be tested against 0. Theoretically
799 * we could propogate the algorithm into the parent and deal with multiple
800 * 'roots' in the cluster header, but it's easier not to.
804 btree_split_internal(hammer_cursor_t cursor)
806 hammer_node_ondisk_t ondisk;
808 hammer_node_t parent;
809 hammer_node_t new_node;
810 hammer_btree_elm_t elm;
811 hammer_btree_elm_t parent_elm;
816 const int esize = sizeof(*elm);
819 * We are splitting but elms[split] will be promoted to the parent,
820 * leaving the right hand node with one less element. If the
821 * insertion point will be on the left-hand side adjust the split
822 * point to give the right hand side one additional node.
825 ondisk = node->ondisk;
826 split = (ondisk->count + 1) / 2;
827 if (cursor->index <= split)
832 * If we are at the root of the cluster, create a new root node with
833 * 1 element and split normally. Avoid making major modifications
834 * until we know the whole operation will work.
836 * The root of the cluster is different from the root of the root
837 * cluster. Use the node's on-disk structure's parent offset to
840 if (ondisk->parent == 0) {
841 parent = hammer_alloc_btree(node->cluster, &error);
844 hammer_lock_ex(&parent->lock);
845 ondisk = parent->ondisk;
848 ondisk->type = HAMMER_BTREE_TYPE_INTERNAL;
849 ondisk->elms[0].base = node->cluster->clu_btree_beg;
850 ondisk->elms[0].internal.subtree_type = node->ondisk->type;
851 ondisk->elms[0].internal.subtree_offset = node->node_offset;
852 ondisk->elms[1].base = node->cluster->clu_btree_end;
854 parent_index = 0; /* index of current node in parent */
857 parent = cursor->parent;
858 parent_index = cursor->parent_index;
862 * Split node into new_node at the split point.
864 * B O O O P N N B <-- P = node->elms[split]
865 * 0 1 2 3 4 5 6 <-- subtree indices
870 * B O O O B B N N B <--- inner boundary points are 'P'
874 new_node = hammer_alloc_btree(node->cluster, &error);
875 if (new_node == NULL) {
877 hammer_unlock(&parent->lock);
878 hammer_free_btree(node->cluster, parent->node_offset);
879 hammer_rel_node(parent);
883 hammer_lock_ex(&new_node->lock);
886 * Create the new node. P becomes the left-hand boundary in the
887 * new node. Copy the right-hand boundary as well.
889 * elm is the new separator.
891 ondisk = node->ondisk;
892 elm = &ondisk->elms[split];
893 bcopy(elm, &new_node->ondisk->elms[0],
894 (ondisk->count - split + 1) * esize);
895 new_node->ondisk->count = ondisk->count - split;
896 new_node->ondisk->parent = parent->node_offset;
897 new_node->ondisk->type = HAMMER_BTREE_TYPE_INTERNAL;
898 KKASSERT(ondisk->type == new_node->ondisk->type);
901 * Cleanup the original node. P becomes the new boundary, its
902 * subtree_offset was moved to the new node. If we had created
903 * a new root its parent pointer may have changed.
905 elm->internal.subtree_offset = 0;
908 * Insert the separator into the parent, fixup the parent's
909 * reference to the original node, and reference the new node.
910 * The separator is P.
912 * Remember that base.count does not include the right-hand boundary.
914 ondisk = parent->ondisk;
915 ondisk->elms[parent_index].internal.subtree_count = split;
916 parent_elm = &ondisk->elms[parent_index+1];
917 bcopy(parent_elm, parent_elm + 1,
918 (ondisk->count - parent_index) * esize);
919 parent_elm->internal.base = elm->base; /* separator P */
920 parent_elm->internal.subtree_offset = new_node->node_offset;
921 parent_elm->internal.subtree_count = new_node->ondisk->count;
924 * The cluster's root pointer may have to be updated.
927 node->cluster->ondisk->clu_btree_root = parent->node_offset;
928 hammer_modify_cluster(node->cluster);
929 node->ondisk->parent = parent->node_offset;
930 if (cursor->parent) {
931 hammer_unlock(&cursor->parent->lock);
932 hammer_rel_node(cursor->parent);
934 cursor->parent = parent; /* lock'd and ref'd */
937 hammer_modify_node(node);
938 hammer_modify_node(new_node);
939 hammer_modify_node(parent);
942 * Ok, now adjust the cursor depending on which element the original
943 * index was pointing at. If we are >= the split point the push node
944 * is now in the new node.
946 * NOTE: If we are at the split point itself we cannot stay with the
947 * original node because the push index will point at the right-hand
948 * boundary, which is illegal.
950 * NOTE: The cursor's parent or parent_index must be adjusted for
951 * the case where a new parent (new root) was created, and the case
952 * where the cursor is now pointing at the split node.
954 if (cursor->index >= split) {
955 cursor->parent_index = parent_index + 1;
956 cursor->index -= split;
957 hammer_unlock(&cursor->node->lock);
958 hammer_rel_node(cursor->node);
959 cursor->node = new_node; /* locked and ref'd */
961 cursor->parent_index = parent_index;
962 hammer_unlock(&new_node->lock);
963 hammer_rel_node(new_node);
969 * Same as the above, but splits a full leaf node.
973 btree_split_leaf(hammer_cursor_t cursor)
975 hammer_node_ondisk_t ondisk;
976 hammer_node_t parent;
978 hammer_node_t new_leaf;
979 hammer_btree_elm_t elm;
980 hammer_btree_elm_t parent_elm;
985 const size_t esize = sizeof(*elm);
988 * Calculate the split point. If the insertion point will be on
989 * the left-hand side adjust the split point to give the right
990 * hand side one additional node.
993 ondisk = leaf->ondisk;
994 split = (ondisk->count + 1) / 2;
995 if (cursor->index <= split)
1000 * If we are at the root of the tree, create a new root node with
1001 * 1 element and split normally. Avoid making major modifications
1002 * until we know the whole operation will work.
1004 if (ondisk->parent == 0) {
1005 parent = hammer_alloc_btree(leaf->cluster, &error);
1008 hammer_lock_ex(&parent->lock);
1009 ondisk = parent->ondisk;
1012 ondisk->type = HAMMER_BTREE_TYPE_INTERNAL;
1013 ondisk->elms[0].base = leaf->cluster->clu_btree_beg;
1014 ondisk->elms[0].internal.subtree_type = leaf->ondisk->type;
1015 ondisk->elms[0].internal.subtree_offset = leaf->node_offset;
1016 ondisk->elms[1].base = leaf->cluster->clu_btree_end;
1018 parent_index = 0; /* insertion point in parent */
1021 parent = cursor->parent;
1022 parent_index = cursor->parent_index;
1026 * Split leaf into new_leaf at the split point. Select a separator
1027 * value in-between the two leafs but with a bent towards the right
1028 * leaf since comparisons use an 'elm >= separator' inequality.
1037 new_leaf = hammer_alloc_btree(leaf->cluster, &error);
1038 if (new_leaf == NULL) {
1040 hammer_unlock(&parent->lock);
1041 hammer_free_btree(leaf->cluster, parent->node_offset);
1042 hammer_rel_node(parent);
1046 hammer_lock_ex(&new_leaf->lock);
1049 * Create the new node. P become the left-hand boundary in the
1050 * new node. Copy the right-hand boundary as well.
1052 ondisk = leaf->ondisk;
1053 elm = &ondisk->elms[split];
1054 bcopy(elm, &new_leaf->ondisk->elms[0], (ondisk->count - split) * esize);
1055 new_leaf->ondisk->count = ondisk->count - split;
1056 new_leaf->ondisk->parent = parent->node_offset;
1057 new_leaf->ondisk->type = HAMMER_BTREE_TYPE_LEAF;
1058 KKASSERT(ondisk->type == new_leaf->ondisk->type);
1061 * Cleanup the original node. Because this is a leaf node and
1062 * leaf nodes do not have a right-hand boundary, there
1063 * aren't any special edge cases to clean up.
1068 * Insert the separator into the parent, fixup the parent's
1069 * reference to the original node, and reference the new node.
1070 * The separator is P.
1072 * Remember that base.count does not include the right-hand boundary.
1073 * We are copying parent_index+1 to parent_index+2, not +0 to +1.
1075 ondisk = parent->ondisk;
1076 ondisk->elms[parent_index].internal.subtree_count = split;
1077 parent_elm = &ondisk->elms[parent_index+1];
1078 if (parent_index + 1 != ondisk->count) {
1079 bcopy(parent_elm, parent_elm + 1,
1080 (ondisk->count - parent_index - 1) * esize);
1082 hammer_make_separator(&elm[-1].base, &elm[0].base, &parent_elm->base);
1083 parent_elm->internal.subtree_offset = new_leaf->node_offset;
1084 parent_elm->internal.subtree_count = new_leaf->ondisk->count;
1087 * The cluster's root pointer may have to be updated.
1090 leaf->cluster->ondisk->clu_btree_root = parent->node_offset;
1091 hammer_modify_cluster(leaf->cluster);
1092 leaf->ondisk->parent = parent->node_offset;
1093 if (cursor->parent) {
1094 hammer_unlock(&cursor->parent->lock);
1095 hammer_rel_node(cursor->parent);
1097 cursor->parent = parent; /* lock'd and ref'd */
1100 hammer_modify_node(leaf);
1101 hammer_modify_node(new_leaf);
1102 hammer_modify_node(parent);
1105 * Ok, now adjust the cursor depending on which element the original
1106 * index was pointing at. If we are >= the split point the push node
1107 * is now in the new node.
1109 * NOTE: If we are at the split point itself we cannot stay with the
1110 * original node because the push index will point at the right-hand
1111 * boundary, which is illegal.
1113 if (cursor->index >= split) {
1114 cursor->parent_index = parent_index + 1;
1115 cursor->index -= split;
1116 cursor->node = new_leaf;
1117 hammer_unlock(&cursor->node->lock);
1118 hammer_rel_node(cursor->node);
1119 cursor->node = new_leaf;
1121 cursor->parent_index = parent_index;
1122 hammer_unlock(&new_leaf->lock);
1123 hammer_rel_node(new_leaf);
1129 * Remove the element at (node, index). If the internal node would become
1130 * empty passively recurse up the tree.
1132 * A locked internal node is passed to this function, the node remains
1133 * locked on return. Leaf nodes cannot be passed to this function.
1135 * Returns EAGAIN if we were unable to acquire the needed locks. The caller
1136 * does not deal with the empty leaf until determines whether this recursion
1137 * has succeeded or not.
1140 btree_remove(hammer_node_t node, int index)
1142 hammer_node_ondisk_t ondisk;
1143 hammer_node_t parent;
1146 ondisk = node->ondisk;
1147 KKASSERT(ondisk->count > 0);
1150 * Remove the element, shifting remaining elements left one.
1151 * Note that our move must include the right-boundary element.
1153 if (ondisk->count != 1) {
1154 bcopy(&ondisk->elms[index+1], &ondisk->elms[index],
1155 (ondisk->count - index) * sizeof(ondisk->elms[0]));
1157 hammer_modify_node(node);
1162 * Internal nodes cannot drop to 0 elements, so remove the node
1163 * from ITS parent. If the node is the root node, convert it to
1164 * an empty leaf node (which can drop to 0 elements).
1166 if (ondisk->parent == 0) {
1168 ondisk->type = HAMMER_BTREE_TYPE_LEAF;
1169 hammer_modify_node(node);
1174 * Try to remove the node from its parent. Return EAGAIN if we
1177 parent = hammer_get_node(node->cluster, ondisk->parent, &error);
1178 if (hammer_lock_ex_try(&parent->lock)) {
1179 hammer_rel_node(parent);
1182 ondisk = parent->ondisk;
1183 for (index = 0; index < ondisk->count; ++index) {
1184 if (ondisk->elms[index].internal.subtree_offset ==
1185 node->node_offset) {
1189 if (index == ondisk->count) {
1190 kprintf("btree_remove: lost parent linkage to node\n");
1193 error = btree_remove(parent, index);
1195 hammer_free_btree(node->cluster, node->node_offset);
1196 /* NOTE: node can be reallocated at any time now */
1199 hammer_unlock(&parent->lock);
1200 hammer_rel_node(parent);
1207 * This routine is called on the internal node (node) prior to recursing down
1208 * through (node, index) when the node referenced by (node, index) MIGHT
1209 * have too few elements for the caller to perform a deletion.
1211 * cursor->index is invalid on return because the separators may have gotten
1212 * adjusted, the caller must rescan the node's elements. The caller may set
1213 * cursor->index to -1 if it wants us to do a general rebalancing.
1215 * This routine rebalances the children of the (node), collapsing children
1216 * together if possible. On return each child will have at least L/2-1
1217 * elements unless the node only has one child.
1219 * NOTE: Because we do not update the parent's parent in the split code,
1220 * the subtree_count used by the caller may be incorrect. We correct it
1221 * here. Also note that we cannot change the depth of the tree's leaf
1222 * nodes here (see btree_collapse()).
1224 * NOTE: We make no attempt to rebalance inter-cluster elements.
1228 btree_rebalance(hammer_cursor_t cursor)
1230 hammer_node_ondisk_t ondisk;
1232 hammer_node_t children[HAMMER_BTREE_INT_ELMS];
1233 hammer_node_t child;
1234 hammer_btree_elm_t elm;
1235 hammer_btree_elm_t elms;
1236 int i, j, n, nelms, goal;
1237 int maxelms, halfelms;
1241 * If the elm being recursed through is an inter-cluster reference,
1242 * don't worry about it.
1244 ondisk = cursor->node->ondisk;
1245 elm = &ondisk->elms[cursor->index];
1246 if (elm->internal.subtree_type == HAMMER_BTREE_TYPE_CLUSTER)
1249 KKASSERT(elm->internal.subtree_offset != 0);
1253 * Load the children of node and do any necessary corrections
1254 * to subtree_count. subtree_count may be too low due to the
1255 * way insertions split nodes. Get a count of the total number
1256 * of actual elements held by our children.
1260 for (i = n = 0; i < node->base.count; ++i) {
1261 struct hammer_btree_internal_elm *elm;
1263 elm = &node->elms[i];
1265 child_buffer[i] = NULL; /* must be preinitialized for bread */
1266 if (elm->subtree_offset == 0)
1268 child = hammer_bread(cursor->cluster, elm->subtree_offset,
1269 HAMMER_FSBUF_BTREE, &error,
1270 &child_buffer[i], XXX);
1271 children[i] = child;
1275 KKASSERT(node->base.subtype == child->base.type);
1278 * Accumulate n for a good child, update the node's count
1281 if (node->elms[i].subtree_count != child->base.count) {
1282 node->elms[i].subtree_count = child->base.count;
1284 n += node->elms[i].subtree_count;
1290 * Collect all the children's elements together
1293 elms = kmalloc(sizeof(*elms) * (nelms + 1), M_HAMMER, M_WAITOK|M_ZERO);
1294 for (i = n = 0; i < node->base.count; ++i) {
1295 child = children[i];
1296 for (j = 0; j < child->base.count; ++j) {
1297 elms[n].owner = child;
1298 if (node->base.subtype == HAMMER_BTREE_TYPE_LEAF)
1299 elms[n].u.leaf = child->leaf.elms[j];
1301 elms[n].u.internal = child->internal.elms[j];
1305 KKASSERT(n == nelms);
1308 * Store a boundary in the elms array to ease the code below. This
1309 * is only used if the children are internal nodes.
1311 elms[n].u.internal = node->elms[i];
1314 * Calculate the number of elements each child should have (goal) by
1315 * reducing the number of elements until we achieve at least
1316 * halfelms - 1 per child, unless we are a degenerate case.
1318 maxelms = btree_max_elements(node->base.subtype);
1319 halfelms = maxelms / 2;
1321 goal = halfelms - 1;
1322 while (i && n / i < goal)
1326 * Now rebalance using the specified goal
1328 for (i = n = 0; i < node->base.count; ++i) {
1329 struct hammer_buffer *subchild_buffer = NULL;
1330 struct hammer_btree_internal_node *subchild;
1332 child = children[i];
1333 for (j = 0; j < goal && n < nelms; ++j) {
1334 if (node->base.subtype == HAMMER_BTREE_TYPE_LEAF) {
1335 child->leaf.elms[j] = elms[n].u.leaf;
1337 child->internal.elms[j] = elms[n].u.internal;
1341 * If the element's parent has changed we have to
1342 * update the parent pointer. This is somewhat
1345 if (elms[n].owner != child &&
1346 node->base.subtype == HAMMER_BTREE_TYPE_INTERNAL) {
1347 subchild = hammer_bread(cursor->cluster,
1348 elms[n].u.internal.subtree_offset,
1351 &subchild_buffer, XXX);
1353 subchild->base.parent =
1354 hammer_bclu_offset(child_buffer[i],
1356 hammer_modify_buffer(subchild_buffer);
1363 * Set right boundary if the children are internal nodes.
1365 if (node->base.subtype == HAMMER_BTREE_TYPE_INTERNAL)
1366 child->internal.elms[j] = elms[n].u.internal;
1367 child->base.count = j;
1368 hammer_modify_buffer(child_buffer[i]);
1369 if (subchild_buffer)
1370 hammer_put_buffer(subchild_buffer, 0);
1373 * If we have run out of elements, break out
1380 * Physically destroy any left-over children. These children's
1381 * elements have been packed into prior children. The node's
1382 * right hand boundary and count gets shifted to index i.
1384 * The subtree count in the node's parent MUST be updated because
1385 * we are removing elements. The subtree_count field is allowed to
1386 * be too small, but not too large!
1388 if (i != node->base.count) {
1390 node->elms[n] = node->elms[node->base.count];
1391 while (i < node->base.count) {
1392 hammer_free_btree_ptr(child_buffer[i], children[i]);
1393 hammer_put_buffer(child_buffer[i], 0);
1396 node->base.count = n;
1397 if (cursor->parent) {
1398 cursor->parent->elms[cursor->parent_index].subtree_count = n;
1399 hammer_modify_buffer(cursor->parent_buffer);
1403 kfree(elms, M_HAMMER);
1405 hammer_modify_buffer(cursor->node_buffer);
1406 for (i = 0; i < node->base.count; ++i) {
1407 if (child_buffer[i])
1408 hammer_put_buffer(child_buffer[i], 0);
1414 * This routine is only called if the cursor is at the root node and the
1415 * root node is an internal node. We attempt to collapse the root node
1416 * by replacing it with all of its children, reducing tree depth by one.
1418 * This is the only way to reduce tree depth in a HAMMER filesystem.
1419 * Note that all leaf nodes are at the same depth.
1421 * This is a fairly expensive operation because we not only have to load
1422 * the root's children, we also have to scan each child and adjust the
1423 * parent offset for each element in each child. Nasty all around.
1427 btree_collapse(hammer_cursor_t cursor)
1429 hammer_btree_node_ondisk_t root, child;
1430 hammer_btree_node_ondisk_t children[HAMMER_BTREE_INT_ELMS];
1431 struct hammer_buffer *child_buffer[HAMMER_BTREE_INT_ELMS];
1436 int32_t root_offset;
1437 u_int8_t subsubtype;
1439 root = cursor->node;
1440 count = root->base.count;
1441 root_offset = hammer_bclu_offset(cursor->node_buffer, root);
1444 * Sum up the number of children each element has. This value is
1445 * only approximate due to the way the insertion node works. It
1446 * may be too small but it will never be too large.
1448 * Quickly terminate the collapse if the elements have too many
1451 KKASSERT(root->base.parent == 0); /* must be root node */
1452 KKASSERT(root->base.type == HAMMER_BTREE_TYPE_INTERNAL);
1453 KKASSERT(count <= HAMMER_BTREE_INT_ELMS);
1455 for (i = n = 0; i < count; ++i) {
1456 n += root->internal.elms[i].subtree_count;
1458 if (n > btree_max_elements(root->base.subtype))
1462 * Iterate through the elements again and correct the subtree_count.
1463 * Terminate the collapse if we wind up with too many.
1468 for (i = n = 0; i < count; ++i) {
1469 struct hammer_btree_internal_elm *elm;
1471 elm = &root->internal.elms[i];
1472 child_buffer[i] = NULL;
1474 if (elm->subtree_offset == 0)
1476 child = hammer_bread(cursor->cluster, elm->subtree_offset,
1477 HAMMER_FSBUF_BTREE, &error,
1478 &child_buffer[i], XXX);
1479 children[i] = child;
1482 KKASSERT(root->base.subtype == child->base.type);
1485 * Accumulate n for a good child, update the root's count
1488 if (root->internal.elms[i].subtree_count != child->base.count) {
1489 root->internal.elms[i].subtree_count = child->base.count;
1492 n += root->internal.elms[i].subtree_count;
1494 if (error || n > btree_max_elements(root->base.subtype))
1498 * Ok, we can collapse the root. If the root's children are leafs
1499 * the collapse is really simple. If they are internal nodes the
1500 * collapse is not so simple because we have to fixup the parent
1501 * pointers for the root's children's children.
1503 * When collapsing an internal node the far left and far right
1504 * element's boundaries should match the root's left and right
1507 if (root->base.subtype == HAMMER_BTREE_TYPE_LEAF) {
1508 for (i = n = 0; i < count; ++i) {
1509 child = children[i];
1510 for (j = 0; j < child->base.count; ++j) {
1511 root->leaf.elms[n] = child->leaf.elms[j];
1515 root->base.type = root->base.subtype;
1516 root->base.subtype = 0;
1517 root->base.count = n;
1518 root->leaf.link_left = 0;
1519 root->leaf.link_right = 0;
1521 struct hammer_btree_internal_elm *elm;
1522 struct hammer_btree_internal_node *subchild;
1523 struct hammer_buffer *subchild_buffer = NULL;
1526 child = children[0];
1527 subsubtype = child->base.subtype;
1528 KKASSERT(child->base.count > 0);
1529 KKASSERT(root->internal.elms[0].base.key ==
1530 child->internal.elms[0].base.key);
1531 child = children[count-1];
1532 KKASSERT(child->base.count > 0);
1533 KKASSERT(root->internal.elms[count].base.key ==
1534 child->internal.elms[child->base.count].base.key);
1538 for (i = n = 0; i < count; ++i) {
1539 child = children[i];
1540 KKASSERT(child->base.subtype == subsubtype);
1541 for (j = 0; j < child->base.count; ++j) {
1542 elm = &child->internal.elms[j];
1544 root->internal.elms[n] = *elm;
1545 subchild = hammer_bread(cursor->cluster,
1546 elm->subtree_offset,
1552 subchild->base.parent = root_offset;
1553 hammer_modify_buffer(subchild_buffer);
1557 /* make sure the right boundary is correct */
1558 /* (this gets overwritten when the loop continues) */
1559 /* XXX generate a new separator? */
1560 root->internal.elms[n] = child->internal.elms[j];
1562 root->base.type = HAMMER_BTREE_TYPE_INTERNAL;
1563 root->base.subtype = subsubtype;
1564 if (subchild_buffer)
1565 hammer_put_buffer(subchild_buffer, 0);
1574 hammer_modify_buffer(cursor->node_buffer);
1575 for (i = 0; i < count; ++i) {
1576 if (child_buffer[i])
1577 hammer_put_buffer(child_buffer[i], 0);
1584 /************************************************************************
1585 * MISCELLANIOUS SUPPORT *
1586 ************************************************************************/
1589 * Compare two B-Tree elements, return -1, 0, or +1 (e.g. similar to strcmp).
1591 * See also hammer_rec_rb_compare() and hammer_rec_cmp() in hammer_object.c.
1593 * Note that key1 and key2 are treated differently. key1 is allowed to
1594 * wildcard some of its fields by setting them to 0, while key2 is expected
1595 * to be in an on-disk form (no wildcards).
1598 hammer_btree_cmp(hammer_base_elm_t key1, hammer_base_elm_t key2)
1601 kprintf("compare obj_id %016llx %016llx\n",
1602 key1->obj_id, key2->obj_id);
1603 kprintf("compare rec_type %04x %04x\n",
1604 key1->rec_type, key2->rec_type);
1605 kprintf("compare key %016llx %016llx\n",
1606 key1->key, key2->key);
1610 * A key1->obj_id of 0 matches any object id
1613 if (key1->obj_id < key2->obj_id)
1615 if (key1->obj_id > key2->obj_id)
1620 * A key1->rec_type of 0 matches any record type.
1622 if (key1->rec_type) {
1623 if (key1->rec_type < key2->rec_type)
1625 if (key1->rec_type > key2->rec_type)
1630 * There is no special case for key. 0 means 0.
1632 if (key1->key < key2->key)
1634 if (key1->key > key2->key)
1638 * This test has a number of special cases. create_tid in key1 is
1639 * the as-of transction id, and delete_tid in key1 is NOT USED.
1641 * A key1->create_tid of 0 matches any record regardles of when
1642 * it was created or destroyed. 0xFFFFFFFFFFFFFFFFULL should be
1643 * used to search for the most current state of the object.
1645 * key2->create_tid is a HAMMER record and will never be
1646 * 0. key2->delete_tid is the deletion transaction id or 0 if
1647 * the record has not yet been deleted.
1649 if (key1->create_tid) {
1650 if (key1->create_tid < key2->create_tid)
1652 if (key2->delete_tid && key1->create_tid >= key2->delete_tid)
1660 * Create a separator half way inbetween key1 and key2. For fields just
1661 * one unit apart, the separator will match key2.
1663 * The handling of delete_tid is a little confusing. It is only possible
1664 * to have one record in the B-Tree where all fields match except delete_tid.
1665 * This means, worse case, two adjacent elements may have a create_tid that
1666 * is one-apart and cause the separator to choose the right-hand element's
1667 * create_tid. e.g. (create,delete): (1,x)(2,x) -> separator is (2,x).
1669 * So all we have to do is set delete_tid to the right-hand element to
1670 * guarentee that the separator is properly between the two elements.
1672 #define MAKE_SEPARATOR(key1, key2, dest, field) \
1673 dest->field = key1->field + ((key2->field - key1->field + 1) >> 1);
1676 hammer_make_separator(hammer_base_elm_t key1, hammer_base_elm_t key2,
1677 hammer_base_elm_t dest)
1679 bzero(dest, sizeof(*dest));
1680 MAKE_SEPARATOR(key1, key2, dest, obj_id);
1681 MAKE_SEPARATOR(key1, key2, dest, rec_type);
1682 MAKE_SEPARATOR(key1, key2, dest, key);
1683 MAKE_SEPARATOR(key1, key2, dest, create_tid);
1684 dest->delete_tid = key2->delete_tid;
1687 #undef MAKE_SEPARATOR
1690 * Return whether a generic internal or leaf node is full
1693 btree_node_is_full(hammer_node_ondisk_t node)
1695 switch(node->type) {
1696 case HAMMER_BTREE_TYPE_INTERNAL:
1697 if (node->count == HAMMER_BTREE_INT_ELMS)
1700 case HAMMER_BTREE_TYPE_LEAF:
1701 if (node->count == HAMMER_BTREE_LEAF_ELMS)
1705 panic("illegal btree subtype");
1712 btree_max_elements(u_int8_t type)
1714 if (type == HAMMER_BTREE_TYPE_LEAF)
1715 return(HAMMER_BTREE_LEAF_ELMS);
1716 if (type == HAMMER_BTREE_TYPE_INTERNAL)
1717 return(HAMMER_BTREE_INT_ELMS);
1718 panic("btree_max_elements: bad type %d\n", type);