- Factor out if_purgeaddrs_nolink(), which frees all non-link ifaddrs no
[dragonfly.git] / sys / vfs / hammer / hammer_btree.c
... / ...
CommitLineData
1/*
2 * Copyright (c) 2007 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * $DragonFly: src/sys/vfs/hammer/hammer_btree.c,v 1.13 2007/12/30 08:49:20 dillon Exp $
35 */
36
37/*
38 * HAMMER B-Tree index
39 *
40 * HAMMER implements a modified B+Tree. In documentation this will
41 * simply be refered to as the HAMMER B-Tree. Basically a B-Tree
42 * looks like a B+Tree (A B-Tree which stores its records only at the leafs
43 * of the tree), but adds two additional boundary elements which describe
44 * the left-most and right-most element a node is able to represent. In
45 * otherwords, we have boundary elements at the two ends of a B-Tree node
46 * instead of sub-tree pointers.
47 *
48 * A B-Tree internal node looks like this:
49 *
50 * B N N N N N N B <-- boundary and internal elements
51 * S S S S S S S <-- subtree pointers
52 *
53 * A B-Tree leaf node basically looks like this:
54 *
55 * L L L L L L L L <-- leaf elemenets
56 *
57 * The radix for an internal node is 1 less then a leaf but we get a
58 * number of significant benefits for our troubles.
59 *
60 * The big benefit to using a B-Tree containing boundary information
61 * is that it is possible to cache pointers into the middle of the tree
62 * and not have to start searches, insertions, OR deletions at the root
63 * node. In particular, searches are able to progress in a definitive
64 * direction from any point in the tree without revisting nodes. This
65 * greatly improves the efficiency of many operations, most especially
66 * record appends.
67 *
68 * B-Trees also make the stacking of trees fairly straightforward.
69 *
70 * INTER-CLUSTER ELEMENTS: An element of an internal node may reference
71 * the root of another cluster rather then a node in the current cluster.
72 * This is known as an inter-cluster references. Only B-Tree searches
73 * will cross cluster boundaries. The rebalancing and collapse code does
74 * not attempt to move children between clusters. A major effect of this
75 * is that we have to relax minimum element count requirements and allow
76 * trees to become somewhat unabalanced.
77 *
78 * INSERTIONS AND DELETIONS: When inserting we split full nodes on our
79 * way down as an optimization. I originally experimented with rebalancing
80 * nodes on the way down for deletions but it created a huge mess due to
81 * the way inter-cluster linkages work. Instead, now I simply allow
82 * the tree to become unbalanced and allow leaf nodes to become empty.
83 * The delete code will try to clean things up from the bottom-up but
84 * will stop if related elements are not in-core or if it cannot get a node
85 * lock.
86 */
87#include "hammer.h"
88#include <sys/buf.h>
89#include <sys/buf2.h>
90
91static int btree_search(hammer_cursor_t cursor, int flags);
92static int btree_split_internal(hammer_cursor_t cursor);
93static int btree_split_leaf(hammer_cursor_t cursor);
94static int btree_remove(hammer_cursor_t cursor);
95static int btree_set_parent(hammer_node_t node, hammer_btree_elm_t elm);
96#if 0
97static int btree_rebalance(hammer_cursor_t cursor);
98static int btree_collapse(hammer_cursor_t cursor);
99#endif
100static int btree_node_is_full(hammer_node_ondisk_t node);
101static void hammer_make_separator(hammer_base_elm_t key1,
102 hammer_base_elm_t key2, hammer_base_elm_t dest);
103
104/*
105 * Iterate records after a search. The cursor is iterated forwards past
106 * the current record until a record matching the key-range requirements
107 * is found. ENOENT is returned if the iteration goes past the ending
108 * key.
109 *
110 * The iteration is inclusive of key_beg and can be inclusive or exclusive
111 * of key_end depending on whether HAMMER_CURSOR_END_INCLUSIVE is set.
112 *
113 * cursor->key_beg may or may not be modified by this function during
114 * the iteration. XXX future - in case of an inverted lock we may have
115 * to reinitiate the lookup and set key_beg to properly pick up where we
116 * left off.
117 */
118int
119hammer_btree_iterate(hammer_cursor_t cursor)
120{
121 hammer_node_ondisk_t node;
122 hammer_btree_elm_t elm;
123 int error;
124 int r;
125 int s;
126
127 /*
128 * Skip past the current record
129 */
130 node = cursor->node->ondisk;
131 if (node == NULL)
132 return(ENOENT);
133 if (cursor->index < node->count &&
134 (cursor->flags & HAMMER_CURSOR_ATEDISK)) {
135 ++cursor->index;
136 }
137
138 /*
139 * Loop until an element is found or we are done.
140 */
141 for (;;) {
142 /*
143 * We iterate up the tree and then index over one element
144 * while we are at the last element in the current node.
145 *
146 * NOTE: This can pop us up to another cluster.
147 *
148 * If we are at the root of the root cluster, cursor_up
149 * returns ENOENT.
150 *
151 * NOTE: hammer_cursor_up() will adjust cursor->key_beg
152 * when told to re-search for the cluster tag.
153 *
154 * XXX this could be optimized by storing the information in
155 * the parent reference.
156 *
157 * XXX we can lose the node lock temporarily, this could mess
158 * up our scan.
159 */
160 if (cursor->index == node->count) {
161 error = hammer_cursor_up(cursor, 0);
162 if (error)
163 break;
164 node = cursor->node->ondisk;
165 KKASSERT(cursor->index != node->count);
166 ++cursor->index;
167 continue;
168 }
169
170 /*
171 * Check internal or leaf element. Determine if the record
172 * at the cursor has gone beyond the end of our range.
173 * Remember that our key range is end-exclusive.
174 *
175 * Generally we recurse down through internal nodes. An
176 * internal node can only be returned if INCLUSTER is set
177 * and the node represents a cluster-push record. Internal
178 * elements do not contain create_tid/delete_tid information.
179 */
180 if (node->type == HAMMER_BTREE_TYPE_INTERNAL) {
181 elm = &node->elms[cursor->index];
182 r = hammer_btree_cmp(&cursor->key_end, &elm[0].base);
183 s = hammer_btree_cmp(&cursor->key_beg, &elm[1].base);
184 if (r < 0) {
185 error = ENOENT;
186 break;
187 }
188 if (r == 0 && (cursor->flags & HAMMER_CURSOR_END_INCLUSIVE) == 0) {
189 error = ENOENT;
190 break;
191 }
192 KKASSERT(s <= 0);
193 if ((cursor->flags & HAMMER_CURSOR_INCLUSTER) == 0 ||
194 elm->internal.rec_offset == 0) {
195 error = hammer_cursor_down(cursor);
196 if (error)
197 break;
198 KKASSERT(cursor->index == 0);
199 node = cursor->node->ondisk;
200 continue;
201 }
202 } else {
203 elm = &node->elms[cursor->index];
204 r = hammer_btree_cmp(&cursor->key_end, &elm->base);
205 if (r < 0) {
206 error = ENOENT;
207 break;
208 }
209 if (r == 0 && (cursor->flags & HAMMER_CURSOR_END_INCLUSIVE) == 0) {
210 error = ENOENT;
211 break;
212 }
213 if ((cursor->flags & HAMMER_CURSOR_ALLHISTORY) == 0 &&
214 hammer_btree_chkts(cursor->key_beg.create_tid,
215 &elm->base) != 0) {
216 ++cursor->index;
217 continue;
218 }
219 }
220
221 /*
222 * Return entry
223 */
224 return(0);
225 }
226 return(error);
227}
228
229/*
230 * Lookup cursor->key_beg. 0 is returned on success, ENOENT if the entry
231 * could not be found, and a fatal error otherwise.
232 *
233 * The cursor is suitably positioned for a deletion on success, and suitably
234 * positioned for an insertion on ENOENT.
235 *
236 * The cursor may begin anywhere, the search will traverse clusters in
237 * either direction to locate the requested element.
238 */
239int
240hammer_btree_lookup(hammer_cursor_t cursor)
241{
242 int error;
243
244 error = btree_search(cursor, 0);
245 if (error == 0 && cursor->flags)
246 error = hammer_btree_extract(cursor, cursor->flags);
247 return(error);
248}
249
250/*
251 * Execute the logic required to start an iteration. The first record
252 * located within the specified range is returned and iteration control
253 * flags are adjusted for successive hammer_btree_iterate() calls.
254 */
255int
256hammer_btree_first(hammer_cursor_t cursor)
257{
258 int error;
259
260 error = hammer_btree_lookup(cursor);
261 if (error == ENOENT) {
262 cursor->flags &= ~HAMMER_CURSOR_ATEDISK;
263 error = hammer_btree_iterate(cursor);
264 }
265 cursor->flags |= HAMMER_CURSOR_ATEDISK;
266 return(error);
267}
268
269/*
270 * Extract the record and/or data associated with the cursor's current
271 * position. Any prior record or data stored in the cursor is replaced.
272 * The cursor must be positioned at a leaf node.
273 *
274 * NOTE: Most extractions occur at the leaf of the B-Tree. The only
275 * extraction allowed at an internal element is at a cluster-push.
276 * Cluster-push elements have records but no data.
277 */
278int
279hammer_btree_extract(hammer_cursor_t cursor, int flags)
280{
281 hammer_node_ondisk_t node;
282 hammer_btree_elm_t elm;
283 hammer_cluster_t cluster;
284 u_int64_t buf_type;
285 int32_t cloff;
286 int32_t roff;
287 int error;
288
289 /*
290 * A cluster record type has no data reference, the information
291 * is stored directly in the record and B-Tree element.
292 *
293 * The case where the data reference resolves to the same buffer
294 * as the record reference must be handled.
295 */
296 node = cursor->node->ondisk;
297 elm = &node->elms[cursor->index];
298 cluster = cursor->node->cluster;
299 cursor->flags &= ~HAMMER_CURSOR_DATA_EMBEDDED;
300 cursor->data = NULL;
301 error = 0;
302
303 /*
304 * Internal elements can only be cluster pushes. A cluster push has
305 * no data reference.
306 */
307 if (node->type == HAMMER_BTREE_TYPE_INTERNAL) {
308 cloff = elm->leaf.rec_offset;
309 KKASSERT(cloff != 0);
310 cursor->record = hammer_bread(cluster, cloff,
311 HAMMER_FSBUF_RECORDS, &error,
312 &cursor->record_buffer);
313 return(error);
314 }
315
316 /*
317 * Leaf element.
318 */
319 if ((flags & HAMMER_CURSOR_GET_RECORD) && error == 0) {
320 cloff = elm->leaf.rec_offset;
321 cursor->record = hammer_bread(cluster, cloff,
322 HAMMER_FSBUF_RECORDS, &error,
323 &cursor->record_buffer);
324 } else {
325 cloff = 0;
326 }
327 if ((flags & HAMMER_CURSOR_GET_DATA) && error == 0) {
328 if ((cloff ^ elm->leaf.data_offset) & ~HAMMER_BUFMASK) {
329 /*
330 * The data is not in the same buffer as the last
331 * record we cached, but it could still be embedded
332 * in a record. Note that we may not have loaded the
333 * record's buffer above, depending on flags.
334 */
335 if ((elm->leaf.rec_offset ^ elm->leaf.data_offset) &
336 ~HAMMER_BUFMASK) {
337 if (elm->leaf.data_len & HAMMER_BUFMASK)
338 buf_type = HAMMER_FSBUF_DATA;
339 else
340 buf_type = 0; /* pure data buffer */
341 } else {
342 buf_type = HAMMER_FSBUF_RECORDS;
343 }
344 cursor->data = hammer_bread(cluster,
345 elm->leaf.data_offset,
346 buf_type, &error,
347 &cursor->data_buffer);
348 } else {
349 /*
350 * Data in same buffer as record. Note that we
351 * leave any existing data_buffer intact, even
352 * though we don't use it in this case, in case
353 * other records extracted during an iteration
354 * go back to it.
355 *
356 * The data must be embedded in the record for this
357 * case to be hit.
358 *
359 * Just assume the buffer type is correct.
360 */
361 cursor->data = (void *)
362 ((char *)cursor->record_buffer->ondisk +
363 (elm->leaf.data_offset & HAMMER_BUFMASK));
364 roff = (char *)cursor->data - (char *)cursor->record;
365 KKASSERT (roff >= 0 && roff < HAMMER_RECORD_SIZE);
366 cursor->flags |= HAMMER_CURSOR_DATA_EMBEDDED;
367 }
368 }
369 return(error);
370}
371
372
373/*
374 * Insert a leaf element into the B-Tree at the current cursor position.
375 * The cursor is positioned such that the element at and beyond the cursor
376 * are shifted to make room for the new record.
377 *
378 * The caller must call hammer_btree_lookup() with the HAMMER_CURSOR_INSERT
379 * flag set and that call must return ENOENT before this function can be
380 * called.
381 *
382 * ENOSPC is returned if there is no room to insert a new record.
383 */
384int
385hammer_btree_insert(hammer_cursor_t cursor, hammer_btree_elm_t elm)
386{
387 hammer_node_ondisk_t parent;
388 hammer_node_ondisk_t node;
389 int i;
390
391#if 0
392 /* HANDLED BY CALLER */
393 /*
394 * Issue a search to get our cursor at the right place. The search
395 * will get us to a leaf node.
396 *
397 * The search also does some setup for our insert, so there is always
398 * room in the leaf.
399 */
400 error = btree_search(cursor, HAMMER_CURSOR_INSERT);
401 if (error != ENOENT) {
402 if (error == 0)
403 error = EEXIST;
404 return (error);
405 }
406#endif
407
408 /*
409 * Insert the element at the leaf node and update the count in the
410 * parent. It is possible for parent to be NULL, indicating that
411 * the root of the B-Tree in the cluster is a leaf. It is also
412 * possible for the leaf to be empty.
413 *
414 * Remember that the right-hand boundary is not included in the
415 * count.
416 */
417 hammer_modify_node(cursor->node);
418 node = cursor->node->ondisk;
419 i = cursor->index;
420 KKASSERT(node->type == HAMMER_BTREE_TYPE_LEAF);
421 KKASSERT(node->count < HAMMER_BTREE_LEAF_ELMS);
422 if (i != node->count) {
423 bcopy(&node->elms[i], &node->elms[i+1],
424 (node->count - i) * sizeof(*elm));
425 }
426 node->elms[i] = *elm;
427 ++node->count;
428 hammer_modify_node_done(cursor->node);
429
430 /*
431 * Adjust the sub-tree count in the parent. note that the parent
432 * may be in a different cluster.
433 */
434 if (cursor->parent) {
435 hammer_modify_node(cursor->parent);
436 parent = cursor->parent->ondisk;
437 i = cursor->parent_index;
438 ++parent->elms[i].internal.subtree_count;
439 hammer_modify_node_done(cursor->parent);
440 KKASSERT(parent->elms[i].internal.subtree_count <= node->count);
441 }
442 return(0);
443}
444
445/*
446 * Delete a record from the B-Tree's at the current cursor position.
447 * The cursor is positioned such that the current element is the one
448 * to be deleted.
449 *
450 * On return the cursor will be positioned after the deleted element and
451 * MAY point to an internal node. It will be suitable for the continuation
452 * of an iteration but not for an insertion or deletion.
453 *
454 * Deletions will attempt to partially rebalance the B-Tree in an upward
455 * direction. It is possible to end up with empty leafs. An empty internal
456 * node is impossible (worst case: it has one element pointing to an empty
457 * leaf).
458 */
459int
460hammer_btree_delete(hammer_cursor_t cursor)
461{
462 hammer_node_ondisk_t ondisk;
463 hammer_node_t node;
464 hammer_node_t parent;
465 hammer_btree_elm_t elm;
466 int error;
467 int i;
468
469#if 0
470 /* HANDLED BY CALLER */
471 /*
472 * Locate the leaf element to delete. The search is also responsible
473 * for doing some of the rebalancing work on its way down.
474 */
475 error = btree_search(cursor, HAMMER_CURSOR_DELETE);
476 if (error)
477 return (error);
478#endif
479
480 /*
481 * Delete the element from the leaf node.
482 *
483 * Remember that leaf nodes do not have boundaries.
484 */
485 node = cursor->node;
486 ondisk = node->ondisk;
487 i = cursor->index;
488
489 KKASSERT(ondisk->type == HAMMER_BTREE_TYPE_LEAF);
490 hammer_modify_node(node);
491 if (i + 1 != ondisk->count) {
492 bcopy(&ondisk->elms[i+1], &ondisk->elms[i],
493 (ondisk->count - i - 1) * sizeof(ondisk->elms[0]));
494 }
495 --ondisk->count;
496 hammer_modify_node_done(node);
497 if (cursor->parent != NULL) {
498 /*
499 * Adjust parent's notion of the leaf's count. subtree_count
500 * is only approximate, it is allowed to be too small but
501 * never allowed to be too large. Make sure we don't drop
502 * the count below 0.
503 */
504 parent = cursor->parent;
505 hammer_modify_node(parent);
506 elm = &parent->ondisk->elms[cursor->parent_index];
507 if (elm->internal.subtree_count)
508 --elm->internal.subtree_count;
509 hammer_modify_node_done(parent);
510 KKASSERT(elm->internal.subtree_count <= ondisk->count);
511 }
512
513 /*
514 * It is possible, but not desireable, to stop here. If the element
515 * count drops to 0 (which is allowed for a leaf), try recursively
516 * remove the B-Tree node.
517 *
518 * XXX rebalancing calls would go here too.
519 *
520 * This may reposition the cursor at one of the parent's of the
521 * current node.
522 */
523 if (ondisk->count == 0) {
524 error = btree_remove(cursor);
525 if (error == EAGAIN)
526 error = 0;
527 } else {
528 error = 0;
529 }
530 return(error);
531}
532
533/*
534 * PRIMAY B-TREE SEARCH SUPPORT PROCEDURE
535 *
536 * Search a cluster's B-Tree for cursor->key_beg, return the matching node.
537 *
538 * The search can begin ANYWHERE in the B-Tree. As a first step the search
539 * iterates up the tree as necessary to properly position itself prior to
540 * actually doing the sarch.
541 *
542 * INSERTIONS: The search will split full nodes and leaves on its way down
543 * and guarentee that the leaf it ends up on is not full. If we run out
544 * of space the search continues to the leaf (to position the cursor for
545 * the spike), but ENOSPC is returned.
546 *
547 * DELETIONS: The search will rebalance the tree on its way down. XXX
548 *
549 * The search is only guarenteed to end up on a leaf if an error code of 0
550 * is returned, or if inserting and an error code of ENOENT is returned.
551 * Otherwise it can stop at an internal node. On success a search returns
552 * a leaf node unless INCLUSTER is set and the search located a cluster push
553 * node (which is an internal node).
554 */
555static
556int
557btree_search(hammer_cursor_t cursor, int flags)
558{
559 hammer_node_ondisk_t node;
560 hammer_cluster_t cluster;
561 int error;
562 int enospc = 0;
563 int i;
564 int r;
565
566 flags |= cursor->flags;
567
568 /*
569 * Move our cursor up the tree until we find a node whos range covers
570 * the key we are trying to locate. This may move us between
571 * clusters.
572 *
573 * The left bound is inclusive, the right bound is non-inclusive.
574 * It is ok to cursor up too far so when cursoring across a cluster
575 * boundary.
576 *
577 * First see if we can skip the whole cluster. hammer_cursor_up()
578 * handles both cases but this way we don't check the cluster
579 * bounds when going up the tree within a cluster.
580 *
581 * NOTE: If INCLUSTER is set and we are at the root of the cluster,
582 * hammer_cursor_up() will return ENOENT.
583 */
584 cluster = cursor->node->cluster;
585 while (
586 hammer_btree_cmp(&cursor->key_beg, &cluster->clu_btree_beg) < 0 ||
587 hammer_btree_cmp(&cursor->key_beg, &cluster->clu_btree_end) >= 0) {
588 error = hammer_cursor_toroot(cursor);
589 if (error)
590 goto done;
591 error = hammer_cursor_up(cursor, 0);
592 if (error)
593 goto done;
594 cluster = cursor->node->cluster;
595 }
596
597 /*
598 * Deal with normal cursoring within a cluster. The right bound
599 * is non-inclusive. That is, the bounds form a separator.
600 */
601 while (hammer_btree_cmp(&cursor->key_beg, cursor->left_bound) < 0 ||
602 hammer_btree_cmp(&cursor->key_beg, cursor->right_bound) >= 0) {
603 error = hammer_cursor_up(cursor, 0);
604 if (error)
605 goto done;
606 }
607
608 /*
609 * We better have ended up with a node somewhere, and our second
610 * while loop had better not have traversed up a cluster.
611 */
612 KKASSERT(cursor->node != NULL && cursor->node->cluster == cluster);
613
614 /*
615 * If we are inserting we can't start at a full node if the parent
616 * is also full (because there is no way to split the node),
617 * continue running up the tree until we hit the root of the
618 * root cluster or until the requirement is satisfied.
619 *
620 * NOTE: These cursor-up's CAN continue to cross cluster boundaries.
621 *
622 * XXX as an optimization it should be possible to unbalance the tree
623 * and stop at the root of the current cluster.
624 */
625 while (flags & HAMMER_CURSOR_INSERT) {
626 if (btree_node_is_full(cursor->node->ondisk) == 0)
627 break;
628 if (cursor->parent == NULL)
629 break;
630 if (cursor->parent->ondisk->count != HAMMER_BTREE_INT_ELMS)
631 break;
632 error = hammer_cursor_up(cursor, 0);
633 /* cluster and node are now may become stale */
634 if (error)
635 goto done;
636 }
637 /* cluster = cursor->node->cluster; not needed until next cluster = */
638
639#if 0
640 /*
641 * If we are deleting we can't start at an internal node with only
642 * one element unless it is root, because all of our code assumes
643 * that internal nodes will never be empty. Just do this generally
644 * for both leaf and internal nodes to get better balance.
645 *
646 * This handles the case where the cursor is sitting at a leaf and
647 * either the leaf or parent contain an insufficient number of
648 * elements.
649 *
650 * NOTE: These cursor-up's CAN continue to cross cluster boundaries.
651 *
652 * XXX NOTE: Iterations may not set this flag anyway.
653 */
654 while (flags & HAMMER_CURSOR_DELETE) {
655 if (cursor->node->ondisk->count > 1)
656 break;
657 if (cursor->parent == NULL)
658 break;
659 KKASSERT(cursor->node->ondisk->count != 0);
660 error = hammer_cursor_up(cursor, 0);
661 /* cluster and node are now may become stale */
662 if (error)
663 goto done;
664 }
665#endif
666
667/*new_cluster:*/
668 /*
669 * Push down through internal nodes to locate the requested key.
670 */
671 cluster = cursor->node->cluster;
672 node = cursor->node->ondisk;
673 while (node->type == HAMMER_BTREE_TYPE_INTERNAL) {
674#if 0
675 /*
676 * If we are a the root node and deleting, try to collapse
677 * all of the root's children into the root. This is the
678 * only point where tree depth is reduced.
679 *
680 * XXX NOTE: Iterations may not set this flag anyway.
681 */
682 if ((flags & HAMMER_CURSOR_DELETE) && cursor->parent == NULL) {
683 error = btree_collapse(cursor);
684 /* node becomes stale after call */
685 /* XXX ENOSPC */
686 if (error)
687 goto done;
688 }
689 node = cursor->node->ondisk;
690#endif
691 /*
692 * Scan the node to find the subtree index to push down into.
693 * We go one-past, then back-up.
694 */
695 for (i = 0; i < node->count; ++i) {
696 r = hammer_btree_cmp(&cursor->key_beg,
697 &node->elms[i].base);
698 if (r < 0)
699 break;
700 }
701
702 /*
703 * It is possible for the search to terminate at i == 0,
704 * which is to the LEFT of the LEFT boundary but the RIGHT
705 * of the parent's boundary on the left of the sub-tree
706 * element. This case can occur due to deletions (see
707 * btree_remove()).
708 *
709 * When this case occurs an ENOENT return is guarenteed but
710 * if inserting we must still terminate at a leaf. The
711 * solution is to make the node's left boundary inherit the
712 * boundary stored in the parent.
713 *
714 * When doing this inheritance some fields in 'base' are
715 * actually related to the internal element's child
716 * specification and not to the key. These have to be
717 * retained.
718 *
719 * If we terminate at i == count it is still possible to
720 * be to the RIGHT of the RIGHT boundary but still to the
721 * LEFT of the parent's RIGHT boundary. The solution is to
722 * adjust the RIGHT boundary to match the parent. This
723 * case can occur due to deletions (see btree_remove()).
724 */
725 if (i == 0) {
726 u_int8_t save;
727
728 if ((flags & HAMMER_CURSOR_INSERT) == 0) {
729 cursor->index = 0;
730 return(ENOENT);
731 }
732 hammer_modify_node(cursor->node);
733 save = node->elms[0].subtree_type;
734 node->elms[0].base = *cursor->left_bound;
735 node->elms[0].subtree_type = save;
736 hammer_modify_node_done(cursor->node);
737 } else if (i == node->count) {
738 /*
739 * Terminate early if not inserting and the key is
740 * beyond the uncorrected right hand boundary. The
741 * index must be PAST the last element to prevent an
742 * iteration from returning elements to the left of
743 * key_beg.
744 */
745 if ((flags & HAMMER_CURSOR_INSERT) == 0 &&
746 hammer_btree_cmp(&cursor->key_beg,
747 &node->elms[i].base) >= 0
748 ) {
749 cursor->index = i;
750 return(ENOENT);
751 }
752
753 /*
754 * Correct a right-hand boundary mismatch. The push
755 * index is the last element (i-1).
756 */
757 if (hammer_btree_cmp(&node->elms[i].base,
758 cursor->right_bound) != 0) {
759 hammer_modify_node(cursor->node);
760 node->elms[i].base = *cursor->right_bound;
761 hammer_modify_node_done(cursor->node);
762 }
763 --i;
764 } else {
765 /*
766 * The push-down index is now i - 1.
767 */
768 --i;
769 }
770 cursor->index = i;
771
772 /*
773 * Handle insertion and deletion requirements.
774 *
775 * If inserting split full nodes. The split code will
776 * adjust cursor->node and cursor->index if the current
777 * index winds up in the new node.
778 */
779 if (flags & HAMMER_CURSOR_INSERT) {
780 if (node->count == HAMMER_BTREE_INT_ELMS) {
781 error = btree_split_internal(cursor);
782 if (error) {
783 if (error != ENOSPC)
784 goto done;
785 enospc = 1;
786 flags &= ~HAMMER_CURSOR_INSERT;
787 }
788 /*
789 * reload stale pointers
790 */
791 i = cursor->index;
792 node = cursor->node->ondisk;
793 }
794 }
795
796#if 0
797 /*
798 * If deleting rebalance - do not allow the child to have
799 * just one element or we will not be able to delete it.
800 *
801 * Neither internal or leaf nodes (except a root-leaf) are
802 * allowed to drop to 0 elements. (XXX - well, leaf nodes
803 * can at the moment).
804 *
805 * Our separators may have been reorganized after rebalancing,
806 * so we have to pop back up and rescan.
807 *
808 * XXX test for subtree_count < maxelms / 2, minus 1 or 2
809 * for hysteresis?
810 *
811 * XXX NOTE: Iterations may not set this flag anyway.
812 */
813 if (flags & HAMMER_CURSOR_DELETE) {
814 if (node->elms[i].internal.subtree_count <= 1) {
815 error = btree_rebalance(cursor);
816 if (error)
817 goto done;
818 /* cursor->index is invalid after call */
819 goto new_cluster;
820 }
821 }
822#endif
823 /*
824 * Cluster pushes are done with internal elements. If this
825 * is a cluster push (rec_offset != 0), and INCLUSTER is set,
826 * we stop here.
827 *
828 * However, because this is an internal node we have to
829 * determine whether key_beg is within its range and return
830 * 0 or ENOENT appropriately.
831 */
832 if ((flags & HAMMER_CURSOR_INCLUSTER) &&
833 node->elms[i].internal.rec_offset) {
834 r = hammer_btree_cmp(&cursor->key_beg,
835 &node->elms[i+1].base);
836 error = (r < 0) ? 0 : (enospc ? ENOSPC : ENOENT);
837 goto done;
838 }
839
840 /*
841 * Push down (push into new node, existing node becomes
842 * the parent) and continue the search.
843 */
844 error = hammer_cursor_down(cursor);
845 /* node and cluster become stale */
846 if (error)
847 goto done;
848 node = cursor->node->ondisk;
849 cluster = cursor->node->cluster;
850 }
851
852 /*
853 * We are at a leaf, do a linear search of the key array.
854 *
855 * On success the index is set to the matching element and 0
856 * is returned.
857 *
858 * On failure the index is set to the insertion point and ENOENT
859 * is returned.
860 *
861 * Boundaries are not stored in leaf nodes, so the index can wind
862 * up to the left of element 0 (index == 0) or past the end of
863 * the array (index == node->count).
864 */
865 KKASSERT(node->count <= HAMMER_BTREE_LEAF_ELMS);
866
867 for (i = 0; i < node->count; ++i) {
868 r = hammer_btree_cmp(&cursor->key_beg, &node->elms[i].base);
869
870 /*
871 * Stop if we've flipped past key_beg
872 */
873 if (r < 0)
874 break;
875
876 /*
877 * Return an exact match
878 */
879 if (r == 0) {
880 cursor->index = i;
881 error = 0;
882 goto done;
883 }
884 }
885
886 /*
887 * No exact match was found, i is now at the insertion point.
888 *
889 * If inserting split a full leaf before returning. This
890 * may have the side effect of adjusting cursor->node and
891 * cursor->index.
892 */
893 cursor->index = i;
894 if ((flags & HAMMER_CURSOR_INSERT) &&
895 node->count == HAMMER_BTREE_LEAF_ELMS) {
896 error = btree_split_leaf(cursor);
897 if (error) {
898 if (error != ENOSPC)
899 goto done;
900 enospc = 1;
901 flags &= ~HAMMER_CURSOR_INSERT;
902 }
903 /*
904 * reload stale pointers
905 */
906 /* NOT USED
907 i = cursor->index;
908 node = &cursor->node->internal;
909 */
910 }
911
912 /*
913 * We reached a leaf but did not find the key we were looking for.
914 * If this is an insert we will be properly positioned for an insert
915 * (ENOENT) or spike (ENOSPC) operation.
916 */
917 error = enospc ? ENOSPC : ENOENT;
918done:
919 return(error);
920}
921
922
923/************************************************************************
924 * SPLITTING AND MERGING *
925 ************************************************************************
926 *
927 * These routines do all the dirty work required to split and merge nodes.
928 */
929
930/*
931 * Split an internal node into two nodes and move the separator at the split
932 * point to the parent. Note that the parent's parent's element pointing
933 * to our parent will have an incorrect subtree_count (we don't update it).
934 * It will be low, which is ok.
935 *
936 * (cursor->node, cursor->index) indicates the element the caller intends
937 * to push into. We will adjust node and index if that element winds
938 * up in the split node.
939 *
940 * If we are at the root of a cluster a new root must be created with two
941 * elements, one pointing to the original root and one pointing to the
942 * newly allocated split node.
943 *
944 * NOTE! Being at the root of a cluster is different from being at the
945 * root of the root cluster. cursor->parent will not be NULL and
946 * cursor->node->ondisk.parent must be tested against 0. Theoretically
947 * we could propogate the algorithm into the parent and deal with multiple
948 * 'roots' in the cluster header, but it's easier not to.
949 */
950static
951int
952btree_split_internal(hammer_cursor_t cursor)
953{
954 hammer_node_ondisk_t ondisk;
955 hammer_node_t node;
956 hammer_node_t parent;
957 hammer_node_t new_node;
958 hammer_btree_elm_t elm;
959 hammer_btree_elm_t parent_elm;
960 int parent_index;
961 int made_root;
962 int split;
963 int error;
964 int i;
965 const int esize = sizeof(*elm);
966
967 /*
968 * We are splitting but elms[split] will be promoted to the parent,
969 * leaving the right hand node with one less element. If the
970 * insertion point will be on the left-hand side adjust the split
971 * point to give the right hand side one additional node.
972 */
973 node = cursor->node;
974 ondisk = node->ondisk;
975 split = (ondisk->count + 1) / 2;
976 if (cursor->index <= split)
977 --split;
978 error = 0;
979
980 /*
981 * If we are at the root of the cluster, create a new root node with
982 * 1 element and split normally. Avoid making major modifications
983 * until we know the whole operation will work.
984 *
985 * The root of the cluster is different from the root of the root
986 * cluster. Use the node's on-disk structure's parent offset to
987 * detect the case.
988 */
989 if (ondisk->parent == 0) {
990 parent = hammer_alloc_btree(node->cluster, &error);
991 if (parent == NULL)
992 return(error);
993 hammer_lock_ex(&parent->lock);
994 hammer_modify_node(parent);
995 ondisk = parent->ondisk;
996 ondisk->count = 1;
997 ondisk->parent = 0;
998 ondisk->type = HAMMER_BTREE_TYPE_INTERNAL;
999 ondisk->elms[0].base = node->cluster->clu_btree_beg;
1000 ondisk->elms[0].internal.subtree_type = node->ondisk->type;
1001 ondisk->elms[0].internal.subtree_offset = node->node_offset;
1002 ondisk->elms[1].base = node->cluster->clu_btree_end;
1003 made_root = 1;
1004 parent_index = 0; /* index of current node in parent */
1005 hammer_modify_node_done(parent);
1006 } else {
1007 made_root = 0;
1008 parent = cursor->parent;
1009 parent_index = cursor->parent_index;
1010 KKASSERT(parent->cluster == node->cluster);
1011 }
1012
1013 /*
1014 * Split node into new_node at the split point.
1015 *
1016 * B O O O P N N B <-- P = node->elms[split]
1017 * 0 1 2 3 4 5 6 <-- subtree indices
1018 *
1019 * x x P x x
1020 * s S S s
1021 * / \
1022 * B O O O B B N N B <--- inner boundary points are 'P'
1023 * 0 1 2 3 4 5 6
1024 *
1025 */
1026 new_node = hammer_alloc_btree(node->cluster, &error);
1027 if (new_node == NULL) {
1028 if (made_root) {
1029 hammer_unlock(&parent->lock);
1030 hammer_free_btree(parent->cluster, parent->node_offset);
1031 hammer_rel_node(parent);
1032 }
1033 return(error);
1034 }
1035 hammer_lock_ex(&new_node->lock);
1036
1037 /*
1038 * Create the new node. P becomes the left-hand boundary in the
1039 * new node. Copy the right-hand boundary as well.
1040 *
1041 * elm is the new separator.
1042 */
1043 hammer_modify_node(new_node);
1044 hammer_modify_node(node);
1045 ondisk = node->ondisk;
1046 elm = &ondisk->elms[split];
1047 bcopy(elm, &new_node->ondisk->elms[0],
1048 (ondisk->count - split + 1) * esize);
1049 new_node->ondisk->count = ondisk->count - split;
1050 new_node->ondisk->parent = parent->node_offset;
1051 new_node->ondisk->type = HAMMER_BTREE_TYPE_INTERNAL;
1052 KKASSERT(ondisk->type == new_node->ondisk->type);
1053
1054 /*
1055 * Cleanup the original node. P becomes the new boundary, its
1056 * subtree_offset was moved to the new node. If we had created
1057 * a new root its parent pointer may have changed.
1058 */
1059 elm->internal.subtree_offset = 0;
1060 elm->internal.rec_offset = 0;
1061 ondisk->count = split;
1062
1063 /*
1064 * Insert the separator into the parent, fixup the parent's
1065 * reference to the original node, and reference the new node.
1066 * The separator is P.
1067 *
1068 * Remember that base.count does not include the right-hand boundary.
1069 */
1070 hammer_modify_node(parent);
1071 ondisk = parent->ondisk;
1072 KKASSERT(ondisk->count != HAMMER_BTREE_INT_ELMS);
1073 ondisk->elms[parent_index].internal.subtree_count = split;
1074 parent_elm = &ondisk->elms[parent_index+1];
1075 bcopy(parent_elm, parent_elm + 1,
1076 (ondisk->count - parent_index) * esize);
1077 parent_elm->internal.base = elm->base; /* separator P */
1078 parent_elm->internal.subtree_offset = new_node->node_offset;
1079 parent_elm->internal.subtree_count = new_node->ondisk->count;
1080 parent_elm->internal.subtree_type = new_node->ondisk->type;
1081 parent_elm->internal.subtree_vol_no = 0;
1082 parent_elm->internal.rec_offset = 0;
1083 ++ondisk->count;
1084 hammer_modify_node_done(parent);
1085
1086 /*
1087 * The children of new_node need their parent pointer set to new_node.
1088 */
1089 for (i = 0; i < new_node->ondisk->count; ++i) {
1090 elm = &new_node->ondisk->elms[i];
1091 error = btree_set_parent(new_node, elm);
1092 if (error) {
1093 panic("btree_split_internal: btree-fixup problem");
1094 }
1095 }
1096
1097 /*
1098 * The cluster's root pointer may have to be updated.
1099 */
1100 if (made_root) {
1101 hammer_modify_cluster(node->cluster);
1102 node->cluster->ondisk->clu_btree_root = parent->node_offset;
1103 hammer_modify_cluster_done(node->cluster);
1104 node->ondisk->parent = parent->node_offset;
1105 if (cursor->parent) {
1106 hammer_unlock(&cursor->parent->lock);
1107 hammer_rel_node(cursor->parent);
1108 }
1109 cursor->parent = parent; /* lock'd and ref'd */
1110 }
1111 hammer_modify_node_done(new_node);
1112 hammer_modify_node_done(node);
1113
1114
1115 /*
1116 * Ok, now adjust the cursor depending on which element the original
1117 * index was pointing at. If we are >= the split point the push node
1118 * is now in the new node.
1119 *
1120 * NOTE: If we are at the split point itself we cannot stay with the
1121 * original node because the push index will point at the right-hand
1122 * boundary, which is illegal.
1123 *
1124 * NOTE: The cursor's parent or parent_index must be adjusted for
1125 * the case where a new parent (new root) was created, and the case
1126 * where the cursor is now pointing at the split node.
1127 */
1128 if (cursor->index >= split) {
1129 cursor->parent_index = parent_index + 1;
1130 cursor->index -= split;
1131 hammer_unlock(&cursor->node->lock);
1132 hammer_rel_node(cursor->node);
1133 cursor->node = new_node; /* locked and ref'd */
1134 } else {
1135 cursor->parent_index = parent_index;
1136 hammer_unlock(&new_node->lock);
1137 hammer_rel_node(new_node);
1138 }
1139
1140 /*
1141 * Fixup left and right bounds
1142 */
1143 parent_elm = &parent->ondisk->elms[cursor->parent_index];
1144 cursor->left_bound = &parent_elm[0].internal.base;
1145 cursor->right_bound = &parent_elm[1].internal.base;
1146
1147 return (0);
1148}
1149
1150/*
1151 * Same as the above, but splits a full leaf node.
1152 */
1153static
1154int
1155btree_split_leaf(hammer_cursor_t cursor)
1156{
1157 hammer_node_ondisk_t ondisk;
1158 hammer_node_t parent;
1159 hammer_node_t leaf;
1160 hammer_node_t new_leaf;
1161 hammer_btree_elm_t elm;
1162 hammer_btree_elm_t parent_elm;
1163 int parent_index;
1164 int made_root;
1165 int split;
1166 int error;
1167 const size_t esize = sizeof(*elm);
1168
1169 /*
1170 * Calculate the split point. If the insertion point will be on
1171 * the left-hand side adjust the split point to give the right
1172 * hand side one additional node.
1173 */
1174 leaf = cursor->node;
1175 ondisk = leaf->ondisk;
1176 split = (ondisk->count + 1) / 2;
1177 if (cursor->index <= split)
1178 --split;
1179 error = 0;
1180
1181 /*
1182 * If we are at the root of the tree, create a new root node with
1183 * 1 element and split normally. Avoid making major modifications
1184 * until we know the whole operation will work.
1185 */
1186 if (ondisk->parent == 0) {
1187 parent = hammer_alloc_btree(leaf->cluster, &error);
1188 if (parent == NULL)
1189 return(error);
1190 hammer_lock_ex(&parent->lock);
1191 hammer_modify_node(parent);
1192 ondisk = parent->ondisk;
1193 ondisk->count = 1;
1194 ondisk->parent = 0;
1195 ondisk->type = HAMMER_BTREE_TYPE_INTERNAL;
1196 ondisk->elms[0].base = leaf->cluster->clu_btree_beg;
1197 ondisk->elms[0].internal.subtree_type = leaf->ondisk->type;
1198 ondisk->elms[0].internal.subtree_offset = leaf->node_offset;
1199 ondisk->elms[1].base = leaf->cluster->clu_btree_end;
1200 hammer_modify_node_done(parent);
1201 made_root = 1;
1202 parent_index = 0; /* insertion point in parent */
1203 } else {
1204 made_root = 0;
1205 parent = cursor->parent;
1206 parent_index = cursor->parent_index;
1207 KKASSERT(parent->cluster == leaf->cluster);
1208 }
1209
1210 /*
1211 * Split leaf into new_leaf at the split point. Select a separator
1212 * value in-between the two leafs but with a bent towards the right
1213 * leaf since comparisons use an 'elm >= separator' inequality.
1214 *
1215 * L L L L L L L L
1216 *
1217 * x x P x x
1218 * s S S s
1219 * / \
1220 * L L L L L L L L
1221 */
1222 new_leaf = hammer_alloc_btree(leaf->cluster, &error);
1223 if (new_leaf == NULL) {
1224 if (made_root) {
1225 hammer_unlock(&parent->lock);
1226 hammer_free_btree(parent->cluster, parent->node_offset);
1227 hammer_rel_node(parent);
1228 }
1229 return(error);
1230 }
1231 hammer_lock_ex(&new_leaf->lock);
1232
1233 /*
1234 * Create the new node. P become the left-hand boundary in the
1235 * new node. Copy the right-hand boundary as well.
1236 */
1237 hammer_modify_node(leaf);
1238 hammer_modify_node(new_leaf);
1239 ondisk = leaf->ondisk;
1240 elm = &ondisk->elms[split];
1241 bcopy(elm, &new_leaf->ondisk->elms[0], (ondisk->count - split) * esize);
1242 new_leaf->ondisk->count = ondisk->count - split;
1243 new_leaf->ondisk->parent = parent->node_offset;
1244 new_leaf->ondisk->type = HAMMER_BTREE_TYPE_LEAF;
1245 KKASSERT(ondisk->type == new_leaf->ondisk->type);
1246
1247 /*
1248 * Cleanup the original node. Because this is a leaf node and
1249 * leaf nodes do not have a right-hand boundary, there
1250 * aren't any special edge cases to clean up. We just fixup the
1251 * count.
1252 */
1253 ondisk->count = split;
1254
1255 /*
1256 * Insert the separator into the parent, fixup the parent's
1257 * reference to the original node, and reference the new node.
1258 * The separator is P.
1259 *
1260 * Remember that base.count does not include the right-hand boundary.
1261 * We are copying parent_index+1 to parent_index+2, not +0 to +1.
1262 */
1263 hammer_modify_node(parent);
1264 ondisk = parent->ondisk;
1265 KKASSERT(ondisk->count != HAMMER_BTREE_INT_ELMS);
1266 ondisk->elms[parent_index].internal.subtree_count = split;
1267 parent_elm = &ondisk->elms[parent_index+1];
1268 bcopy(parent_elm, parent_elm + 1,
1269 (ondisk->count - parent_index) * esize);
1270 hammer_make_separator(&elm[-1].base, &elm[0].base, &parent_elm->base);
1271 parent_elm->internal.subtree_offset = new_leaf->node_offset;
1272 parent_elm->internal.subtree_count = new_leaf->ondisk->count;
1273 parent_elm->internal.subtree_type = new_leaf->ondisk->type;
1274 parent_elm->internal.subtree_vol_no = 0;
1275 parent_elm->internal.rec_offset = 0;
1276 ++ondisk->count;
1277 hammer_modify_node_done(parent);
1278
1279 /*
1280 * The cluster's root pointer may have to be updated.
1281 */
1282 if (made_root) {
1283 hammer_modify_cluster(leaf->cluster);
1284 leaf->cluster->ondisk->clu_btree_root = parent->node_offset;
1285 hammer_modify_cluster_done(leaf->cluster);
1286 leaf->ondisk->parent = parent->node_offset;
1287 if (cursor->parent) {
1288 hammer_unlock(&cursor->parent->lock);
1289 hammer_rel_node(cursor->parent);
1290 }
1291 cursor->parent = parent; /* lock'd and ref'd */
1292 }
1293 hammer_modify_node_done(leaf);
1294 hammer_modify_node_done(new_leaf);
1295
1296 /*
1297 * Ok, now adjust the cursor depending on which element the original
1298 * index was pointing at. If we are >= the split point the push node
1299 * is now in the new node.
1300 *
1301 * NOTE: If we are at the split point itself we cannot stay with the
1302 * original node because the push index will point at the right-hand
1303 * boundary, which is illegal.
1304 */
1305 if (cursor->index >= split) {
1306 cursor->parent_index = parent_index + 1;
1307 cursor->index -= split;
1308 hammer_unlock(&cursor->node->lock);
1309 hammer_rel_node(cursor->node);
1310 cursor->node = new_leaf;
1311 } else {
1312 cursor->parent_index = parent_index;
1313 hammer_unlock(&new_leaf->lock);
1314 hammer_rel_node(new_leaf);
1315 }
1316
1317 /*
1318 * Fixup left and right bounds
1319 */
1320 parent_elm = &parent->ondisk->elms[cursor->parent_index];
1321 cursor->left_bound = &parent_elm[0].internal.base;
1322 cursor->right_bound = &parent_elm[1].internal.base;
1323
1324 return (0);
1325}
1326
1327/*
1328 * Attempt to remove the empty B-Tree node at (cursor->node). Returns 0
1329 * on success, EAGAIN if we could not acquire the necessary locks, or some
1330 * other error.
1331 *
1332 * On return the cursor may end up pointing at an internal node, suitable
1333 * for further iteration but not for insertion or deletion.
1334 *
1335 * cursor->node may be an internal node or a leaf node.
1336 */
1337int
1338btree_remove(hammer_cursor_t cursor)
1339{
1340 hammer_node_ondisk_t ondisk;
1341 hammer_btree_elm_t elm;
1342 hammer_node_t save;
1343 hammer_node_t node;
1344 hammer_node_t parent;
1345 int error;
1346 int i;
1347
1348 /*
1349 * If we are at the root of the root cluster there is nothing to
1350 * remove, but an internal node at the root of a cluster is not
1351 * allowed to be empty so convert it to a leaf node.
1352 */
1353 if (cursor->parent == NULL) {
1354 hammer_modify_node(cursor->node);
1355 ondisk = cursor->node->ondisk;
1356 KKASSERT(ondisk->parent == 0);
1357 ondisk->type = HAMMER_BTREE_TYPE_LEAF;
1358 ondisk->count = 0;
1359 hammer_modify_node_done(cursor->node);
1360 kprintf("EMPTY ROOT OF ROOT CLUSTER -> LEAF\n");
1361 return(0);
1362 }
1363
1364 /*
1365 * Retain a reference to cursor->node, ex-lock again (2 locks now)
1366 * so we do not lose the lock when we cursor around.
1367 */
1368 save = cursor->node;
1369 hammer_ref_node(save);
1370 hammer_lock_ex(&save->lock);
1371
1372 /*
1373 * We need to be able to lock the parent of the parent. Do this
1374 * non-blocking and return EAGAIN if the lock cannot be acquired.
1375 * non-blocking is required in order to avoid a deadlock.
1376 *
1377 * After we cursor up, parent is moved to node and the new parent
1378 * is the parent of the parent.
1379 */
1380 error = hammer_cursor_up(cursor, 1);
1381 if (error) {
1382 kprintf("BTREE_REMOVE: Cannot lock parent\n");
1383 hammer_unlock(&save->lock);
1384 hammer_rel_node(save);
1385 return(error);
1386 }
1387
1388 /*
1389 * At this point we want to remove the element at (node, index),
1390 * which is now the (original) parent pointing to the saved node.
1391 * Removing the element allows us to then free the node it was
1392 * pointing to.
1393 *
1394 * However, an internal node is not allowed to have 0 elements, so
1395 * if the count would drop to 0 we have to recurse. It is possible
1396 * for the recursion to fail.
1397 *
1398 * NOTE: The cursor is in an indeterminant position after recursing,
1399 * but will still be suitable for an iteration.
1400 */
1401 node = cursor->node;
1402 KKASSERT(node->ondisk->count > 0);
1403 if (node->ondisk->count == 1) {
1404 error = btree_remove(cursor);
1405 if (error == 0) {
1406 kprintf("BTREE_REMOVE: Successful!\n");
1407 hammer_flush_node(save);
1408 hammer_free_btree(save->cluster, save->node_offset);
1409 } else {
1410 kprintf("BTREE_REMOVE: Recursion failed %d\n", error);
1411 }
1412 hammer_unlock(&save->lock);
1413 hammer_rel_node(save);
1414 return(error);
1415 }
1416
1417 /*
1418 * Remove the element at (node, index) and adjust the parent's
1419 * subtree_count.
1420 *
1421 * NOTE! If removing element 0 an internal node's left-hand boundary
1422 * will no longer match its parent. If removing a mid-element the
1423 * boundary will no longer match a child's left hand or right hand
1424 * boundary.
1425 *
1426 * BxBxBxB remove a (x[0]): internal node's left-hand
1427 * | | | boundary no longer matches
1428 * a b c parent.
1429 *
1430 * remove b (x[1]): a's right hand boundary no
1431 * longer matches parent.
1432 *
1433 * remove c (x[2]): b's right hand boundary no
1434 * longer matches parent.
1435 *
1436 * These cases are corrected in btree_search().
1437 */
1438#if 0
1439 kprintf("BTREE_REMOVE: Removing element %d\n", cursor->index);
1440#endif
1441 hammer_modify_node(node);
1442 ondisk = node->ondisk;
1443 i = cursor->index;
1444 bcopy(&ondisk->elms[i+1], &ondisk->elms[i],
1445 (ondisk->count - i) * sizeof(ondisk->elms[0]));
1446 --ondisk->count;
1447 hammer_modify_node_done(node);
1448
1449 /*
1450 * Adjust the parent-parent's (now parent) reference to the parent
1451 * (now node).
1452 */
1453 if ((parent = cursor->parent) != NULL) {
1454 elm = &parent->ondisk->elms[cursor->parent_index];
1455 if (elm->internal.subtree_count != ondisk->count) {
1456 hammer_modify_node(parent);
1457 elm->internal.subtree_count = ondisk->count;
1458 hammer_modify_node_done(parent);
1459 }
1460 if (elm->subtree_type != HAMMER_BTREE_TYPE_CLUSTER &&
1461 elm->subtree_type != ondisk->type) {
1462 hammer_modify_node(parent);
1463 elm->subtree_type = ondisk->type;
1464 hammer_modify_node_done(parent);
1465 }
1466 }
1467
1468 /*
1469 * Free the saved node.
1470 */
1471 hammer_flush_node(save);
1472 hammer_free_btree(save->cluster, save->node_offset);
1473 hammer_unlock(&save->lock);
1474 hammer_rel_node(save);
1475 return(0);
1476}
1477
1478/*
1479 * The child represented by the element in internal node node needs
1480 * to have its parent pointer adjusted.
1481 */
1482static
1483int
1484btree_set_parent(hammer_node_t node, hammer_btree_elm_t elm)
1485{
1486 hammer_volume_t volume;
1487 hammer_cluster_t cluster;
1488 hammer_node_t child;
1489 int error;
1490
1491 error = 0;
1492
1493 switch(elm->internal.subtree_type) {
1494 case HAMMER_BTREE_TYPE_LEAF:
1495 case HAMMER_BTREE_TYPE_INTERNAL:
1496 child = hammer_get_node(node->cluster,
1497 elm->internal.subtree_offset, &error);
1498 if (error == 0) {
1499 hammer_modify_node(child);
1500 hammer_lock_ex(&child->lock);
1501 child->ondisk->parent = node->node_offset;
1502 hammer_unlock(&child->lock);
1503 hammer_modify_node_done(child);
1504 hammer_rel_node(child);
1505 }
1506 break;
1507 case HAMMER_BTREE_TYPE_CLUSTER:
1508 volume = hammer_get_volume(node->cluster->volume->hmp,
1509 elm->internal.subtree_vol_no, &error);
1510 if (error)
1511 break;
1512 cluster = hammer_get_cluster(volume,
1513 elm->internal.subtree_clu_no,
1514 &error, 0);
1515 hammer_rel_volume(volume, 0);
1516 if (error)
1517 break;
1518 hammer_modify_cluster(cluster);
1519 hammer_lock_ex(&cluster->io.lock);
1520 cluster->ondisk->clu_btree_parent_offset = node->node_offset;
1521 hammer_unlock(&cluster->io.lock);
1522 hammer_modify_cluster_done(cluster);
1523 KKASSERT(cluster->ondisk->clu_btree_parent_clu_no ==
1524 node->cluster->clu_no);
1525 KKASSERT(cluster->ondisk->clu_btree_parent_vol_no ==
1526 node->cluster->volume->vol_no);
1527 hammer_rel_cluster(cluster, 0);
1528 break;
1529 default:
1530 hammer_print_btree_elm(elm, HAMMER_BTREE_TYPE_INTERNAL, -1);
1531 panic("btree_set_parent: bad subtree_type");
1532 break; /* NOT REACHED */
1533 }
1534 return(error);
1535}
1536
1537#if 0
1538
1539/*
1540 * This routine is called on the internal node (node) prior to recursing down
1541 * through (node, index) when the node referenced by (node, index) MIGHT
1542 * have too few elements for the caller to perform a deletion.
1543 *
1544 * cursor->index is invalid on return because the separators may have gotten
1545 * adjusted, the caller must rescan the node's elements. The caller may set
1546 * cursor->index to -1 if it wants us to do a general rebalancing.
1547 *
1548 * This routine rebalances the children of the (node), collapsing children
1549 * together if possible. On return each child will have at least L/2-1
1550 * elements unless the node only has one child.
1551 *
1552 * NOTE: Because we do not update the parent's parent in the split code,
1553 * the subtree_count used by the caller may be incorrect. We correct it
1554 * here. Also note that we cannot change the depth of the tree's leaf
1555 * nodes here (see btree_collapse()).
1556 *
1557 * NOTE: We make no attempt to rebalance inter-cluster elements.
1558 */
1559static
1560int
1561btree_rebalance(hammer_cursor_t cursor)
1562{
1563 hammer_node_ondisk_t ondisk;
1564 hammer_node_t node;
1565 hammer_node_t children[HAMMER_BTREE_INT_ELMS];
1566 hammer_node_t child;
1567 hammer_btree_elm_t elm;
1568 hammer_btree_elm_t elms;
1569 int i, j, n, nelms, goal;
1570 int maxelms, halfelms;
1571 int error;
1572
1573 /*
1574 * If the elm being recursed through is an inter-cluster reference,
1575 * don't worry about it.
1576 */
1577 ondisk = cursor->node->ondisk;
1578 elm = &ondisk->elms[cursor->index];
1579 if (elm->internal.subtree_type == HAMMER_BTREE_TYPE_CLUSTER)
1580 return(0);
1581
1582 KKASSERT(elm->internal.subtree_offset != 0);
1583 error = 0;
1584
1585 /*
1586 * Load the children of node and do any necessary corrections
1587 * to subtree_count. subtree_count may be too low due to the
1588 * way insertions split nodes. Get a count of the total number
1589 * of actual elements held by our children.
1590 */
1591 error = 0;
1592
1593 for (i = n = 0; i < node->base.count; ++i) {
1594 struct hammer_btree_internal_elm *elm;
1595
1596 elm = &node->elms[i];
1597 children[i] = NULL;
1598 child_buffer[i] = NULL; /* must be preinitialized for bread */
1599 if (elm->subtree_offset == 0)
1600 continue;
1601 child = hammer_bread(cursor->cluster, elm->subtree_offset,
1602 HAMMER_FSBUF_BTREE, &error,
1603 &child_buffer[i], XXX);
1604 children[i] = child;
1605 if (child == NULL)
1606 continue;
1607 XXX
1608 KKASSERT(node->base.subtype == child->base.type);
1609
1610 /*
1611 * Accumulate n for a good child, update the node's count
1612 * if it was wrong.
1613 */
1614 if (node->elms[i].subtree_count != child->base.count) {
1615 node->elms[i].subtree_count = child->base.count;
1616 }
1617 n += node->elms[i].subtree_count;
1618 }
1619 if (error)
1620 goto failed;
1621
1622 /*
1623 * Collect all the children's elements together
1624 */
1625 nelms = n;
1626 elms = kmalloc(sizeof(*elms) * (nelms + 1), M_HAMMER, M_WAITOK|M_ZERO);
1627 for (i = n = 0; i < node->base.count; ++i) {
1628 child = children[i];
1629 for (j = 0; j < child->base.count; ++j) {
1630 elms[n].owner = child;
1631 if (node->base.subtype == HAMMER_BTREE_TYPE_LEAF)
1632 elms[n].u.leaf = child->leaf.elms[j];
1633 else
1634 elms[n].u.internal = child->internal.elms[j];
1635 ++n;
1636 }
1637 }
1638 KKASSERT(n == nelms);
1639
1640 /*
1641 * Store a boundary in the elms array to ease the code below. This
1642 * is only used if the children are internal nodes.
1643 */
1644 elms[n].u.internal = node->elms[i];
1645
1646 /*
1647 * Calculate the number of elements each child should have (goal) by
1648 * reducing the number of elements until we achieve at least
1649 * halfelms - 1 per child, unless we are a degenerate case.
1650 */
1651 maxelms = btree_max_elements(node->base.subtype);
1652 halfelms = maxelms / 2;
1653
1654 goal = halfelms - 1;
1655 while (i && n / i < goal)
1656 --i;
1657
1658 /*
1659 * Now rebalance using the specified goal
1660 */
1661 for (i = n = 0; i < node->base.count; ++i) {
1662 struct hammer_buffer *subchild_buffer = NULL;
1663 struct hammer_btree_internal_node *subchild;
1664
1665 child = children[i];
1666 for (j = 0; j < goal && n < nelms; ++j) {
1667 if (node->base.subtype == HAMMER_BTREE_TYPE_LEAF) {
1668 child->leaf.elms[j] = elms[n].u.leaf;
1669 } else {
1670 child->internal.elms[j] = elms[n].u.internal;
1671 }
1672
1673 /*
1674 * If the element's parent has changed we have to
1675 * update the parent pointer. This is somewhat
1676 * expensive.
1677 */
1678 if (elms[n].owner != child &&
1679 node->base.subtype == HAMMER_BTREE_TYPE_INTERNAL) {
1680 subchild = hammer_bread(cursor->cluster,
1681 elms[n].u.internal.subtree_offset,
1682 HAMMER_FSBUF_BTREE,
1683 &error,
1684 &subchild_buffer, XXX);
1685 if (subchild) {
1686 hammer_modify_buffer(subchild_buffer);
1687 subchild->base.parent =
1688 hammer_bclu_offset(child_buffer[i],
1689 child);
1690 hammer_modify_buffer_done(subchild_buffer);
1691 }
1692 /* XXX error */
1693 }
1694 ++n;
1695 }
1696 /*
1697 * Set right boundary if the children are internal nodes.
1698 */
1699 if (node->base.subtype == HAMMER_BTREE_TYPE_INTERNAL)
1700 child->internal.elms[j] = elms[n].u.internal;
1701 child->base.count = j;
1702 hammer_modify_buffer(child_buffer[i]);
1703 if (subchild_buffer)
1704 hammer_put_buffer(subchild_buffer, 0);
1705
1706 /*
1707 * If we have run out of elements, break out
1708 */
1709 if (n == nelms)
1710 break;
1711 }
1712
1713 /*
1714 * Physically destroy any left-over children. These children's
1715 * elements have been packed into prior children. The node's
1716 * right hand boundary and count gets shifted to index i.
1717 *
1718 * The subtree count in the node's parent MUST be updated because
1719 * we are removing elements. The subtree_count field is allowed to
1720 * be too small, but not too large!
1721 */
1722 if (i != node->base.count) {
1723 n = i;
1724 node->elms[n] = node->elms[node->base.count];
1725 while (i < node->base.count) {
1726 hammer_free_btree_ptr(child_buffer[i], children[i]);
1727 hammer_put_buffer(child_buffer[i], 0);
1728 ++i;
1729 }
1730 node->base.count = n;
1731 if (cursor->parent) {
1732 cursor->parent->elms[cursor->parent_index].subtree_count = n;
1733 hammer_modify_buffer(cursor->parent_buffer);
1734 }
1735 }
1736
1737 kfree(elms, M_HAMMER);
1738failed:
1739 hammer_modify_buffer(cursor->node_buffer);
1740 for (i = 0; i < node->base.count; ++i) {
1741 if (child_buffer[i])
1742 hammer_put_buffer(child_buffer[i], 0);
1743 }
1744 return (error);
1745}
1746
1747/*
1748 * This routine is only called if the cursor is at the root node and the
1749 * root node is an internal node. We attempt to collapse the root node
1750 * by replacing it with all of its children, reducing tree depth by one.
1751 *
1752 * This is the only way to reduce tree depth in a HAMMER filesystem.
1753 * Note that all leaf nodes are at the same depth.
1754 *
1755 * This is a fairly expensive operation because we not only have to load
1756 * the root's children, we also have to scan each child and adjust the
1757 * parent offset for each element in each child. Nasty all around.
1758 */
1759static
1760int
1761btree_collapse(hammer_cursor_t cursor)
1762{
1763 hammer_btree_node_ondisk_t root, child;
1764 hammer_btree_node_ondisk_t children[HAMMER_BTREE_INT_ELMS];
1765 struct hammer_buffer *child_buffer[HAMMER_BTREE_INT_ELMS];
1766 int count;
1767 int i, j, n;
1768 int root_modified;
1769 int error;
1770 int32_t root_offset;
1771 u_int8_t subsubtype;
1772
1773 root = cursor->node;
1774 count = root->base.count;
1775 root_offset = hammer_bclu_offset(cursor->node_buffer, root);
1776
1777 /*
1778 * Sum up the number of children each element has. This value is
1779 * only approximate due to the way the insertion node works. It
1780 * may be too small but it will never be too large.
1781 *
1782 * Quickly terminate the collapse if the elements have too many
1783 * children.
1784 */
1785 KKASSERT(root->base.parent == 0); /* must be root node */
1786 KKASSERT(root->base.type == HAMMER_BTREE_TYPE_INTERNAL);
1787 KKASSERT(count <= HAMMER_BTREE_INT_ELMS);
1788
1789 for (i = n = 0; i < count; ++i) {
1790 n += root->internal.elms[i].subtree_count;
1791 }
1792 if (n > btree_max_elements(root->base.subtype))
1793 return(0);
1794
1795 /*
1796 * Iterate through the elements again and correct the subtree_count.
1797 * Terminate the collapse if we wind up with too many.
1798 */
1799 error = 0;
1800 root_modified = 0;
1801
1802 for (i = n = 0; i < count; ++i) {
1803 struct hammer_btree_internal_elm *elm;
1804
1805 elm = &root->internal.elms[i];
1806 child_buffer[i] = NULL;
1807 children[i] = NULL;
1808 if (elm->subtree_offset == 0)
1809 continue;
1810 child = hammer_bread(cursor->cluster, elm->subtree_offset,
1811 HAMMER_FSBUF_BTREE, &error,
1812 &child_buffer[i], XXX);
1813 children[i] = child;
1814 if (child == NULL)
1815 continue;
1816 KKASSERT(root->base.subtype == child->base.type);
1817
1818 /*
1819 * Accumulate n for a good child, update the root's count
1820 * if it was wrong.
1821 */
1822 if (root->internal.elms[i].subtree_count != child->base.count) {
1823 root->internal.elms[i].subtree_count = child->base.count;
1824 root_modified = 1;
1825 }
1826 n += root->internal.elms[i].subtree_count;
1827 }
1828 if (error || n > btree_max_elements(root->base.subtype))
1829 goto done;
1830
1831 /*
1832 * Ok, we can collapse the root. If the root's children are leafs
1833 * the collapse is really simple. If they are internal nodes the
1834 * collapse is not so simple because we have to fixup the parent
1835 * pointers for the root's children's children.
1836 *
1837 * When collapsing an internal node the far left and far right
1838 * element's boundaries should match the root's left and right
1839 * boundaries.
1840 */
1841 if (root->base.subtype == HAMMER_BTREE_TYPE_LEAF) {
1842 for (i = n = 0; i < count; ++i) {
1843 child = children[i];
1844 for (j = 0; j < child->base.count; ++j) {
1845 root->leaf.elms[n] = child->leaf.elms[j];
1846 ++n;
1847 }
1848 }
1849 root->base.type = root->base.subtype;
1850 root->base.subtype = 0;
1851 root->base.count = n;
1852 root->leaf.link_left = 0;
1853 root->leaf.link_right = 0;
1854 } else {
1855 struct hammer_btree_internal_elm *elm;
1856 struct hammer_btree_internal_node *subchild;
1857 struct hammer_buffer *subchild_buffer = NULL;
1858
1859 if (count) {
1860 child = children[0];
1861 subsubtype = child->base.subtype;
1862 KKASSERT(child->base.count > 0);
1863 KKASSERT(root->internal.elms[0].base.key ==
1864 child->internal.elms[0].base.key);
1865 child = children[count-1];
1866 KKASSERT(child->base.count > 0);
1867 KKASSERT(root->internal.elms[count].base.key ==
1868 child->internal.elms[child->base.count].base.key);
1869 } else {
1870 subsubtype = 0;
1871 }
1872 for (i = n = 0; i < count; ++i) {
1873 child = children[i];
1874 KKASSERT(child->base.subtype == subsubtype);
1875 for (j = 0; j < child->base.count; ++j) {
1876 elm = &child->internal.elms[j];
1877
1878 root->internal.elms[n] = *elm;
1879 subchild = hammer_bread(cursor->cluster,
1880 elm->subtree_offset,
1881 HAMMER_FSBUF_BTREE,
1882 &error,
1883 &subchild_buffer,
1884 XXX);
1885 if (subchild) {
1886 subchild->base.parent = root_offset;
1887 hammer_modify_buffer(subchild_buffer);
1888 }
1889 ++n;
1890 }
1891 /* make sure the right boundary is correct */
1892 /* (this gets overwritten when the loop continues) */
1893 /* XXX generate a new separator? */
1894 root->internal.elms[n] = child->internal.elms[j];
1895 }
1896 root->base.type = HAMMER_BTREE_TYPE_INTERNAL;
1897 root->base.subtype = subsubtype;
1898 if (subchild_buffer)
1899 hammer_put_buffer(subchild_buffer, 0);
1900 }
1901 root_modified = 1;
1902
1903 /*
1904 * Cleanup
1905 */
1906done:
1907 if (root_modified)
1908 hammer_modify_buffer(cursor->node_buffer);
1909 for (i = 0; i < count; ++i) {
1910 if (child_buffer[i])
1911 hammer_put_buffer(child_buffer[i], 0);
1912 }
1913 return(error);
1914}
1915
1916#endif
1917
1918/************************************************************************
1919 * MISCELLANIOUS SUPPORT *
1920 ************************************************************************/
1921
1922/*
1923 * Compare two B-Tree elements, return -N, 0, or +N (e.g. similar to strcmp).
1924 *
1925 * See also hammer_rec_rb_compare() and hammer_rec_cmp() in hammer_object.c.
1926 */
1927int
1928hammer_btree_cmp(hammer_base_elm_t key1, hammer_base_elm_t key2)
1929{
1930 if (key1->obj_id < key2->obj_id)
1931 return(-4);
1932 if (key1->obj_id > key2->obj_id)
1933 return(4);
1934
1935 if (key1->rec_type < key2->rec_type)
1936 return(-3);
1937 if (key1->rec_type > key2->rec_type)
1938 return(3);
1939
1940 if (key1->key < key2->key)
1941 return(-2);
1942 if (key1->key > key2->key)
1943 return(2);
1944 return(0);
1945}
1946
1947/*
1948 * Test a non-zero timestamp against an element to determine whether the
1949 * element is visible.
1950 */
1951int
1952hammer_btree_chkts(hammer_tid_t create_tid, hammer_base_elm_t base)
1953{
1954 if (create_tid < base->create_tid)
1955 return(-1);
1956 if (base->delete_tid && create_tid >= base->delete_tid)
1957 return(1);
1958 return(0);
1959}
1960
1961/*
1962 * Create a separator half way inbetween key1 and key2. For fields just
1963 * one unit apart, the separator will match key2.
1964 *
1965 * The handling of delete_tid is a little confusing. It is only possible
1966 * to have one record in the B-Tree where all fields match except delete_tid.
1967 * This means, worse case, two adjacent elements may have a create_tid that
1968 * is one-apart and cause the separator to choose the right-hand element's
1969 * create_tid. e.g. (create,delete): (1,x)(2,x) -> separator is (2,x).
1970 *
1971 * So all we have to do is set delete_tid to the right-hand element to
1972 * guarentee that the separator is properly between the two elements.
1973 */
1974#define MAKE_SEPARATOR(key1, key2, dest, field) \
1975 dest->field = key1->field + ((key2->field - key1->field + 1) >> 1);
1976
1977static void
1978hammer_make_separator(hammer_base_elm_t key1, hammer_base_elm_t key2,
1979 hammer_base_elm_t dest)
1980{
1981 bzero(dest, sizeof(*dest));
1982 MAKE_SEPARATOR(key1, key2, dest, obj_id);
1983 MAKE_SEPARATOR(key1, key2, dest, rec_type);
1984 MAKE_SEPARATOR(key1, key2, dest, key);
1985 MAKE_SEPARATOR(key1, key2, dest, create_tid);
1986 dest->delete_tid = key2->delete_tid;
1987}
1988
1989#undef MAKE_SEPARATOR
1990
1991/*
1992 * Return whether a generic internal or leaf node is full
1993 */
1994static int
1995btree_node_is_full(hammer_node_ondisk_t node)
1996{
1997 switch(node->type) {
1998 case HAMMER_BTREE_TYPE_INTERNAL:
1999 if (node->count == HAMMER_BTREE_INT_ELMS)
2000 return(1);
2001 break;
2002 case HAMMER_BTREE_TYPE_LEAF:
2003 if (node->count == HAMMER_BTREE_LEAF_ELMS)
2004 return(1);
2005 break;
2006 default:
2007 panic("illegal btree subtype");
2008 }
2009 return(0);
2010}
2011
2012#if 0
2013static int
2014btree_max_elements(u_int8_t type)
2015{
2016 if (type == HAMMER_BTREE_TYPE_LEAF)
2017 return(HAMMER_BTREE_LEAF_ELMS);
2018 if (type == HAMMER_BTREE_TYPE_INTERNAL)
2019 return(HAMMER_BTREE_INT_ELMS);
2020 panic("btree_max_elements: bad type %d\n", type);
2021}
2022#endif
2023
2024void
2025hammer_print_btree_node(hammer_node_ondisk_t ondisk)
2026{
2027 hammer_btree_elm_t elm;
2028 int i;
2029
2030 kprintf("node %p count=%d parent=%d type=%c\n",
2031 ondisk, ondisk->count, ondisk->parent, ondisk->type);
2032
2033 /*
2034 * Dump both boundary elements if an internal node
2035 */
2036 if (ondisk->type == HAMMER_BTREE_TYPE_INTERNAL) {
2037 for (i = 0; i <= ondisk->count; ++i) {
2038 elm = &ondisk->elms[i];
2039 hammer_print_btree_elm(elm, ondisk->type, i);
2040 }
2041 } else {
2042 for (i = 0; i < ondisk->count; ++i) {
2043 elm = &ondisk->elms[i];
2044 hammer_print_btree_elm(elm, ondisk->type, i);
2045 }
2046 }
2047}
2048
2049void
2050hammer_print_btree_elm(hammer_btree_elm_t elm, u_int8_t type, int i)
2051{
2052 kprintf(" %2d", i);
2053 kprintf("\tobjid = %016llx\n", elm->base.obj_id);
2054 kprintf("\tkey = %016llx\n", elm->base.key);
2055 kprintf("\tcreate_tid = %016llx\n", elm->base.create_tid);
2056 kprintf("\tdelete_tid = %016llx\n", elm->base.delete_tid);
2057 kprintf("\trec_type = %04x\n", elm->base.rec_type);
2058 kprintf("\tobj_type = %02x\n", elm->base.obj_type);
2059 kprintf("\tsubtree_type = %02x\n", elm->subtree_type);
2060
2061 if (type == HAMMER_BTREE_TYPE_INTERNAL) {
2062 if (elm->internal.rec_offset) {
2063 kprintf("\tcluster_rec = %08x\n",
2064 elm->internal.rec_offset);
2065 kprintf("\tcluster_id = %08x\n",
2066 elm->internal.subtree_clu_no);
2067 kprintf("\tvolno = %08x\n",
2068 elm->internal.subtree_vol_no);
2069 } else {
2070 kprintf("\tsubtree_off = %08x\n",
2071 elm->internal.subtree_offset);
2072 }
2073 kprintf("\tsubtree_count= %d\n", elm->internal.subtree_count);
2074 } else {
2075 kprintf("\trec_offset = %08x\n", elm->leaf.rec_offset);
2076 kprintf("\tdata_offset = %08x\n", elm->leaf.data_offset);
2077 kprintf("\tdata_len = %08x\n", elm->leaf.data_len);
2078 kprintf("\tdata_crc = %08x\n", elm->leaf.data_crc);
2079 }
2080}