Nuke staled code
[dragonfly.git] / sys / vfs / hammer / hammer_btree.c
... / ...
CommitLineData
1/*
2 * Copyright (c) 2007 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * $DragonFly: src/sys/vfs/hammer/hammer_btree.c,v 1.20 2008/01/17 05:06:09 dillon Exp $
35 */
36
37/*
38 * HAMMER B-Tree index
39 *
40 * HAMMER implements a modified B+Tree. In documentation this will
41 * simply be refered to as the HAMMER B-Tree. Basically a HAMMER B-Tree
42 * looks like a B+Tree (A B-Tree which stores its records only at the leafs
43 * of the tree), but adds two additional boundary elements which describe
44 * the left-most and right-most element a node is able to represent. In
45 * otherwords, we have boundary elements at the two ends of a B-Tree node
46 * instead of sub-tree pointers.
47 *
48 * A B-Tree internal node looks like this:
49 *
50 * B N N N N N N B <-- boundary and internal elements
51 * S S S S S S S <-- subtree pointers
52 *
53 * A B-Tree leaf node basically looks like this:
54 *
55 * L L L L L L L L <-- leaf elemenets
56 *
57 * The radix for an internal node is 1 less then a leaf but we get a
58 * number of significant benefits for our troubles.
59 *
60 * The big benefit to using a B-Tree containing boundary information
61 * is that it is possible to cache pointers into the middle of the tree
62 * and not have to start searches, insertions, OR deletions at the root
63 * node. In particular, searches are able to progress in a definitive
64 * direction from any point in the tree without revisting nodes. This
65 * greatly improves the efficiency of many operations, most especially
66 * record appends.
67 *
68 * B-Trees also make the stacking of trees fairly straightforward.
69 *
70 * SPIKES: Two leaf elements denoting an inclusive sub-range of keys
71 * may represent a spike, or a recursion into another cluster. Most
72 * standard B-Tree searches traverse spikes.
73 *
74 * INSERTIONS: A search performed with the intention of doing
75 * an insert will guarantee that the terminal leaf node is not full by
76 * splitting full nodes. Splits occur top-down during the dive down the
77 * B-Tree.
78 *
79 * DELETIONS: A deletion makes no attempt to proactively balance the
80 * tree and will recursively remove nodes that become empty. Empty
81 * nodes are not allowed and a deletion may recurse upwards from the leaf.
82 * Rather then allow a deadlock a deletion may terminate early by setting
83 * an internal node's element's subtree_offset to 0. The deletion will
84 * then be resumed the next time a search encounters the element.
85 */
86#include "hammer.h"
87#include <sys/buf.h>
88#include <sys/buf2.h>
89
90static int btree_search(hammer_cursor_t cursor, int flags);
91static int btree_split_internal(hammer_cursor_t cursor);
92static int btree_split_leaf(hammer_cursor_t cursor);
93static int btree_remove(hammer_cursor_t cursor);
94static int btree_set_parent(hammer_node_t node, hammer_btree_elm_t elm);
95#if 0
96static int btree_rebalance(hammer_cursor_t cursor);
97static int btree_collapse(hammer_cursor_t cursor);
98static int btree_node_is_almost_full(hammer_node_ondisk_t node);
99#endif
100static int btree_node_is_full(hammer_node_ondisk_t node);
101static void hammer_make_separator(hammer_base_elm_t key1,
102 hammer_base_elm_t key2, hammer_base_elm_t dest);
103
104/*
105 * Iterate records after a search. The cursor is iterated forwards past
106 * the current record until a record matching the key-range requirements
107 * is found. ENOENT is returned if the iteration goes past the ending
108 * key.
109 *
110 * The iteration is inclusive of key_beg and can be inclusive or exclusive
111 * of key_end depending on whether HAMMER_CURSOR_END_INCLUSIVE is set.
112 *
113 * When doing an as-of search (cursor->asof != 0), key_beg.delete_tid
114 * may be modified by B-Tree functions.
115 *
116 * cursor->key_beg may or may not be modified by this function during
117 * the iteration. XXX future - in case of an inverted lock we may have
118 * to reinitiate the lookup and set key_beg to properly pick up where we
119 * left off.
120 */
121int
122hammer_btree_iterate(hammer_cursor_t cursor)
123{
124 hammer_node_ondisk_t node;
125 hammer_btree_elm_t elm;
126 int error;
127 int r;
128 int s;
129
130 /*
131 * Skip past the current record
132 */
133 node = cursor->node->ondisk;
134 if (node == NULL)
135 return(ENOENT);
136 if (cursor->index < node->count &&
137 (cursor->flags & HAMMER_CURSOR_ATEDISK)) {
138 ++cursor->index;
139 }
140
141 /*
142 * Loop until an element is found or we are done.
143 */
144 for (;;) {
145 /*
146 * We iterate up the tree and then index over one element
147 * while we are at the last element in the current node.
148 *
149 * NOTE: This can pop us up to another cluster.
150 *
151 * If we are at the root of the root cluster, cursor_up
152 * returns ENOENT.
153 *
154 * NOTE: hammer_cursor_up() will adjust cursor->key_beg
155 * when told to re-search for the cluster tag.
156 *
157 * XXX this could be optimized by storing the information in
158 * the parent reference.
159 *
160 * XXX we can lose the node lock temporarily, this could mess
161 * up our scan.
162 */
163 if (cursor->index == node->count) {
164 error = hammer_cursor_up(cursor, 0);
165 if (error)
166 break;
167 node = cursor->node->ondisk;
168 KKASSERT(cursor->index != node->count);
169 ++cursor->index;
170 continue;
171 }
172
173 /*
174 * Check internal or leaf element. Determine if the record
175 * at the cursor has gone beyond the end of our range.
176 *
177 * Generally we recurse down through internal nodes. An
178 * internal node can only be returned if INCLUSTER is set
179 * and the node represents a cluster-push record.
180 */
181 if (node->type == HAMMER_BTREE_TYPE_INTERNAL) {
182 elm = &node->elms[cursor->index];
183 r = hammer_btree_cmp(&cursor->key_end, &elm[0].base);
184 s = hammer_btree_cmp(&cursor->key_beg, &elm[1].base);
185 if (hammer_debug_btree) {
186 kprintf("BRACKETL %p:%d %016llx %02x %016llx %d\n",
187 cursor->node, cursor->index,
188 elm[0].internal.base.obj_id,
189 elm[0].internal.base.rec_type,
190 elm[0].internal.base.key,
191 r
192 );
193 kprintf("BRACKETR %p:%d %016llx %02x %016llx %d\n",
194 cursor->node, cursor->index + 1,
195 elm[1].internal.base.obj_id,
196 elm[1].internal.base.rec_type,
197 elm[1].internal.base.key,
198 s
199 );
200 }
201
202 if (r < 0) {
203 error = ENOENT;
204 break;
205 }
206 if (r == 0 && (cursor->flags &
207 HAMMER_CURSOR_END_INCLUSIVE) == 0) {
208 error = ENOENT;
209 break;
210 }
211 KKASSERT(s <= 0);
212 KKASSERT(elm->internal.subtree_offset != 0);
213 error = hammer_cursor_down(cursor);
214 if (error)
215 break;
216 KKASSERT(cursor->index == 0);
217 node = cursor->node->ondisk;
218 continue;
219 } else {
220 elm = &node->elms[cursor->index];
221 r = hammer_btree_cmp(&cursor->key_end, &elm->base);
222 if (hammer_debug_btree) {
223 kprintf("ELEMENT %p:%d %016llx %02x %016llx %d\n",
224 cursor->node, cursor->index,
225 elm[0].leaf.base.obj_id,
226 elm[0].leaf.base.rec_type,
227 elm[0].leaf.base.key,
228 r
229 );
230 }
231 if (r < 0) {
232 error = ENOENT;
233 break;
234 }
235 if (r == 0 && (cursor->flags &
236 HAMMER_CURSOR_END_INCLUSIVE) == 0) {
237 error = ENOENT;
238 break;
239 }
240 switch(elm->leaf.base.btype) {
241 case HAMMER_BTREE_TYPE_RECORD:
242 if ((cursor->flags & HAMMER_CURSOR_ASOF) &&
243 hammer_btree_chkts(cursor->asof, &elm->base)) {
244 ++cursor->index;
245 continue;
246 }
247 break;
248 case HAMMER_BTREE_TYPE_SPIKE_BEG:
249 /*
250 * We must cursor-down via the SPIKE_END
251 * element, otherwise cursor->parent will
252 * not be set correctly for deletions.
253 */
254 KKASSERT(cursor->index + 1 < node->count);
255 ++cursor->index;
256 /* fall through */
257 case HAMMER_BTREE_TYPE_SPIKE_END:
258 if (cursor->flags & HAMMER_CURSOR_INCLUSTER)
259 break;
260 error = hammer_cursor_down(cursor);
261 if (error)
262 break;
263 KKASSERT(cursor->index == 0);
264 node = cursor->node->ondisk;
265 continue;
266 default:
267 error = EINVAL;
268 break;
269 }
270 if (error)
271 break;
272 }
273
274 /*
275 * Return entry
276 */
277 if (hammer_debug_btree) {
278 int i = cursor->index;
279 hammer_btree_elm_t elm = &cursor->node->ondisk->elms[i];
280 kprintf("ITERATE %p:%d %016llx %02x %016llx\n",
281 cursor->node, i,
282 elm->internal.base.obj_id,
283 elm->internal.base.rec_type,
284 elm->internal.base.key
285 );
286 }
287 return(0);
288 }
289 return(error);
290}
291
292/*
293 * Lookup cursor->key_beg. 0 is returned on success, ENOENT if the entry
294 * could not be found, and a fatal error otherwise.
295 *
296 * The cursor is suitably positioned for a deletion on success, and suitably
297 * positioned for an insertion on ENOENT.
298 *
299 * The cursor may begin anywhere, the search will traverse clusters in
300 * either direction to locate the requested element.
301 */
302int
303hammer_btree_lookup(hammer_cursor_t cursor)
304{
305 int error;
306
307 if (cursor->flags & HAMMER_CURSOR_ASOF) {
308 cursor->key_beg.delete_tid = cursor->asof;
309 do {
310 error = btree_search(cursor, 0);
311 } while (error == EAGAIN);
312 } else {
313 error = btree_search(cursor, 0);
314 }
315 if (error == 0 && cursor->flags)
316 error = hammer_btree_extract(cursor, cursor->flags);
317 return(error);
318}
319
320/*
321 * Execute the logic required to start an iteration. The first record
322 * located within the specified range is returned and iteration control
323 * flags are adjusted for successive hammer_btree_iterate() calls.
324 */
325int
326hammer_btree_first(hammer_cursor_t cursor)
327{
328 int error;
329
330 error = hammer_btree_lookup(cursor);
331 if (error == ENOENT) {
332 cursor->flags &= ~HAMMER_CURSOR_ATEDISK;
333 error = hammer_btree_iterate(cursor);
334 }
335 cursor->flags |= HAMMER_CURSOR_ATEDISK;
336 return(error);
337}
338
339/*
340 * Extract the record and/or data associated with the cursor's current
341 * position. Any prior record or data stored in the cursor is replaced.
342 * The cursor must be positioned at a leaf node.
343 *
344 * NOTE: Most extractions occur at the leaf of the B-Tree. The only
345 * extraction allowed at an internal element is at a cluster-push.
346 * Cluster-push elements have records but no data.
347 */
348int
349hammer_btree_extract(hammer_cursor_t cursor, int flags)
350{
351 hammer_node_ondisk_t node;
352 hammer_btree_elm_t elm;
353 hammer_cluster_t cluster;
354 u_int64_t buf_type;
355 int32_t cloff;
356 int32_t roff;
357 int error;
358
359 /*
360 * A cluster record type has no data reference, the information
361 * is stored directly in the record and B-Tree element.
362 *
363 * The case where the data reference resolves to the same buffer
364 * as the record reference must be handled.
365 */
366 node = cursor->node->ondisk;
367 elm = &node->elms[cursor->index];
368 cluster = cursor->node->cluster;
369 cursor->flags &= ~HAMMER_CURSOR_DATA_EMBEDDED;
370 cursor->data = NULL;
371
372 /*
373 * There is nothing to extract for an internal element.
374 */
375 if (node->type == HAMMER_BTREE_TYPE_INTERNAL)
376 return(EINVAL);
377
378 KKASSERT(node->type == HAMMER_BTREE_TYPE_LEAF);
379
380 /*
381 * Leaf element.
382 */
383 if ((flags & HAMMER_CURSOR_GET_RECORD)) {
384 cloff = elm->leaf.rec_offset;
385 cursor->record = hammer_bread(cluster, cloff,
386 HAMMER_FSBUF_RECORDS, &error,
387 &cursor->record_buffer);
388 } else {
389 cloff = 0;
390 error = 0;
391 }
392 if ((flags & HAMMER_CURSOR_GET_DATA) && error == 0) {
393 if (elm->leaf.base.btype != HAMMER_BTREE_TYPE_RECORD) {
394 /*
395 * Only records have data references. Spike elements
396 * do not.
397 */
398 cursor->data = NULL;
399 } else if ((cloff ^ elm->leaf.data_offset) & ~HAMMER_BUFMASK) {
400 /*
401 * The data is not in the same buffer as the last
402 * record we cached, but it could still be embedded
403 * in a record. Note that we may not have loaded the
404 * record's buffer above, depending on flags.
405 */
406 if ((elm->leaf.rec_offset ^ elm->leaf.data_offset) &
407 ~HAMMER_BUFMASK) {
408 if (elm->leaf.data_len & HAMMER_BUFMASK)
409 buf_type = HAMMER_FSBUF_DATA;
410 else
411 buf_type = 0; /* pure data buffer */
412 } else {
413 buf_type = HAMMER_FSBUF_RECORDS;
414 }
415 cursor->data = hammer_bread(cluster,
416 elm->leaf.data_offset,
417 buf_type, &error,
418 &cursor->data_buffer);
419 } else {
420 /*
421 * Data in same buffer as record. Note that we
422 * leave any existing data_buffer intact, even
423 * though we don't use it in this case, in case
424 * other records extracted during an iteration
425 * go back to it.
426 *
427 * The data must be embedded in the record for this
428 * case to be hit.
429 *
430 * Just assume the buffer type is correct.
431 */
432 cursor->data = (void *)
433 ((char *)cursor->record_buffer->ondisk +
434 (elm->leaf.data_offset & HAMMER_BUFMASK));
435 roff = (char *)cursor->data - (char *)cursor->record;
436 KKASSERT (roff >= 0 && roff < HAMMER_RECORD_SIZE);
437 cursor->flags |= HAMMER_CURSOR_DATA_EMBEDDED;
438 }
439 }
440 return(error);
441}
442
443
444/*
445 * Insert a leaf element into the B-Tree at the current cursor position.
446 * The cursor is positioned such that the element at and beyond the cursor
447 * are shifted to make room for the new record.
448 *
449 * The caller must call hammer_btree_lookup() with the HAMMER_CURSOR_INSERT
450 * flag set and that call must return ENOENT before this function can be
451 * called.
452 *
453 * ENOSPC is returned if there is no room to insert a new record.
454 */
455int
456hammer_btree_insert(hammer_cursor_t cursor, hammer_btree_elm_t elm)
457{
458 hammer_node_ondisk_t parent;
459 hammer_node_ondisk_t node;
460 int i;
461
462 /*
463 * Insert the element at the leaf node and update the count in the
464 * parent. It is possible for parent to be NULL, indicating that
465 * the root of the B-Tree in the cluster is a leaf. It is also
466 * possible for the leaf to be empty.
467 *
468 * Remember that the right-hand boundary is not included in the
469 * count.
470 */
471 hammer_modify_node(cursor->node);
472 node = cursor->node->ondisk;
473 i = cursor->index;
474 KKASSERT(elm->base.btype != 0);
475 KKASSERT(node->type == HAMMER_BTREE_TYPE_LEAF);
476 KKASSERT(node->count < HAMMER_BTREE_LEAF_ELMS);
477 if (i != node->count) {
478 bcopy(&node->elms[i], &node->elms[i+1],
479 (node->count - i) * sizeof(*elm));
480 }
481 node->elms[i] = *elm;
482 ++node->count;
483
484 KKASSERT(hammer_btree_cmp(cursor->left_bound, &elm->leaf.base) <= 0);
485 KKASSERT(hammer_btree_cmp(cursor->right_bound, &elm->leaf.base) > 0);
486 if (i)
487 KKASSERT(hammer_btree_cmp(&node->elms[i-1].leaf.base, &elm->leaf.base) < 0);
488 if (i != node->count - 1)
489 KKASSERT(hammer_btree_cmp(&node->elms[i+1].leaf.base, &elm->leaf.base) > 0);
490
491 /*
492 * Adjust the sub-tree count in the parent. note that the parent
493 * may be in a different cluster.
494 */
495 if (cursor->parent) {
496 hammer_modify_node(cursor->parent);
497 parent = cursor->parent->ondisk;
498 i = cursor->parent_index;
499 }
500 return(0);
501}
502
503#if 0
504
505/*
506 * Insert a cluster push into the B-Tree at the current cursor position.
507 * The cursor is positioned at a leaf after a failed btree_lookup.
508 *
509 * The caller must call hammer_btree_lookup() with the HAMMER_CURSOR_INSERT
510 * flag set and that call must return ENOENT before this function can be
511 * called.
512 *
513 * This routine is used ONLY during a recovery pass while the originating
514 * cluster is serialized. The leaf is broken up into up to three pieces,
515 * causing up to an additional internal elements to be added to the parent.
516 *
517 * ENOSPC is returned if there is no room to insert a new record.
518 */
519int
520hammer_btree_insert_cluster(hammer_cursor_t cursor, hammer_cluster_t ncluster,
521 int32_t rec_offset)
522{
523 hammer_cluster_t ocluster;
524 hammer_node_ondisk_t parent;
525 hammer_node_ondisk_t node;
526 hammer_node_ondisk_t xnode; /* additional leaf node */
527 hammer_node_t new_node;
528 hammer_btree_elm_t elm;
529 const int esize = sizeof(*elm);
530 u_int8_t save;
531 int error = 0;
532 int pi, i;
533
534 kprintf("cursor %p ncluster %p\n", cursor, ncluster);
535 hammer_modify_node(cursor->node);
536 node = cursor->node->ondisk;
537 i = cursor->index;
538 KKASSERT(node->type == HAMMER_BTREE_TYPE_LEAF);
539 KKASSERT(node->count < HAMMER_BTREE_LEAF_ELMS);
540
541 /*
542 * Make sure the spike is legal or the B-Tree code will get really
543 * confused.
544 */
545 KKASSERT(hammer_btree_cmp(&ncluster->ondisk->clu_btree_beg,
546 cursor->left_bound) >= 0);
547 KKASSERT(hammer_btree_cmp(&ncluster->ondisk->clu_btree_end,
548 cursor->right_bound) <= 0);
549 if (i != node->count) {
550 KKASSERT(hammer_btree_cmp(&ncluster->ondisk->clu_btree_end,
551 &node->elms[i].leaf.base) <= 0);
552 }
553
554 /*
555 * If we are at the local root of the cluster a new root node
556 * must be created, because we need an internal node. The
557 * caller has already marked the source cluster as undergoing
558 * modification.
559 */
560 ocluster = cursor->node->cluster;
561 if (cursor->parent == NULL) {
562 cursor->parent = hammer_alloc_btree(ocluster, &error);
563 if (error)
564 return(error);
565 hammer_lock_ex(&cursor->parent->lock);
566 hammer_modify_node(cursor->parent);
567 parent = cursor->parent->ondisk;
568 parent->count = 1;
569 parent->parent = 0;
570 parent->type = HAMMER_BTREE_TYPE_INTERNAL;
571 parent->elms[0].base = ocluster->clu_btree_beg;
572 parent->elms[0].base.subtree_type = node->type;
573 parent->elms[0].internal.subtree_offset = cursor->node->node_offset;
574 parent->elms[1].base = ocluster->clu_btree_end;
575 cursor->parent_index = 0;
576 cursor->left_bound = &parent->elms[0].base;
577 cursor->right_bound = &parent->elms[1].base;
578 node->parent = cursor->parent->node_offset;
579 ocluster->ondisk->clu_btree_root = cursor->parent->node_offset;
580 kprintf("no parent\n");
581 } else {
582 kprintf("has parent\n");
583 }
584
585
586 KKASSERT(cursor->parent->ondisk->count <= HAMMER_BTREE_INT_ELMS - 2);
587
588 hammer_modify_node(cursor->parent);
589 parent = cursor->parent->ondisk;
590 pi = cursor->parent_index;
591
592 kprintf("%d node %d/%d (%c) offset=%d parent=%d\n",
593 cursor->node->cluster->clu_no,
594 i, node->count, node->type, cursor->node->node_offset, node->parent);
595
596 /*
597 * If the insertion point bisects the node we will need to allocate
598 * a second leaf node to copy the right hand side into.
599 */
600 if (i != 0 && i != node->count) {
601 new_node = hammer_alloc_btree(cursor->node->cluster, &error);
602 if (error)
603 return(error);
604 xnode = new_node->ondisk;
605 bcopy(&node->elms[i], &xnode->elms[0],
606 (node->count - i) * esize);
607 xnode->count = node->count - i;
608 xnode->parent = cursor->parent->node_offset;
609 xnode->type = HAMMER_BTREE_TYPE_LEAF;
610 node->count = i;
611 } else {
612 new_node = NULL;
613 xnode = NULL;
614 }
615
616 /*
617 * Adjust the parent and set pi to point at the internal element
618 * which we intended to hold the spike.
619 */
620 if (new_node) {
621 /*
622 * Insert spike after parent index. Spike is at pi + 1.
623 * Also include room after the spike for new_node
624 */
625 ++pi;
626 bcopy(&parent->elms[pi], &parent->elms[pi+2],
627 (parent->count - pi + 1) * esize);
628 parent->count += 2;
629 } else if (i == 0) {
630 /*
631 * Insert spike before parent index. Spike is at pi.
632 *
633 * cursor->node's index in the parent (cursor->parent_index)
634 * has now shifted over by one.
635 */
636 bcopy(&parent->elms[pi], &parent->elms[pi+1],
637 (parent->count - pi + 1) * esize);
638 ++parent->count;
639 ++cursor->parent_index;
640 } else {
641 /*
642 * Insert spike after parent index. Spike is at pi + 1.
643 */
644 ++pi;
645 bcopy(&parent->elms[pi], &parent->elms[pi+1],
646 (parent->count - pi + 1) * esize);
647 ++parent->count;
648 }
649
650 /*
651 * Load the spike into the parent at (pi).
652 *
653 * WARNING: subtree_type is actually overloaded within base.
654 * WARNING: subtree_clu_no is overloaded on subtree_offset
655 */
656 elm = &parent->elms[pi];
657 elm[0].internal.base = ncluster->ondisk->clu_btree_beg;
658 elm[0].internal.base.subtree_type = HAMMER_BTREE_TYPE_CLUSTER;
659 elm[0].internal.rec_offset = rec_offset;
660 elm[0].internal.subtree_clu_no = ncluster->clu_no;
661 elm[0].internal.subtree_vol_no = ncluster->volume->vol_no;
662
663 /*
664 * Load the new node into parent at (pi+1) if non-NULL, and also
665 * set the right-hand boundary for the spike.
666 *
667 * Because new_node is a leaf its elements do not point to any
668 * nodes so we don't have to scan it to adjust parent pointers.
669 *
670 * WARNING: subtree_type is actually overloaded within base.
671 * WARNING: subtree_clu_no is overloaded on subtree_offset
672 *
673 * XXX right-boundary may not match clu_btree_end if spike is
674 * at the end of the internal node. For now the cursor search
675 * insertion code will deal with it.
676 */
677 if (new_node) {
678 elm[1].internal.base = ncluster->ondisk->clu_btree_end;
679 elm[1].internal.base.subtree_type = HAMMER_BTREE_TYPE_LEAF;
680 elm[1].internal.subtree_offset = new_node->node_offset;
681 elm[1].internal.subtree_vol_no = -1;
682 elm[1].internal.rec_offset = 0;
683 } else {
684 /*
685 * The right boundary is only the base part of elm[1].
686 * The rest belongs to elm[1]'s recursion. Note however
687 * that subtree_type is overloaded within base so we
688 * have to retain it as well.
689 */
690 save = elm[1].internal.base.subtree_type;
691 elm[1].internal.base = ncluster->ondisk->clu_btree_end;
692 elm[1].internal.base.subtree_type = save;
693 }
694
695 /*
696 * The boundaries stored in the cursor for node are probably all
697 * messed up now, fix them.
698 */
699 cursor->left_bound = &parent->elms[cursor->parent_index].base;
700 cursor->right_bound = &parent->elms[cursor->parent_index+1].base;
701
702 KKASSERT(hammer_btree_cmp(&ncluster->ondisk->clu_btree_end,
703 &elm[1].internal.base) <= 0);
704
705
706 /*
707 * Adjust the target cluster's parent offset
708 */
709 hammer_modify_cluster(ncluster);
710 ncluster->ondisk->clu_btree_parent_offset = cursor->parent->node_offset;
711
712 if (new_node)
713 hammer_rel_node(new_node);
714
715 return(0);
716}
717
718#endif
719
720/*
721 * Delete a record from the B-Tree at the current cursor position.
722 * The cursor is positioned such that the current element is the one
723 * to be deleted.
724 *
725 * On return the cursor will be positioned after the deleted element and
726 * MAY point to an internal node. It will be suitable for the continuation
727 * of an iteration but not for an insertion or deletion.
728 *
729 * Deletions will attempt to partially rebalance the B-Tree in an upward
730 * direction, but will terminate rather then deadlock. Empty leaves are
731 * not allowed except at the root node of a cluster. An early termination
732 * will leave an internal node with an element whos subtree_offset is 0,
733 * a case detected and handled by btree_search().
734 */
735int
736hammer_btree_delete(hammer_cursor_t cursor)
737{
738 hammer_node_ondisk_t ondisk;
739 hammer_node_t node;
740 hammer_node_t parent;
741 int error;
742 int i;
743
744 /*
745 * Delete the element from the leaf node.
746 *
747 * Remember that leaf nodes do not have boundaries.
748 */
749 node = cursor->node;
750 ondisk = node->ondisk;
751 i = cursor->index;
752
753 KKASSERT(ondisk->type == HAMMER_BTREE_TYPE_LEAF);
754 KKASSERT(i >= 0 && i < ondisk->count);
755 hammer_modify_node(node);
756 if (i + 1 != ondisk->count) {
757 bcopy(&ondisk->elms[i+1], &ondisk->elms[i],
758 (ondisk->count - i - 1) * sizeof(ondisk->elms[0]));
759 }
760 --ondisk->count;
761
762 /*
763 * Validate local parent
764 */
765 if (ondisk->parent) {
766 parent = cursor->parent;
767
768 KKASSERT(parent != NULL);
769 KKASSERT(parent->node_offset == ondisk->parent);
770 KKASSERT(parent->cluster == node->cluster);
771 }
772
773 /*
774 * If the leaf becomes empty it must be detached from the parent,
775 * potentially recursing through to the cluster root.
776 *
777 * This may reposition the cursor at one of the parent's of the
778 * current node.
779 */
780 KKASSERT(cursor->index <= ondisk->count);
781 if (ondisk->count == 0) {
782 do {
783 error = btree_remove(cursor);
784 } while (error == EAGAIN);
785 } else {
786 error = 0;
787 }
788 KKASSERT(cursor->parent == NULL || cursor->parent_index < cursor->parent->ondisk->count);
789 return(error);
790}
791
792/*
793 * PRIMAY B-TREE SEARCH SUPPORT PROCEDURE
794 *
795 * Search a cluster's B-Tree for cursor->key_beg, return the matching node.
796 *
797 * The search can begin ANYWHERE in the B-Tree. As a first step the search
798 * iterates up the tree as necessary to properly position itself prior to
799 * actually doing the sarch.
800 *
801 * INSERTIONS: The search will split full nodes and leaves on its way down
802 * and guarentee that the leaf it ends up on is not full. If we run out
803 * of space the search continues to the leaf (to position the cursor for
804 * the spike), but ENOSPC is returned.
805 *
806 * XXX this isn't optimal - we really need to just locate the end point and
807 * insert space going up, and if we get a deadlock just release and retry
808 * the operation. Or something like that. The insertion code can transit
809 * multiple clusters and run splits in unnecessary clusters.
810 *
811 * DELETIONS: The search will rebalance the tree on its way down. XXX
812 *
813 * The search is only guarenteed to end up on a leaf if an error code of 0
814 * is returned, or if inserting and an error code of ENOENT is returned.
815 * Otherwise it can stop at an internal node. On success a search returns
816 * a leaf node unless INCLUSTER is set and the search located a cluster push
817 * node (which is an internal node).
818 */
819static
820int
821btree_search(hammer_cursor_t cursor, int flags)
822{
823 hammer_node_ondisk_t node;
824 hammer_cluster_t cluster;
825 hammer_btree_elm_t elm;
826 int error;
827 int enospc = 0;
828 int i;
829 int r;
830
831 flags |= cursor->flags;
832
833 if (hammer_debug_btree) {
834 kprintf("SEARCH %p:%d %016llx %02x key=%016llx did=%016llx\n",
835 cursor->node, cursor->index,
836 cursor->key_beg.obj_id,
837 cursor->key_beg.rec_type,
838 cursor->key_beg.key,
839 cursor->key_beg.delete_tid
840 );
841 }
842
843 /*
844 * Move our cursor up the tree until we find a node whos range covers
845 * the key we are trying to locate. This may move us between
846 * clusters.
847 *
848 * The left bound is inclusive, the right bound is non-inclusive.
849 * It is ok to cursor up too far so when cursoring across a cluster
850 * boundary.
851 *
852 * First see if we can skip the whole cluster. hammer_cursor_up()
853 * handles both cases but this way we don't check the cluster
854 * bounds when going up the tree within a cluster.
855 *
856 * NOTE: If INCLUSTER is set and we are at the root of the cluster,
857 * hammer_cursor_up() will return ENOENT.
858 */
859 cluster = cursor->node->cluster;
860 while (
861 hammer_btree_cmp(&cursor->key_beg, &cluster->clu_btree_beg) < 0 ||
862 hammer_btree_cmp(&cursor->key_beg, &cluster->clu_btree_end) >= 0) {
863 error = hammer_cursor_toroot(cursor);
864 if (error)
865 goto done;
866 KKASSERT(cursor->parent);
867 error = hammer_cursor_up(cursor, 0);
868 if (error)
869 goto done;
870 cluster = cursor->node->cluster;
871 }
872
873 /*
874 * Deal with normal cursoring within a cluster. The right bound
875 * is non-inclusive. That is, the bounds form a separator.
876 */
877 while (hammer_btree_cmp(&cursor->key_beg, cursor->left_bound) < 0 ||
878 hammer_btree_cmp(&cursor->key_beg, cursor->right_bound) >= 0) {
879 KKASSERT(cursor->parent);
880 error = hammer_cursor_up(cursor, 0);
881 if (error)
882 goto done;
883 }
884
885 /*
886 * We better have ended up with a node somewhere, and our second
887 * while loop had better not have traversed up a cluster.
888 */
889 KKASSERT(cursor->node != NULL && cursor->node->cluster == cluster);
890
891 /*
892 * If we are inserting we can't start at a full node if the parent
893 * is also full (because there is no way to split the node),
894 * continue running up the tree until we hit the root of the
895 * root cluster or until the requirement is satisfied.
896 *
897 * NOTE: These cursor-up's CAN continue to cross cluster boundaries.
898 *
899 * NOTE: We must guarantee at least two open spots in the parent
900 * to deal with hammer_btree_insert_cluster().
901 *
902 * XXX as an optimization it should be possible to unbalance the tree
903 * and stop at the root of the current cluster.
904 */
905 while ((flags & HAMMER_CURSOR_INSERT) && enospc == 0) {
906 if (btree_node_is_full(cursor->node->ondisk) == 0)
907 break;
908 if (cursor->parent == NULL)
909 break;
910 if (cursor->parent->ondisk->count != HAMMER_BTREE_INT_ELMS)
911 break;
912 error = hammer_cursor_up(cursor, 0);
913 /* cluster and node are now may become stale */
914 if (error)
915 goto done;
916 }
917 /* cluster = cursor->node->cluster; not needed until next cluster = */
918
919#if 0
920 /*
921 * If we are deleting we can't start at an internal node with only
922 * one element unless it is root, because all of our code assumes
923 * that internal nodes will never be empty. Just do this generally
924 * for both leaf and internal nodes to get better balance.
925 *
926 * This handles the case where the cursor is sitting at a leaf and
927 * either the leaf or parent contain an insufficient number of
928 * elements.
929 *
930 * NOTE: These cursor-up's CAN continue to cross cluster boundaries.
931 *
932 * XXX NOTE: Iterations may not set this flag anyway.
933 */
934 while (flags & HAMMER_CURSOR_DELETE) {
935 if (cursor->node->ondisk->count > 1)
936 break;
937 if (cursor->parent == NULL)
938 break;
939 KKASSERT(cursor->node->ondisk->count != 0);
940 error = hammer_cursor_up(cursor, 0);
941 /* cluster and node are now may become stale */
942 if (error)
943 goto done;
944 }
945#endif
946
947new_cluster:
948 /*
949 * Push down through internal nodes to locate the requested key.
950 */
951 cluster = cursor->node->cluster;
952 node = cursor->node->ondisk;
953 while (node->type == HAMMER_BTREE_TYPE_INTERNAL) {
954#if 0
955 /*
956 * If we are a the root node and deleting, try to collapse
957 * all of the root's children into the root. This is the
958 * only point where tree depth is reduced.
959 *
960 * XXX NOTE: Iterations may not set this flag anyway.
961 */
962 if ((flags & HAMMER_CURSOR_DELETE) && cursor->parent == NULL) {
963 error = btree_collapse(cursor);
964 /* node becomes stale after call */
965 /* XXX ENOSPC */
966 if (error)
967 goto done;
968 }
969 node = cursor->node->ondisk;
970#endif
971 /*
972 * Scan the node to find the subtree index to push down into.
973 * We go one-past, then back-up.
974 *
975 * We must proactively remove deleted elements which may
976 * have been left over from a deadlocked btree_remove().
977 *
978 * The left and right boundaries are included in the loop h
979 * in order to detect edge cases.
980 *
981 * If the separator only differs by delete_tid (r == -1)
982 * we may end up going down a branch to the left of the
983 * one containing the desired key. Flag it.
984 */
985 for (i = 0; i <= node->count; ++i) {
986 elm = &node->elms[i];
987
988 KKASSERT(i == node->count ||
989 elm->internal.subtree_offset != 0);
990#if 0
991 if (i < node->count &&
992 elm->internal.subtree_offset == 0){
993 btree_remove_deleted_elements(cursor);
994 goto new_cluster;
995 }
996#endif
997 r = hammer_btree_cmp(&cursor->key_beg, &elm->base);
998 if (r < 0)
999 break;
1000 }
1001
1002 /*
1003 * These cases occur when the parent's idea of the boundary
1004 * is wider then the child's idea of the boundary, and
1005 * require special handling. If not inserting we can
1006 * terminate the search early for these cases but the
1007 * child's boundaries cannot be unconditionally modified.
1008 */
1009 if (i == 0) {
1010 /*
1011 * If i == 0 the search terminated to the LEFT of the
1012 * left_boundary but to the RIGHT of the parent's left
1013 * boundary.
1014 */
1015 u_int8_t save;
1016
1017 if ((flags & HAMMER_CURSOR_INSERT) == 0) {
1018 cursor->index = 0;
1019 return(ENOENT);
1020 }
1021 elm = &node->elms[0];
1022
1023 /*
1024 * Correct a left-hand boundary mismatch.
1025 */
1026 hammer_modify_node(cursor->node);
1027 save = node->elms[0].base.btype;
1028 node->elms[0].base = *cursor->left_bound;
1029 node->elms[0].base.btype = save;
1030 } else if (i == node->count + 1) {
1031 /*
1032 * If i == node->count + 1 the search terminated to
1033 * the RIGHT of the right boundary but to the LEFT
1034 * of the parent's right boundary.
1035 *
1036 * Note that the last element in this case is
1037 * elms[i-2] prior to adjustments to 'i'.
1038 */
1039 --i;
1040 if ((flags & HAMMER_CURSOR_INSERT) == 0) {
1041 cursor->index = i;
1042 return(ENOENT);
1043 }
1044
1045 /*
1046 * Correct a right-hand boundary mismatch.
1047 * (actual push-down record is i-2 prior to
1048 * adjustments to i).
1049 */
1050 elm = &node->elms[i];
1051 hammer_modify_node(cursor->node);
1052 elm->base = *cursor->right_bound;
1053 --i;
1054 } else {
1055 /*
1056 * The push-down index is now i - 1. If we had
1057 * terminated on the right boundary this will point
1058 * us at the last element.
1059 */
1060 --i;
1061 }
1062 cursor->index = i;
1063
1064 if (hammer_debug_btree) {
1065 elm = &node->elms[i];
1066 kprintf("SEARCH-I %p:%d %016llx %02x key=%016llx did=%016llx\n",
1067 cursor->node, i,
1068 elm->internal.base.obj_id,
1069 elm->internal.base.rec_type,
1070 elm->internal.base.key,
1071 elm->internal.base.delete_tid
1072 );
1073 }
1074
1075 /*
1076 * Handle insertion and deletion requirements.
1077 *
1078 * If inserting split full nodes. The split code will
1079 * adjust cursor->node and cursor->index if the current
1080 * index winds up in the new node.
1081 *
1082 * If inserting and a left or right edge case was detected,
1083 * we cannot correct the left or right boundary and must
1084 * prepend and append an empty leaf node in order to make
1085 * the boundary correction.
1086 *
1087 * If we run out of space we set enospc and continue on
1088 * to a leaf to provide the spike code with a good point
1089 * of entry. Enospc is reset if we cross a cluster boundary.
1090 */
1091 if ((flags & HAMMER_CURSOR_INSERT) && enospc == 0) {
1092 if (btree_node_is_full(node)) {
1093 error = btree_split_internal(cursor);
1094 if (error) {
1095 if (error != ENOSPC)
1096 goto done;
1097 enospc = 1;
1098 }
1099 /*
1100 * reload stale pointers
1101 */
1102 i = cursor->index;
1103 node = cursor->node->ondisk;
1104 }
1105 }
1106
1107 /*
1108 * Push down (push into new node, existing node becomes
1109 * the parent) and continue the search.
1110 */
1111 error = hammer_cursor_down(cursor);
1112 /* node and cluster become stale */
1113 if (error)
1114 goto done;
1115 node = cursor->node->ondisk;
1116 cluster = cursor->node->cluster;
1117 }
1118
1119 /*
1120 * We are at a leaf, do a linear search of the key array.
1121 *
1122 * If we encounter a spike element type within the necessary
1123 * range we push into it.
1124 *
1125 * On success the index is set to the matching element and 0
1126 * is returned.
1127 *
1128 * On failure the index is set to the insertion point and ENOENT
1129 * is returned.
1130 *
1131 * Boundaries are not stored in leaf nodes, so the index can wind
1132 * up to the left of element 0 (index == 0) or past the end of
1133 * the array (index == node->count).
1134 */
1135 KKASSERT (node->type == HAMMER_BTREE_TYPE_LEAF);
1136 KKASSERT(node->count <= HAMMER_BTREE_LEAF_ELMS);
1137
1138 for (i = 0; i < node->count; ++i) {
1139 elm = &node->elms[i];
1140
1141 r = hammer_btree_cmp(&cursor->key_beg, &elm->leaf.base);
1142
1143 if (hammer_debug_btree > 1)
1144 kprintf(" ELM %p %d r=%d\n", &node->elms[i], i, r);
1145
1146 if (elm->leaf.base.btype == HAMMER_BTREE_TYPE_SPIKE_BEG) {
1147 /*
1148 * SPIKE_BEG. Stop if we are to the left of the
1149 * spike begin element.
1150 *
1151 * If we are not the last element in the leaf continue
1152 * the loop looking for the SPIKE_END. If we are
1153 * the last element, however, then push into the
1154 * spike.
1155 *
1156 * A Spike demark on a delete_tid boundary must be
1157 * pushed into. An as-of search failure will force
1158 * an iteration.
1159 *
1160 * enospc must be reset because we have crossed a
1161 * cluster boundary.
1162 */
1163 if (r < -1)
1164 goto failed;
1165 if (i != node->count - 1)
1166 continue;
1167 panic("btree_search: illegal spike, no SPIKE_END "
1168 "in leaf node! %p\n", cursor->node);
1169 /*
1170 * XXX This is not currently legal, you can only
1171 * cursor_down() from a SPIKE_END element, otherwise
1172 * the cursor parent is pointing at the wrong element
1173 * for deletions.
1174 */
1175 if (cursor->flags & HAMMER_CURSOR_INCLUSTER)
1176 goto success;
1177 cursor->index = i;
1178 error = hammer_cursor_down(cursor);
1179 enospc = 0;
1180 if (error)
1181 goto done;
1182 goto new_cluster;
1183 }
1184 if (elm->leaf.base.btype == HAMMER_BTREE_TYPE_SPIKE_END) {
1185 /*
1186 * SPIKE_END. We can only hit this case if we are
1187 * greater or equal to SPIKE_BEG.
1188 *
1189 * If we are less then or equal to the SPIKE_END
1190 * we must push into it, otherwise continue the
1191 * search.
1192 *
1193 * enospc must be reset because we have crossed a
1194 * cluster boundary.
1195 */
1196 if (r > 0)
1197 continue;
1198 if (cursor->flags & HAMMER_CURSOR_INCLUSTER)
1199 goto success;
1200 cursor->index = i;
1201 error = hammer_cursor_down(cursor);
1202 enospc = 0;
1203 if (error)
1204 goto done;
1205 goto new_cluster;
1206 }
1207
1208 /*
1209 * We are at a record element. Stop if we've flipped past
1210 * key_beg, not counting the delete_tid test.
1211 */
1212 KKASSERT (elm->leaf.base.btype == HAMMER_BTREE_TYPE_RECORD);
1213
1214 if (r < -1)
1215 goto failed;
1216 if (r > 0)
1217 continue;
1218
1219 /*
1220 * Check our as-of timestamp against the element.
1221 */
1222 if (r == -1) {
1223 if ((cursor->flags & HAMMER_CURSOR_ASOF) == 0)
1224 goto failed;
1225 if (hammer_btree_chkts(cursor->asof,
1226 &node->elms[i].base) != 0) {
1227 continue;
1228 }
1229 }
1230success:
1231 cursor->index = i;
1232 error = 0;
1233 if (hammer_debug_btree)
1234 kprintf("SEARCH-L %p:%d (SUCCESS)\n", cursor->node, i);
1235 goto done;
1236 }
1237
1238 /*
1239 * The search failed but due the way we handle delete_tid we may
1240 * have to iterate. Here is why: If a center separator differs
1241 * only by its delete_tid as shown below and we are looking for, say,
1242 * a record with an as-of TID of 12, we will traverse LEAF1. LEAF1
1243 * might contain element 11 and thus not match, and LEAF2 might
1244 * contain element 17 which we DO want to match (i.e. that record
1245 * will be visible to us).
1246 *
1247 * delete_tid: 10 15 20
1248 * L1 L2
1249 *
1250 *
1251 * Its easiest to adjust delete_tid and to tell the caller to
1252 * retry, because this may be an insertion search and require
1253 * more then just a simple iteration.
1254 */
1255 if ((flags & (HAMMER_CURSOR_INSERT|HAMMER_CURSOR_ASOF)) ==
1256 HAMMER_CURSOR_ASOF &&
1257 cursor->key_beg.obj_id == cursor->right_bound->obj_id &&
1258 cursor->key_beg.rec_type == cursor->right_bound->rec_type &&
1259 cursor->key_beg.key == cursor->right_bound->key &&
1260 (cursor->right_bound->delete_tid == 0 ||
1261 cursor->key_beg.delete_tid < cursor->right_bound->delete_tid)
1262 ) {
1263 kprintf("MUST ITERATE\n");
1264 cursor->key_beg.delete_tid = cursor->right_bound->delete_tid;
1265 return(EAGAIN);
1266 }
1267
1268failed:
1269 if (hammer_debug_btree) {
1270 kprintf("SEARCH-L %p:%d (FAILED)\n",
1271 cursor->node, i);
1272 }
1273
1274 /*
1275 * No exact match was found, i is now at the insertion point.
1276 *
1277 * If inserting split a full leaf before returning. This
1278 * may have the side effect of adjusting cursor->node and
1279 * cursor->index.
1280 */
1281 cursor->index = i;
1282 if ((flags & HAMMER_CURSOR_INSERT) && btree_node_is_full(node)) {
1283 error = btree_split_leaf(cursor);
1284 if (error) {
1285 if (error != ENOSPC)
1286 goto done;
1287 enospc = 1;
1288 flags &= ~HAMMER_CURSOR_INSERT;
1289 }
1290 /*
1291 * reload stale pointers
1292 */
1293 /* NOT USED
1294 i = cursor->index;
1295 node = &cursor->node->internal;
1296 */
1297 }
1298
1299 /*
1300 * We reached a leaf but did not find the key we were looking for.
1301 * If this is an insert we will be properly positioned for an insert
1302 * (ENOENT) or spike (ENOSPC) operation.
1303 */
1304 error = enospc ? ENOSPC : ENOENT;
1305done:
1306 return(error);
1307}
1308
1309
1310/************************************************************************
1311 * SPLITTING AND MERGING *
1312 ************************************************************************
1313 *
1314 * These routines do all the dirty work required to split and merge nodes.
1315 */
1316
1317/*
1318 * Split an internal node into two nodes and move the separator at the split
1319 * point to the parent.
1320 *
1321 * (cursor->node, cursor->index) indicates the element the caller intends
1322 * to push into. We will adjust node and index if that element winds
1323 * up in the split node.
1324 *
1325 * If we are at the root of a cluster a new root must be created with two
1326 * elements, one pointing to the original root and one pointing to the
1327 * newly allocated split node.
1328 *
1329 * NOTE! Being at the root of a cluster is different from being at the
1330 * root of the root cluster. cursor->parent will not be NULL and
1331 * cursor->node->ondisk.parent must be tested against 0. Theoretically
1332 * we could propogate the algorithm into the parent and deal with multiple
1333 * 'roots' in the cluster header, but it's easier not to.
1334 */
1335static
1336int
1337btree_split_internal(hammer_cursor_t cursor)
1338{
1339 hammer_node_ondisk_t ondisk;
1340 hammer_node_t node;
1341 hammer_node_t parent;
1342 hammer_node_t new_node;
1343 hammer_btree_elm_t elm;
1344 hammer_btree_elm_t parent_elm;
1345 int parent_index;
1346 int made_root;
1347 int split;
1348 int error;
1349 int i;
1350 const int esize = sizeof(*elm);
1351
1352 /*
1353 * We are splitting but elms[split] will be promoted to the parent,
1354 * leaving the right hand node with one less element. If the
1355 * insertion point will be on the left-hand side adjust the split
1356 * point to give the right hand side one additional node.
1357 */
1358 node = cursor->node;
1359 ondisk = node->ondisk;
1360 split = (ondisk->count + 1) / 2;
1361 if (cursor->index <= split)
1362 --split;
1363 error = 0;
1364
1365 /*
1366 * If we are at the root of the cluster, create a new root node with
1367 * 1 element and split normally. Avoid making major modifications
1368 * until we know the whole operation will work.
1369 *
1370 * The root of the cluster is different from the root of the root
1371 * cluster. Use the node's on-disk structure's parent offset to
1372 * detect the case.
1373 */
1374 if (ondisk->parent == 0) {
1375 parent = hammer_alloc_btree(node->cluster, &error);
1376 if (parent == NULL)
1377 return(error);
1378 hammer_lock_ex(&parent->lock);
1379 hammer_modify_node(parent);
1380 ondisk = parent->ondisk;
1381 ondisk->count = 1;
1382 ondisk->parent = 0;
1383 ondisk->type = HAMMER_BTREE_TYPE_INTERNAL;
1384 ondisk->elms[0].base = node->cluster->clu_btree_beg;
1385 ondisk->elms[0].base.btype = node->ondisk->type;
1386 ondisk->elms[0].internal.subtree_offset = node->node_offset;
1387 ondisk->elms[1].base = node->cluster->clu_btree_end;
1388 /* ondisk->elms[1].base.btype - not used */
1389 made_root = 1;
1390 parent_index = 0; /* index of current node in parent */
1391 } else {
1392 made_root = 0;
1393 parent = cursor->parent;
1394 parent_index = cursor->parent_index;
1395 KKASSERT(parent->cluster == node->cluster);
1396 }
1397
1398 /*
1399 * Split node into new_node at the split point.
1400 *
1401 * B O O O P N N B <-- P = node->elms[split]
1402 * 0 1 2 3 4 5 6 <-- subtree indices
1403 *
1404 * x x P x x
1405 * s S S s
1406 * / \
1407 * B O O O B B N N B <--- inner boundary points are 'P'
1408 * 0 1 2 3 4 5 6
1409 *
1410 */
1411 new_node = hammer_alloc_btree(node->cluster, &error);
1412 if (new_node == NULL) {
1413 if (made_root) {
1414 hammer_unlock(&parent->lock);
1415 parent->flags |= HAMMER_NODE_DELETED;
1416 hammer_rel_node(parent);
1417 }
1418 return(error);
1419 }
1420 hammer_lock_ex(&new_node->lock);
1421
1422 /*
1423 * Create the new node. P becomes the left-hand boundary in the
1424 * new node. Copy the right-hand boundary as well.
1425 *
1426 * elm is the new separator.
1427 */
1428 hammer_modify_node(new_node);
1429 hammer_modify_node(node);
1430 ondisk = node->ondisk;
1431 elm = &ondisk->elms[split];
1432 bcopy(elm, &new_node->ondisk->elms[0],
1433 (ondisk->count - split + 1) * esize);
1434 new_node->ondisk->count = ondisk->count - split;
1435 new_node->ondisk->parent = parent->node_offset;
1436 new_node->ondisk->type = HAMMER_BTREE_TYPE_INTERNAL;
1437 KKASSERT(ondisk->type == new_node->ondisk->type);
1438
1439 /*
1440 * Cleanup the original node. Elm (P) becomes the new boundary,
1441 * its subtree_offset was moved to the new node. If we had created
1442 * a new root its parent pointer may have changed.
1443 */
1444 elm->internal.subtree_offset = 0;
1445 ondisk->count = split;
1446
1447 /*
1448 * Insert the separator into the parent, fixup the parent's
1449 * reference to the original node, and reference the new node.
1450 * The separator is P.
1451 *
1452 * Remember that base.count does not include the right-hand boundary.
1453 */
1454 hammer_modify_node(parent);
1455 ondisk = parent->ondisk;
1456 KKASSERT(ondisk->count != HAMMER_BTREE_INT_ELMS);
1457 parent_elm = &ondisk->elms[parent_index+1];
1458 bcopy(parent_elm, parent_elm + 1,
1459 (ondisk->count - parent_index) * esize);
1460 parent_elm->internal.base = elm->base; /* separator P */
1461 parent_elm->internal.base.btype = new_node->ondisk->type;
1462 parent_elm->internal.subtree_offset = new_node->node_offset;
1463 ++ondisk->count;
1464
1465 /*
1466 * The children of new_node need their parent pointer set to new_node.
1467 */
1468 for (i = 0; i < new_node->ondisk->count; ++i) {
1469 elm = &new_node->ondisk->elms[i];
1470 error = btree_set_parent(new_node, elm);
1471 if (error) {
1472 panic("btree_split_internal: btree-fixup problem");
1473 }
1474 }
1475
1476 /*
1477 * The cluster's root pointer may have to be updated.
1478 */
1479 if (made_root) {
1480 hammer_modify_cluster(node->cluster);
1481 node->cluster->ondisk->clu_btree_root = parent->node_offset;
1482 node->ondisk->parent = parent->node_offset;
1483 if (cursor->parent) {
1484 hammer_unlock(&cursor->parent->lock);
1485 hammer_rel_node(cursor->parent);
1486 }
1487 cursor->parent = parent; /* lock'd and ref'd */
1488 }
1489
1490
1491 /*
1492 * Ok, now adjust the cursor depending on which element the original
1493 * index was pointing at. If we are >= the split point the push node
1494 * is now in the new node.
1495 *
1496 * NOTE: If we are at the split point itself we cannot stay with the
1497 * original node because the push index will point at the right-hand
1498 * boundary, which is illegal.
1499 *
1500 * NOTE: The cursor's parent or parent_index must be adjusted for
1501 * the case where a new parent (new root) was created, and the case
1502 * where the cursor is now pointing at the split node.
1503 */
1504 if (cursor->index >= split) {
1505 cursor->parent_index = parent_index + 1;
1506 cursor->index -= split;
1507 hammer_unlock(&cursor->node->lock);
1508 hammer_rel_node(cursor->node);
1509 cursor->node = new_node; /* locked and ref'd */
1510 } else {
1511 cursor->parent_index = parent_index;
1512 hammer_unlock(&new_node->lock);
1513 hammer_rel_node(new_node);
1514 }
1515
1516 /*
1517 * Fixup left and right bounds
1518 */
1519 parent_elm = &parent->ondisk->elms[cursor->parent_index];
1520 cursor->left_bound = &parent_elm[0].internal.base;
1521 cursor->right_bound = &parent_elm[1].internal.base;
1522 KKASSERT(hammer_btree_cmp(cursor->left_bound,
1523 &cursor->node->ondisk->elms[0].internal.base) <= 0);
1524 KKASSERT(hammer_btree_cmp(cursor->right_bound,
1525 &cursor->node->ondisk->elms[cursor->node->ondisk->count].internal.base) >= 0);
1526
1527 return (0);
1528}
1529
1530/*
1531 * Same as the above, but splits a full leaf node.
1532 */
1533static
1534int
1535btree_split_leaf(hammer_cursor_t cursor)
1536{
1537 hammer_node_ondisk_t ondisk;
1538 hammer_node_t parent;
1539 hammer_node_t leaf;
1540 hammer_node_t new_leaf;
1541 hammer_btree_elm_t elm;
1542 hammer_btree_elm_t parent_elm;
1543 hammer_base_elm_t mid_boundary;
1544 int parent_index;
1545 int made_root;
1546 int split;
1547 int error;
1548 int i;
1549 const size_t esize = sizeof(*elm);
1550
1551 /*
1552 * Calculate the split point. If the insertion point will be on
1553 * the left-hand side adjust the split point to give the right
1554 * hand side one additional node.
1555 *
1556 * Spikes are made up of two leaf elements which cannot be
1557 * safely split.
1558 */
1559 leaf = cursor->node;
1560 ondisk = leaf->ondisk;
1561 split = (ondisk->count + 1) / 2;
1562 if (cursor->index <= split)
1563 --split;
1564 error = 0;
1565
1566 elm = &ondisk->elms[split];
1567 if (elm->leaf.base.btype == HAMMER_BTREE_TYPE_SPIKE_END) {
1568 KKASSERT(split &&
1569 elm[-1].leaf.base.btype == HAMMER_BTREE_TYPE_SPIKE_BEG);
1570 --split;
1571 }
1572
1573 /*
1574 * If we are at the root of the tree, create a new root node with
1575 * 1 element and split normally. Avoid making major modifications
1576 * until we know the whole operation will work.
1577 */
1578 if (ondisk->parent == 0) {
1579 parent = hammer_alloc_btree(leaf->cluster, &error);
1580 if (parent == NULL)
1581 return(error);
1582 hammer_lock_ex(&parent->lock);
1583 hammer_modify_node(parent);
1584 ondisk = parent->ondisk;
1585 ondisk->count = 1;
1586 ondisk->parent = 0;
1587 ondisk->type = HAMMER_BTREE_TYPE_INTERNAL;
1588 ondisk->elms[0].base = leaf->cluster->clu_btree_beg;
1589 ondisk->elms[0].base.btype = leaf->ondisk->type;
1590 ondisk->elms[0].internal.subtree_offset = leaf->node_offset;
1591 ondisk->elms[1].base = leaf->cluster->clu_btree_end;
1592 /* ondisk->elms[1].base.btype = not used */
1593 made_root = 1;
1594 parent_index = 0; /* insertion point in parent */
1595 } else {
1596 made_root = 0;
1597 parent = cursor->parent;
1598 parent_index = cursor->parent_index;
1599 KKASSERT(parent->cluster == leaf->cluster);
1600 }
1601
1602 /*
1603 * Split leaf into new_leaf at the split point. Select a separator
1604 * value in-between the two leafs but with a bent towards the right
1605 * leaf since comparisons use an 'elm >= separator' inequality.
1606 *
1607 * L L L L L L L L
1608 *
1609 * x x P x x
1610 * s S S s
1611 * / \
1612 * L L L L L L L L
1613 */
1614 new_leaf = hammer_alloc_btree(leaf->cluster, &error);
1615 if (new_leaf == NULL) {
1616 if (made_root) {
1617 hammer_unlock(&parent->lock);
1618 parent->flags |= HAMMER_NODE_DELETED;
1619 hammer_rel_node(parent);
1620 }
1621 return(error);
1622 }
1623 hammer_lock_ex(&new_leaf->lock);
1624
1625 /*
1626 * Create the new node. P become the left-hand boundary in the
1627 * new node. Copy the right-hand boundary as well.
1628 */
1629 hammer_modify_node(leaf);
1630 hammer_modify_node(new_leaf);
1631 ondisk = leaf->ondisk;
1632 elm = &ondisk->elms[split];
1633 bcopy(elm, &new_leaf->ondisk->elms[0], (ondisk->count - split) * esize);
1634 new_leaf->ondisk->count = ondisk->count - split;
1635 new_leaf->ondisk->parent = parent->node_offset;
1636 new_leaf->ondisk->type = HAMMER_BTREE_TYPE_LEAF;
1637 KKASSERT(ondisk->type == new_leaf->ondisk->type);
1638
1639 /*
1640 * Cleanup the original node. Because this is a leaf node and
1641 * leaf nodes do not have a right-hand boundary, there
1642 * aren't any special edge cases to clean up. We just fixup the
1643 * count.
1644 */
1645 ondisk->count = split;
1646
1647 /*
1648 * Insert the separator into the parent, fixup the parent's
1649 * reference to the original node, and reference the new node.
1650 * The separator is P.
1651 *
1652 * Remember that base.count does not include the right-hand boundary.
1653 * We are copying parent_index+1 to parent_index+2, not +0 to +1.
1654 */
1655 hammer_modify_node(parent);
1656 ondisk = parent->ondisk;
1657 KKASSERT(ondisk->count != HAMMER_BTREE_INT_ELMS);
1658 parent_elm = &ondisk->elms[parent_index+1];
1659 bcopy(parent_elm, parent_elm + 1,
1660 (ondisk->count - parent_index) * esize);
1661 hammer_make_separator(&elm[-1].base, &elm[0].base, &parent_elm->base);
1662 parent_elm->internal.base.btype = new_leaf->ondisk->type;
1663 parent_elm->internal.subtree_offset = new_leaf->node_offset;
1664 mid_boundary = &parent_elm->base;
1665 ++ondisk->count;
1666
1667 /*
1668 * The children of new_leaf need their parent pointer set to new_leaf.
1669 *
1670 * The leaf's elements are either TYPE_RECORD or TYPE_SPIKE_*. Only
1671 * elements of BTREE_TYPE_SPIKE_END really requires any action.
1672 */
1673 for (i = 0; i < new_leaf->ondisk->count; ++i) {
1674 elm = &new_leaf->ondisk->elms[i];
1675 error = btree_set_parent(new_leaf, elm);
1676 if (error) {
1677 panic("btree_split_internal: btree-fixup problem");
1678 }
1679 }
1680
1681 /*
1682 * The cluster's root pointer may have to be updated.
1683 */
1684 if (made_root) {
1685 hammer_modify_cluster(leaf->cluster);
1686 leaf->cluster->ondisk->clu_btree_root = parent->node_offset;
1687 leaf->ondisk->parent = parent->node_offset;
1688 if (cursor->parent) {
1689 hammer_unlock(&cursor->parent->lock);
1690 hammer_rel_node(cursor->parent);
1691 }
1692 cursor->parent = parent; /* lock'd and ref'd */
1693 }
1694
1695 /*
1696 * Ok, now adjust the cursor depending on which element the original
1697 * index was pointing at. If we are >= the split point the push node
1698 * is now in the new node.
1699 *
1700 * NOTE: If we are at the split point itself we need to select the
1701 * old or new node based on where key_beg's insertion point will be.
1702 * If we pick the wrong side the inserted element will wind up in
1703 * the wrong leaf node and outside that node's bounds.
1704 */
1705 if (cursor->index > split ||
1706 (cursor->index == split &&
1707 hammer_btree_cmp(&cursor->key_beg, mid_boundary) >= 0)) {
1708 cursor->parent_index = parent_index + 1;
1709 cursor->index -= split;
1710 hammer_unlock(&cursor->node->lock);
1711 hammer_rel_node(cursor->node);
1712 cursor->node = new_leaf;
1713 } else {
1714 cursor->parent_index = parent_index;
1715 hammer_unlock(&new_leaf->lock);
1716 hammer_rel_node(new_leaf);
1717 }
1718
1719 /*
1720 * Fixup left and right bounds
1721 */
1722 parent_elm = &parent->ondisk->elms[cursor->parent_index];
1723 cursor->left_bound = &parent_elm[0].internal.base;
1724 cursor->right_bound = &parent_elm[1].internal.base;
1725 KKASSERT(hammer_btree_cmp(cursor->left_bound,
1726 &cursor->node->ondisk->elms[0].leaf.base) <= 0);
1727 KKASSERT(hammer_btree_cmp(cursor->right_bound,
1728 &cursor->node->ondisk->elms[cursor->node->ondisk->count-1].leaf.base) > 0);
1729
1730 return (0);
1731}
1732
1733/*
1734 * Attempt to remove the empty B-Tree node at (cursor->node). Returns 0
1735 * on success, EAGAIN if we could not acquire the necessary locks, or some
1736 * other error. This node can be a leaf node or an internal node.
1737 *
1738 * On return the cursor may end up pointing at an internal node, suitable
1739 * for further iteration but not for an immediate insertion or deletion.
1740 *
1741 * cursor->node may be an internal node or a leaf node.
1742 *
1743 * NOTE: If cursor->node has one element it is the parent trying to delete
1744 * that element, make sure cursor->index is properly adjusted on success.
1745 */
1746int
1747btree_remove(hammer_cursor_t cursor)
1748{
1749 hammer_node_ondisk_t ondisk;
1750 hammer_btree_elm_t elm;
1751 hammer_node_t node;
1752 hammer_node_t save;
1753 hammer_node_t parent;
1754 const int esize = sizeof(*elm);
1755 int error;
1756
1757 /*
1758 * If we are at the root of the cluster we must be able to
1759 * successfully delete the HAMMER_BTREE_SPIKE_* leaf elements in
1760 * the parent in order to be able to destroy the cluster.
1761 */
1762 node = cursor->node;
1763
1764 if (node->ondisk->parent == 0) {
1765 hammer_modify_node(node);
1766 ondisk = node->ondisk;
1767 ondisk->type = HAMMER_BTREE_TYPE_LEAF;
1768 ondisk->count = 0;
1769 cursor->index = 0;
1770 error = 0;
1771
1772 /*
1773 * When trying to delete a cluster retain a lock on the
1774 * cluster's root node (node) to prevent insertions while
1775 * we try to undo the spike.
1776 */
1777 if ((parent = cursor->parent) != NULL) {
1778 save = node;
1779 hammer_ref_node(save);
1780 hammer_lock_ex(&save->lock);
1781 error = hammer_cursor_up(cursor, 1);
1782 if (error) {
1783 kprintf("BTREE_REMOVE: Cannot delete cluster\n");
1784 Debugger("BTREE_REMOVE");
1785 if (error == EAGAIN)
1786 error = 0;
1787 } else {
1788 /*
1789 * cursor->node is now the leaf in the parent
1790 * cluster containing the spike elements.
1791 *
1792 * The cursor should be pointing at the
1793 * SPIKE_END element.
1794 *
1795 * Remove the spike elements and recurse
1796 * if the leaf becomes empty.
1797 */
1798 node = cursor->node;
1799 hammer_modify_node(node);
1800 ondisk = node->ondisk;
1801 KKASSERT(cursor->index > 0);
1802 --cursor->index;
1803 elm = &ondisk->elms[cursor->index];
1804 KKASSERT(elm[0].leaf.base.btype ==
1805 HAMMER_BTREE_TYPE_SPIKE_BEG);
1806 KKASSERT(elm[1].leaf.base.btype ==
1807 HAMMER_BTREE_TYPE_SPIKE_END);
1808 bcopy(elm + 2, elm, (ondisk->count -
1809 cursor->index - 2) * esize);
1810 ondisk->count -= 2;
1811 if (ondisk->count == 0)
1812 error = btree_remove(cursor);
1813 hammer_flush_node(save);
1814 save->flags |= HAMMER_NODE_DELETED;
1815 }
1816 hammer_unlock(&save->lock);
1817 hammer_rel_node(save);
1818 }
1819 return(error);
1820 }
1821
1822 /*
1823 * Zero-out the parent's reference to the child and flag the
1824 * child for destruction. This ensures that the child is not
1825 * reused while other references to it exist.
1826 */
1827 parent = cursor->parent;
1828 hammer_modify_node(parent);
1829 ondisk = parent->ondisk;
1830 KKASSERT(ondisk->type == HAMMER_BTREE_TYPE_INTERNAL);
1831 elm = &ondisk->elms[cursor->parent_index];
1832 KKASSERT(elm->internal.subtree_offset == node->node_offset);
1833 elm->internal.subtree_offset = 0;
1834
1835 hammer_flush_node(node);
1836 node->flags |= HAMMER_NODE_DELETED;
1837
1838 /*
1839 * If the parent would otherwise not become empty we can physically
1840 * remove the zero'd element. Note however that in order to
1841 * guarentee a valid cursor we still need to be able to cursor up
1842 * because we no longer have a node.
1843 *
1844 * This collapse will change the parent's boundary elements, making
1845 * them wider. The new boundaries are recursively corrected in
1846 * btree_search().
1847 *
1848 * XXX we can theoretically recalculate the midpoint but there isn't
1849 * much of a reason to do it.
1850 */
1851 error = hammer_cursor_up(cursor, 1);
1852 if (error) {
1853 kprintf("BTREE_REMOVE: Cannot lock parent, skipping\n");
1854 Debugger("BTREE_REMOVE");
1855 return (0);
1856 }
1857
1858 /*
1859 * Remove the internal element from the parent. The bcopy must
1860 * include the right boundary element.
1861 */
1862 KKASSERT(parent == cursor->node && ondisk == parent->ondisk);
1863 node = parent;
1864 parent = NULL;
1865 /* ondisk is node's ondisk */
1866 /* elm is node's element */
1867
1868 KKASSERT(ondisk->count > 0);
1869 bcopy(&elm[1], &elm[0], (ondisk->count - cursor->index) * esize);
1870 --ondisk->count;
1871 if (ondisk->count == 0)
1872 error = EAGAIN;
1873 return(error);
1874}
1875
1876/*
1877 * The element (elm) has been moved to a new internal node (node).
1878 *
1879 * If the element represents a pointer to an internal node that node's
1880 * parent must be adjusted to the element's new location.
1881 *
1882 * If the element represents a spike the target cluster's header must
1883 * be adjusted to point to the element's new location. This only
1884 * applies to HAMMER_SPIKE_END.
1885 */
1886static
1887int
1888btree_set_parent(hammer_node_t node, hammer_btree_elm_t elm)
1889{
1890 hammer_volume_t volume;
1891 hammer_cluster_t cluster;
1892 hammer_node_t child;
1893 int error;
1894
1895 error = 0;
1896
1897 switch(elm->base.btype) {
1898 case HAMMER_BTREE_TYPE_INTERNAL:
1899 case HAMMER_BTREE_TYPE_LEAF:
1900 child = hammer_get_node(node->cluster,
1901 elm->internal.subtree_offset, &error);
1902 if (error == 0) {
1903 hammer_modify_node(child);
1904 hammer_lock_ex(&child->lock);
1905 child->ondisk->parent = node->node_offset;
1906 hammer_unlock(&child->lock);
1907 hammer_rel_node(child);
1908 }
1909 break;
1910 case HAMMER_BTREE_TYPE_SPIKE_END:
1911 volume = hammer_get_volume(node->cluster->volume->hmp,
1912 elm->leaf.spike_vol_no, &error);
1913 if (error)
1914 break;
1915 cluster = hammer_get_cluster(volume, elm->leaf.spike_clu_no,
1916 &error, 0);
1917 hammer_rel_volume(volume, 0);
1918 if (error)
1919 break;
1920 hammer_modify_cluster(cluster);
1921 hammer_lock_ex(&cluster->io.lock);
1922 cluster->ondisk->clu_btree_parent_offset = node->node_offset;
1923 hammer_unlock(&cluster->io.lock);
1924 KKASSERT(cluster->ondisk->clu_btree_parent_clu_no ==
1925 node->cluster->clu_no);
1926 KKASSERT(cluster->ondisk->clu_btree_parent_vol_no ==
1927 node->cluster->volume->vol_no);
1928 hammer_rel_cluster(cluster, 0);
1929 break;
1930 default:
1931 break;
1932 }
1933 return(error);
1934}
1935
1936/************************************************************************
1937 * MISCELLANIOUS SUPPORT *
1938 ************************************************************************/
1939
1940/*
1941 * Compare two B-Tree elements, return -N, 0, or +N (e.g. similar to strcmp).
1942 *
1943 * Note that for this particular function a return value of -1, 0, or +1
1944 * can denote a match if delete_tid is otherwise discounted. A delete_tid
1945 * of zero is considered to be 'infinity' in comparisons.
1946 *
1947 * See also hammer_rec_rb_compare() and hammer_rec_cmp() in hammer_object.c.
1948 */
1949int
1950hammer_btree_cmp(hammer_base_elm_t key1, hammer_base_elm_t key2)
1951{
1952 if (key1->obj_id < key2->obj_id)
1953 return(-4);
1954 if (key1->obj_id > key2->obj_id)
1955 return(4);
1956
1957 if (key1->rec_type < key2->rec_type)
1958 return(-3);
1959 if (key1->rec_type > key2->rec_type)
1960 return(3);
1961
1962 if (key1->key < key2->key)
1963 return(-2);
1964 if (key1->key > key2->key)
1965 return(2);
1966
1967 /*
1968 * A delete_tid of zero indicates a record which has not been
1969 * deleted yet and must be considered to have a value of positive
1970 * infinity.
1971 */
1972 if (key1->delete_tid == 0) {
1973 if (key2->delete_tid == 0)
1974 return(0);
1975 return(1);
1976 }
1977 if (key2->delete_tid == 0)
1978 return(-1);
1979 if (key1->delete_tid < key2->delete_tid)
1980 return(-1);
1981 if (key1->delete_tid > key2->delete_tid)
1982 return(1);
1983 return(0);
1984}
1985
1986/*
1987 * Test a timestamp against an element to determine whether the
1988 * element is visible. A timestamp of 0 means 'infinity'.
1989 */
1990int
1991hammer_btree_chkts(hammer_tid_t asof, hammer_base_elm_t base)
1992{
1993 if (asof == 0) {
1994 if (base->delete_tid)
1995 return(1);
1996 return(0);
1997 }
1998 if (asof < base->create_tid)
1999 return(-1);
2000 if (base->delete_tid && asof >= base->delete_tid)
2001 return(1);
2002 return(0);
2003}
2004
2005/*
2006 * Create a separator half way inbetween key1 and key2. For fields just
2007 * one unit apart, the separator will match key2. key1 is on the left-hand
2008 * side and key2 is on the right-hand side.
2009 *
2010 * delete_tid has to be special cased because a value of 0 represents
2011 * infinity, and records with a delete_tid of 0 can be replaced with
2012 * a non-zero delete_tid when deleted and must maintain their proper
2013 * (as in the same) position in the B-Tree.
2014 */
2015#define MAKE_SEPARATOR(key1, key2, dest, field) \
2016 dest->field = key1->field + ((key2->field - key1->field + 1) >> 1);
2017
2018static void
2019hammer_make_separator(hammer_base_elm_t key1, hammer_base_elm_t key2,
2020 hammer_base_elm_t dest)
2021{
2022 bzero(dest, sizeof(*dest));
2023 MAKE_SEPARATOR(key1, key2, dest, obj_id);
2024 MAKE_SEPARATOR(key1, key2, dest, rec_type);
2025 MAKE_SEPARATOR(key1, key2, dest, key);
2026
2027 if (key1->obj_id == key2->obj_id &&
2028 key1->rec_type == key2->rec_type &&
2029 key1->key == key2->key) {
2030 if (key1->delete_tid == 0) {
2031 /*
2032 * key1 cannot be on the left hand side if everything
2033 * matches but it has an infinite delete_tid!
2034 */
2035 panic("hammer_make_separator: illegal delete_tid");
2036 } else if (key2->delete_tid == 0) {
2037 dest->delete_tid = key1->delete_tid + 1;
2038 } else {
2039 MAKE_SEPARATOR(key1, key2, dest, delete_tid);
2040 }
2041 } else {
2042 dest->delete_tid = 0;
2043 }
2044}
2045
2046/*
2047 * This adjusts a right-hand key from being exclusive to being inclusive.
2048 *
2049 * A delete_key of 0 represents infinity. Decrementing it results in
2050 * (u_int64_t)-1 which is the largest value possible prior to infinity.
2051 */
2052void
2053hammer_make_base_inclusive(hammer_base_elm_t key)
2054{
2055 --key->delete_tid;
2056}
2057
2058#undef MAKE_SEPARATOR
2059
2060/*
2061 * Return whether a generic internal or leaf node is full
2062 */
2063static int
2064btree_node_is_full(hammer_node_ondisk_t node)
2065{
2066 switch(node->type) {
2067 case HAMMER_BTREE_TYPE_INTERNAL:
2068 if (node->count == HAMMER_BTREE_INT_ELMS)
2069 return(1);
2070 break;
2071 case HAMMER_BTREE_TYPE_LEAF:
2072 if (node->count == HAMMER_BTREE_LEAF_ELMS)
2073 return(1);
2074 break;
2075 default:
2076 panic("illegal btree subtype");
2077 }
2078 return(0);
2079}
2080
2081#if 0
2082/*
2083 * Return whether a generic internal or leaf node is almost full. This
2084 * routine is used as a helper for search insertions to guarentee at
2085 * least 2 available slots in the internal node(s) leading up to a leaf,
2086 * so hammer_btree_insert_cluster() will function properly.
2087 */
2088static int
2089btree_node_is_almost_full(hammer_node_ondisk_t node)
2090{
2091 switch(node->type) {
2092 case HAMMER_BTREE_TYPE_INTERNAL:
2093 if (node->count > HAMMER_BTREE_INT_ELMS - 2)
2094 return(1);
2095 break;
2096 case HAMMER_BTREE_TYPE_LEAF:
2097 if (node->count > HAMMER_BTREE_LEAF_ELMS - 2)
2098 return(1);
2099 break;
2100 default:
2101 panic("illegal btree subtype");
2102 }
2103 return(0);
2104}
2105#endif
2106
2107#if 0
2108static int
2109btree_max_elements(u_int8_t type)
2110{
2111 if (type == HAMMER_BTREE_TYPE_LEAF)
2112 return(HAMMER_BTREE_LEAF_ELMS);
2113 if (type == HAMMER_BTREE_TYPE_INTERNAL)
2114 return(HAMMER_BTREE_INT_ELMS);
2115 panic("btree_max_elements: bad type %d\n", type);
2116}
2117#endif
2118
2119void
2120hammer_print_btree_node(hammer_node_ondisk_t ondisk)
2121{
2122 hammer_btree_elm_t elm;
2123 int i;
2124
2125 kprintf("node %p count=%d parent=%d type=%c\n",
2126 ondisk, ondisk->count, ondisk->parent, ondisk->type);
2127
2128 /*
2129 * Dump both boundary elements if an internal node
2130 */
2131 if (ondisk->type == HAMMER_BTREE_TYPE_INTERNAL) {
2132 for (i = 0; i <= ondisk->count; ++i) {
2133 elm = &ondisk->elms[i];
2134 hammer_print_btree_elm(elm, ondisk->type, i);
2135 }
2136 } else {
2137 for (i = 0; i < ondisk->count; ++i) {
2138 elm = &ondisk->elms[i];
2139 hammer_print_btree_elm(elm, ondisk->type, i);
2140 }
2141 }
2142}
2143
2144void
2145hammer_print_btree_elm(hammer_btree_elm_t elm, u_int8_t type, int i)
2146{
2147 kprintf(" %2d", i);
2148 kprintf("\tobjid = %016llx\n", elm->base.obj_id);
2149 kprintf("\tkey = %016llx\n", elm->base.key);
2150 kprintf("\tcreate_tid = %016llx\n", elm->base.create_tid);
2151 kprintf("\tdelete_tid = %016llx\n", elm->base.delete_tid);
2152 kprintf("\trec_type = %04x\n", elm->base.rec_type);
2153 kprintf("\tobj_type = %02x\n", elm->base.obj_type);
2154 kprintf("\tbtype = %02x (%c)\n",
2155 elm->base.btype,
2156 (elm->base.btype ? elm->base.btype : '?'));
2157
2158 switch(type) {
2159 case HAMMER_BTREE_TYPE_INTERNAL:
2160 kprintf("\tsubtree_off = %08x\n",
2161 elm->internal.subtree_offset);
2162 break;
2163 case HAMMER_BTREE_TYPE_SPIKE_BEG:
2164 case HAMMER_BTREE_TYPE_SPIKE_END:
2165 kprintf("\tspike_clu_no = %d\n", elm->leaf.spike_clu_no);
2166 kprintf("\tspike_vol_no = %d\n", elm->leaf.spike_vol_no);
2167 break;
2168 case HAMMER_BTREE_TYPE_RECORD:
2169 kprintf("\trec_offset = %08x\n", elm->leaf.rec_offset);
2170 kprintf("\tdata_offset = %08x\n", elm->leaf.data_offset);
2171 kprintf("\tdata_len = %08x\n", elm->leaf.data_len);
2172 kprintf("\tdata_crc = %08x\n", elm->leaf.data_crc);
2173 break;
2174 }
2175}