HAMMER VFS - Implement swapcache for HAMMER data in double_buffer mode
[dragonfly.git] / sys / vfs / hammer / hammer_btree.c
CommitLineData
427e5fc6 1/*
b84de5af 2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved.
427e5fc6
MD
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
5c8d05e2 34 * $DragonFly: src/sys/vfs/hammer/hammer_btree.c,v 1.76 2008/08/06 15:38:58 dillon Exp $
427e5fc6
MD
35 */
36
37/*
8cd0a023 38 * HAMMER B-Tree index
427e5fc6
MD
39 *
40 * HAMMER implements a modified B+Tree. In documentation this will
9944ae54 41 * simply be refered to as the HAMMER B-Tree. Basically a HAMMER B-Tree
427e5fc6
MD
42 * looks like a B+Tree (A B-Tree which stores its records only at the leafs
43 * of the tree), but adds two additional boundary elements which describe
44 * the left-most and right-most element a node is able to represent. In
8cd0a023 45 * otherwords, we have boundary elements at the two ends of a B-Tree node
427e5fc6
MD
46 * instead of sub-tree pointers.
47 *
8cd0a023 48 * A B-Tree internal node looks like this:
427e5fc6
MD
49 *
50 * B N N N N N N B <-- boundary and internal elements
51 * S S S S S S S <-- subtree pointers
52 *
8cd0a023 53 * A B-Tree leaf node basically looks like this:
427e5fc6
MD
54 *
55 * L L L L L L L L <-- leaf elemenets
56 *
8cd0a023
MD
57 * The radix for an internal node is 1 less then a leaf but we get a
58 * number of significant benefits for our troubles.
427e5fc6 59 *
8cd0a023
MD
60 * The big benefit to using a B-Tree containing boundary information
61 * is that it is possible to cache pointers into the middle of the tree
62 * and not have to start searches, insertions, OR deletions at the root
63 * node. In particular, searches are able to progress in a definitive
64 * direction from any point in the tree without revisting nodes. This
65 * greatly improves the efficiency of many operations, most especially
66 * record appends.
427e5fc6 67 *
8cd0a023
MD
68 * B-Trees also make the stacking of trees fairly straightforward.
69 *
fe7678ee
MD
70 * INSERTIONS: A search performed with the intention of doing
71 * an insert will guarantee that the terminal leaf node is not full by
72 * splitting full nodes. Splits occur top-down during the dive down the
73 * B-Tree.
74 *
75 * DELETIONS: A deletion makes no attempt to proactively balance the
f36a9737
MD
76 * tree and will recursively remove nodes that become empty. If a
77 * deadlock occurs a deletion may not be able to remove an empty leaf.
78 * Deletions never allow internal nodes to become empty (that would blow
79 * up the boundaries).
8cd0a023
MD
80 */
81#include "hammer.h"
82#include <sys/buf.h>
83#include <sys/buf2.h>
66325755 84
8cd0a023
MD
85static int btree_search(hammer_cursor_t cursor, int flags);
86static int btree_split_internal(hammer_cursor_t cursor);
87static int btree_split_leaf(hammer_cursor_t cursor);
46fe7ae1 88static int btree_remove(hammer_cursor_t cursor);
fe7678ee 89static int btree_node_is_full(hammer_node_ondisk_t node);
adf01747
MD
90static int hammer_btree_mirror_propagate(hammer_cursor_t cursor,
91 hammer_tid_t mirror_tid);
8cd0a023
MD
92static void hammer_make_separator(hammer_base_elm_t key1,
93 hammer_base_elm_t key2, hammer_base_elm_t dest);
4c038e17 94static void hammer_cursor_mirror_filter(hammer_cursor_t cursor);
66325755
MD
95
96/*
8cd0a023
MD
97 * Iterate records after a search. The cursor is iterated forwards past
98 * the current record until a record matching the key-range requirements
99 * is found. ENOENT is returned if the iteration goes past the ending
6a37e7e4 100 * key.
66325755 101 *
d26d0ae9
MD
102 * The iteration is inclusive of key_beg and can be inclusive or exclusive
103 * of key_end depending on whether HAMMER_CURSOR_END_INCLUSIVE is set.
66325755 104 *
eaeff70d 105 * When doing an as-of search (cursor->asof != 0), key_beg.create_tid
9582c7da 106 * may be modified by B-Tree functions.
d5530d22 107 *
8cd0a023 108 * cursor->key_beg may or may not be modified by this function during
d26d0ae9
MD
109 * the iteration. XXX future - in case of an inverted lock we may have
110 * to reinitiate the lookup and set key_beg to properly pick up where we
111 * left off.
6a37e7e4 112 *
6dc17446
MD
113 * If HAMMER_CURSOR_ITERATE_CHECK is set it is possible that the cursor
114 * was reverse indexed due to being moved to a parent while unlocked,
115 * and something else might have inserted an element outside the iteration
116 * range. When this case occurs the iterator just keeps iterating until
117 * it gets back into the iteration range (instead of asserting).
118 *
6a37e7e4 119 * NOTE! EDEADLK *CANNOT* be returned by this procedure.
66325755
MD
120 */
121int
8cd0a023 122hammer_btree_iterate(hammer_cursor_t cursor)
66325755 123{
8cd0a023
MD
124 hammer_node_ondisk_t node;
125 hammer_btree_elm_t elm;
3e583440 126 hammer_mount_t hmp;
1d4077f7 127 int error = 0;
66325755
MD
128 int r;
129 int s;
130
131 /*
8cd0a023 132 * Skip past the current record
66325755 133 */
3e583440 134 hmp = cursor->trans->hmp;
8cd0a023 135 node = cursor->node->ondisk;
a89aec1b
MD
136 if (node == NULL)
137 return(ENOENT);
c0ade690
MD
138 if (cursor->index < node->count &&
139 (cursor->flags & HAMMER_CURSOR_ATEDISK)) {
66325755 140 ++cursor->index;
c0ade690 141 }
66325755 142
3e583440
MD
143 /*
144 * HAMMER can wind up being cpu-bound.
145 */
146 if (++hmp->check_yield > hammer_yield_check) {
147 hmp->check_yield = 0;
148 lwkt_user_yield();
149 }
150
151
8cd0a023
MD
152 /*
153 * Loop until an element is found or we are done.
154 */
66325755
MD
155 for (;;) {
156 /*
8cd0a023
MD
157 * We iterate up the tree and then index over one element
158 * while we are at the last element in the current node.
159 *
47197d71 160 * If we are at the root of the filesystem, cursor_up
8cd0a023
MD
161 * returns ENOENT.
162 *
66325755
MD
163 * XXX this could be optimized by storing the information in
164 * the parent reference.
195c19a1
MD
165 *
166 * XXX we can lose the node lock temporarily, this could mess
167 * up our scan.
66325755 168 */
47637bff 169 ++hammer_stats_btree_iterations;
3e583440 170 hammer_flusher_clean_loose_ios(hmp);
77fec802 171
8cd0a023 172 if (cursor->index == node->count) {
a84a197d
MD
173 if (hammer_debug_btree) {
174 kprintf("BRACKETU %016llx[%d] -> %016llx[%d] (td=%p)\n",
973c11b9 175 (long long)cursor->node->node_offset,
a84a197d 176 cursor->index,
973c11b9 177 (long long)(cursor->parent ? cursor->parent->node_offset : -1),
a84a197d
MD
178 cursor->parent_index,
179 curthread);
180 }
181 KKASSERT(cursor->parent == NULL || cursor->parent->ondisk->elms[cursor->parent_index].internal.subtree_offset == cursor->node->node_offset);
6a37e7e4 182 error = hammer_cursor_up(cursor);
8cd0a023
MD
183 if (error)
184 break;
46fe7ae1 185 /* reload stale pointer */
8cd0a023
MD
186 node = cursor->node->ondisk;
187 KKASSERT(cursor->index != node->count);
2f85fa4d
MD
188
189 /*
190 * If we are reblocking we want to return internal
1775b6a0
MD
191 * nodes. Note that the internal node will be
192 * returned multiple times, on each upward recursion
193 * from its children. The caller selects which
194 * revisit it cares about (usually first or last only).
2f85fa4d
MD
195 */
196 if (cursor->flags & HAMMER_CURSOR_REBLOCKING) {
197 cursor->flags |= HAMMER_CURSOR_ATEDISK;
198 return(0);
199 }
8cd0a023
MD
200 ++cursor->index;
201 continue;
66325755
MD
202 }
203
204 /*
d26d0ae9
MD
205 * Check internal or leaf element. Determine if the record
206 * at the cursor has gone beyond the end of our range.
66325755 207 *
47197d71 208 * We recurse down through internal nodes.
66325755 209 */
8cd0a023
MD
210 if (node->type == HAMMER_BTREE_TYPE_INTERNAL) {
211 elm = &node->elms[cursor->index];
c82af904 212
d26d0ae9
MD
213 r = hammer_btree_cmp(&cursor->key_end, &elm[0].base);
214 s = hammer_btree_cmp(&cursor->key_beg, &elm[1].base);
b3deaf57 215 if (hammer_debug_btree) {
2f85fa4d 216 kprintf("BRACKETL %016llx[%d] %016llx %02x %016llx lo=%02x %d (td=%p)\n",
973c11b9 217 (long long)cursor->node->node_offset,
eaeff70d 218 cursor->index,
973c11b9 219 (long long)elm[0].internal.base.obj_id,
b3deaf57 220 elm[0].internal.base.rec_type,
973c11b9 221 (long long)elm[0].internal.base.key,
2f85fa4d 222 elm[0].internal.base.localization,
a84a197d
MD
223 r,
224 curthread
b3deaf57 225 );
2f85fa4d 226 kprintf("BRACKETR %016llx[%d] %016llx %02x %016llx lo=%02x %d\n",
973c11b9 227 (long long)cursor->node->node_offset,
eaeff70d 228 cursor->index + 1,
973c11b9 229 (long long)elm[1].internal.base.obj_id,
b3deaf57 230 elm[1].internal.base.rec_type,
973c11b9 231 (long long)elm[1].internal.base.key,
2f85fa4d 232 elm[1].internal.base.localization,
b3deaf57
MD
233 s
234 );
235 }
236
d26d0ae9
MD
237 if (r < 0) {
238 error = ENOENT;
239 break;
66325755 240 }
fe7678ee
MD
241 if (r == 0 && (cursor->flags &
242 HAMMER_CURSOR_END_INCLUSIVE) == 0) {
d26d0ae9 243 error = ENOENT;
8cd0a023 244 break;
d26d0ae9 245 }
6a37e7e4
MD
246
247 /*
f36a9737 248 * Better not be zero
6a37e7e4 249 */
f36a9737
MD
250 KKASSERT(elm->internal.subtree_offset != 0);
251
6dc17446
MD
252 if (s <= 0) {
253 /*
254 * If running the mirror filter see if we
255 * can skip one or more entire sub-trees.
256 * If we can we return the internal node
257 * and the caller processes the skipped
258 * range (see mirror_read).
259 */
260 if (cursor->flags &
261 HAMMER_CURSOR_MIRROR_FILTERED) {
262 if (elm->internal.mirror_tid <
263 cursor->cmirror->mirror_tid) {
264 hammer_cursor_mirror_filter(cursor);
265 return(0);
266 }
c82af904 267 }
6dc17446
MD
268 } else {
269 /*
270 * Normally it would be impossible for the
271 * cursor to have gotten back-indexed,
272 * but it can happen if a node is deleted
273 * and the cursor is moved to its parent
274 * internal node. ITERATE_CHECK will be set.
275 */
276 KKASSERT(cursor->flags &
277 HAMMER_CURSOR_ITERATE_CHECK);
278 kprintf("hammer_btree_iterate: "
279 "DEBUG: Caught parent seek "
280 "in internal iteration\n");
c82af904
MD
281 }
282
f36a9737
MD
283 error = hammer_cursor_down(cursor);
284 if (error)
285 break;
286 KKASSERT(cursor->index == 0);
46fe7ae1
MD
287 /* reload stale pointer */
288 node = cursor->node->ondisk;
fe7678ee 289 continue;
d26d0ae9
MD
290 } else {
291 elm = &node->elms[cursor->index];
292 r = hammer_btree_cmp(&cursor->key_end, &elm->base);
b3deaf57 293 if (hammer_debug_btree) {
2f85fa4d 294 kprintf("ELEMENT %016llx:%d %c %016llx %02x %016llx lo=%02x %d\n",
973c11b9 295 (long long)cursor->node->node_offset,
eaeff70d
MD
296 cursor->index,
297 (elm[0].leaf.base.btype ?
298 elm[0].leaf.base.btype : '?'),
973c11b9 299 (long long)elm[0].leaf.base.obj_id,
b3deaf57 300 elm[0].leaf.base.rec_type,
973c11b9 301 (long long)elm[0].leaf.base.key,
2f85fa4d 302 elm[0].leaf.base.localization,
b3deaf57
MD
303 r
304 );
305 }
d26d0ae9
MD
306 if (r < 0) {
307 error = ENOENT;
308 break;
309 }
b33e2cc0
MD
310
311 /*
312 * We support both end-inclusive and
313 * end-exclusive searches.
314 */
315 if (r == 0 &&
316 (cursor->flags & HAMMER_CURSOR_END_INCLUSIVE) == 0) {
317 error = ENOENT;
318 break;
319 }
320
6dc17446
MD
321 /*
322 * If ITERATE_CHECK is set an unlocked cursor may
323 * have been moved to a parent and the iterate can
324 * happen upon elements that are not in the requested
325 * range.
326 */
327 if (cursor->flags & HAMMER_CURSOR_ITERATE_CHECK) {
328 s = hammer_btree_cmp(&cursor->key_beg,
329 &elm->base);
330 if (s > 0) {
331 kprintf("hammer_btree_iterate: "
332 "DEBUG: Caught parent seek "
333 "in leaf iteration\n");
334 ++cursor->index;
335 continue;
336 }
337 }
338 cursor->flags &= ~HAMMER_CURSOR_ITERATE_CHECK;
339
340 /*
341 * Return the element
342 */
fe7678ee
MD
343 switch(elm->leaf.base.btype) {
344 case HAMMER_BTREE_TYPE_RECORD:
345 if ((cursor->flags & HAMMER_CURSOR_ASOF) &&
346 hammer_btree_chkts(cursor->asof, &elm->base)) {
347 ++cursor->index;
348 continue;
349 }
1d4077f7 350 error = 0;
fe7678ee 351 break;
fe7678ee
MD
352 default:
353 error = EINVAL;
354 break;
d26d0ae9 355 }
fe7678ee
MD
356 if (error)
357 break;
66325755 358 }
46fe7ae1
MD
359 /*
360 * node pointer invalid after loop
361 */
66325755
MD
362
363 /*
d26d0ae9 364 * Return entry
66325755 365 */
b3deaf57
MD
366 if (hammer_debug_btree) {
367 int i = cursor->index;
368 hammer_btree_elm_t elm = &cursor->node->ondisk->elms[i];
2f85fa4d 369 kprintf("ITERATE %p:%d %016llx %02x %016llx lo=%02x\n",
b3deaf57 370 cursor->node, i,
973c11b9 371 (long long)elm->internal.base.obj_id,
b3deaf57 372 elm->internal.base.rec_type,
973c11b9 373 (long long)elm->internal.base.key,
2f85fa4d 374 elm->internal.base.localization
b3deaf57
MD
375 );
376 }
d26d0ae9 377 return(0);
427e5fc6 378 }
66325755 379 return(error);
427e5fc6
MD
380}
381
4c038e17
MD
382/*
383 * We hit an internal element that we could skip as part of a mirroring
384 * scan. Calculate the entire range being skipped.
385 *
386 * It is important to include any gaps between the parent's left_bound
387 * and the node's left_bound, and same goes for the right side.
388 */
389static void
390hammer_cursor_mirror_filter(hammer_cursor_t cursor)
391{
392 struct hammer_cmirror *cmirror;
393 hammer_node_ondisk_t ondisk;
394 hammer_btree_elm_t elm;
395
396 ondisk = cursor->node->ondisk;
397 cmirror = cursor->cmirror;
398
399 /*
400 * Calculate the skipped range
401 */
402 elm = &ondisk->elms[cursor->index];
403 if (cursor->index == 0)
404 cmirror->skip_beg = *cursor->left_bound;
405 else
406 cmirror->skip_beg = elm->internal.base;
407 while (cursor->index < ondisk->count) {
408 if (elm->internal.mirror_tid >= cmirror->mirror_tid)
409 break;
410 ++cursor->index;
411 ++elm;
412 }
413 if (cursor->index == ondisk->count)
414 cmirror->skip_end = *cursor->right_bound;
415 else
416 cmirror->skip_end = elm->internal.base;
417
418 /*
419 * clip the returned result.
420 */
421 if (hammer_btree_cmp(&cmirror->skip_beg, &cursor->key_beg) < 0)
422 cmirror->skip_beg = cursor->key_beg;
423 if (hammer_btree_cmp(&cmirror->skip_end, &cursor->key_end) > 0)
424 cmirror->skip_end = cursor->key_end;
425}
426
32c90105
MD
427/*
428 * Iterate in the reverse direction. This is used by the pruning code to
429 * avoid overlapping records.
430 */
431int
432hammer_btree_iterate_reverse(hammer_cursor_t cursor)
433{
434 hammer_node_ondisk_t node;
435 hammer_btree_elm_t elm;
bb5add8c 436 hammer_mount_t hmp;
f75df937 437 int error = 0;
32c90105
MD
438 int r;
439 int s;
440
4c038e17
MD
441 /* mirror filtering not supported for reverse iteration */
442 KKASSERT ((cursor->flags & HAMMER_CURSOR_MIRROR_FILTERED) == 0);
443
32c90105
MD
444 /*
445 * Skip past the current record. For various reasons the cursor
446 * may end up set to -1 or set to point at the end of the current
447 * node. These cases must be addressed.
448 */
449 node = cursor->node->ondisk;
450 if (node == NULL)
451 return(ENOENT);
452 if (cursor->index != -1 &&
453 (cursor->flags & HAMMER_CURSOR_ATEDISK)) {
454 --cursor->index;
455 }
456 if (cursor->index == cursor->node->ondisk->count)
457 --cursor->index;
458
bb5add8c
MD
459 /*
460 * HAMMER can wind up being cpu-bound.
461 */
462 hmp = cursor->trans->hmp;
463 if (++hmp->check_yield > hammer_yield_check) {
464 hmp->check_yield = 0;
465 lwkt_user_yield();
466 }
467
32c90105
MD
468 /*
469 * Loop until an element is found or we are done.
470 */
471 for (;;) {
77fec802 472 ++hammer_stats_btree_iterations;
bb5add8c 473 hammer_flusher_clean_loose_ios(hmp);
77fec802 474
32c90105
MD
475 /*
476 * We iterate up the tree and then index over one element
477 * while we are at the last element in the current node.
32c90105
MD
478 */
479 if (cursor->index == -1) {
480 error = hammer_cursor_up(cursor);
481 if (error) {
482 cursor->index = 0; /* sanity */
483 break;
484 }
485 /* reload stale pointer */
486 node = cursor->node->ondisk;
487 KKASSERT(cursor->index != node->count);
488 --cursor->index;
489 continue;
490 }
491
492 /*
493 * Check internal or leaf element. Determine if the record
494 * at the cursor has gone beyond the end of our range.
495 *
47197d71 496 * We recurse down through internal nodes.
32c90105
MD
497 */
498 KKASSERT(cursor->index != node->count);
499 if (node->type == HAMMER_BTREE_TYPE_INTERNAL) {
500 elm = &node->elms[cursor->index];
501 r = hammer_btree_cmp(&cursor->key_end, &elm[0].base);
502 s = hammer_btree_cmp(&cursor->key_beg, &elm[1].base);
503 if (hammer_debug_btree) {
2f85fa4d 504 kprintf("BRACKETL %016llx[%d] %016llx %02x %016llx lo=%02x %d\n",
973c11b9 505 (long long)cursor->node->node_offset,
32c90105 506 cursor->index,
973c11b9 507 (long long)elm[0].internal.base.obj_id,
32c90105 508 elm[0].internal.base.rec_type,
973c11b9 509 (long long)elm[0].internal.base.key,
2f85fa4d 510 elm[0].internal.base.localization,
32c90105
MD
511 r
512 );
2f85fa4d 513 kprintf("BRACKETR %016llx[%d] %016llx %02x %016llx lo=%02x %d\n",
973c11b9 514 (long long)cursor->node->node_offset,
32c90105 515 cursor->index + 1,
973c11b9 516 (long long)elm[1].internal.base.obj_id,
32c90105 517 elm[1].internal.base.rec_type,
973c11b9 518 (long long)elm[1].internal.base.key,
2f85fa4d 519 elm[1].internal.base.localization,
32c90105
MD
520 s
521 );
522 }
523
524 if (s >= 0) {
525 error = ENOENT;
526 break;
527 }
6dc17446
MD
528
529 /*
530 * It shouldn't be possible to be seeked past key_end,
531 * even if the cursor got moved to a parent.
532 */
32c90105
MD
533 KKASSERT(r >= 0);
534
535 /*
f36a9737 536 * Better not be zero
32c90105 537 */
f36a9737
MD
538 KKASSERT(elm->internal.subtree_offset != 0);
539
540 error = hammer_cursor_down(cursor);
541 if (error)
542 break;
543 KKASSERT(cursor->index == 0);
32c90105
MD
544 /* reload stale pointer */
545 node = cursor->node->ondisk;
f36a9737
MD
546
547 /* this can assign -1 if the leaf was empty */
548 cursor->index = node->count - 1;
32c90105
MD
549 continue;
550 } else {
551 elm = &node->elms[cursor->index];
552 s = hammer_btree_cmp(&cursor->key_beg, &elm->base);
553 if (hammer_debug_btree) {
2f85fa4d 554 kprintf("ELEMENT %016llx:%d %c %016llx %02x %016llx lo=%02x %d\n",
973c11b9 555 (long long)cursor->node->node_offset,
32c90105
MD
556 cursor->index,
557 (elm[0].leaf.base.btype ?
558 elm[0].leaf.base.btype : '?'),
973c11b9 559 (long long)elm[0].leaf.base.obj_id,
32c90105 560 elm[0].leaf.base.rec_type,
973c11b9 561 (long long)elm[0].leaf.base.key,
2f85fa4d 562 elm[0].leaf.base.localization,
32c90105
MD
563 s
564 );
565 }
566 if (s > 0) {
567 error = ENOENT;
568 break;
569 }
570
6dc17446
MD
571 /*
572 * It shouldn't be possible to be seeked past key_end,
573 * even if the cursor got moved to a parent.
574 */
575 cursor->flags &= ~HAMMER_CURSOR_ITERATE_CHECK;
576
577 /*
578 * Return the element
579 */
32c90105
MD
580 switch(elm->leaf.base.btype) {
581 case HAMMER_BTREE_TYPE_RECORD:
582 if ((cursor->flags & HAMMER_CURSOR_ASOF) &&
583 hammer_btree_chkts(cursor->asof, &elm->base)) {
584 --cursor->index;
585 continue;
586 }
f75df937 587 error = 0;
32c90105 588 break;
32c90105
MD
589 default:
590 error = EINVAL;
591 break;
592 }
593 if (error)
594 break;
595 }
596 /*
597 * node pointer invalid after loop
598 */
599
600 /*
601 * Return entry
602 */
603 if (hammer_debug_btree) {
604 int i = cursor->index;
605 hammer_btree_elm_t elm = &cursor->node->ondisk->elms[i];
2f85fa4d 606 kprintf("ITERATE %p:%d %016llx %02x %016llx lo=%02x\n",
32c90105 607 cursor->node, i,
973c11b9 608 (long long)elm->internal.base.obj_id,
32c90105 609 elm->internal.base.rec_type,
973c11b9 610 (long long)elm->internal.base.key,
2f85fa4d 611 elm->internal.base.localization
32c90105
MD
612 );
613 }
614 return(0);
615 }
616 return(error);
617}
618
427e5fc6 619/*
8cd0a023 620 * Lookup cursor->key_beg. 0 is returned on success, ENOENT if the entry
6a37e7e4
MD
621 * could not be found, EDEADLK if inserting and a retry is needed, and a
622 * fatal error otherwise. When retrying, the caller must terminate the
eaeff70d 623 * cursor and reinitialize it. EDEADLK cannot be returned if not inserting.
8cd0a023
MD
624 *
625 * The cursor is suitably positioned for a deletion on success, and suitably
eaeff70d
MD
626 * positioned for an insertion on ENOENT if HAMMER_CURSOR_INSERT was
627 * specified.
427e5fc6 628 *
47197d71 629 * The cursor may begin anywhere, the search will traverse the tree in
8cd0a023 630 * either direction to locate the requested element.
eaeff70d
MD
631 *
632 * Most of the logic implementing historical searches is handled here. We
9582c7da
MD
633 * do an initial lookup with create_tid set to the asof TID. Due to the
634 * way records are laid out, a backwards iteration may be required if
eaeff70d
MD
635 * ENOENT is returned to locate the historical record. Here's the
636 * problem:
637 *
9582c7da 638 * create_tid: 10 15 20
eaeff70d
MD
639 * LEAF1 LEAF2
640 * records: (11) (18)
641 *
9582c7da
MD
642 * Lets say we want to do a lookup AS-OF timestamp 17. We will traverse
643 * LEAF2 but the only record in LEAF2 has a create_tid of 18, which is
644 * not visible and thus causes ENOENT to be returned. We really need
645 * to check record 11 in LEAF1. If it also fails then the search fails
646 * (e.g. it might represent the range 11-16 and thus still not match our
f36a9737
MD
647 * AS-OF timestamp of 17). Note that LEAF1 could be empty, requiring
648 * further iterations.
b33e2cc0 649 *
9582c7da
MD
650 * If this case occurs btree_search() will set HAMMER_CURSOR_CREATE_CHECK
651 * and the cursor->create_check TID if an iteration might be needed.
652 * In the above example create_check would be set to 14.
427e5fc6
MD
653 */
654int
8cd0a023 655hammer_btree_lookup(hammer_cursor_t cursor)
427e5fc6 656{
66325755
MD
657 int error;
658
6dc17446 659 cursor->flags &= ~HAMMER_CURSOR_ITERATE_CHECK;
98da6d8c
MD
660 KKASSERT ((cursor->flags & HAMMER_CURSOR_INSERT) == 0 ||
661 cursor->trans->sync_lock_refs > 0);
cb51be26 662 ++hammer_stats_btree_lookups;
d5530d22 663 if (cursor->flags & HAMMER_CURSOR_ASOF) {
eaeff70d 664 KKASSERT((cursor->flags & HAMMER_CURSOR_INSERT) == 0);
9582c7da 665 cursor->key_beg.create_tid = cursor->asof;
eaeff70d 666 for (;;) {
9582c7da 667 cursor->flags &= ~HAMMER_CURSOR_CREATE_CHECK;
d5530d22 668 error = btree_search(cursor, 0);
b33e2cc0 669 if (error != ENOENT ||
9582c7da 670 (cursor->flags & HAMMER_CURSOR_CREATE_CHECK) == 0) {
b33e2cc0
MD
671 /*
672 * Stop if no error.
673 * Stop if error other then ENOENT.
674 * Stop if ENOENT and not special case.
675 */
eaeff70d
MD
676 break;
677 }
32c90105
MD
678 if (hammer_debug_btree) {
679 kprintf("CREATE_CHECK %016llx\n",
973c11b9 680 (long long)cursor->create_check);
32c90105 681 }
9582c7da 682 cursor->key_beg.create_tid = cursor->create_check;
eaeff70d
MD
683 /* loop */
684 }
d5530d22
MD
685 } else {
686 error = btree_search(cursor, 0);
687 }
bf3b416b 688 if (error == 0)
8cd0a023 689 error = hammer_btree_extract(cursor, cursor->flags);
66325755
MD
690 return(error);
691}
692
d26d0ae9
MD
693/*
694 * Execute the logic required to start an iteration. The first record
695 * located within the specified range is returned and iteration control
696 * flags are adjusted for successive hammer_btree_iterate() calls.
3214ade6
MD
697 *
698 * Set ATEDISK so a low-level caller can call btree_first/btree_iterate
699 * in a loop without worrying about it. Higher-level merged searches will
700 * adjust the flag appropriately.
d26d0ae9
MD
701 */
702int
703hammer_btree_first(hammer_cursor_t cursor)
704{
705 int error;
706
707 error = hammer_btree_lookup(cursor);
708 if (error == ENOENT) {
709 cursor->flags &= ~HAMMER_CURSOR_ATEDISK;
710 error = hammer_btree_iterate(cursor);
711 }
712 cursor->flags |= HAMMER_CURSOR_ATEDISK;
713 return(error);
714}
715
32c90105
MD
716/*
717 * Similarly but for an iteration in the reverse direction.
814387f6
MD
718 *
719 * Set ATEDISK when iterating backwards to skip the current entry,
720 * which after an ENOENT lookup will be pointing beyond our end point.
3214ade6
MD
721 *
722 * Set ATEDISK so a low-level caller can call btree_last/btree_iterate_reverse
723 * in a loop without worrying about it. Higher-level merged searches will
724 * adjust the flag appropriately.
32c90105
MD
725 */
726int
727hammer_btree_last(hammer_cursor_t cursor)
728{
729 struct hammer_base_elm save;
730 int error;
731
732 save = cursor->key_beg;
733 cursor->key_beg = cursor->key_end;
734 error = hammer_btree_lookup(cursor);
735 cursor->key_beg = save;
736 if (error == ENOENT ||
737 (cursor->flags & HAMMER_CURSOR_END_INCLUSIVE) == 0) {
814387f6 738 cursor->flags |= HAMMER_CURSOR_ATEDISK;
32c90105
MD
739 error = hammer_btree_iterate_reverse(cursor);
740 }
741 cursor->flags |= HAMMER_CURSOR_ATEDISK;
742 return(error);
743}
744
8cd0a023
MD
745/*
746 * Extract the record and/or data associated with the cursor's current
747 * position. Any prior record or data stored in the cursor is replaced.
748 * The cursor must be positioned at a leaf node.
749 *
47197d71 750 * NOTE: All extractions occur at the leaf of the B-Tree.
8cd0a023 751 */
66325755 752int
8cd0a023 753hammer_btree_extract(hammer_cursor_t cursor, int flags)
66325755 754{
8cd0a023
MD
755 hammer_node_ondisk_t node;
756 hammer_btree_elm_t elm;
47197d71 757 hammer_off_t data_off;
bac808fe 758 hammer_mount_t hmp;
19619882 759 int32_t data_len;
427e5fc6 760 int error;
427e5fc6 761
8cd0a023 762 /*
427e5fc6
MD
763 * The case where the data reference resolves to the same buffer
764 * as the record reference must be handled.
765 */
8cd0a023 766 node = cursor->node->ondisk;
8cd0a023 767 elm = &node->elms[cursor->index];
40043e7f
MD
768 cursor->data = NULL;
769 hmp = cursor->node->hmp;
66325755 770
d26d0ae9 771 /*
fe7678ee 772 * There is nothing to extract for an internal element.
d26d0ae9 773 */
fe7678ee
MD
774 if (node->type == HAMMER_BTREE_TYPE_INTERNAL)
775 return(EINVAL);
776
47197d71
MD
777 /*
778 * Only record types have data.
779 */
fe7678ee 780 KKASSERT(node->type == HAMMER_BTREE_TYPE_LEAF);
11ad5ade 781 cursor->leaf = &elm->leaf;
4a2796f3
MD
782
783 if ((flags & HAMMER_CURSOR_GET_DATA) == 0)
784 return(0);
47197d71 785 if (elm->leaf.base.btype != HAMMER_BTREE_TYPE_RECORD)
4a2796f3 786 return(0);
47197d71 787 data_off = elm->leaf.data_offset;
19619882 788 data_len = elm->leaf.data_len;
47197d71 789 if (data_off == 0)
4a2796f3 790 return(0);
d26d0ae9 791
4a2796f3
MD
792 /*
793 * Load the data
794 */
795 KKASSERT(data_len >= 0 && data_len <= HAMMER_XBUFSIZE);
796 cursor->data = hammer_bread_ext(hmp, data_off, data_len,
797 &error, &cursor->data_buffer);
b8a41159
MD
798
799 /*
800 * Mark the data buffer as not being meta-data if it isn't
801 * meta-data (sometimes bulk data is accessed via a volume
802 * block device).
803 */
804 if (error == 0) {
805 switch(elm->leaf.base.rec_type) {
806 case HAMMER_RECTYPE_DATA:
807 case HAMMER_RECTYPE_DB:
18bee4a2
MD
808 if ((data_off & HAMMER_ZONE_LARGE_DATA) == 0)
809 break;
810 if (hammer_double_buffer == 0 ||
811 (cursor->flags & HAMMER_CURSOR_NOSWAPCACHE)) {
812 hammer_io_notmeta(cursor->data_buffer);
813 }
b8a41159
MD
814 break;
815 default:
816 break;
817 }
818 }
819
820 /*
821 * Deal with CRC errors on the extracted data.
822 */
2faf0737
MD
823 if (error == 0 &&
824 hammer_crc_test_leaf(cursor->data, &elm->leaf) == 0) {
e469566b 825 kprintf("CRC DATA @ %016llx/%d FAILED\n",
973c11b9 826 (long long)elm->leaf.data_offset, elm->leaf.data_len);
fc73edd8 827 if (hammer_debug_critical)
4c286c36
MD
828 Debugger("CRC FAILED: DATA");
829 if (cursor->trans->flags & HAMMER_TRANSF_CRCDOM)
830 error = EDOM; /* less critical (mirroring) */
831 else
832 error = EIO; /* critical */
e469566b 833 }
427e5fc6
MD
834 return(error);
835}
836
837
838/*
8cd0a023
MD
839 * Insert a leaf element into the B-Tree at the current cursor position.
840 * The cursor is positioned such that the element at and beyond the cursor
841 * are shifted to make room for the new record.
842 *
a89aec1b 843 * The caller must call hammer_btree_lookup() with the HAMMER_CURSOR_INSERT
8cd0a023
MD
844 * flag set and that call must return ENOENT before this function can be
845 * called.
846 *
d36ec43b 847 * The caller may depend on the cursor's exclusive lock after return to
1f07f686 848 * interlock frontend visibility (see HAMMER_RECF_CONVERT_DELETE).
d36ec43b 849 *
8cd0a023 850 * ENOSPC is returned if there is no room to insert a new record.
427e5fc6
MD
851 */
852int
602c6cb8
MD
853hammer_btree_insert(hammer_cursor_t cursor, hammer_btree_leaf_elm_t elm,
854 int *doprop)
427e5fc6 855{
8cd0a023 856 hammer_node_ondisk_t node;
427e5fc6 857 int i;
6a37e7e4
MD
858 int error;
859
602c6cb8 860 *doprop = 0;
7bc5b8c2 861 if ((error = hammer_cursor_upgrade_node(cursor)) != 0)
6a37e7e4 862 return(error);
cb51be26 863 ++hammer_stats_btree_inserts;
427e5fc6 864
427e5fc6
MD
865 /*
866 * Insert the element at the leaf node and update the count in the
867 * parent. It is possible for parent to be NULL, indicating that
47197d71
MD
868 * the filesystem's ROOT B-Tree node is a leaf itself, which is
869 * possible. The root inode can never be deleted so the leaf should
870 * never be empty.
427e5fc6
MD
871 *
872 * Remember that the right-hand boundary is not included in the
873 * count.
874 */
36f82b23 875 hammer_modify_node_all(cursor->trans, cursor->node);
8cd0a023 876 node = cursor->node->ondisk;
427e5fc6 877 i = cursor->index;
fe7678ee 878 KKASSERT(elm->base.btype != 0);
8cd0a023
MD
879 KKASSERT(node->type == HAMMER_BTREE_TYPE_LEAF);
880 KKASSERT(node->count < HAMMER_BTREE_LEAF_ELMS);
881 if (i != node->count) {
882 bcopy(&node->elms[i], &node->elms[i+1],
883 (node->count - i) * sizeof(*elm));
884 }
11ad5ade 885 node->elms[i].leaf = *elm;
8cd0a023 886 ++node->count;
e4a5ff06 887 hammer_cursor_inserted_element(cursor->node, i);
c82af904
MD
888
889 /*
890 * Update the leaf node's aggregate mirror_tid for mirroring
891 * support.
892 */
602c6cb8 893 if (node->mirror_tid < elm->base.delete_tid) {
c82af904 894 node->mirror_tid = elm->base.delete_tid;
602c6cb8
MD
895 *doprop = 1;
896 }
897 if (node->mirror_tid < elm->base.create_tid) {
c82af904 898 node->mirror_tid = elm->base.create_tid;
602c6cb8 899 *doprop = 1;
c82af904 900 }
602c6cb8 901 hammer_modify_node_done(cursor->node);
c82af904 902
eaeff70d 903 /*
47197d71 904 * Debugging sanity checks.
eaeff70d 905 */
11ad5ade
MD
906 KKASSERT(hammer_btree_cmp(cursor->left_bound, &elm->base) <= 0);
907 KKASSERT(hammer_btree_cmp(cursor->right_bound, &elm->base) > 0);
eaeff70d 908 if (i) {
11ad5ade 909 KKASSERT(hammer_btree_cmp(&node->elms[i-1].leaf.base, &elm->base) < 0);
eaeff70d 910 }
b3deaf57 911 if (i != node->count - 1)
11ad5ade 912 KKASSERT(hammer_btree_cmp(&node->elms[i+1].leaf.base, &elm->base) > 0);
b3deaf57 913
427e5fc6
MD
914 return(0);
915}
916
917/*
fe7678ee 918 * Delete a record from the B-Tree at the current cursor position.
8cd0a023
MD
919 * The cursor is positioned such that the current element is the one
920 * to be deleted.
921 *
195c19a1
MD
922 * On return the cursor will be positioned after the deleted element and
923 * MAY point to an internal node. It will be suitable for the continuation
924 * of an iteration but not for an insertion or deletion.
8cd0a023 925 *
195c19a1 926 * Deletions will attempt to partially rebalance the B-Tree in an upward
f36a9737
MD
927 * direction, but will terminate rather then deadlock. Empty internal nodes
928 * are never allowed by a deletion which deadlocks may end up giving us an
929 * empty leaf. The pruner will clean up and rebalance the tree.
46fe7ae1
MD
930 *
931 * This function can return EDEADLK, requiring the caller to retry the
932 * operation after clearing the deadlock.
427e5fc6
MD
933 */
934int
8cd0a023 935hammer_btree_delete(hammer_cursor_t cursor)
427e5fc6 936{
8cd0a023
MD
937 hammer_node_ondisk_t ondisk;
938 hammer_node_t node;
939 hammer_node_t parent;
8cd0a023 940 int error;
427e5fc6
MD
941 int i;
942
98da6d8c 943 KKASSERT (cursor->trans->sync_lock_refs > 0);
6a37e7e4
MD
944 if ((error = hammer_cursor_upgrade(cursor)) != 0)
945 return(error);
cb51be26 946 ++hammer_stats_btree_deletes;
6a37e7e4 947
427e5fc6 948 /*
8cd0a023 949 * Delete the element from the leaf node.
427e5fc6 950 *
8cd0a023 951 * Remember that leaf nodes do not have boundaries.
427e5fc6 952 */
8cd0a023
MD
953 node = cursor->node;
954 ondisk = node->ondisk;
427e5fc6
MD
955 i = cursor->index;
956
8cd0a023 957 KKASSERT(ondisk->type == HAMMER_BTREE_TYPE_LEAF);
fe7678ee 958 KKASSERT(i >= 0 && i < ondisk->count);
36f82b23 959 hammer_modify_node_all(cursor->trans, node);
8cd0a023
MD
960 if (i + 1 != ondisk->count) {
961 bcopy(&ondisk->elms[i+1], &ondisk->elms[i],
962 (ondisk->count - i - 1) * sizeof(ondisk->elms[0]));
963 }
964 --ondisk->count;
10a5d1ba 965 hammer_modify_node_done(node);
b3bad96f 966 hammer_cursor_deleted_element(node, i);
fe7678ee
MD
967
968 /*
969 * Validate local parent
970 */
971 if (ondisk->parent) {
8cd0a023 972 parent = cursor->parent;
fe7678ee
MD
973
974 KKASSERT(parent != NULL);
975 KKASSERT(parent->node_offset == ondisk->parent);
427e5fc6 976 }
427e5fc6 977
8cd0a023 978 /*
fe7678ee 979 * If the leaf becomes empty it must be detached from the parent,
47197d71 980 * potentially recursing through to the filesystem root.
195c19a1
MD
981 *
982 * This may reposition the cursor at one of the parent's of the
983 * current node.
6a37e7e4
MD
984 *
985 * Ignore deadlock errors, that simply means that btree_remove
f36a9737 986 * was unable to recurse and had to leave us with an empty leaf.
8cd0a023 987 */
b3deaf57 988 KKASSERT(cursor->index <= ondisk->count);
8cd0a023 989 if (ondisk->count == 0) {
f36a9737 990 error = btree_remove(cursor);
6a37e7e4
MD
991 if (error == EDEADLK)
992 error = 0;
8cd0a023 993 } else {
8cd0a023
MD
994 error = 0;
995 }
eaeff70d
MD
996 KKASSERT(cursor->parent == NULL ||
997 cursor->parent_index < cursor->parent->ondisk->count);
8cd0a023
MD
998 return(error);
999}
427e5fc6
MD
1000
1001/*
8cd0a023
MD
1002 * PRIMAY B-TREE SEARCH SUPPORT PROCEDURE
1003 *
47197d71 1004 * Search the filesystem B-Tree for cursor->key_beg, return the matching node.
8cd0a023 1005 *
d26d0ae9
MD
1006 * The search can begin ANYWHERE in the B-Tree. As a first step the search
1007 * iterates up the tree as necessary to properly position itself prior to
1008 * actually doing the sarch.
1009 *
8cd0a023 1010 * INSERTIONS: The search will split full nodes and leaves on its way down
d26d0ae9
MD
1011 * and guarentee that the leaf it ends up on is not full. If we run out
1012 * of space the search continues to the leaf (to position the cursor for
1013 * the spike), but ENOSPC is returned.
427e5fc6 1014 *
fbc6e32a
MD
1015 * The search is only guarenteed to end up on a leaf if an error code of 0
1016 * is returned, or if inserting and an error code of ENOENT is returned.
d26d0ae9 1017 * Otherwise it can stop at an internal node. On success a search returns
47197d71 1018 * a leaf node.
eaeff70d
MD
1019 *
1020 * COMPLEXITY WARNING! This is the core B-Tree search code for the entire
1021 * filesystem, and it is not simple code. Please note the following facts:
1022 *
1023 * - Internal node recursions have a boundary on the left AND right. The
9582c7da 1024 * right boundary is non-inclusive. The create_tid is a generic part
eaeff70d
MD
1025 * of the key for internal nodes.
1026 *
47197d71 1027 * - Leaf nodes contain terminal elements only now.
eaeff70d
MD
1028 *
1029 * - Filesystem lookups typically set HAMMER_CURSOR_ASOF, indicating a
b33e2cc0
MD
1030 * historical search. ASOF and INSERT are mutually exclusive. When
1031 * doing an as-of lookup btree_search() checks for a right-edge boundary
9582c7da
MD
1032 * case. If while recursing down the left-edge differs from the key
1033 * by ONLY its create_tid, HAMMER_CURSOR_CREATE_CHECK is set along
1034 * with cursor->create_check. This is used by btree_lookup() to iterate.
1035 * The iteration backwards because as-of searches can wind up going
b33e2cc0 1036 * down the wrong branch of the B-Tree.
427e5fc6 1037 */
8cd0a023 1038static
427e5fc6 1039int
8cd0a023 1040btree_search(hammer_cursor_t cursor, int flags)
427e5fc6 1041{
8cd0a023 1042 hammer_node_ondisk_t node;
61aeeb33 1043 hammer_btree_elm_t elm;
8cd0a023 1044 int error;
d26d0ae9 1045 int enospc = 0;
8cd0a023
MD
1046 int i;
1047 int r;
b33e2cc0 1048 int s;
8cd0a023
MD
1049
1050 flags |= cursor->flags;
cb51be26 1051 ++hammer_stats_btree_searches;
8cd0a023 1052
b3deaf57 1053 if (hammer_debug_btree) {
2f85fa4d 1054 kprintf("SEARCH %016llx[%d] %016llx %02x key=%016llx cre=%016llx lo=%02x (td = %p)\n",
973c11b9 1055 (long long)cursor->node->node_offset,
eaeff70d 1056 cursor->index,
973c11b9 1057 (long long)cursor->key_beg.obj_id,
b3deaf57 1058 cursor->key_beg.rec_type,
973c11b9
MD
1059 (long long)cursor->key_beg.key,
1060 (long long)cursor->key_beg.create_tid,
2f85fa4d 1061 cursor->key_beg.localization,
a84a197d 1062 curthread
b3deaf57 1063 );
a84a197d
MD
1064 if (cursor->parent)
1065 kprintf("SEARCHP %016llx[%d] (%016llx/%016llx %016llx/%016llx) (%p/%p %p/%p)\n",
973c11b9
MD
1066 (long long)cursor->parent->node_offset,
1067 cursor->parent_index,
1068 (long long)cursor->left_bound->obj_id,
1069 (long long)cursor->parent->ondisk->elms[cursor->parent_index].internal.base.obj_id,
1070 (long long)cursor->right_bound->obj_id,
1071 (long long)cursor->parent->ondisk->elms[cursor->parent_index+1].internal.base.obj_id,
a84a197d
MD
1072 cursor->left_bound,
1073 &cursor->parent->ondisk->elms[cursor->parent_index],
1074 cursor->right_bound,
1075 &cursor->parent->ondisk->elms[cursor->parent_index+1]
1076 );
b3deaf57
MD
1077 }
1078
8cd0a023
MD
1079 /*
1080 * Move our cursor up the tree until we find a node whos range covers
47197d71 1081 * the key we are trying to locate.
8cd0a023
MD
1082 *
1083 * The left bound is inclusive, the right bound is non-inclusive.
47197d71 1084 * It is ok to cursor up too far.
8cd0a023 1085 */
b33e2cc0
MD
1086 for (;;) {
1087 r = hammer_btree_cmp(&cursor->key_beg, cursor->left_bound);
1088 s = hammer_btree_cmp(&cursor->key_beg, cursor->right_bound);
1089 if (r >= 0 && s < 0)
1090 break;
9944ae54 1091 KKASSERT(cursor->parent);
cb51be26 1092 ++hammer_stats_btree_iterations;
6a37e7e4 1093 error = hammer_cursor_up(cursor);
8cd0a023
MD
1094 if (error)
1095 goto done;
427e5fc6 1096 }
427e5fc6 1097
b33e2cc0
MD
1098 /*
1099 * The delete-checks below are based on node, not parent. Set the
1100 * initial delete-check based on the parent.
1101 */
9582c7da
MD
1102 if (r == 1) {
1103 KKASSERT(cursor->left_bound->create_tid != 1);
1104 cursor->create_check = cursor->left_bound->create_tid - 1;
1105 cursor->flags |= HAMMER_CURSOR_CREATE_CHECK;
b33e2cc0
MD
1106 }
1107
8cd0a023 1108 /*
47197d71 1109 * We better have ended up with a node somewhere.
8cd0a023 1110 */
47197d71 1111 KKASSERT(cursor->node != NULL);
8cd0a023
MD
1112
1113 /*
1114 * If we are inserting we can't start at a full node if the parent
1115 * is also full (because there is no way to split the node),
b33e2cc0 1116 * continue running up the tree until the requirement is satisfied
47197d71 1117 * or we hit the root of the filesystem.
9582c7da
MD
1118 *
1119 * (If inserting we aren't doing an as-of search so we don't have
1120 * to worry about create_check).
8cd0a023 1121 */
61aeeb33 1122 while ((flags & HAMMER_CURSOR_INSERT) && enospc == 0) {
eaeff70d
MD
1123 if (cursor->node->ondisk->type == HAMMER_BTREE_TYPE_INTERNAL) {
1124 if (btree_node_is_full(cursor->node->ondisk) == 0)
1125 break;
1126 } else {
47197d71 1127 if (btree_node_is_full(cursor->node->ondisk) ==0)
eaeff70d
MD
1128 break;
1129 }
b33e2cc0
MD
1130 if (cursor->node->ondisk->parent == 0 ||
1131 cursor->parent->ondisk->count != HAMMER_BTREE_INT_ELMS) {
8cd0a023 1132 break;
b33e2cc0 1133 }
cb51be26 1134 ++hammer_stats_btree_iterations;
6a37e7e4 1135 error = hammer_cursor_up(cursor);
47197d71 1136 /* node may have become stale */
8cd0a023
MD
1137 if (error)
1138 goto done;
427e5fc6 1139 }
427e5fc6 1140
8cd0a023
MD
1141 /*
1142 * Push down through internal nodes to locate the requested key.
1143 */
8cd0a023
MD
1144 node = cursor->node->ondisk;
1145 while (node->type == HAMMER_BTREE_TYPE_INTERNAL) {
8cd0a023
MD
1146 /*
1147 * Scan the node to find the subtree index to push down into.
fbc6e32a 1148 * We go one-past, then back-up.
d113fda1 1149 *
fe7678ee
MD
1150 * We must proactively remove deleted elements which may
1151 * have been left over from a deadlocked btree_remove().
1152 *
eaeff70d 1153 * The left and right boundaries are included in the loop
d5530d22 1154 * in order to detect edge cases.
9944ae54 1155 *
9582c7da 1156 * If the separator only differs by create_tid (r == 1)
eaeff70d
MD
1157 * and we are doing an as-of search, we may end up going
1158 * down a branch to the left of the one containing the
1159 * desired key. This requires numerous special cases.
8cd0a023 1160 */
47637bff 1161 ++hammer_stats_btree_iterations;
46fe7ae1 1162 if (hammer_debug_btree) {
47197d71 1163 kprintf("SEARCH-I %016llx count=%d\n",
973c11b9 1164 (long long)cursor->node->node_offset,
46fe7ae1
MD
1165 node->count);
1166 }
af209b0f
MD
1167
1168 /*
1169 * Try to shortcut the search before dropping into the
1170 * linear loop. Locate the first node where r <= 1.
1171 */
1172 i = hammer_btree_search_node(&cursor->key_beg, node);
1173 while (i <= node->count) {
cb51be26 1174 ++hammer_stats_btree_elements;
61aeeb33
MD
1175 elm = &node->elms[i];
1176 r = hammer_btree_cmp(&cursor->key_beg, &elm->base);
b33e2cc0
MD
1177 if (hammer_debug_btree > 2) {
1178 kprintf(" IELM %p %d r=%d\n",
1179 &node->elms[i], i, r);
1180 }
9582c7da 1181 if (r < 0)
8cd0a023 1182 break;
9582c7da
MD
1183 if (r == 1) {
1184 KKASSERT(elm->base.create_tid != 1);
1185 cursor->create_check = elm->base.create_tid - 1;
1186 cursor->flags |= HAMMER_CURSOR_CREATE_CHECK;
b33e2cc0 1187 }
af209b0f 1188 ++i;
8cd0a023 1189 }
eaeff70d 1190 if (hammer_debug_btree) {
46fe7ae1
MD
1191 kprintf("SEARCH-I preI=%d/%d r=%d\n",
1192 i, node->count, r);
eaeff70d 1193 }
8cd0a023
MD
1194
1195 /*
9944ae54
MD
1196 * These cases occur when the parent's idea of the boundary
1197 * is wider then the child's idea of the boundary, and
1198 * require special handling. If not inserting we can
1199 * terminate the search early for these cases but the
1200 * child's boundaries cannot be unconditionally modified.
8cd0a023 1201 */
fbc6e32a 1202 if (i == 0) {
9944ae54
MD
1203 /*
1204 * If i == 0 the search terminated to the LEFT of the
1205 * left_boundary but to the RIGHT of the parent's left
1206 * boundary.
1207 */
fbc6e32a 1208 u_int8_t save;
d26d0ae9 1209
eaeff70d
MD
1210 elm = &node->elms[0];
1211
1212 /*
1213 * If we aren't inserting we can stop here.
1214 */
11ad5ade
MD
1215 if ((flags & (HAMMER_CURSOR_INSERT |
1216 HAMMER_CURSOR_PRUNING)) == 0) {
fbc6e32a
MD
1217 cursor->index = 0;
1218 return(ENOENT);
1219 }
9944ae54 1220
d5530d22
MD
1221 /*
1222 * Correct a left-hand boundary mismatch.
6a37e7e4 1223 *
f36a9737
MD
1224 * We can only do this if we can upgrade the lock,
1225 * and synchronized as a background cursor (i.e.
1226 * inserting or pruning).
10a5d1ba
MD
1227 *
1228 * WARNING: We can only do this if inserting, i.e.
1229 * we are running on the backend.
d5530d22 1230 */
eaeff70d
MD
1231 if ((error = hammer_cursor_upgrade(cursor)) != 0)
1232 return(error);
10a5d1ba 1233 KKASSERT(cursor->flags & HAMMER_CURSOR_BACKEND);
c9b9e29d
MD
1234 hammer_modify_node_field(cursor->trans, cursor->node,
1235 elms[0]);
fe7678ee 1236 save = node->elms[0].base.btype;
d5530d22 1237 node->elms[0].base = *cursor->left_bound;
fe7678ee 1238 node->elms[0].base.btype = save;
10a5d1ba 1239 hammer_modify_node_done(cursor->node);
9944ae54 1240 } else if (i == node->count + 1) {
d26d0ae9 1241 /*
9944ae54
MD
1242 * If i == node->count + 1 the search terminated to
1243 * the RIGHT of the right boundary but to the LEFT
eaeff70d
MD
1244 * of the parent's right boundary. If we aren't
1245 * inserting we can stop here.
d113fda1 1246 *
9944ae54
MD
1247 * Note that the last element in this case is
1248 * elms[i-2] prior to adjustments to 'i'.
d26d0ae9 1249 */
9944ae54 1250 --i;
11ad5ade
MD
1251 if ((flags & (HAMMER_CURSOR_INSERT |
1252 HAMMER_CURSOR_PRUNING)) == 0) {
9944ae54 1253 cursor->index = i;
eaeff70d 1254 return (ENOENT);
d26d0ae9
MD
1255 }
1256
d5530d22
MD
1257 /*
1258 * Correct a right-hand boundary mismatch.
1259 * (actual push-down record is i-2 prior to
1260 * adjustments to i).
6a37e7e4 1261 *
f36a9737
MD
1262 * We can only do this if we can upgrade the lock,
1263 * and synchronized as a background cursor (i.e.
1264 * inserting or pruning).
10a5d1ba
MD
1265 *
1266 * WARNING: We can only do this if inserting, i.e.
1267 * we are running on the backend.
d5530d22 1268 */
eaeff70d
MD
1269 if ((error = hammer_cursor_upgrade(cursor)) != 0)
1270 return(error);
9944ae54 1271 elm = &node->elms[i];
10a5d1ba 1272 KKASSERT(cursor->flags & HAMMER_CURSOR_BACKEND);
36f82b23
MD
1273 hammer_modify_node(cursor->trans, cursor->node,
1274 &elm->base, sizeof(elm->base));
d5530d22 1275 elm->base = *cursor->right_bound;
10a5d1ba 1276 hammer_modify_node_done(cursor->node);
d5530d22 1277 --i;
fbc6e32a
MD
1278 } else {
1279 /*
9944ae54
MD
1280 * The push-down index is now i - 1. If we had
1281 * terminated on the right boundary this will point
1282 * us at the last element.
fbc6e32a
MD
1283 */
1284 --i;
1285 }
8cd0a023 1286 cursor->index = i;
6a37e7e4 1287 elm = &node->elms[i];
8cd0a023 1288
b3deaf57 1289 if (hammer_debug_btree) {
47197d71 1290 kprintf("RESULT-I %016llx[%d] %016llx %02x "
2f85fa4d 1291 "key=%016llx cre=%016llx lo=%02x\n",
973c11b9 1292 (long long)cursor->node->node_offset,
eaeff70d 1293 i,
973c11b9 1294 (long long)elm->internal.base.obj_id,
b3deaf57 1295 elm->internal.base.rec_type,
973c11b9
MD
1296 (long long)elm->internal.base.key,
1297 (long long)elm->internal.base.create_tid,
2f85fa4d 1298 elm->internal.base.localization
b3deaf57
MD
1299 );
1300 }
1301
6a37e7e4 1302 /*
f36a9737 1303 * We better have a valid subtree offset.
6a37e7e4 1304 */
f36a9737 1305 KKASSERT(elm->internal.subtree_offset != 0);
6a37e7e4 1306
8cd0a023
MD
1307 /*
1308 * Handle insertion and deletion requirements.
1309 *
1310 * If inserting split full nodes. The split code will
1311 * adjust cursor->node and cursor->index if the current
1312 * index winds up in the new node.
61aeeb33 1313 *
9944ae54
MD
1314 * If inserting and a left or right edge case was detected,
1315 * we cannot correct the left or right boundary and must
1316 * prepend and append an empty leaf node in order to make
1317 * the boundary correction.
1318 *
61aeeb33
MD
1319 * If we run out of space we set enospc and continue on
1320 * to a leaf to provide the spike code with a good point
47197d71 1321 * of entry.
8cd0a023 1322 */
61aeeb33 1323 if ((flags & HAMMER_CURSOR_INSERT) && enospc == 0) {
fe7678ee 1324 if (btree_node_is_full(node)) {
8cd0a023 1325 error = btree_split_internal(cursor);
d26d0ae9
MD
1326 if (error) {
1327 if (error != ENOSPC)
1328 goto done;
1329 enospc = 1;
d26d0ae9 1330 }
8cd0a023
MD
1331 /*
1332 * reload stale pointers
1333 */
1334 i = cursor->index;
1335 node = cursor->node->ondisk;
1336 }
d26d0ae9 1337 }
427e5fc6
MD
1338
1339 /*
8cd0a023 1340 * Push down (push into new node, existing node becomes
d26d0ae9 1341 * the parent) and continue the search.
427e5fc6 1342 */
8cd0a023 1343 error = hammer_cursor_down(cursor);
47197d71 1344 /* node may have become stale */
8cd0a023
MD
1345 if (error)
1346 goto done;
1347 node = cursor->node->ondisk;
427e5fc6 1348 }
427e5fc6 1349
8cd0a023
MD
1350 /*
1351 * We are at a leaf, do a linear search of the key array.
d26d0ae9
MD
1352 *
1353 * On success the index is set to the matching element and 0
1354 * is returned.
1355 *
1356 * On failure the index is set to the insertion point and ENOENT
1357 * is returned.
8cd0a023
MD
1358 *
1359 * Boundaries are not stored in leaf nodes, so the index can wind
1360 * up to the left of element 0 (index == 0) or past the end of
f36a9737
MD
1361 * the array (index == node->count). It is also possible that the
1362 * leaf might be empty.
8cd0a023 1363 */
47637bff 1364 ++hammer_stats_btree_iterations;
fe7678ee 1365 KKASSERT (node->type == HAMMER_BTREE_TYPE_LEAF);
8cd0a023 1366 KKASSERT(node->count <= HAMMER_BTREE_LEAF_ELMS);
46fe7ae1 1367 if (hammer_debug_btree) {
47197d71 1368 kprintf("SEARCH-L %016llx count=%d\n",
973c11b9 1369 (long long)cursor->node->node_offset,
46fe7ae1
MD
1370 node->count);
1371 }
8cd0a023 1372
af209b0f
MD
1373 /*
1374 * Try to shortcut the search before dropping into the
1375 * linear loop. Locate the first node where r <= 1.
1376 */
1377 i = hammer_btree_search_node(&cursor->key_beg, node);
1378 while (i < node->count) {
cb51be26 1379 ++hammer_stats_btree_elements;
fe7678ee
MD
1380 elm = &node->elms[i];
1381
1382 r = hammer_btree_cmp(&cursor->key_beg, &elm->leaf.base);
427e5fc6 1383
d5530d22
MD
1384 if (hammer_debug_btree > 1)
1385 kprintf(" ELM %p %d r=%d\n", &node->elms[i], i, r);
1386
427e5fc6 1387 /*
fe7678ee 1388 * We are at a record element. Stop if we've flipped past
9582c7da
MD
1389 * key_beg, not counting the create_tid test. Allow the
1390 * r == 1 case (key_beg > element but differs only by its
1391 * create_tid) to fall through to the AS-OF check.
427e5fc6 1392 */
fe7678ee
MD
1393 KKASSERT (elm->leaf.base.btype == HAMMER_BTREE_TYPE_RECORD);
1394
9582c7da 1395 if (r < 0)
d5530d22 1396 goto failed;
af209b0f
MD
1397 if (r > 1) {
1398 ++i;
d5530d22 1399 continue;
af209b0f 1400 }
427e5fc6 1401
66325755 1402 /*
9582c7da 1403 * Check our as-of timestamp against the element.
66325755 1404 */
eaeff70d 1405 if (flags & HAMMER_CURSOR_ASOF) {
fe7678ee 1406 if (hammer_btree_chkts(cursor->asof,
d113fda1 1407 &node->elms[i].base) != 0) {
af209b0f 1408 ++i;
d113fda1
MD
1409 continue;
1410 }
eaeff70d
MD
1411 /* success */
1412 } else {
af209b0f
MD
1413 if (r > 0) { /* can only be +1 */
1414 ++i;
9582c7da 1415 continue;
af209b0f 1416 }
eaeff70d 1417 /* success */
66325755 1418 }
d5530d22
MD
1419 cursor->index = i;
1420 error = 0;
eaeff70d 1421 if (hammer_debug_btree) {
47197d71 1422 kprintf("RESULT-L %016llx[%d] (SUCCESS)\n",
973c11b9 1423 (long long)cursor->node->node_offset, i);
eaeff70d 1424 }
d5530d22
MD
1425 goto done;
1426 }
1427
1428 /*
eaeff70d 1429 * The search of the leaf node failed. i is the insertion point.
d5530d22 1430 */
d5530d22 1431failed:
b3deaf57 1432 if (hammer_debug_btree) {
47197d71 1433 kprintf("RESULT-L %016llx[%d] (FAILED)\n",
973c11b9 1434 (long long)cursor->node->node_offset, i);
b3deaf57
MD
1435 }
1436
8cd0a023
MD
1437 /*
1438 * No exact match was found, i is now at the insertion point.
1439 *
1440 * If inserting split a full leaf before returning. This
1441 * may have the side effect of adjusting cursor->node and
1442 * cursor->index.
1443 */
1444 cursor->index = i;
eaeff70d 1445 if ((flags & HAMMER_CURSOR_INSERT) && enospc == 0 &&
47197d71 1446 btree_node_is_full(node)) {
8cd0a023 1447 error = btree_split_leaf(cursor);
d26d0ae9
MD
1448 if (error) {
1449 if (error != ENOSPC)
1450 goto done;
1451 enospc = 1;
d26d0ae9
MD
1452 }
1453 /*
1454 * reload stale pointers
1455 */
8cd0a023
MD
1456 /* NOT USED
1457 i = cursor->index;
1458 node = &cursor->node->internal;
1459 */
8cd0a023 1460 }
d26d0ae9
MD
1461
1462 /*
1463 * We reached a leaf but did not find the key we were looking for.
1464 * If this is an insert we will be properly positioned for an insert
1465 * (ENOENT) or spike (ENOSPC) operation.
1466 */
1467 error = enospc ? ENOSPC : ENOENT;
8cd0a023 1468done:
427e5fc6
MD
1469 return(error);
1470}
1471
af209b0f
MD
1472/*
1473 * Heuristical search for the first element whos comparison is <= 1. May
1474 * return an index whos compare result is > 1 but may only return an index
1475 * whos compare result is <= 1 if it is the first element with that result.
1476 */
bcac4bbb 1477int
af209b0f
MD
1478hammer_btree_search_node(hammer_base_elm_t elm, hammer_node_ondisk_t node)
1479{
1480 int b;
1481 int s;
1482 int i;
1483 int r;
1484
1485 /*
1486 * Don't bother if the node does not have very many elements
1487 */
1488 b = 0;
1489 s = node->count;
1490 while (s - b > 4) {
1491 i = b + (s - b) / 2;
cb51be26 1492 ++hammer_stats_btree_elements;
af209b0f
MD
1493 r = hammer_btree_cmp(elm, &node->elms[i].leaf.base);
1494 if (r <= 1) {
1495 s = i;
1496 } else {
1497 b = i;
1498 }
1499 }
1500 return(b);
1501}
1502
8cd0a023 1503
427e5fc6 1504/************************************************************************
8cd0a023 1505 * SPLITTING AND MERGING *
427e5fc6
MD
1506 ************************************************************************
1507 *
1508 * These routines do all the dirty work required to split and merge nodes.
1509 */
1510
1511/*
8cd0a023 1512 * Split an internal node into two nodes and move the separator at the split
fe7678ee 1513 * point to the parent.
427e5fc6 1514 *
8cd0a023
MD
1515 * (cursor->node, cursor->index) indicates the element the caller intends
1516 * to push into. We will adjust node and index if that element winds
427e5fc6 1517 * up in the split node.
8cd0a023 1518 *
47197d71
MD
1519 * If we are at the root of the filesystem a new root must be created with
1520 * two elements, one pointing to the original root and one pointing to the
8cd0a023 1521 * newly allocated split node.
427e5fc6
MD
1522 */
1523static
1524int
8cd0a023 1525btree_split_internal(hammer_cursor_t cursor)
427e5fc6 1526{
8cd0a023
MD
1527 hammer_node_ondisk_t ondisk;
1528 hammer_node_t node;
1529 hammer_node_t parent;
1530 hammer_node_t new_node;
1531 hammer_btree_elm_t elm;
1532 hammer_btree_elm_t parent_elm;
1775b6a0 1533 struct hammer_node_lock lockroot;
36f82b23 1534 hammer_mount_t hmp = cursor->trans->hmp;
df2ccbac 1535 hammer_off_t hint;
427e5fc6
MD
1536 int parent_index;
1537 int made_root;
1538 int split;
1539 int error;
7f7c1f84 1540 int i;
8cd0a023 1541 const int esize = sizeof(*elm);
427e5fc6 1542
1775b6a0 1543 hammer_node_lock_init(&lockroot, cursor->node);
24cf83d2 1544 error = hammer_btree_lock_children(cursor, 1, &lockroot, NULL);
47197d71
MD
1545 if (error)
1546 goto done;
7bc5b8c2
MD
1547 if ((error = hammer_cursor_upgrade(cursor)) != 0)
1548 goto done;
cb51be26 1549 ++hammer_stats_btree_splits;
6a37e7e4 1550
427e5fc6 1551 /*
fa2b9a03
MD
1552 * Calculate the split point. If the insertion point is at the
1553 * end of the leaf we adjust the split point significantly to the
1554 * right to try to optimize node fill and flag it. If we hit
1555 * that same leaf again our heuristic failed and we don't try
1556 * to optimize node fill (it could lead to a degenerate case).
427e5fc6 1557 */
8cd0a023
MD
1558 node = cursor->node;
1559 ondisk = node->ondisk;
fa2b9a03
MD
1560 KKASSERT(ondisk->count > 4);
1561 if (cursor->index == ondisk->count &&
1562 (node->flags & HAMMER_NODE_NONLINEAR) == 0) {
1563 split = (ondisk->count + 1) * 3 / 4;
1564 node->flags |= HAMMER_NODE_NONLINEAR;
1565 } else {
1566 /*
1567 * We are splitting but elms[split] will be promoted to
1568 * the parent, leaving the right hand node with one less
1569 * element. If the insertion point will be on the
1570 * left-hand side adjust the split point to give the
1571 * right hand side one additional node.
1572 */
1573 split = (ondisk->count + 1) / 2;
1574 if (cursor->index <= split)
1575 --split;
1576 }
427e5fc6
MD
1577
1578 /*
47197d71
MD
1579 * If we are at the root of the filesystem, create a new root node
1580 * with 1 element and split normally. Avoid making major
1581 * modifications until we know the whole operation will work.
427e5fc6 1582 */
8cd0a023 1583 if (ondisk->parent == 0) {
b4f86ea3 1584 parent = hammer_alloc_btree(cursor->trans, 0, &error);
427e5fc6 1585 if (parent == NULL)
6a37e7e4 1586 goto done;
8cd0a023 1587 hammer_lock_ex(&parent->lock);
36f82b23 1588 hammer_modify_node_noundo(cursor->trans, parent);
8cd0a023
MD
1589 ondisk = parent->ondisk;
1590 ondisk->count = 1;
1591 ondisk->parent = 0;
a56cb012 1592 ondisk->mirror_tid = node->ondisk->mirror_tid;
8cd0a023 1593 ondisk->type = HAMMER_BTREE_TYPE_INTERNAL;
47197d71 1594 ondisk->elms[0].base = hmp->root_btree_beg;
fe7678ee 1595 ondisk->elms[0].base.btype = node->ondisk->type;
8cd0a023 1596 ondisk->elms[0].internal.subtree_offset = node->node_offset;
47197d71 1597 ondisk->elms[1].base = hmp->root_btree_end;
10a5d1ba 1598 hammer_modify_node_done(parent);
fe7678ee 1599 /* ondisk->elms[1].base.btype - not used */
427e5fc6 1600 made_root = 1;
8cd0a023 1601 parent_index = 0; /* index of current node in parent */
427e5fc6
MD
1602 } else {
1603 made_root = 0;
8cd0a023
MD
1604 parent = cursor->parent;
1605 parent_index = cursor->parent_index;
427e5fc6 1606 }
427e5fc6 1607
df2ccbac
MD
1608 /*
1609 * Calculate a hint for the allocation of the new B-Tree node.
1610 * The most likely expansion is coming from the insertion point
1611 * at cursor->index, so try to localize the allocation of our
1612 * new node to accomodate that sub-tree.
1613 *
1614 * Use the right-most sub-tree when expandinging on the right edge.
1615 * This is a very common case when copying a directory tree.
1616 */
1617 if (cursor->index == ondisk->count)
1618 hint = ondisk->elms[cursor->index - 1].internal.subtree_offset;
1619 else
1620 hint = ondisk->elms[cursor->index].internal.subtree_offset;
1621
427e5fc6
MD
1622 /*
1623 * Split node into new_node at the split point.
1624 *
df2ccbac 1625 * B O O O P N N B <-- P = node->elms[split] (index 4)
427e5fc6
MD
1626 * 0 1 2 3 4 5 6 <-- subtree indices
1627 *
1628 * x x P x x
1629 * s S S s
1630 * / \
1631 * B O O O B B N N B <--- inner boundary points are 'P'
1632 * 0 1 2 3 4 5 6
427e5fc6 1633 */
b4f86ea3 1634 new_node = hammer_alloc_btree(cursor->trans, 0, &error);
427e5fc6 1635 if (new_node == NULL) {
8cd0a023
MD
1636 if (made_root) {
1637 hammer_unlock(&parent->lock);
36f82b23 1638 hammer_delete_node(cursor->trans, parent);
8cd0a023
MD
1639 hammer_rel_node(parent);
1640 }
6a37e7e4 1641 goto done;
427e5fc6 1642 }
8cd0a023 1643 hammer_lock_ex(&new_node->lock);
427e5fc6
MD
1644
1645 /*
8cd0a023 1646 * Create the new node. P becomes the left-hand boundary in the
427e5fc6
MD
1647 * new node. Copy the right-hand boundary as well.
1648 *
1649 * elm is the new separator.
1650 */
36f82b23
MD
1651 hammer_modify_node_noundo(cursor->trans, new_node);
1652 hammer_modify_node_all(cursor->trans, node);
8cd0a023
MD
1653 ondisk = node->ondisk;
1654 elm = &ondisk->elms[split];
1655 bcopy(elm, &new_node->ondisk->elms[0],
1656 (ondisk->count - split + 1) * esize);
1657 new_node->ondisk->count = ondisk->count - split;
1658 new_node->ondisk->parent = parent->node_offset;
1659 new_node->ondisk->type = HAMMER_BTREE_TYPE_INTERNAL;
a56cb012 1660 new_node->ondisk->mirror_tid = ondisk->mirror_tid;
8cd0a023 1661 KKASSERT(ondisk->type == new_node->ondisk->type);
b3bad96f 1662 hammer_cursor_split_node(node, new_node, split);
427e5fc6
MD
1663
1664 /*
fe7678ee
MD
1665 * Cleanup the original node. Elm (P) becomes the new boundary,
1666 * its subtree_offset was moved to the new node. If we had created
427e5fc6
MD
1667 * a new root its parent pointer may have changed.
1668 */
8cd0a023 1669 elm->internal.subtree_offset = 0;
c0ade690 1670 ondisk->count = split;
427e5fc6
MD
1671
1672 /*
1673 * Insert the separator into the parent, fixup the parent's
1674 * reference to the original node, and reference the new node.
1675 * The separator is P.
1676 *
1677 * Remember that base.count does not include the right-hand boundary.
1678 */
36f82b23 1679 hammer_modify_node_all(cursor->trans, parent);
8cd0a023 1680 ondisk = parent->ondisk;
d26d0ae9 1681 KKASSERT(ondisk->count != HAMMER_BTREE_INT_ELMS);
8cd0a023 1682 parent_elm = &ondisk->elms[parent_index+1];
427e5fc6 1683 bcopy(parent_elm, parent_elm + 1,
8cd0a023
MD
1684 (ondisk->count - parent_index) * esize);
1685 parent_elm->internal.base = elm->base; /* separator P */
fe7678ee 1686 parent_elm->internal.base.btype = new_node->ondisk->type;
8cd0a023 1687 parent_elm->internal.subtree_offset = new_node->node_offset;
a56cb012 1688 parent_elm->internal.mirror_tid = new_node->ondisk->mirror_tid;
76376933 1689 ++ondisk->count;
10a5d1ba 1690 hammer_modify_node_done(parent);
b3bad96f 1691 hammer_cursor_inserted_element(parent, parent_index + 1);
427e5fc6 1692
7f7c1f84
MD
1693 /*
1694 * The children of new_node need their parent pointer set to new_node.
b33e2cc0
MD
1695 * The children have already been locked by
1696 * hammer_btree_lock_children().
7f7c1f84
MD
1697 */
1698 for (i = 0; i < new_node->ondisk->count; ++i) {
1699 elm = &new_node->ondisk->elms[i];
36f82b23 1700 error = btree_set_parent(cursor->trans, new_node, elm);
7f7c1f84
MD
1701 if (error) {
1702 panic("btree_split_internal: btree-fixup problem");
1703 }
1704 }
10a5d1ba 1705 hammer_modify_node_done(new_node);
7f7c1f84 1706
427e5fc6 1707 /*
47197d71 1708 * The filesystem's root B-Tree pointer may have to be updated.
427e5fc6
MD
1709 */
1710 if (made_root) {
47197d71
MD
1711 hammer_volume_t volume;
1712
1713 volume = hammer_get_root_volume(hmp, &error);
1714 KKASSERT(error == 0);
1715
e8599db1
MD
1716 hammer_modify_volume_field(cursor->trans, volume,
1717 vol0_btree_root);
47197d71 1718 volume->ondisk->vol0_btree_root = parent->node_offset;
10a5d1ba 1719 hammer_modify_volume_done(volume);
8cd0a023
MD
1720 node->ondisk->parent = parent->node_offset;
1721 if (cursor->parent) {
1722 hammer_unlock(&cursor->parent->lock);
1723 hammer_rel_node(cursor->parent);
1724 }
1725 cursor->parent = parent; /* lock'd and ref'd */
47197d71 1726 hammer_rel_volume(volume, 0);
427e5fc6 1727 }
10a5d1ba 1728 hammer_modify_node_done(node);
427e5fc6
MD
1729
1730 /*
1731 * Ok, now adjust the cursor depending on which element the original
1732 * index was pointing at. If we are >= the split point the push node
1733 * is now in the new node.
1734 *
1735 * NOTE: If we are at the split point itself we cannot stay with the
1736 * original node because the push index will point at the right-hand
1737 * boundary, which is illegal.
8cd0a023
MD
1738 *
1739 * NOTE: The cursor's parent or parent_index must be adjusted for
1740 * the case where a new parent (new root) was created, and the case
1741 * where the cursor is now pointing at the split node.
427e5fc6
MD
1742 */
1743 if (cursor->index >= split) {
8cd0a023 1744 cursor->parent_index = parent_index + 1;
427e5fc6 1745 cursor->index -= split;
8cd0a023
MD
1746 hammer_unlock(&cursor->node->lock);
1747 hammer_rel_node(cursor->node);
1748 cursor->node = new_node; /* locked and ref'd */
1749 } else {
1750 cursor->parent_index = parent_index;
1751 hammer_unlock(&new_node->lock);
1752 hammer_rel_node(new_node);
427e5fc6 1753 }
76376933
MD
1754
1755 /*
1756 * Fixup left and right bounds
1757 */
1758 parent_elm = &parent->ondisk->elms[cursor->parent_index];
fbc6e32a
MD
1759 cursor->left_bound = &parent_elm[0].internal.base;
1760 cursor->right_bound = &parent_elm[1].internal.base;
b3deaf57
MD
1761 KKASSERT(hammer_btree_cmp(cursor->left_bound,
1762 &cursor->node->ondisk->elms[0].internal.base) <= 0);
1763 KKASSERT(hammer_btree_cmp(cursor->right_bound,
9944ae54 1764 &cursor->node->ondisk->elms[cursor->node->ondisk->count].internal.base) >= 0);
76376933 1765
6a37e7e4 1766done:
24cf83d2 1767 hammer_btree_unlock_children(cursor->trans->hmp, &lockroot, NULL);
6a37e7e4
MD
1768 hammer_cursor_downgrade(cursor);
1769 return (error);
427e5fc6
MD
1770}
1771
1772/*
1773 * Same as the above, but splits a full leaf node.
6a37e7e4
MD
1774 *
1775 * This function
427e5fc6
MD
1776 */
1777static
1778int
8cd0a023 1779btree_split_leaf(hammer_cursor_t cursor)
427e5fc6 1780{
8cd0a023
MD
1781 hammer_node_ondisk_t ondisk;
1782 hammer_node_t parent;
1783 hammer_node_t leaf;
47197d71 1784 hammer_mount_t hmp;
8cd0a023
MD
1785 hammer_node_t new_leaf;
1786 hammer_btree_elm_t elm;
1787 hammer_btree_elm_t parent_elm;
b3deaf57 1788 hammer_base_elm_t mid_boundary;
df2ccbac 1789 hammer_off_t hint;
427e5fc6
MD
1790 int parent_index;
1791 int made_root;
1792 int split;
1793 int error;
8cd0a023 1794 const size_t esize = sizeof(*elm);
427e5fc6 1795
6a37e7e4
MD
1796 if ((error = hammer_cursor_upgrade(cursor)) != 0)
1797 return(error);
cb51be26 1798 ++hammer_stats_btree_splits;
6a37e7e4 1799
36f82b23
MD
1800 KKASSERT(hammer_btree_cmp(cursor->left_bound,
1801 &cursor->node->ondisk->elms[0].leaf.base) <= 0);
1802 KKASSERT(hammer_btree_cmp(cursor->right_bound,
1803 &cursor->node->ondisk->elms[cursor->node->ondisk->count-1].leaf.base) > 0);
1804
427e5fc6 1805 /*
fa2b9a03
MD
1806 * Calculate the split point. If the insertion point is at the
1807 * end of the leaf we adjust the split point significantly to the
1808 * right to try to optimize node fill and flag it. If we hit
1809 * that same leaf again our heuristic failed and we don't try
1810 * to optimize node fill (it could lead to a degenerate case).
fe7678ee
MD
1811 *
1812 * Spikes are made up of two leaf elements which cannot be
1813 * safely split.
427e5fc6 1814 */
8cd0a023
MD
1815 leaf = cursor->node;
1816 ondisk = leaf->ondisk;
fa2b9a03
MD
1817 KKASSERT(ondisk->count > 4);
1818 if (cursor->index == ondisk->count &&
1819 (leaf->flags & HAMMER_NODE_NONLINEAR) == 0) {
1820 split = (ondisk->count + 1) * 3 / 4;
1821 leaf->flags |= HAMMER_NODE_NONLINEAR;
1822 } else {
1823 split = (ondisk->count + 1) / 2;
1824 }
1825
1826#if 0
1827 /*
1828 * If the insertion point is at the split point shift the
1829 * split point left so we don't have to worry about
1830 */
1831 if (cursor->index == split)
427e5fc6 1832 --split;
fa2b9a03
MD
1833#endif
1834 KKASSERT(split > 0 && split < ondisk->count);
1835
427e5fc6 1836 error = 0;
40043e7f 1837 hmp = leaf->hmp;
427e5fc6 1838
fe7678ee 1839 elm = &ondisk->elms[split];
fe7678ee 1840
36f82b23
MD
1841 KKASSERT(hammer_btree_cmp(cursor->left_bound, &elm[-1].leaf.base) <= 0);
1842 KKASSERT(hammer_btree_cmp(cursor->left_bound, &elm->leaf.base) <= 0);
1843 KKASSERT(hammer_btree_cmp(cursor->right_bound, &elm->leaf.base) > 0);
1844 KKASSERT(hammer_btree_cmp(cursor->right_bound, &elm[1].leaf.base) > 0);
1845
427e5fc6
MD
1846 /*
1847 * If we are at the root of the tree, create a new root node with
1848 * 1 element and split normally. Avoid making major modifications
1849 * until we know the whole operation will work.
1850 */
8cd0a023 1851 if (ondisk->parent == 0) {
b4f86ea3 1852 parent = hammer_alloc_btree(cursor->trans, 0, &error);
427e5fc6 1853 if (parent == NULL)
6a37e7e4 1854 goto done;
8cd0a023 1855 hammer_lock_ex(&parent->lock);
36f82b23 1856 hammer_modify_node_noundo(cursor->trans, parent);
8cd0a023
MD
1857 ondisk = parent->ondisk;
1858 ondisk->count = 1;
1859 ondisk->parent = 0;
a56cb012 1860 ondisk->mirror_tid = leaf->ondisk->mirror_tid;
8cd0a023 1861 ondisk->type = HAMMER_BTREE_TYPE_INTERNAL;
47197d71 1862 ondisk->elms[0].base = hmp->root_btree_beg;
fe7678ee 1863 ondisk->elms[0].base.btype = leaf->ondisk->type;
8cd0a023 1864 ondisk->elms[0].internal.subtree_offset = leaf->node_offset;
47197d71 1865 ondisk->elms[1].base = hmp->root_btree_end;
fe7678ee 1866 /* ondisk->elms[1].base.btype = not used */
10a5d1ba 1867 hammer_modify_node_done(parent);
427e5fc6 1868 made_root = 1;
8cd0a023 1869 parent_index = 0; /* insertion point in parent */
427e5fc6
MD
1870 } else {
1871 made_root = 0;
8cd0a023
MD
1872 parent = cursor->parent;
1873 parent_index = cursor->parent_index;
427e5fc6 1874 }
427e5fc6 1875
df2ccbac
MD
1876 /*
1877 * Calculate a hint for the allocation of the new B-Tree leaf node.
1878 * For now just try to localize it within the same bigblock as
1879 * the current leaf.
1880 *
1881 * If the insertion point is at the end of the leaf we recognize a
1882 * likely append sequence of some sort (data, meta-data, inodes,
1883 * whatever). Set the hint to zero to allocate out of linear space
1884 * instead of trying to completely fill previously hinted space.
1885 *
1886 * This also sets the stage for recursive splits to localize using
1887 * the new space.
1888 */
1889 ondisk = leaf->ondisk;
1890 if (cursor->index == ondisk->count)
1891 hint = 0;
1892 else
1893 hint = leaf->node_offset;
1894
427e5fc6
MD
1895 /*
1896 * Split leaf into new_leaf at the split point. Select a separator
1897 * value in-between the two leafs but with a bent towards the right
1898 * leaf since comparisons use an 'elm >= separator' inequality.
1899 *
1900 * L L L L L L L L
1901 *
1902 * x x P x x
1903 * s S S s
1904 * / \
1905 * L L L L L L L L
1906 */
b4f86ea3 1907 new_leaf = hammer_alloc_btree(cursor->trans, 0, &error);
427e5fc6 1908 if (new_leaf == NULL) {
8cd0a023
MD
1909 if (made_root) {
1910 hammer_unlock(&parent->lock);
36f82b23 1911 hammer_delete_node(cursor->trans, parent);
8cd0a023
MD
1912 hammer_rel_node(parent);
1913 }
6a37e7e4 1914 goto done;
427e5fc6 1915 }
8cd0a023 1916 hammer_lock_ex(&new_leaf->lock);
427e5fc6
MD
1917
1918 /*
36f82b23
MD
1919 * Create the new node and copy the leaf elements from the split
1920 * point on to the new node.
427e5fc6 1921 */
36f82b23
MD
1922 hammer_modify_node_all(cursor->trans, leaf);
1923 hammer_modify_node_noundo(cursor->trans, new_leaf);
8cd0a023
MD
1924 ondisk = leaf->ondisk;
1925 elm = &ondisk->elms[split];
1926 bcopy(elm, &new_leaf->ondisk->elms[0], (ondisk->count - split) * esize);
1927 new_leaf->ondisk->count = ondisk->count - split;
1928 new_leaf->ondisk->parent = parent->node_offset;
1929 new_leaf->ondisk->type = HAMMER_BTREE_TYPE_LEAF;
a56cb012 1930 new_leaf->ondisk->mirror_tid = ondisk->mirror_tid;
8cd0a023 1931 KKASSERT(ondisk->type == new_leaf->ondisk->type);
10a5d1ba 1932 hammer_modify_node_done(new_leaf);
b3bad96f 1933 hammer_cursor_split_node(leaf, new_leaf, split);
427e5fc6
MD
1934
1935 /*
8cd0a023
MD
1936 * Cleanup the original node. Because this is a leaf node and
1937 * leaf nodes do not have a right-hand boundary, there
c0ade690
MD
1938 * aren't any special edge cases to clean up. We just fixup the
1939 * count.
427e5fc6 1940 */
c0ade690 1941 ondisk->count = split;
427e5fc6
MD
1942
1943 /*
1944 * Insert the separator into the parent, fixup the parent's
1945 * reference to the original node, and reference the new node.
1946 * The separator is P.
1947 *
1948 * Remember that base.count does not include the right-hand boundary.
1949 * We are copying parent_index+1 to parent_index+2, not +0 to +1.
1950 */
36f82b23 1951 hammer_modify_node_all(cursor->trans, parent);
8cd0a023 1952 ondisk = parent->ondisk;
36f82b23 1953 KKASSERT(split != 0);
d26d0ae9 1954 KKASSERT(ondisk->count != HAMMER_BTREE_INT_ELMS);
8cd0a023 1955 parent_elm = &ondisk->elms[parent_index+1];
d26d0ae9
MD
1956 bcopy(parent_elm, parent_elm + 1,
1957 (ondisk->count - parent_index) * esize);
eaeff70d 1958
47197d71 1959 hammer_make_separator(&elm[-1].base, &elm[0].base, &parent_elm->base);
fe7678ee 1960 parent_elm->internal.base.btype = new_leaf->ondisk->type;
8cd0a023 1961 parent_elm->internal.subtree_offset = new_leaf->node_offset;
a56cb012 1962 parent_elm->internal.mirror_tid = new_leaf->ondisk->mirror_tid;
b3deaf57 1963 mid_boundary = &parent_elm->base;
76376933 1964 ++ondisk->count;
10a5d1ba 1965 hammer_modify_node_done(parent);
b3bad96f 1966 hammer_cursor_inserted_element(parent, parent_index + 1);
427e5fc6 1967
fe7678ee 1968 /*
47197d71 1969 * The filesystem's root B-Tree pointer may have to be updated.
427e5fc6
MD
1970 */
1971 if (made_root) {
47197d71
MD
1972 hammer_volume_t volume;
1973
1974 volume = hammer_get_root_volume(hmp, &error);
1975 KKASSERT(error == 0);
1976
e8599db1
MD
1977 hammer_modify_volume_field(cursor->trans, volume,
1978 vol0_btree_root);
47197d71 1979 volume->ondisk->vol0_btree_root = parent->node_offset;
10a5d1ba 1980 hammer_modify_volume_done(volume);
8cd0a023
MD
1981 leaf->ondisk->parent = parent->node_offset;
1982 if (cursor->parent) {
1983 hammer_unlock(&cursor->parent->lock);
1984 hammer_rel_node(cursor->parent);
1985 }
1986 cursor->parent = parent; /* lock'd and ref'd */
47197d71 1987 hammer_rel_volume(volume, 0);
427e5fc6 1988 }
10a5d1ba 1989 hammer_modify_node_done(leaf);
8cd0a023 1990
427e5fc6
MD
1991 /*
1992 * Ok, now adjust the cursor depending on which element the original
1993 * index was pointing at. If we are >= the split point the push node
1994 * is now in the new node.
1995 *
b3deaf57
MD
1996 * NOTE: If we are at the split point itself we need to select the
1997 * old or new node based on where key_beg's insertion point will be.
1998 * If we pick the wrong side the inserted element will wind up in
1999 * the wrong leaf node and outside that node's bounds.
427e5fc6 2000 */
b3deaf57
MD
2001 if (cursor->index > split ||
2002 (cursor->index == split &&
2003 hammer_btree_cmp(&cursor->key_beg, mid_boundary) >= 0)) {
8cd0a023 2004 cursor->parent_index = parent_index + 1;
427e5fc6 2005 cursor->index -= split;
8cd0a023
MD
2006 hammer_unlock(&cursor->node->lock);
2007 hammer_rel_node(cursor->node);
2008 cursor->node = new_leaf;
2009 } else {
2010 cursor->parent_index = parent_index;
2011 hammer_unlock(&new_leaf->lock);
2012 hammer_rel_node(new_leaf);
427e5fc6 2013 }
76376933
MD
2014
2015 /*
2016 * Fixup left and right bounds
2017 */
2018 parent_elm = &parent->ondisk->elms[cursor->parent_index];
fbc6e32a
MD
2019 cursor->left_bound = &parent_elm[0].internal.base;
2020 cursor->right_bound = &parent_elm[1].internal.base;
eaeff70d
MD
2021
2022 /*
47197d71 2023 * Assert that the bounds are correct.
eaeff70d 2024 */
b3deaf57
MD
2025 KKASSERT(hammer_btree_cmp(cursor->left_bound,
2026 &cursor->node->ondisk->elms[0].leaf.base) <= 0);
2027 KKASSERT(hammer_btree_cmp(cursor->right_bound,
47197d71 2028 &cursor->node->ondisk->elms[cursor->node->ondisk->count-1].leaf.base) > 0);
36f82b23
MD
2029 KKASSERT(hammer_btree_cmp(cursor->left_bound, &cursor->key_beg) <= 0);
2030 KKASSERT(hammer_btree_cmp(cursor->right_bound, &cursor->key_beg) > 0);
76376933 2031
6a37e7e4
MD
2032done:
2033 hammer_cursor_downgrade(cursor);
2034 return (error);
427e5fc6
MD
2035}
2036
adf01747
MD
2037#if 0
2038
32c90105
MD
2039/*
2040 * Recursively correct the right-hand boundary's create_tid to (tid) as
2041 * long as the rest of the key matches. We have to recurse upward in
2042 * the tree as well as down the left side of each parent's right node.
2043 *
2044 * Return EDEADLK if we were only partially successful, forcing the caller
2045 * to try again. The original cursor is not modified. This routine can
2046 * also fail with EDEADLK if it is forced to throw away a portion of its
2047 * record history.
2048 *
2049 * The caller must pass a downgraded cursor to us (otherwise we can't dup it).
2050 */
2051struct hammer_rhb {
2052 TAILQ_ENTRY(hammer_rhb) entry;
2053 hammer_node_t node;
2054 int index;
2055};
2056
2057TAILQ_HEAD(hammer_rhb_list, hammer_rhb);
2058
2059int
2060hammer_btree_correct_rhb(hammer_cursor_t cursor, hammer_tid_t tid)
2061{
bac808fe 2062 struct hammer_mount *hmp;
32c90105
MD
2063 struct hammer_rhb_list rhb_list;
2064 hammer_base_elm_t elm;
2065 hammer_node_t orig_node;
2066 struct hammer_rhb *rhb;
2067 int orig_index;
2068 int error;
2069
2070 TAILQ_INIT(&rhb_list);
bac808fe 2071 hmp = cursor->trans->hmp;
32c90105
MD
2072
2073 /*
2074 * Save our position so we can restore it on return. This also
2075 * gives us a stable 'elm'.
2076 */
2077 orig_node = cursor->node;
2078 hammer_ref_node(orig_node);
2079 hammer_lock_sh(&orig_node->lock);
2080 orig_index = cursor->index;
2081 elm = &orig_node->ondisk->elms[orig_index].base;
2082
2083 /*
2084 * Now build a list of parents going up, allocating a rhb
2085 * structure for each one.
2086 */
2087 while (cursor->parent) {
2088 /*
2089 * Stop if we no longer have any right-bounds to fix up
2090 */
2091 if (elm->obj_id != cursor->right_bound->obj_id ||
2092 elm->rec_type != cursor->right_bound->rec_type ||
2093 elm->key != cursor->right_bound->key) {
2094 break;
2095 }
2096
2097 /*
2098 * Stop if the right-hand bound's create_tid does not
47197d71 2099 * need to be corrected.
32c90105
MD
2100 */
2101 if (cursor->right_bound->create_tid >= tid)
2102 break;
2103
bac808fe 2104 rhb = kmalloc(sizeof(*rhb), hmp->m_misc, M_WAITOK|M_ZERO);
32c90105
MD
2105 rhb->node = cursor->parent;
2106 rhb->index = cursor->parent_index;
2107 hammer_ref_node(rhb->node);
2108 hammer_lock_sh(&rhb->node->lock);
2109 TAILQ_INSERT_HEAD(&rhb_list, rhb, entry);
2110
2111 hammer_cursor_up(cursor);
2112 }
2113
2114 /*
2115 * now safely adjust the right hand bound for each rhb. This may
2116 * also require taking the right side of the tree and iterating down
2117 * ITS left side.
2118 */
2119 error = 0;
2120 while (error == 0 && (rhb = TAILQ_FIRST(&rhb_list)) != NULL) {
2121 error = hammer_cursor_seek(cursor, rhb->node, rhb->index);
32c90105
MD
2122 if (error)
2123 break;
2124 TAILQ_REMOVE(&rhb_list, rhb, entry);
2125 hammer_unlock(&rhb->node->lock);
2126 hammer_rel_node(rhb->node);
bac808fe 2127 kfree(rhb, hmp->m_misc);
32c90105
MD
2128
2129 switch (cursor->node->ondisk->type) {
2130 case HAMMER_BTREE_TYPE_INTERNAL:
2131 /*
2132 * Right-boundary for parent at internal node
2133 * is one element to the right of the element whos
2134 * right boundary needs adjusting. We must then
2135 * traverse down the left side correcting any left
2136 * bounds (which may now be too far to the left).
2137 */
2138 ++cursor->index;
2139 error = hammer_btree_correct_lhb(cursor, tid);
2140 break;
32c90105
MD
2141 default:
2142 panic("hammer_btree_correct_rhb(): Bad node type");
2143 error = EINVAL;
2144 break;
2145 }
2146 }
2147
2148 /*
2149 * Cleanup
2150 */
2151 while ((rhb = TAILQ_FIRST(&rhb_list)) != NULL) {
2152 TAILQ_REMOVE(&rhb_list, rhb, entry);
2153 hammer_unlock(&rhb->node->lock);
2154 hammer_rel_node(rhb->node);
bac808fe 2155 kfree(rhb, hmp->m_misc);
32c90105
MD
2156 }
2157 error = hammer_cursor_seek(cursor, orig_node, orig_index);
2158 hammer_unlock(&orig_node->lock);
2159 hammer_rel_node(orig_node);
2160 return (error);
2161}
2162
2163/*
2164 * Similar to rhb (in fact, rhb calls lhb), but corrects the left hand
2165 * bound going downward starting at the current cursor position.
2166 *
2167 * This function does not restore the cursor after use.
2168 */
2169int
2170hammer_btree_correct_lhb(hammer_cursor_t cursor, hammer_tid_t tid)
2171{
2172 struct hammer_rhb_list rhb_list;
2173 hammer_base_elm_t elm;
2174 hammer_base_elm_t cmp;
2175 struct hammer_rhb *rhb;
bac808fe 2176 struct hammer_mount *hmp;
32c90105
MD
2177 int error;
2178
2179 TAILQ_INIT(&rhb_list);
bac808fe 2180 hmp = cursor->trans->hmp;
32c90105
MD
2181
2182 cmp = &cursor->node->ondisk->elms[cursor->index].base;
2183
2184 /*
2185 * Record the node and traverse down the left-hand side for all
2186 * matching records needing a boundary correction.
2187 */
2188 error = 0;
2189 for (;;) {
bac808fe 2190 rhb = kmalloc(sizeof(*rhb), hmp->m_misc, M_WAITOK|M_ZERO);
32c90105
MD
2191 rhb->node = cursor->node;
2192 rhb->index = cursor->index;
2193 hammer_ref_node(rhb->node);
2194 hammer_lock_sh(&rhb->node->lock);
2195 TAILQ_INSERT_HEAD(&rhb_list, rhb, entry);
2196
2197 if (cursor->node->ondisk->type == HAMMER_BTREE_TYPE_INTERNAL) {
2198 /*
2199 * Nothing to traverse down if we are at the right
2200 * boundary of an internal node.
2201 */
2202 if (cursor->index == cursor->node->ondisk->count)
2203 break;
2204 } else {
2205 elm = &cursor->node->ondisk->elms[cursor->index].base;
2206 if (elm->btype == HAMMER_BTREE_TYPE_RECORD)
2207 break;
47197d71 2208 panic("Illegal leaf record type %02x", elm->btype);
32c90105
MD
2209 }
2210 error = hammer_cursor_down(cursor);
2211 if (error)
2212 break;
2213
2214 elm = &cursor->node->ondisk->elms[cursor->index].base;
2215 if (elm->obj_id != cmp->obj_id ||
2216 elm->rec_type != cmp->rec_type ||
2217 elm->key != cmp->key) {
2218 break;
2219 }
2220 if (elm->create_tid >= tid)
2221 break;
2222
2223 }
2224
2225 /*
2226 * Now we can safely adjust the left-hand boundary from the bottom-up.
2227 * The last element we remove from the list is the caller's right hand
2228 * boundary, which must also be adjusted.
2229 */
2230 while (error == 0 && (rhb = TAILQ_FIRST(&rhb_list)) != NULL) {
2231 error = hammer_cursor_seek(cursor, rhb->node, rhb->index);
2232 if (error)
2233 break;
2234 TAILQ_REMOVE(&rhb_list, rhb, entry);
2235 hammer_unlock(&rhb->node->lock);
2236 hammer_rel_node(rhb->node);
bac808fe 2237 kfree(rhb, hmp->m_misc);
32c90105
MD
2238
2239 elm = &cursor->node->ondisk->elms[cursor->index].base;
2240 if (cursor->node->ondisk->type == HAMMER_BTREE_TYPE_INTERNAL) {
36f82b23 2241 hammer_modify_node(cursor->trans, cursor->node,
19619882
MD
2242 &elm->create_tid,
2243 sizeof(elm->create_tid));
32c90105 2244 elm->create_tid = tid;
10a5d1ba 2245 hammer_modify_node_done(cursor->node);
32c90105
MD
2246 } else {
2247 panic("hammer_btree_correct_lhb(): Bad element type");
2248 }
2249 }
2250
2251 /*
2252 * Cleanup
2253 */
2254 while ((rhb = TAILQ_FIRST(&rhb_list)) != NULL) {
2255 TAILQ_REMOVE(&rhb_list, rhb, entry);
2256 hammer_unlock(&rhb->node->lock);
2257 hammer_rel_node(rhb->node);
bac808fe 2258 kfree(rhb, hmp->m_misc);
32c90105
MD
2259 }
2260 return (error);
2261}
2262
adf01747
MD
2263#endif
2264
427e5fc6 2265/*
f36a9737
MD
2266 * Attempt to remove the locked, empty or want-to-be-empty B-Tree node at
2267 * (cursor->node). Returns 0 on success, EDEADLK if we could not complete
2268 * the operation due to a deadlock, or some other error.
8cd0a023 2269 *
5c8d05e2
MD
2270 * This routine is initially called with an empty leaf and may be
2271 * recursively called with single-element internal nodes.
b3deaf57 2272 *
c82af904
MD
2273 * It should also be noted that when removing empty leaves we must be sure
2274 * to test and update mirror_tid because another thread may have deadlocked
5fa5c92f 2275 * against us (or someone) trying to propagate it up and cannot retry once
c82af904
MD
2276 * the node has been deleted.
2277 *
f36a9737
MD
2278 * On return the cursor may end up pointing to an internal node, suitable
2279 * for further iteration but not for an immediate insertion or deletion.
8cd0a023 2280 */
f36a9737 2281static int
46fe7ae1 2282btree_remove(hammer_cursor_t cursor)
8cd0a023
MD
2283{
2284 hammer_node_ondisk_t ondisk;
195c19a1 2285 hammer_btree_elm_t elm;
195c19a1 2286 hammer_node_t node;
8cd0a023 2287 hammer_node_t parent;
fe7678ee 2288 const int esize = sizeof(*elm);
8cd0a023 2289 int error;
8cd0a023 2290
fe7678ee
MD
2291 node = cursor->node;
2292
47197d71
MD
2293 /*
2294 * When deleting the root of the filesystem convert it to
2295 * an empty leaf node. Internal nodes cannot be empty.
2296 */
c82af904
MD
2297 ondisk = node->ondisk;
2298 if (ondisk->parent == 0) {
f36a9737 2299 KKASSERT(cursor->parent == NULL);
36f82b23 2300 hammer_modify_node_all(cursor->trans, node);
c82af904 2301 KKASSERT(ondisk == node->ondisk);
195c19a1
MD
2302 ondisk->type = HAMMER_BTREE_TYPE_LEAF;
2303 ondisk->count = 0;
10a5d1ba 2304 hammer_modify_node_done(node);
b3deaf57 2305 cursor->index = 0;
47197d71 2306 return(0);
8cd0a023
MD
2307 }
2308
c82af904
MD
2309 parent = cursor->parent;
2310
8cd0a023 2311 /*
f36a9737
MD
2312 * Attempt to remove the parent's reference to the child. If the
2313 * parent would become empty we have to recurse. If we fail we
2314 * leave the parent pointing to an empty leaf node.
5c8d05e2
MD
2315 *
2316 * We have to recurse successfully before we can delete the internal
2317 * node as it is illegal to have empty internal nodes. Even though
2318 * the operation may be aborted we must still fixup any unlocked
2319 * cursors as if we had deleted the element prior to recursing
2320 * (by calling hammer_cursor_deleted_element()) so those cursors
2321 * are properly forced up the chain by the recursion.
8cd0a023 2322 */
f36a9737
MD
2323 if (parent->ondisk->count == 1) {
2324 /*
2325 * This special cursor_up_locked() call leaves the original
2326 * node exclusively locked and referenced, leaves the
2327 * original parent locked (as the new node), and locks the
2328 * new parent. It can return EDEADLK.
f3a4893b
MD
2329 *
2330 * We cannot call hammer_cursor_removed_node() until we are
2331 * actually able to remove the node. If we did then tracked
2332 * cursors in the middle of iterations could be repointed
2333 * to a parent node. If this occurs they could end up
2334 * scanning newly inserted records into the node (that could
2335 * not be deleted) when they push down again.
2336 *
2337 * Due to the way the recursion works the final parent is left
2338 * in cursor->parent after the recursion returns. Each
2339 * layer on the way back up is thus able to call
2340 * hammer_cursor_removed_node() and 'jump' the node up to
2341 * the (same) final parent.
2342 *
2343 * NOTE! The local variable 'parent' is invalid after we
2344 * call hammer_cursor_up_locked().
f36a9737
MD
2345 */
2346 error = hammer_cursor_up_locked(cursor);
f3a4893b
MD
2347 parent = NULL;
2348
f36a9737 2349 if (error == 0) {
5c8d05e2 2350 hammer_cursor_deleted_element(cursor->node, 0);
f36a9737
MD
2351 error = btree_remove(cursor);
2352 if (error == 0) {
901ba05c 2353 KKASSERT(node != cursor->node);
f3a4893b 2354 hammer_cursor_removed_node(
901ba05c
MD
2355 node, cursor->node,
2356 cursor->index);
f36a9737
MD
2357 hammer_modify_node_all(cursor->trans, node);
2358 ondisk = node->ondisk;
2359 ondisk->type = HAMMER_BTREE_TYPE_DELETED;
2360 ondisk->count = 0;
2361 hammer_modify_node_done(node);
250aec18 2362 hammer_flush_node(node, 0);
f36a9737
MD
2363 hammer_delete_node(cursor->trans, node);
2364 } else {
3214ade6
MD
2365 /*
2366 * Defer parent removal because we could not
2367 * get the lock, just let the leaf remain
2368 * empty.
2369 */
2370 /**/
f36a9737
MD
2371 }
2372 hammer_unlock(&node->lock);
2373 hammer_rel_node(node);
2374 } else {
3214ade6
MD
2375 /*
2376 * Defer parent removal because we could not
2377 * get the lock, just let the leaf remain
2378 * empty.
2379 */
2380 /**/
f36a9737
MD
2381 }
2382 } else {
2383 KKASSERT(parent->ondisk->count > 1);
6a37e7e4 2384
f36a9737
MD
2385 hammer_modify_node_all(cursor->trans, parent);
2386 ondisk = parent->ondisk;
2387 KKASSERT(ondisk->type == HAMMER_BTREE_TYPE_INTERNAL);
c82af904 2388
f36a9737
MD
2389 elm = &ondisk->elms[cursor->parent_index];
2390 KKASSERT(elm->internal.subtree_offset == node->node_offset);
2391 KKASSERT(ondisk->count > 0);
4c038e17
MD
2392
2393 /*
2394 * We must retain the highest mirror_tid. The deleted
2395 * range is now encompassed by the element to the left.
2396 * If we are already at the left edge the new left edge
2397 * inherits mirror_tid.
2398 *
2399 * Note that bounds of the parent to our parent may create
2400 * a gap to the left of our left-most node or to the right
2401 * of our right-most node. The gap is silently included
2402 * in the mirror_tid's area of effect from the point of view
2403 * of the scan.
2404 */
2405 if (cursor->parent_index) {
2406 if (elm[-1].internal.mirror_tid <
2407 elm[0].internal.mirror_tid) {
2408 elm[-1].internal.mirror_tid =
2409 elm[0].internal.mirror_tid;
2410 }
2411 } else {
2412 if (elm[1].internal.mirror_tid <
2413 elm[0].internal.mirror_tid) {
2414 elm[1].internal.mirror_tid =
2415 elm[0].internal.mirror_tid;
2416 }
2417 }
2418
2419 /*
f3a4893b
MD
2420 * Delete the subtree reference in the parent. Include
2421 * boundary element at end.
4c038e17 2422 */
f36a9737
MD
2423 bcopy(&elm[1], &elm[0],
2424 (ondisk->count - cursor->parent_index) * esize);
2425 --ondisk->count;
10a5d1ba 2426 hammer_modify_node_done(parent);
f3a4893b 2427 hammer_cursor_removed_node(node, parent, cursor->parent_index);
6c1f89f4 2428 hammer_cursor_deleted_element(parent, cursor->parent_index);
250aec18 2429 hammer_flush_node(node, 0);
f36a9737 2430 hammer_delete_node(cursor->trans, node);
6a37e7e4 2431
f36a9737
MD
2432 /*
2433 * cursor->node is invalid, cursor up to make the cursor
ec9b6294
MD
2434 * valid again. We have to flag the condition in case
2435 * another thread wiggles an insertion in during an
2436 * iteration.
f36a9737 2437 */
ec9b6294 2438 cursor->flags |= HAMMER_CURSOR_ITERATE_CHECK;
f36a9737 2439 error = hammer_cursor_up(cursor);
6a37e7e4 2440 }
f36a9737 2441 return (error);
6a37e7e4
MD
2442}
2443
602c6cb8
MD
2444/*
2445 * Propagate cursor->trans->tid up the B-Tree starting at the current
2446 * cursor position using pseudofs info gleaned from the passed inode.
2447 *
2448 * The passed inode has no relationship to the cursor position other
2449 * then being in the same pseudofs as the insertion or deletion we
2450 * are propagating the mirror_tid for.
c9ce54d6
MD
2451 *
2452 * WARNING! Because we push and pop the passed cursor, it may be
2453 * modified by other B-Tree operations while it is unlocked
2454 * and things like the node & leaf pointers, and indexes might
2455 * change.
602c6cb8
MD
2456 */
2457void
4c038e17
MD
2458hammer_btree_do_propagation(hammer_cursor_t cursor,
2459 hammer_pseudofs_inmem_t pfsm,
602c6cb8
MD
2460 hammer_btree_leaf_elm_t leaf)
2461{
adf01747
MD
2462 hammer_cursor_t ncursor;
2463 hammer_tid_t mirror_tid;
602c6cb8
MD
2464 int error;
2465
2466 /*
732a1697
MD
2467 * We do not propagate a mirror_tid if the filesystem was mounted
2468 * in no-mirror mode.
602c6cb8 2469 */
732a1697 2470 if (cursor->trans->hmp->master_id < 0)
602c6cb8 2471 return;
602c6cb8 2472
adf01747
MD
2473 /*
2474 * This is a bit of a hack because we cannot deadlock or return
2475 * EDEADLK here. The related operation has already completed and
2476 * we must propagate the mirror_tid now regardless.
2477 *
2478 * Generate a new cursor which inherits the original's locks and
2479 * unlock the original. Use the new cursor to propagate the
2480 * mirror_tid. Then clean up the new cursor and reacquire locks
2481 * on the original.
2482 *
2483 * hammer_dup_cursor() cannot dup locks. The dup inherits the
2484 * original's locks and the original is tracked and must be
2485 * re-locked.
2486 */
2487 mirror_tid = cursor->node->ondisk->mirror_tid;
a56cb012 2488 KKASSERT(mirror_tid != 0);
3f43fb33 2489 ncursor = hammer_push_cursor(cursor);
adf01747
MD
2490 error = hammer_btree_mirror_propagate(ncursor, mirror_tid);
2491 KKASSERT(error == 0);
3f43fb33 2492 hammer_pop_cursor(cursor, ncursor);
c9ce54d6 2493 /* WARNING: cursor's leaf pointer may change after pop */
602c6cb8
MD
2494}
2495
2496
c82af904
MD
2497/*
2498 * Propagate a mirror TID update upwards through the B-Tree to the root.
2499 *
2500 * A locked internal node must be passed in. The node will remain locked
2501 * on return.
2502 *
2503 * This function syncs mirror_tid at the specified internal node's element,
2504 * adjusts the node's aggregation mirror_tid, and then recurses upwards.
2505 */
602c6cb8 2506static int
adf01747 2507hammer_btree_mirror_propagate(hammer_cursor_t cursor, hammer_tid_t mirror_tid)
c82af904
MD
2508{
2509 hammer_btree_internal_elm_t elm;
adf01747 2510 hammer_node_t node;
c82af904
MD
2511 int error;
2512
adf01747
MD
2513 for (;;) {
2514 error = hammer_cursor_up(cursor);
2515 if (error == 0)
2516 error = hammer_cursor_upgrade(cursor);
6dc17446
MD
2517
2518 /*
2519 * We can ignore HAMMER_CURSOR_ITERATE_CHECK, the
2520 * cursor will still be properly positioned for
2521 * mirror propagation, just not for iterations.
2522 */
adf01747
MD
2523 while (error == EDEADLK) {
2524 hammer_recover_cursor(cursor);
2525 error = hammer_cursor_upgrade(cursor);
2526 }
2527 if (error)
2528 break;
95885f20
MD
2529
2530 /*
2531 * If the cursor deadlocked it could end up at a leaf
2532 * after we lost the lock.
2533 */
adf01747 2534 node = cursor->node;
95885f20
MD
2535 if (node->ondisk->type != HAMMER_BTREE_TYPE_INTERNAL)
2536 continue;
c82af904 2537
adf01747
MD
2538 /*
2539 * Adjust the node's element
2540 */
2541 elm = &node->ondisk->elms[cursor->index].internal;
2542 if (elm->mirror_tid >= mirror_tid)
2543 break;
2544 hammer_modify_node(cursor->trans, node, &elm->mirror_tid,
2545 sizeof(elm->mirror_tid));
2546 elm->mirror_tid = mirror_tid;
2547 hammer_modify_node_done(node);
02325004
MD
2548 if (hammer_debug_general & 0x0002) {
2549 kprintf("mirror_propagate: propagate "
2550 "%016llx @%016llx:%d\n",
973c11b9
MD
2551 (long long)mirror_tid,
2552 (long long)node->node_offset,
2553 cursor->index);
02325004
MD
2554 }
2555
c82af904 2556
adf01747
MD
2557 /*
2558 * Adjust the node's mirror_tid aggregator
2559 */
2560 if (node->ondisk->mirror_tid >= mirror_tid)
2561 return(0);
2562 hammer_modify_node_field(cursor->trans, node, mirror_tid);
2563 node->ondisk->mirror_tid = mirror_tid;
2564 hammer_modify_node_done(node);
02325004
MD
2565 if (hammer_debug_general & 0x0002) {
2566 kprintf("mirror_propagate: propagate "
2567 "%016llx @%016llx\n",
973c11b9
MD
2568 (long long)mirror_tid,
2569 (long long)node->node_offset);
02325004 2570 }
c82af904 2571 }
adf01747
MD
2572 if (error == ENOENT)
2573 error = 0;
c82af904
MD
2574 return(error);
2575}
2576
2577hammer_node_t
82010f9f
MD
2578hammer_btree_get_parent(hammer_transaction_t trans, hammer_node_t node,
2579 int *parent_indexp, int *errorp, int try_exclusive)
c82af904
MD
2580{
2581 hammer_node_t parent;
2582 hammer_btree_elm_t elm;
2583 int i;
2584
2585 /*
2586 * Get the node
2587 */
82010f9f 2588 parent = hammer_get_node(trans, node->ondisk->parent, 0, errorp);
c82af904
MD
2589 if (*errorp) {
2590 KKASSERT(parent == NULL);
2591 return(NULL);
2592 }
2593 KKASSERT ((parent->flags & HAMMER_NODE_DELETED) == 0);
2594
2595 /*
2596 * Lock the node
2597 */
2598 if (try_exclusive) {
2599 if (hammer_lock_ex_try(&parent->lock)) {
2600 hammer_rel_node(parent);
2601 *errorp = EDEADLK;
2602 return(NULL);
2603 }
2604 } else {
2605 hammer_lock_sh(&parent->lock);
2606 }
2607
2608 /*
2609 * Figure out which element in the parent is pointing to the
2610 * child.
2611 */
2612 if (node->ondisk->count) {
2613 i = hammer_btree_search_node(&node->ondisk->elms[0].base,
2614 parent->ondisk);
2615 } else {
2616 i = 0;
2617 }
2618 while (i < parent->ondisk->count) {
2619 elm = &parent->ondisk->elms[i];
2620 if (elm->internal.subtree_offset == node->node_offset)
2621 break;
2622 ++i;
2623 }
2624 if (i == parent->ondisk->count) {
2625 hammer_unlock(&parent->lock);
2626 panic("Bad B-Tree link: parent %p node %p\n", parent, node);
2627 }
2628 *parent_indexp = i;
2629 KKASSERT(*errorp == 0);
2630 return(parent);
2631}
2632
7f7c1f84 2633/*
fe7678ee
MD
2634 * The element (elm) has been moved to a new internal node (node).
2635 *
2636 * If the element represents a pointer to an internal node that node's
2637 * parent must be adjusted to the element's new location.
2638 *
6a37e7e4 2639 * XXX deadlock potential here with our exclusive locks
7f7c1f84 2640 */
7f7c1f84 2641int
36f82b23
MD
2642btree_set_parent(hammer_transaction_t trans, hammer_node_t node,
2643 hammer_btree_elm_t elm)
7f7c1f84 2644{
7f7c1f84
MD
2645 hammer_node_t child;
2646 int error;
2647
2648 error = 0;
2649
fe7678ee 2650 switch(elm->base.btype) {
7f7c1f84 2651 case HAMMER_BTREE_TYPE_INTERNAL:
fe7678ee 2652 case HAMMER_BTREE_TYPE_LEAF:
82010f9f 2653 child = hammer_get_node(trans, elm->internal.subtree_offset,
19619882 2654 0, &error);
7f7c1f84 2655 if (error == 0) {
c9b9e29d 2656 hammer_modify_node_field(trans, child, parent);
7f7c1f84 2657 child->ondisk->parent = node->node_offset;
10a5d1ba 2658 hammer_modify_node_done(child);
7f7c1f84
MD
2659 hammer_rel_node(child);
2660 }
2661 break;
7f7c1f84 2662 default:
fe7678ee 2663 break;
7f7c1f84
MD
2664 }
2665 return(error);
2666}
2667
1775b6a0
MD
2668/*
2669 * Initialize the root of a recursive B-Tree node lock list structure.
2670 */
2671void
2672hammer_node_lock_init(hammer_node_lock_t parent, hammer_node_t node)
2673{
2674 TAILQ_INIT(&parent->list);
2675 parent->parent = NULL;
2676 parent->node = node;
2677 parent->index = -1;
2678 parent->count = node->ondisk->count;
2679 parent->copy = NULL;
2680 parent->flags = 0;
2681}
2682
24cf83d2
MD
2683/*
2684 * Initialize a cache of hammer_node_lock's including space allocated
2685 * for node copies.
2686 *
2687 * This is used by the rebalancing code to preallocate the copy space
2688 * for ~4096 B-Tree nodes (16MB of data) prior to acquiring any HAMMER
2689 * locks, otherwise we can blow out the pageout daemon's emergency
2690 * reserve and deadlock it.
2691 *
2692 * NOTE: HAMMER_NODE_LOCK_LCACHE is not set on items cached in the lcache.
2693 * The flag is set when the item is pulled off the cache for use.
2694 */
2695void
2696hammer_btree_lcache_init(hammer_mount_t hmp, hammer_node_lock_t lcache,
2697 int depth)
2698{
2699 hammer_node_lock_t item;
2700 int count;
2701
2702 for (count = 1; depth; --depth)
2703 count *= HAMMER_BTREE_LEAF_ELMS;
2704 bzero(lcache, sizeof(*lcache));
2705 TAILQ_INIT(&lcache->list);
2706 while (count) {
2707 item = kmalloc(sizeof(*item), hmp->m_misc, M_WAITOK|M_ZERO);
2708 item->copy = kmalloc(sizeof(*item->copy),
2709 hmp->m_misc, M_WAITOK);
2710 TAILQ_INIT(&item->list);
2711 TAILQ_INSERT_TAIL(&lcache->list, item, entry);
2712 --count;
2713 }
2714}
2715
2716void
2717hammer_btree_lcache_free(hammer_mount_t hmp, hammer_node_lock_t lcache)
2718{
2719 hammer_node_lock_t item;
2720
2721 while ((item = TAILQ_FIRST(&lcache->list)) != NULL) {
2722 TAILQ_REMOVE(&lcache->list, item, entry);
2723 KKASSERT(item->copy);
2724 KKASSERT(TAILQ_EMPTY(&item->list));
2725 kfree(item->copy, hmp->m_misc);
2726 kfree(item, hmp->m_misc);
2727 }
2728 KKASSERT(lcache->copy == NULL);
2729}
2730
b33e2cc0
MD
2731/*
2732 * Exclusively lock all the children of node. This is used by the split
2733 * code to prevent anyone from accessing the children of a cursor node
2734 * while we fix-up its parent offset.
2735 *
2736 * If we don't lock the children we can really mess up cursors which block
2737 * trying to cursor-up into our node.
2738 *
b33e2cc0
MD
2739 * On failure EDEADLK (or some other error) is returned. If a deadlock
2740 * error is returned the cursor is adjusted to block on termination.
1775b6a0
MD
2741 *
2742 * The caller is responsible for managing parent->node, the root's node
2743 * is usually aliased from a cursor.
b33e2cc0
MD
2744 */
2745int
1775b6a0 2746hammer_btree_lock_children(hammer_cursor_t cursor, int depth,
24cf83d2
MD
2747 hammer_node_lock_t parent,
2748 hammer_node_lock_t lcache)
b33e2cc0
MD
2749{
2750 hammer_node_t node;
1775b6a0 2751 hammer_node_lock_t item;
b33e2cc0
MD
2752 hammer_node_ondisk_t ondisk;
2753 hammer_btree_elm_t elm;
b33e2cc0 2754 hammer_node_t child;
bac808fe 2755 struct hammer_mount *hmp;
b33e2cc0
MD
2756 int error;
2757 int i;
2758
1775b6a0 2759 node = parent->node;
b33e2cc0
MD
2760 ondisk = node->ondisk;
2761 error = 0;
bac808fe 2762 hmp = cursor->trans->hmp;
7bc5b8c2
MD
2763
2764 /*
2765 * We really do not want to block on I/O with exclusive locks held,
1775b6a0
MD
2766 * pre-get the children before trying to lock the mess. This is
2767 * only done one-level deep for now.
7bc5b8c2
MD
2768 */
2769 for (i = 0; i < ondisk->count; ++i) {
cb51be26 2770 ++hammer_stats_btree_elements;
7bc5b8c2
MD
2771 elm = &ondisk->elms[i];
2772 if (elm->base.btype != HAMMER_BTREE_TYPE_LEAF &&
2773 elm->base.btype != HAMMER_BTREE_TYPE_INTERNAL) {
2774 continue;
2775 }
82010f9f 2776 child = hammer_get_node(cursor->trans,
7bc5b8c2
MD
2777 elm->internal.subtree_offset,
2778 0, &error);
2779 if (child)
2780 hammer_rel_node(child);
2781 }
2782
2783 /*
2784 * Do it for real
2785 */
b33e2cc0 2786 for (i = 0; error == 0 && i < ondisk->count; ++i) {
cb51be26 2787 ++hammer_stats_btree_elements;
b33e2cc0
MD
2788 elm = &ondisk->elms[i];
2789
b33e2cc0
MD
2790 switch(elm->base.btype) {
2791 case HAMMER_BTREE_TYPE_INTERNAL:
2792 case HAMMER_BTREE_TYPE_LEAF:
f36a9737 2793 KKASSERT(elm->internal.subtree_offset != 0);
82010f9f 2794 child = hammer_get_node(cursor->trans,
b33e2cc0 2795 elm->internal.subtree_offset,
19619882 2796 0, &error);
b33e2cc0 2797 break;
b33e2cc0 2798 default:
47197d71 2799 child = NULL;
b33e2cc0
MD
2800 break;
2801 }
2802 if (child) {
2803 if (hammer_lock_ex_try(&child->lock) != 0) {
2804 if (cursor->deadlk_node == NULL) {
1ff9f58e 2805 cursor->deadlk_node = child;
b33e2cc0
MD
2806 hammer_ref_node(cursor->deadlk_node);
2807 }
2808 error = EDEADLK;
a84a197d 2809 hammer_rel_node(child);
b33e2cc0 2810 } else {
24cf83d2
MD
2811 if (lcache) {
2812 item = TAILQ_FIRST(&lcache->list);
2813 KKASSERT(item != NULL);
2814 item->flags |= HAMMER_NODE_LOCK_LCACHE;
2815 TAILQ_REMOVE(&lcache->list,
2816 item, entry);
2817 } else {
2818 item = kmalloc(sizeof(*item),
2819 hmp->m_misc,
2820 M_WAITOK|M_ZERO);
2821 TAILQ_INIT(&item->list);
2822 }
2823
1775b6a0 2824 TAILQ_INSERT_TAIL(&parent->list, item, entry);
1775b6a0 2825 item->parent = parent;
b33e2cc0 2826 item->node = child;
1775b6a0
MD
2827 item->index = i;
2828 item->count = child->ondisk->count;
2829
2830 /*
2831 * Recurse (used by the rebalancing code)
2832 */
2833 if (depth > 1 && elm->base.btype == HAMMER_BTREE_TYPE_INTERNAL) {
2834 error = hammer_btree_lock_children(
2835 cursor,
2836 depth - 1,
24cf83d2
MD
2837 item,
2838 lcache);
1775b6a0 2839 }
b33e2cc0
MD
2840 }
2841 }
2842 }
2843 if (error)
24cf83d2 2844 hammer_btree_unlock_children(hmp, parent, lcache);
b33e2cc0
MD
2845 return(error);
2846}
2847
1775b6a0
MD
2848/*
2849 * Create an in-memory copy of all B-Tree nodes listed, recursively,
2850 * including the parent.
2851 */
2852void
2853hammer_btree_lock_copy(hammer_cursor_t cursor, hammer_node_lock_t parent)
2854{
2855 hammer_mount_t hmp = cursor->trans->hmp;
2856 hammer_node_lock_t item;
2857
2858 if (parent->copy == NULL) {
24cf83d2
MD
2859 KKASSERT((parent->flags & HAMMER_NODE_LOCK_LCACHE) == 0);
2860 parent->copy = kmalloc(sizeof(*parent->copy),
2861 hmp->m_misc, M_WAITOK);
1775b6a0 2862 }
24cf83d2
MD
2863 KKASSERT((parent->flags & HAMMER_NODE_LOCK_UPDATED) == 0);
2864 *parent->copy = *parent->node->ondisk;
1775b6a0
MD
2865 TAILQ_FOREACH(item, &parent->list, entry) {
2866 hammer_btree_lock_copy(cursor, item);
2867 }
2868}
b33e2cc0
MD
2869
2870/*
1775b6a0 2871 * Recursively sync modified copies to the media.
b33e2cc0 2872 */
7ddc70d1 2873int
1775b6a0 2874hammer_btree_sync_copy(hammer_cursor_t cursor, hammer_node_lock_t parent)
b33e2cc0 2875{
1775b6a0 2876 hammer_node_lock_t item;
7ddc70d1 2877 int count = 0;
1775b6a0
MD
2878
2879 if (parent->flags & HAMMER_NODE_LOCK_UPDATED) {
7ddc70d1 2880 ++count;
1775b6a0
MD
2881 hammer_modify_node_all(cursor->trans, parent->node);
2882 *parent->node->ondisk = *parent->copy;
2883 hammer_modify_node_done(parent->node);
2884 if (parent->copy->type == HAMMER_BTREE_TYPE_DELETED) {
250aec18 2885 hammer_flush_node(parent->node, 0);
1775b6a0
MD
2886 hammer_delete_node(cursor->trans, parent->node);
2887 }
2888 }
2889 TAILQ_FOREACH(item, &parent->list, entry) {
7ddc70d1 2890 count += hammer_btree_sync_copy(cursor, item);
1775b6a0 2891 }
7ddc70d1 2892 return(count);
1775b6a0 2893}
b33e2cc0 2894
1775b6a0
MD
2895/*
2896 * Release previously obtained node locks. The caller is responsible for
2897 * cleaning up parent->node itself (its usually just aliased from a cursor),
2898 * but this function will take care of the copies.
24cf83d2
MD
2899 *
2900 * NOTE: The root node is not placed in the lcache and node->copy is not
2901 * deallocated when lcache != NULL.
1775b6a0
MD
2902 */
2903void
24cf83d2
MD
2904hammer_btree_unlock_children(hammer_mount_t hmp, hammer_node_lock_t parent,
2905 hammer_node_lock_t lcache)
1775b6a0
MD
2906{
2907 hammer_node_lock_t item;
24cf83d2 2908 hammer_node_ondisk_t copy;
1775b6a0 2909
1775b6a0
MD
2910 while ((item = TAILQ_FIRST(&parent->list)) != NULL) {
2911 TAILQ_REMOVE(&parent->list, item, entry);
24cf83d2 2912 hammer_btree_unlock_children(hmp, item, lcache);
b33e2cc0
MD
2913 hammer_unlock(&item->node->lock);
2914 hammer_rel_node(item->node);
24cf83d2
MD
2915 if (lcache) {
2916 /*
2917 * NOTE: When placing the item back in the lcache
2918 * the flag is cleared by the bzero().
2919 * Remaining fields are cleared as a safety
2920 * measure.
2921 */
2922 KKASSERT(item->flags & HAMMER_NODE_LOCK_LCACHE);
2923 KKASSERT(TAILQ_EMPTY(&item->list));
2924 copy = item->copy;
2925 bzero(item, sizeof(*item));
2926 TAILQ_INIT(&item->list);
2927 item->copy = copy;
2928 if (copy)
2929 bzero(copy, sizeof(*copy));
2930 TAILQ_INSERT_TAIL(&lcache->list, item, entry);
2931 } else {
2932 kfree(item, hmp->m_misc);
2933 }
2934 }
2935 if (parent->copy && (parent->flags & HAMMER_NODE_LOCK_LCACHE) == 0) {
2936 kfree(parent->copy, hmp->m_misc);
2937 parent->copy = NULL; /* safety */
b33e2cc0
MD
2938 }
2939}
2940
8cd0a023
MD
2941/************************************************************************
2942 * MISCELLANIOUS SUPPORT *
2943 ************************************************************************/
2944
2945/*
d26d0ae9 2946 * Compare two B-Tree elements, return -N, 0, or +N (e.g. similar to strcmp).
8cd0a023 2947 *
d113fda1 2948 * Note that for this particular function a return value of -1, 0, or +1
9582c7da 2949 * can denote a match if create_tid is otherwise discounted. A create_tid
d5530d22 2950 * of zero is considered to be 'infinity' in comparisons.
d113fda1 2951 *
8cd0a023 2952 * See also hammer_rec_rb_compare() and hammer_rec_cmp() in hammer_object.c.
8cd0a023
MD
2953 */
2954int
2955hammer_btree_cmp(hammer_base_elm_t key1, hammer_base_elm_t key2)
2956{
2f85fa4d
MD
2957 if (key1->localization < key2->localization)
2958 return(-5);
2959 if (key1->localization > key2->localization)
2960 return(5);
2961
d26d0ae9
MD
2962 if (key1->obj_id < key2->obj_id)
2963 return(-4);
2964 if (key1->obj_id > key2->obj_id)
2965 return(4);
8cd0a023 2966
d26d0ae9
MD
2967 if (key1->rec_type < key2->rec_type)
2968 return(-3);
2969 if (key1->rec_type > key2->rec_type)
2970 return(3);
8cd0a023 2971
8cd0a023
MD
2972 if (key1->key < key2->key)
2973 return(-2);
2974 if (key1->key > key2->key)
2975 return(2);
d113fda1 2976
d5530d22 2977 /*
9582c7da
MD
2978 * A create_tid of zero indicates a record which is undeletable
2979 * and must be considered to have a value of positive infinity.
d5530d22 2980 */
9582c7da
MD
2981 if (key1->create_tid == 0) {
2982 if (key2->create_tid == 0)
d5530d22
MD
2983 return(0);
2984 return(1);
2985 }
9582c7da 2986 if (key2->create_tid == 0)
d5530d22 2987 return(-1);
9582c7da 2988 if (key1->create_tid < key2->create_tid)
d113fda1 2989 return(-1);
9582c7da 2990 if (key1->create_tid > key2->create_tid)
d113fda1 2991 return(1);
8cd0a023
MD
2992 return(0);
2993}
2994
c0ade690 2995/*
d5530d22
MD
2996 * Test a timestamp against an element to determine whether the
2997 * element is visible. A timestamp of 0 means 'infinity'.
c0ade690
MD
2998 */
2999int
d5530d22 3000hammer_btree_chkts(hammer_tid_t asof, hammer_base_elm_t base)
c0ade690 3001{
d5530d22
MD
3002 if (asof == 0) {
3003 if (base->delete_tid)
3004 return(1);
3005 return(0);
3006 }
3007 if (asof < base->create_tid)
d26d0ae9 3008 return(-1);
d5530d22 3009 if (base->delete_tid && asof >= base->delete_tid)
d26d0ae9 3010 return(1);
c0ade690
MD
3011 return(0);
3012}
3013
8cd0a023
MD
3014/*
3015 * Create a separator half way inbetween key1 and key2. For fields just
d5530d22
MD
3016 * one unit apart, the separator will match key2. key1 is on the left-hand
3017 * side and key2 is on the right-hand side.
8cd0a023 3018 *
9391cded 3019 * key2 must be >= the separator. It is ok for the separator to match key2.
36f82b23 3020 *
9391cded
MD
3021 * NOTE: Even if key1 does not match key2, the separator may wind up matching
3022 * key2.
3023 *
3024 * NOTE: It might be beneficial to just scrap this whole mess and just
3025 * set the separator to key2.
8cd0a023
MD
3026 */
3027#define MAKE_SEPARATOR(key1, key2, dest, field) \
3028 dest->field = key1->field + ((key2->field - key1->field + 1) >> 1);
3029
3030static void
3031hammer_make_separator(hammer_base_elm_t key1, hammer_base_elm_t key2,
3032 hammer_base_elm_t dest)
3033{
3034 bzero(dest, sizeof(*dest));
d5530d22 3035
9391cded
MD
3036 dest->rec_type = key2->rec_type;
3037 dest->key = key2->key;
2f85fa4d 3038 dest->obj_id = key2->obj_id;
9391cded
MD
3039 dest->create_tid = key2->create_tid;
3040
2f85fa4d
MD
3041 MAKE_SEPARATOR(key1, key2, dest, localization);
3042 if (key1->localization == key2->localization) {
3043 MAKE_SEPARATOR(key1, key2, dest, obj_id);
3044 if (key1->obj_id == key2->obj_id) {
3045 MAKE_SEPARATOR(key1, key2, dest, rec_type);
3046 if (key1->rec_type == key2->rec_type) {
3047 MAKE_SEPARATOR(key1, key2, dest, key);
3048 /*
3049 * Don't bother creating a separator for
3050 * create_tid, which also conveniently avoids
3051 * having to handle the create_tid == 0
3052 * (infinity) case. Just leave create_tid
3053 * set to key2.
3054 *
3055 * Worst case, dest matches key2 exactly,
3056 * which is acceptable.
3057 */
3058 }
d5530d22 3059 }
d113fda1 3060 }
8cd0a023
MD
3061}
3062
3063#undef MAKE_SEPARATOR
3064
3065/*
3066 * Return whether a generic internal or leaf node is full
3067 */
3068static int
3069btree_node_is_full(hammer_node_ondisk_t node)
3070{
3071 switch(node->type) {
3072 case HAMMER_BTREE_TYPE_INTERNAL:
3073 if (node->count == HAMMER_BTREE_INT_ELMS)
3074 return(1);
3075 break;
3076 case HAMMER_BTREE_TYPE_LEAF:
3077 if (node->count == HAMMER_BTREE_LEAF_ELMS)
3078 return(1);
3079 break;
3080 default:
3081 panic("illegal btree subtype");
3082 }
3083 return(0);
3084}
9944ae54 3085
8cd0a023
MD
3086#if 0
3087static int
3088btree_max_elements(u_int8_t type)
3089{
3090 if (type == HAMMER_BTREE_TYPE_LEAF)
3091 return(HAMMER_BTREE_LEAF_ELMS);
3092 if (type == HAMMER_BTREE_TYPE_INTERNAL)
3093 return(HAMMER_BTREE_INT_ELMS);
3094 panic("btree_max_elements: bad type %d\n", type);
3095}
3096#endif
3097
c0ade690
MD
3098void
3099hammer_print_btree_node(hammer_node_ondisk_t ondisk)
3100{
3101 hammer_btree_elm_t elm;
3102 int i;
3103
47197d71 3104 kprintf("node %p count=%d parent=%016llx type=%c\n",
973c11b9
MD
3105 ondisk, ondisk->count,
3106 (long long)ondisk->parent, ondisk->type);
c0ade690
MD
3107
3108 /*
3109 * Dump both boundary elements if an internal node
3110 */
3111 if (ondisk->type == HAMMER_BTREE_TYPE_INTERNAL) {
3112 for (i = 0; i <= ondisk->count; ++i) {
3113 elm = &ondisk->elms[i];
3114 hammer_print_btree_elm(elm, ondisk->type, i);
3115 }
3116 } else {
3117 for (i = 0; i < ondisk->count; ++i) {
3118 elm = &ondisk->elms[i];
3119 hammer_print_btree_elm(elm, ondisk->type, i);
3120 }
3121 }
3122}
3123
3124void
3125hammer_print_btree_elm(hammer_btree_elm_t elm, u_int8_t type, int i)
3126{
3127 kprintf(" %2d", i);
973c11b9
MD
3128 kprintf("\tobj_id = %016llx\n", (long long)elm->base.obj_id);
3129 kprintf("\tkey = %016llx\n", (long long)elm->base.key);
3130 kprintf("\tcreate_tid = %016llx\n", (long long)elm->base.create_tid);
3131 kprintf("\tdelete_tid = %016llx\n", (long long)elm->base.delete_tid);
c0ade690
MD
3132 kprintf("\trec_type = %04x\n", elm->base.rec_type);
3133 kprintf("\tobj_type = %02x\n", elm->base.obj_type);
fe7678ee
MD
3134 kprintf("\tbtype = %02x (%c)\n",
3135 elm->base.btype,
3136 (elm->base.btype ? elm->base.btype : '?'));
2f85fa4d 3137 kprintf("\tlocalization = %02x\n", elm->base.localization);
fe7678ee
MD
3138
3139 switch(type) {
3140 case HAMMER_BTREE_TYPE_INTERNAL:
47197d71 3141 kprintf("\tsubtree_off = %016llx\n",
973c11b9 3142 (long long)elm->internal.subtree_offset);
fe7678ee 3143 break;
fe7678ee 3144 case HAMMER_BTREE_TYPE_RECORD:
973c11b9
MD
3145 kprintf("\tdata_offset = %016llx\n",
3146 (long long)elm->leaf.data_offset);
c0ade690
MD
3147 kprintf("\tdata_len = %08x\n", elm->leaf.data_len);
3148 kprintf("\tdata_crc = %08x\n", elm->leaf.data_crc);
fe7678ee 3149 break;
c0ade690
MD
3150 }
3151}