nrelease - fix/improve livecd
[dragonfly.git] / sys / vfs / hammer / hammer_reblock.c
1 /*
2  * Copyright (c) 2008-2012 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34 /*
35  * HAMMER reblocker - This code frees up fragmented physical space
36  *
37  * HAMMER only keeps track of free space on a big-block basis.  A big-block
38  * containing holes can only be freed by migrating the remaining data in
39  * that big-block into a new big-block, then freeing the big-block.
40  *
41  * This function is called from an ioctl or via the hammer support thread.
42  */
43
44 #include "hammer.h"
45
46 static int hammer_reblock_helper(struct hammer_ioc_reblock *reblock,
47                                  hammer_cursor_t cursor,
48                                  hammer_btree_elm_t elm);
49 static int hammer_reblock_data(struct hammer_ioc_reblock *reblock,
50                                 hammer_cursor_t cursor, hammer_btree_elm_t elm);
51 static int hammer_reblock_leaf_node(struct hammer_ioc_reblock *reblock,
52                                 hammer_cursor_t cursor, hammer_btree_elm_t elm);
53 static int hammer_reblock_int_node(struct hammer_ioc_reblock *reblock,
54                                 hammer_cursor_t cursor, hammer_btree_elm_t elm);
55 static void hammer_move_node(hammer_cursor_t cursor, hammer_btree_elm_t elm,
56                                 hammer_node_t onode, hammer_node_t nnode);
57
58 int
59 hammer_ioc_reblock(hammer_transaction_t trans, hammer_inode_t ip,
60                    struct hammer_ioc_reblock *reblock)
61 {
62         struct hammer_cursor cursor;
63         hammer_btree_elm_t elm;
64         int checkspace_count;
65         int error;
66         int seq;
67         int slop;
68         uint32_t key_end_localization;
69
70         if ((reblock->key_beg.localization | reblock->key_end.localization) &
71             HAMMER_LOCALIZE_PSEUDOFS_MASK) {
72                 return(EINVAL);
73         }
74         if (reblock->key_beg.obj_id >= reblock->key_end.obj_id)
75                 return(EINVAL);
76         if (reblock->free_level < 0 ||
77             reblock->free_level > HAMMER_BIGBLOCK_SIZE)
78                 return(EINVAL);
79
80         /*
81          * A fill_percentage <= 20% is considered an emergency.  free_level is
82          * inverted from fill_percentage.
83          */
84         if (reblock->free_level >= HAMMER_BIGBLOCK_SIZE * 8 / 10)
85                 slop = HAMMER_CHKSPC_EMERGENCY;
86         else
87                 slop = HAMMER_CHKSPC_REBLOCK;
88
89         /*
90          * Ioctl caller has only set localization type to reblock.
91          * Initialize cursor key localization with ip localization.
92          */
93         reblock->key_cur = reblock->key_beg;
94         reblock->key_cur.localization &= HAMMER_LOCALIZE_MASK;
95         if (reblock->allpfs == 0)
96                 reblock->key_cur.localization |= ip->obj_localization;
97
98         key_end_localization = reblock->key_end.localization;
99         key_end_localization &= HAMMER_LOCALIZE_MASK;
100         if (reblock->allpfs == 0)
101                 key_end_localization |= ip->obj_localization;
102         else
103                 key_end_localization |= pfs_to_lo(HAMMER_MAX_PFSID);
104
105         checkspace_count = 0;
106         seq = trans->hmp->flusher.done;
107 retry:
108         error = hammer_init_cursor(trans, &cursor, NULL, NULL);
109         if (error) {
110                 hammer_done_cursor(&cursor);
111                 goto failed;
112         }
113         cursor.key_beg.localization = reblock->key_cur.localization;
114         cursor.key_beg.obj_id = reblock->key_cur.obj_id;
115         cursor.key_beg.key = HAMMER_MIN_KEY;
116         cursor.key_beg.create_tid = 1;
117         cursor.key_beg.delete_tid = 0;
118         cursor.key_beg.rec_type = HAMMER_MIN_RECTYPE;
119         cursor.key_beg.obj_type = 0;
120
121         cursor.key_end.localization = key_end_localization;
122         cursor.key_end.obj_id = reblock->key_end.obj_id;
123         cursor.key_end.key = HAMMER_MAX_KEY;
124         cursor.key_end.create_tid = HAMMER_MAX_TID - 1;
125         cursor.key_end.delete_tid = 0;
126         cursor.key_end.rec_type = HAMMER_MAX_RECTYPE;
127         cursor.key_end.obj_type = 0;
128
129         cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE;
130         cursor.flags |= HAMMER_CURSOR_BACKEND;
131         cursor.flags |= HAMMER_CURSOR_NOSWAPCACHE;
132
133         /*
134          * This flag allows the btree scan code to return internal nodes,
135          * so we can reblock them in addition to the leafs.  Only specify it
136          * if we intend to reblock B-Tree nodes.
137          */
138         if (reblock->head.flags & HAMMER_IOC_DO_BTREE)
139                 cursor.flags |= HAMMER_CURSOR_REBLOCKING;
140
141         error = hammer_btree_first(&cursor);
142         while (error == 0) {
143                 /*
144                  * Internal or Leaf node
145                  */
146                 KKASSERT(cursor.index < cursor.node->ondisk->count);
147                 elm = &cursor.node->ondisk->elms[cursor.index];
148                 reblock->key_cur.obj_id = elm->base.obj_id;
149                 reblock->key_cur.localization = elm->base.localization;
150
151                 /*
152                  * Filesystem went read-only during rebalancing
153                  */
154                 if (trans->hmp->ronly) {
155                         error = EROFS;
156                         break;
157                 }
158
159                 /*
160                  * Yield to more important tasks
161                  */
162                 if ((error = hammer_signal_check(trans->hmp)) != 0)
163                         break;
164
165                 /*
166                  * If there is insufficient free space it may be due to
167                  * reserved big-blocks, which flushing might fix.
168                  *
169                  * We must force a retest in case the unlocked cursor is
170                  * moved to the end of the leaf, or moved to an internal
171                  * node.
172                  *
173                  * WARNING: See warnings in hammer_unlock_cursor() function.
174                  */
175                 if (hammer_checkspace(trans->hmp, slop)) {
176                         if (++checkspace_count == 10) {
177                                 error = ENOSPC;
178                                 break;
179                         }
180                         hammer_unlock_cursor(&cursor);
181                         cursor.flags |= HAMMER_CURSOR_RETEST;
182                         hammer_flusher_wait(trans->hmp, seq);
183                         hammer_lock_cursor(&cursor);
184                         seq = hammer_flusher_async(trans->hmp, NULL);
185                         goto skip;
186                 }
187
188                 /*
189                  * Acquiring the sync_lock prevents the operation from
190                  * crossing a synchronization boundary.
191                  *
192                  * NOTE: cursor.node may have changed on return.
193                  *
194                  * WARNING: See warnings in hammer_unlock_cursor() function.
195                  */
196                 hammer_sync_lock_sh(trans);
197                 error = hammer_reblock_helper(reblock, &cursor, elm);
198                 hammer_sync_unlock(trans);
199
200                 while (hammer_flusher_meta_halflimit(trans->hmp) ||
201                        hammer_flusher_undo_exhausted(trans, 2)) {
202                         hammer_unlock_cursor(&cursor);
203                         hammer_flusher_wait(trans->hmp, seq);
204                         hammer_lock_cursor(&cursor);
205                         seq = hammer_flusher_async_one(trans->hmp);
206                 }
207
208                 /*
209                  * Setup for iteration, our cursor flags may be modified by
210                  * other threads while we are unlocked.
211                  */
212                 cursor.flags |= HAMMER_CURSOR_ATEDISK;
213
214                 /*
215                  * We allocate data buffers, which atm we don't track
216                  * dirty levels for because we allow the kernel to write
217                  * them.  But if we allocate too many we can still deadlock
218                  * the buffer cache.
219                  *
220                  * WARNING: See warnings in hammer_unlock_cursor() function.
221                  *          (The cursor's node and element may change!)
222                  */
223                 if (bd_heatup()) {
224                         hammer_unlock_cursor(&cursor);
225                         bwillwrite(HAMMER_XBUFSIZE);
226                         hammer_lock_cursor(&cursor);
227                 }
228                 vm_wait_nominal();
229 skip:
230                 if (error == 0) {
231                         error = hammer_btree_iterate(&cursor);
232                 }
233         }
234         if (error == ENOENT)
235                 error = 0;
236         hammer_done_cursor(&cursor);
237         if (error == EWOULDBLOCK) {
238                 hammer_flusher_sync(trans->hmp);
239                 goto retry;
240         }
241         if (error == EDEADLK)
242                 goto retry;
243         if (error == EINTR) {
244                 reblock->head.flags |= HAMMER_IOC_HEAD_INTR;
245                 error = 0;
246         }
247 failed:
248         reblock->key_cur.localization &= HAMMER_LOCALIZE_MASK;
249         return(error);
250 }
251
252 /*
253  * Reblock the B-Tree (leaf) node, record, and/or data if necessary.
254  *
255  * XXX We have no visibility into internal B-Tree nodes at the moment,
256  * only leaf nodes.
257  */
258 static int
259 hammer_reblock_helper(struct hammer_ioc_reblock *reblock,
260                       hammer_cursor_t cursor, hammer_btree_elm_t elm)
261 {
262         hammer_mount_t hmp;
263         hammer_off_t tmp_offset;
264         hammer_node_ondisk_t ondisk;
265         struct hammer_btree_leaf_elm leaf;
266         int error;
267         int bytes;
268         int cur;
269         int iocflags;
270
271         error = 0;
272         hmp = cursor->trans->hmp;
273
274         /*
275          * Reblock data.  Note that data embedded in a record is reblocked
276          * by the record reblock code.  Data processing only occurs at leaf
277          * nodes and for RECORD element types.
278          */
279         if (cursor->node->ondisk->type != HAMMER_BTREE_TYPE_LEAF)
280                 goto skip;
281         if (elm->leaf.base.btype != HAMMER_BTREE_TYPE_RECORD)
282                 return(EINVAL);
283         tmp_offset = elm->leaf.data_offset;
284         if (tmp_offset == 0)
285                 goto skip;
286
287         /*
288          * If reblock->vol_no is specified we only want to reblock data
289          * in that volume, but ignore everything else.
290          */
291         if (reblock->vol_no != -1 &&
292             reblock->vol_no != HAMMER_VOL_DECODE(tmp_offset))
293                 goto skip;
294
295         /*
296          * NOTE: Localization restrictions may also have been set-up, we can't
297          *       just set the match flags willy-nilly here.
298          */
299         switch(elm->leaf.base.rec_type) {
300         case HAMMER_RECTYPE_INODE:
301         case HAMMER_RECTYPE_SNAPSHOT:
302         case HAMMER_RECTYPE_CONFIG:
303                 iocflags = HAMMER_IOC_DO_INODES;
304                 break;
305         case HAMMER_RECTYPE_EXT:
306         case HAMMER_RECTYPE_FIX:
307         case HAMMER_RECTYPE_PFS:
308         case HAMMER_RECTYPE_DIRENTRY:
309                 iocflags = HAMMER_IOC_DO_DIRS;
310                 break;
311         case HAMMER_RECTYPE_DATA:
312         case HAMMER_RECTYPE_DB:
313                 iocflags = HAMMER_IOC_DO_DATA;
314                 break;
315         default:
316                 iocflags = 0;
317                 break;
318         }
319         if (reblock->head.flags & iocflags) {
320                 ++reblock->data_count;
321                 reblock->data_byte_count += elm->leaf.data_len;
322                 bytes = hammer_blockmap_getfree(hmp, tmp_offset, &cur, &error);
323                 if (hammer_debug_general & 0x4000)
324                         hdkprintf("D %6d/%d\n", bytes, reblock->free_level);
325                 /*
326                  * Start data reblock if
327                  * 1. there is no error
328                  * 2. the data and allocator offset are not in the same
329                  *    big-block, or free level threshold is 0
330                  * 3. free bytes in the data's big-block is larger than
331                  *    free level threshold (means if threshold is 0 then
332                  *    do reblock no matter what).
333                  */
334                 if (error == 0 && (cur == 0 || reblock->free_level == 0) &&
335                     bytes >= reblock->free_level) {
336                         /*
337                          * This is nasty, the uncache code may have to get
338                          * vnode locks and because of that we can't hold
339                          * the cursor locked.
340                          *
341                          * WARNING: See warnings in hammer_unlock_cursor()
342                          *          function.
343                          */
344                         leaf = elm->leaf;
345                         hammer_unlock_cursor(cursor);
346                         hammer_io_direct_uncache(hmp, &leaf);
347                         hammer_lock_cursor(cursor);
348
349                         /*
350                          * elm may have become stale or invalid, reload it.
351                          * ondisk variable is temporary only.  Note that
352                          * cursor->node and thus cursor->node->ondisk may
353                          * also changed.
354                          */
355                         ondisk = cursor->node->ondisk;
356                         elm = &ondisk->elms[cursor->index];
357                         if (cursor->flags & HAMMER_CURSOR_RETEST) {
358                                 hkprintf("debug: retest on reblocker uncache\n");
359                                 error = EDEADLK;
360                         } else if (ondisk->type != HAMMER_BTREE_TYPE_LEAF ||
361                                    cursor->index >= ondisk->count) {
362                                 hkprintf("debug: shifted on reblocker uncache\n");
363                                 error = EDEADLK;
364                         } else if (bcmp(&elm->leaf, &leaf, sizeof(leaf))) {
365                                 hkprintf("debug: changed on reblocker uncache\n");
366                                 error = EDEADLK;
367                         }
368                         if (error == 0)
369                                 error = hammer_cursor_upgrade(cursor);
370                         if (error == 0) {
371                                 KKASSERT(cursor->index < ondisk->count);
372                                 error = hammer_reblock_data(reblock,
373                                                             cursor, elm);
374                         }
375                         if (error == 0) {
376                                 ++reblock->data_moves;
377                                 reblock->data_byte_moves += elm->leaf.data_len;
378                         }
379                 }
380         }
381
382 skip:
383         /*
384          * Reblock a B-Tree internal or leaf node.  A leaf node is reblocked
385          * on initial entry only (element 0).  An internal node is reblocked
386          * when entered upward from its first leaf node only (also element 0,
387          * see hammer_btree_iterate() where cursor moves up and may return).
388          * Further revisits of the internal node (index > 0) are ignored.
389          */
390         tmp_offset = cursor->node->node_offset;
391
392         /*
393          * If reblock->vol_no is specified we only want to reblock data
394          * in that volume, but ignore everything else.
395          */
396         if (reblock->vol_no != -1 &&
397             reblock->vol_no != HAMMER_VOL_DECODE(tmp_offset))
398                 goto end;
399
400         if (cursor->index == 0 &&
401             error == 0 && (reblock->head.flags & HAMMER_IOC_DO_BTREE)) {
402                 ++reblock->btree_count;
403                 bytes = hammer_blockmap_getfree(hmp, tmp_offset, &cur, &error);
404                 if (hammer_debug_general & 0x4000)
405                         hdkprintf("B %6d/%d\n", bytes, reblock->free_level);
406                 /*
407                  * Start node reblock if
408                  * 1. there is no error
409                  * 2. the node and allocator offset are not in the same
410                  *    big-block, or free level threshold is 0
411                  * 3. free bytes in the node's big-block is larger than
412                  *    free level threshold (means if threshold is 0 then
413                  *    do reblock no matter what).
414                  */
415                 if (error == 0 && (cur == 0 || reblock->free_level == 0) &&
416                     bytes >= reblock->free_level) {
417                         error = hammer_cursor_upgrade(cursor);
418                         if (error == 0) {
419                                 if (cursor->parent) {
420                                         KKASSERT(cursor->parent_index <
421                                                  cursor->parent->ondisk->count);
422                                         elm = &cursor->parent->ondisk->elms[cursor->parent_index];
423                                 } else {
424                                         elm = NULL;
425                                 }
426                                 switch(cursor->node->ondisk->type) {
427                                 case HAMMER_BTREE_TYPE_LEAF:
428                                         error = hammer_reblock_leaf_node(
429                                                         reblock, cursor, elm);
430                                         break;
431                                 case HAMMER_BTREE_TYPE_INTERNAL:
432                                         error = hammer_reblock_int_node(
433                                                         reblock, cursor, elm);
434                                         break;
435                                 default:
436                                         hpanic("Illegal B-Tree node type");
437                                 }
438                         }
439                         if (error == 0) {
440                                 ++reblock->btree_moves;
441                         }
442                 }
443         }
444 end:
445         hammer_cursor_downgrade(cursor);
446         return(error);
447 }
448
449 /*
450  * Reblock a record's data.  Both the B-Tree element and record pointers
451  * to the data must be adjusted.
452  */
453 static int
454 hammer_reblock_data(struct hammer_ioc_reblock *reblock,
455                     hammer_cursor_t cursor, hammer_btree_elm_t elm)
456 {
457         hammer_buffer_t data_buffer = NULL;
458         hammer_off_t odata_offset;
459         hammer_off_t ndata_offset;
460         hammer_crc_t ncrc;
461         int error;
462         void *ndata;
463
464         error = hammer_btree_extract_data(cursor);
465         if (error)
466                 return (error);
467         ndata = hammer_alloc_data(cursor->trans, elm->leaf.data_len,
468                                   elm->leaf.base.rec_type,
469                                   &ndata_offset, &data_buffer,
470                                   0, &error);
471         if (error)
472                 goto done;
473         hammer_io_notmeta(data_buffer);
474
475         /*
476          * Move the data.  Note that we must invalidate any cached
477          * data buffer in the cursor before calling blockmap_free.
478          * The blockmap_free may free up the entire big-block and
479          * will not be able to invalidate it if the cursor is holding
480          * a data buffer cached in that big-block.
481          *
482          * Unconditionally regenerate the CRC.  This is a slightly hack
483          * to ensure that the crc method is the latest for the filesystem
484          * version (e.g. upgrade from v6 to v7).
485          */
486         hammer_modify_buffer_noundo(cursor->trans, data_buffer);
487         bcopy(cursor->data, ndata, elm->leaf.data_len);
488         ncrc = hammer_crc_get_leaf(cursor->trans->hmp->version, ndata,
489                                    &elm->leaf);
490         hammer_modify_buffer_done(data_buffer);
491         hammer_cursor_invalidate_cache(cursor);
492
493         hammer_blockmap_free(cursor->trans,
494                              elm->leaf.data_offset, elm->leaf.data_len);
495
496         hammer_modify_node(cursor->trans, cursor->node,
497                            &elm->leaf.data_offset, sizeof(hammer_off_t));
498         odata_offset = elm->leaf.data_offset;
499         elm->leaf.data_offset = ndata_offset;
500         elm->leaf.data_crc = ncrc;
501         hammer_modify_node_done(cursor->node);
502
503         if (hammer_debug_general & 0x4000) {
504                 hdkprintf("%08x %016jx -> %016jx\n",
505                         (elm ? elm->base.localization : -1),
506                         (intmax_t)odata_offset,
507                         (intmax_t)ndata_offset);
508         }
509 done:
510         if (data_buffer)
511                 hammer_rel_buffer(data_buffer, 0);
512         return (error);
513 }
514
515 /*
516  * Reblock a B-Tree leaf node.  The parent must be adjusted to point to
517  * the new copy of the leaf node.
518  *
519  * elm is a pointer to the parent element pointing at cursor.node.
520  */
521 static int
522 hammer_reblock_leaf_node(struct hammer_ioc_reblock *reblock,
523                          hammer_cursor_t cursor, hammer_btree_elm_t elm)
524 {
525         hammer_node_t onode;
526         hammer_node_t nnode;
527         int error;
528
529         /*
530          * Don't supply a hint when allocating the leaf.  Fills are done
531          * from the leaf upwards.
532          */
533         onode = cursor->node;
534         nnode = hammer_alloc_btree(cursor->trans, 0, &error);
535
536         if (nnode == NULL)
537                 return (error);
538
539         hammer_lock_ex(&nnode->lock);
540         hammer_modify_node_noundo(cursor->trans, nnode);
541
542         hammer_move_node(cursor, elm, onode, nnode);
543
544         /*
545          * Clean up.
546          *
547          * The new node replaces the current node in the cursor.  The cursor
548          * expects it to be locked so leave it locked.  Discard onode.
549          */
550         hammer_cursor_replaced_node(onode, nnode);
551         hammer_delete_node(cursor->trans, onode);
552
553         if (hammer_debug_general & 0x4000) {
554                 hdkprintf("%08x %016jx -> %016jx\n",
555                         (elm ? elm->base.localization : -1),
556                         (intmax_t)onode->node_offset,
557                         (intmax_t)nnode->node_offset);
558         }
559         hammer_modify_node_done(nnode);
560         cursor->node = nnode;
561
562         hammer_unlock(&onode->lock);
563         hammer_rel_node(onode);
564
565         return (error);
566 }
567
568 /*
569  * Reblock a B-Tree internal node.  The parent must be adjusted to point to
570  * the new copy of the internal node, and the node's children's parent
571  * pointers must also be adjusted to point to the new copy.
572  *
573  * elm is a pointer to the parent element pointing at cursor.node.
574  */
575 static int
576 hammer_reblock_int_node(struct hammer_ioc_reblock *reblock,
577                          hammer_cursor_t cursor, hammer_btree_elm_t elm)
578 {
579         struct hammer_node_lock lockroot;
580         hammer_node_t onode;
581         hammer_node_t nnode;
582         int error;
583
584         hammer_node_lock_init(&lockroot, cursor->node);
585         error = hammer_btree_lock_children(cursor, 1, &lockroot, NULL);
586         if (error)
587                 goto done;
588
589         /*
590          * Don't supply a hint when allocating the leaf.  Fills are done
591          * from the leaf upwards.
592          */
593         onode = cursor->node;
594         nnode = hammer_alloc_btree(cursor->trans, 0, &error);
595
596         if (nnode == NULL)
597                 goto done;
598
599         hammer_lock_ex(&nnode->lock);
600         hammer_modify_node_noundo(cursor->trans, nnode);
601
602         hammer_move_node(cursor, elm, onode, nnode);
603
604         /*
605          * Clean up.
606          *
607          * The new node replaces the current node in the cursor.  The cursor
608          * expects it to be locked so leave it locked.  Discard onode.
609          */
610         hammer_cursor_replaced_node(onode, nnode);
611         hammer_delete_node(cursor->trans, onode);
612
613         if (hammer_debug_general & 0x4000) {
614                 hdkprintf("%08x %016jx -> %016jx\n",
615                         (elm ? elm->base.localization : -1),
616                         (intmax_t)onode->node_offset,
617                         (intmax_t)nnode->node_offset);
618         }
619         hammer_modify_node_done(nnode);
620         cursor->node = nnode;
621
622         hammer_unlock(&onode->lock);
623         hammer_rel_node(onode);
624
625 done:
626         hammer_btree_unlock_children(cursor->trans->hmp, &lockroot, NULL);
627         return (error);
628 }
629
630 /*
631  * nnode is a newly allocated node, and now elm becomes the node
632  * element within nnode's parent that represents a pointer to nnode,
633  * or nnode becomes the root node if elm does not exist.
634  */
635 static void
636 hammer_move_node(hammer_cursor_t cursor, hammer_btree_elm_t elm,
637                  hammer_node_t onode, hammer_node_t nnode)
638 {
639         int error, i;
640
641         bcopy(onode->ondisk, nnode->ondisk, sizeof(*nnode->ondisk));
642
643         /*
644          * Adjust the parent's pointer to us first.
645          */
646         if (elm) {
647                 /*
648                  * We are not the root of the B-Tree
649                  */
650                 KKASSERT(hammer_is_internal_node_elm(elm));
651                 hammer_modify_node(cursor->trans, cursor->parent,
652                                    &elm->internal.subtree_offset,
653                                    sizeof(elm->internal.subtree_offset));
654                 elm->internal.subtree_offset = nnode->node_offset;
655                 hammer_modify_node_done(cursor->parent);
656         } else {
657                 /*
658                  * We are the root of the B-Tree
659                  */
660                 hammer_volume_t volume;
661                 volume = hammer_get_root_volume(cursor->trans->hmp, &error);
662                 KKASSERT(error == 0);
663
664                 hammer_modify_volume_field(cursor->trans, volume,
665                                            vol0_btree_root);
666                 volume->ondisk->vol0_btree_root = nnode->node_offset;
667                 hammer_modify_volume_done(volume);
668                 hammer_rel_volume(volume, 0);
669         }
670
671         /*
672          * Now adjust our children's pointers to us
673          * if we are an internal node.
674          */
675         if (nnode->ondisk->type == HAMMER_BTREE_TYPE_INTERNAL) {
676                 for (i = 0; i < nnode->ondisk->count; ++i) {
677                         error = btree_set_parent_of_child(cursor->trans, nnode,
678                                         &nnode->ondisk->elms[i]);
679                         if (error)
680                                 hpanic("reblock internal node: fixup problem");
681                 }
682         }
683 }