HAMMER VFS - Fix degenerate stall condition in flusher during unmount
[dragonfly.git] / sys / vfs / hammer / hammer_reblock.c
CommitLineData
bf686dbe
MD
1/*
2 * Copyright (c) 2008 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
44a83111 34 * $DragonFly: src/sys/vfs/hammer/hammer_reblock.c,v 1.34 2008/11/13 02:18:43 dillon Exp $
bf686dbe
MD
35 */
36/*
37 * HAMMER reblocker - This code frees up fragmented physical space
38 *
39 * HAMMER only keeps track of free space on a big-block basis. A big-block
40 * containing holes can only be freed by migrating the remaining data in
41 * that big-block into a new big-block, then freeing the big-block.
42 *
43 * This function is called from an ioctl or via the hammer support thread.
44 */
45
46#include "hammer.h"
47
36f82b23 48static int hammer_reblock_helper(struct hammer_ioc_reblock *reblock,
bf686dbe
MD
49 hammer_cursor_t cursor,
50 hammer_btree_elm_t elm);
36f82b23 51static int hammer_reblock_data(struct hammer_ioc_reblock *reblock,
bf686dbe 52 hammer_cursor_t cursor, hammer_btree_elm_t elm);
2f85fa4d
MD
53static int hammer_reblock_leaf_node(struct hammer_ioc_reblock *reblock,
54 hammer_cursor_t cursor, hammer_btree_elm_t elm);
55static int hammer_reblock_int_node(struct hammer_ioc_reblock *reblock,
bf686dbe
MD
56 hammer_cursor_t cursor, hammer_btree_elm_t elm);
57
58int
36f82b23
MD
59hammer_ioc_reblock(hammer_transaction_t trans, hammer_inode_t ip,
60 struct hammer_ioc_reblock *reblock)
bf686dbe
MD
61{
62 struct hammer_cursor cursor;
63 hammer_btree_elm_t elm;
a7e9bef1 64 int checkspace_count;
93291532
MD
65 int error;
66 int seq;
7b6ccb11
MD
67 int slop;
68
69 /*
70 * A fill level <= 20% is considered an emergency. free_level is
71 * inverted from fill_level.
72 */
73 if (reblock->free_level >= HAMMER_LARGEBLOCK_SIZE * 8 / 10)
74 slop = HAMMER_CHKSPC_EMERGENCY;
75 else
76 slop = HAMMER_CHKSPC_REBLOCK;
bf686dbe 77
dd94f1b1
MD
78 if ((reblock->key_beg.localization | reblock->key_end.localization) &
79 HAMMER_LOCALIZE_PSEUDOFS_MASK) {
80 return(EINVAL);
81 }
82 if (reblock->key_beg.obj_id >= reblock->key_end.obj_id)
bf686dbe
MD
83 return(EINVAL);
84 if (reblock->free_level < 0)
85 return(EINVAL);
86
dd94f1b1 87 reblock->key_cur = reblock->key_beg;
842e7a70 88 reblock->key_cur.localization &= HAMMER_LOCALIZE_MASK;
dd94f1b1 89 reblock->key_cur.localization += ip->obj_localization;
814387f6 90
a7e9bef1 91 checkspace_count = 0;
e86903d8 92 seq = trans->hmp->flusher.done;
bf686dbe 93retry:
4e17f465 94 error = hammer_init_cursor(trans, &cursor, NULL, NULL);
bf686dbe
MD
95 if (error) {
96 hammer_done_cursor(&cursor);
dd94f1b1 97 goto failed;
bf686dbe 98 }
dd94f1b1
MD
99 cursor.key_beg.localization = reblock->key_cur.localization;
100 cursor.key_beg.obj_id = reblock->key_cur.obj_id;
bf686dbe
MD
101 cursor.key_beg.key = HAMMER_MIN_KEY;
102 cursor.key_beg.create_tid = 1;
103 cursor.key_beg.delete_tid = 0;
104 cursor.key_beg.rec_type = HAMMER_MIN_RECTYPE;
105 cursor.key_beg.obj_type = 0;
106
842e7a70
MD
107 cursor.key_end.localization = (reblock->key_end.localization &
108 HAMMER_LOCALIZE_MASK) +
dd94f1b1
MD
109 ip->obj_localization;
110 cursor.key_end.obj_id = reblock->key_end.obj_id;
bf686dbe
MD
111 cursor.key_end.key = HAMMER_MAX_KEY;
112 cursor.key_end.create_tid = HAMMER_MAX_TID - 1;
113 cursor.key_end.delete_tid = 0;
114 cursor.key_end.rec_type = HAMMER_MAX_RECTYPE;
115 cursor.key_end.obj_type = 0;
116
117 cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE;
9480ff55 118 cursor.flags |= HAMMER_CURSOR_BACKEND;
18bee4a2 119 cursor.flags |= HAMMER_CURSOR_NOSWAPCACHE;
bf686dbe 120
2f85fa4d
MD
121 /*
122 * This flag allows the btree scan code to return internal nodes,
123 * so we can reblock them in addition to the leafs. Only specify it
124 * if we intend to reblock B-Tree nodes.
125 */
126 if (reblock->head.flags & HAMMER_IOC_DO_BTREE)
127 cursor.flags |= HAMMER_CURSOR_REBLOCKING;
128
bf686dbe
MD
129 error = hammer_btree_first(&cursor);
130 while (error == 0) {
2f85fa4d
MD
131 /*
132 * Internal or Leaf node
133 */
07ed04b5 134 KKASSERT(cursor.index < cursor.node->ondisk->count);
bf686dbe 135 elm = &cursor.node->ondisk->elms[cursor.index];
dd94f1b1
MD
136 reblock->key_cur.obj_id = elm->base.obj_id;
137 reblock->key_cur.localization = elm->base.localization;
bf686dbe 138
9480ff55 139 /*
9f5097dc
MD
140 * Yield to more important tasks
141 */
142 if ((error = hammer_signal_check(trans->hmp)) != 0)
143 break;
a7e9bef1
MD
144
145 /*
a7e9bef1
MD
146 * If there is insufficient free space it may be due to
147 * reserved bigblocks, which flushing might fix.
c9ce54d6 148 *
07ed04b5
MD
149 * We must force a retest in case the unlocked cursor is
150 * moved to the end of the leaf, or moved to an internal
151 * node.
152 *
c9ce54d6 153 * WARNING: See warnings in hammer_unlock_cursor() function.
a7e9bef1 154 */
7b6ccb11 155 if (hammer_checkspace(trans->hmp, slop)) {
a7e9bef1
MD
156 if (++checkspace_count == 10) {
157 error = ENOSPC;
93291532 158 break;
a7e9bef1 159 }
982be4bf 160 hammer_unlock_cursor(&cursor);
07ed04b5 161 cursor.flags |= HAMMER_CURSOR_RETEST;
93291532 162 hammer_flusher_wait(trans->hmp, seq);
982be4bf 163 hammer_lock_cursor(&cursor);
7a61b85d 164 seq = hammer_flusher_async(trans->hmp, NULL);
07ed04b5 165 goto skip;
a7e9bef1
MD
166 }
167
168 /*
9480ff55
MD
169 * Acquiring the sync_lock prevents the operation from
170 * crossing a synchronization boundary.
09ac686b
MD
171 *
172 * NOTE: cursor.node may have changed on return.
c9ce54d6
MD
173 *
174 * WARNING: See warnings in hammer_unlock_cursor() function.
9480ff55 175 */
2f85fa4d 176 hammer_sync_lock_sh(trans);
36f82b23 177 error = hammer_reblock_helper(reblock, &cursor, elm);
2f85fa4d 178 hammer_sync_unlock(trans);
93291532 179
15e75dab
MD
180 while (hammer_flusher_meta_halflimit(trans->hmp) ||
181 hammer_flusher_undo_exhausted(trans, 2)) {
982be4bf 182 hammer_unlock_cursor(&cursor);
93291532 183 hammer_flusher_wait(trans->hmp, seq);
982be4bf 184 hammer_lock_cursor(&cursor);
15e75dab 185 seq = hammer_flusher_async_one(trans->hmp);
93291532 186 }
1b0ab2c3
MD
187
188 /*
189 * Setup for iteration, our cursor flags may be modified by
190 * other threads while we are unlocked.
191 */
192 cursor.flags |= HAMMER_CURSOR_ATEDISK;
193
194 /*
195 * We allocate data buffers, which atm we don't track
196 * dirty levels for because we allow the kernel to write
197 * them. But if we allocate too many we can still deadlock
198 * the buffer cache.
199 *
c9ce54d6
MD
200 * WARNING: See warnings in hammer_unlock_cursor() function.
201 * (The cursor's node and element may change!)
1b0ab2c3
MD
202 */
203 if (bd_heatup()) {
982be4bf 204 hammer_unlock_cursor(&cursor);
1b0ab2c3 205 bwillwrite(HAMMER_XBUFSIZE);
982be4bf 206 hammer_lock_cursor(&cursor);
1b0ab2c3 207 }
07ed04b5 208skip:
bf686dbe 209 if (error == 0) {
bf686dbe
MD
210 error = hammer_btree_iterate(&cursor);
211 }
212 }
213 if (error == ENOENT)
214 error = 0;
215 hammer_done_cursor(&cursor);
06ad81ff
MD
216 if (error == EWOULDBLOCK) {
217 hammer_flusher_sync(trans->hmp);
218 goto retry;
219 }
bf686dbe
MD
220 if (error == EDEADLK)
221 goto retry;
19619882
MD
222 if (error == EINTR) {
223 reblock->head.flags |= HAMMER_IOC_HEAD_INTR;
224 error = 0;
225 }
dd94f1b1
MD
226failed:
227 reblock->key_cur.localization &= HAMMER_LOCALIZE_MASK;
bf686dbe
MD
228 return(error);
229}
230
231/*
232 * Reblock the B-Tree (leaf) node, record, and/or data if necessary.
233 *
9480ff55
MD
234 * XXX We have no visibility into internal B-Tree nodes at the moment,
235 * only leaf nodes.
bf686dbe
MD
236 */
237static int
36f82b23 238hammer_reblock_helper(struct hammer_ioc_reblock *reblock,
bf686dbe
MD
239 hammer_cursor_t cursor, hammer_btree_elm_t elm)
240{
43c665ae 241 hammer_mount_t hmp;
bf686dbe 242 hammer_off_t tmp_offset;
ebbcfba9 243 hammer_node_ondisk_t ondisk;
44a83111 244 struct hammer_btree_leaf_elm leaf;
bf686dbe 245 int error;
bf686dbe
MD
246 int bytes;
247 int cur;
bf3b416b 248 int iocflags;
bf686dbe 249
bf686dbe 250 error = 0;
43c665ae 251 hmp = cursor->trans->hmp;
bf686dbe
MD
252
253 /*
254 * Reblock data. Note that data embedded in a record is reblocked
2f85fa4d
MD
255 * by the record reblock code. Data processing only occurs at leaf
256 * nodes and for RECORD element types.
bf686dbe 257 */
2f85fa4d
MD
258 if (cursor->node->ondisk->type != HAMMER_BTREE_TYPE_LEAF)
259 goto skip;
260 if (elm->leaf.base.btype != HAMMER_BTREE_TYPE_RECORD)
261 return(0);
bf686dbe 262 tmp_offset = elm->leaf.data_offset;
bf3b416b
MD
263 if (tmp_offset == 0)
264 goto skip;
265 if (error)
266 goto skip;
267
268 /*
269 * NOTE: Localization restrictions may also have been set-up, we can't
83f2a3aa 270 * just set the match flags willy-nilly here.
bf3b416b
MD
271 */
272 switch(elm->leaf.base.rec_type) {
273 case HAMMER_RECTYPE_INODE:
83f2a3aa
MD
274 case HAMMER_RECTYPE_SNAPSHOT:
275 case HAMMER_RECTYPE_CONFIG:
bf3b416b
MD
276 iocflags = HAMMER_IOC_DO_INODES;
277 break;
278 case HAMMER_RECTYPE_EXT:
279 case HAMMER_RECTYPE_FIX:
ea434b6f 280 case HAMMER_RECTYPE_PFS:
bf3b416b
MD
281 case HAMMER_RECTYPE_DIRENTRY:
282 iocflags = HAMMER_IOC_DO_DIRS;
283 break;
284 case HAMMER_RECTYPE_DATA:
285 case HAMMER_RECTYPE_DB:
286 iocflags = HAMMER_IOC_DO_DATA;
287 break;
288 default:
289 iocflags = 0;
290 break;
291 }
292 if (reblock->head.flags & iocflags) {
bf686dbe
MD
293 ++reblock->data_count;
294 reblock->data_byte_count += elm->leaf.data_len;
43c665ae 295 bytes = hammer_blockmap_getfree(hmp, tmp_offset, &cur, &error);
2f85fa4d
MD
296 if (hammer_debug_general & 0x4000)
297 kprintf("D %6d/%d\n", bytes, reblock->free_level);
bf3b416b
MD
298 if (error == 0 && (cur == 0 || reblock->free_level == 0) &&
299 bytes >= reblock->free_level) {
44a83111
MD
300 /*
301 * This is nasty, the uncache code may have to get
302 * vnode locks and because of that we can't hold
303 * the cursor locked.
c9ce54d6
MD
304 *
305 * WARNING: See warnings in hammer_unlock_cursor()
306 * function.
44a83111
MD
307 */
308 leaf = elm->leaf;
982be4bf 309 hammer_unlock_cursor(cursor);
44a83111 310 hammer_io_direct_uncache(hmp, &leaf);
982be4bf 311 hammer_lock_cursor(cursor);
ebbcfba9
MD
312
313 /*
314 * elm may have become stale or invalid, reload it.
315 * ondisk variable is temporary only. Note that
316 * cursor->node and thus cursor->node->ondisk may
317 * also changed.
318 */
319 ondisk = cursor->node->ondisk;
320 elm = &ondisk->elms[cursor->index];
44a83111 321 if (cursor->flags & HAMMER_CURSOR_RETEST) {
ebbcfba9
MD
322 kprintf("hammer: debug: retest on "
323 "reblocker uncache\n");
324 error = EDEADLK;
325 } else if (ondisk->type != HAMMER_BTREE_TYPE_LEAF ||
326 cursor->index >= ondisk->count) {
327 kprintf("hammer: debug: shifted on "
328 "reblocker uncache\n");
329 error = EDEADLK;
330 } else if (bcmp(&elm->leaf, &leaf, sizeof(leaf))) {
331 kprintf("hammer: debug: changed on "
332 "reblocker uncache\n");
44a83111 333 error = EDEADLK;
44a83111
MD
334 }
335 if (error == 0)
336 error = hammer_cursor_upgrade(cursor);
bf686dbe 337 if (error == 0) {
07ed04b5 338 KKASSERT(cursor->index < ondisk->count);
36f82b23 339 error = hammer_reblock_data(reblock,
bf686dbe
MD
340 cursor, elm);
341 }
342 if (error == 0) {
343 ++reblock->data_moves;
344 reblock->data_byte_moves += elm->leaf.data_len;
345 }
346 }
347 }
348
2f85fa4d 349skip:
bf686dbe 350 /*
1775b6a0
MD
351 * Reblock a B-Tree internal or leaf node. A leaf node is reblocked
352 * on initial entry only (element 0). An internal node is reblocked
353 * when entered upward from its first leaf node only (also element 0).
354 * Further revisits of the internal node (index > 0) are ignored.
bf686dbe
MD
355 */
356 tmp_offset = cursor->node->node_offset;
bf3b416b 357 if (cursor->index == 0 &&
814387f6 358 error == 0 && (reblock->head.flags & HAMMER_IOC_DO_BTREE)) {
bf686dbe 359 ++reblock->btree_count;
43c665ae 360 bytes = hammer_blockmap_getfree(hmp, tmp_offset, &cur, &error);
2f85fa4d
MD
361 if (hammer_debug_general & 0x4000)
362 kprintf("B %6d/%d\n", bytes, reblock->free_level);
bf3b416b
MD
363 if (error == 0 && (cur == 0 || reblock->free_level == 0) &&
364 bytes >= reblock->free_level) {
bf686dbe
MD
365 error = hammer_cursor_upgrade(cursor);
366 if (error == 0) {
07ed04b5
MD
367 if (cursor->parent) {
368 KKASSERT(cursor->parent_index <
369 cursor->parent->ondisk->count);
bf686dbe 370 elm = &cursor->parent->ondisk->elms[cursor->parent_index];
07ed04b5 371 } else {
bf686dbe 372 elm = NULL;
07ed04b5 373 }
2f85fa4d
MD
374 switch(cursor->node->ondisk->type) {
375 case HAMMER_BTREE_TYPE_LEAF:
376 error = hammer_reblock_leaf_node(
377 reblock, cursor, elm);
378 break;
379 case HAMMER_BTREE_TYPE_INTERNAL:
380 error = hammer_reblock_int_node(
381 reblock, cursor, elm);
382 break;
383 default:
384 panic("Illegal B-Tree node type");
385 }
bf686dbe
MD
386 }
387 if (error == 0) {
388 ++reblock->btree_moves;
389 }
390 }
391 }
392
393 hammer_cursor_downgrade(cursor);
394 return(error);
395}
396
397/*
398 * Reblock a record's data. Both the B-Tree element and record pointers
399 * to the data must be adjusted.
400 */
401static int
36f82b23 402hammer_reblock_data(struct hammer_ioc_reblock *reblock,
bf686dbe
MD
403 hammer_cursor_t cursor, hammer_btree_elm_t elm)
404{
405 struct hammer_buffer *data_buffer = NULL;
406 hammer_off_t ndata_offset;
407 int error;
408 void *ndata;
409
410 error = hammer_btree_extract(cursor, HAMMER_CURSOR_GET_DATA |
11ad5ade 411 HAMMER_CURSOR_GET_LEAF);
bf686dbe
MD
412 if (error)
413 return (error);
36f82b23 414 ndata = hammer_alloc_data(cursor->trans, elm->leaf.data_len,
bf3b416b 415 elm->leaf.base.rec_type,
df2ccbac
MD
416 &ndata_offset, &data_buffer,
417 0, &error);
bf686dbe
MD
418 if (error)
419 goto done;
b8a41159 420 hammer_io_notmeta(data_buffer);
bf686dbe
MD
421
422 /*
b9107f58
MD
423 * Move the data. Note that we must invalidate any cached
424 * data buffer in the cursor before calling blockmap_free.
425 * The blockmap_free may free up the entire large-block and
426 * will not be able to invalidate it if the cursor is holding
427 * a data buffer cached in that large block.
bf686dbe 428 */
10a5d1ba 429 hammer_modify_buffer(cursor->trans, data_buffer, NULL, 0);
bf686dbe 430 bcopy(cursor->data, ndata, elm->leaf.data_len);
10a5d1ba 431 hammer_modify_buffer_done(data_buffer);
b9107f58 432 hammer_cursor_invalidate_cache(cursor);
bf686dbe 433
36f82b23
MD
434 hammer_blockmap_free(cursor->trans,
435 elm->leaf.data_offset, elm->leaf.data_len);
bf686dbe 436
10a5d1ba
MD
437 hammer_modify_node(cursor->trans, cursor->node,
438 &elm->leaf.data_offset, sizeof(hammer_off_t));
bf686dbe 439 elm->leaf.data_offset = ndata_offset;
10a5d1ba 440 hammer_modify_node_done(cursor->node);
bf686dbe
MD
441
442done:
443 if (data_buffer)
444 hammer_rel_buffer(data_buffer, 0);
445 return (error);
446}
447
448/*
2f85fa4d
MD
449 * Reblock a B-Tree leaf node. The parent must be adjusted to point to
450 * the new copy of the leaf node.
bf686dbe 451 *
2f85fa4d 452 * elm is a pointer to the parent element pointing at cursor.node.
bf686dbe
MD
453 */
454static int
2f85fa4d
MD
455hammer_reblock_leaf_node(struct hammer_ioc_reblock *reblock,
456 hammer_cursor_t cursor, hammer_btree_elm_t elm)
bf686dbe
MD
457{
458 hammer_node_t onode;
459 hammer_node_t nnode;
460 int error;
461
df2ccbac
MD
462 /*
463 * Don't supply a hint when allocating the leaf. Fills are done
464 * from the leaf upwards.
465 */
bf686dbe 466 onode = cursor->node;
df2ccbac 467 nnode = hammer_alloc_btree(cursor->trans, 0, &error);
8d0efe43 468
bf686dbe
MD
469 if (nnode == NULL)
470 return (error);
471
472 /*
473 * Move the node
474 */
09ac686b
MD
475 hammer_lock_ex(&nnode->lock);
476 hammer_modify_node_noundo(cursor->trans, nnode);
bf686dbe
MD
477 bcopy(onode->ondisk, nnode->ondisk, sizeof(*nnode->ondisk));
478
479 if (elm) {
480 /*
481 * We are not the root of the B-Tree
482 */
36f82b23 483 hammer_modify_node(cursor->trans, cursor->parent,
bf686dbe
MD
484 &elm->internal.subtree_offset,
485 sizeof(elm->internal.subtree_offset));
486 elm->internal.subtree_offset = nnode->node_offset;
10a5d1ba 487 hammer_modify_node_done(cursor->parent);
bf686dbe
MD
488 } else {
489 /*
490 * We are the root of the B-Tree
491 */
492 hammer_volume_t volume;
493
36f82b23 494 volume = hammer_get_root_volume(cursor->trans->hmp, &error);
bf686dbe
MD
495 KKASSERT(error == 0);
496
e8599db1
MD
497 hammer_modify_volume_field(cursor->trans, volume,
498 vol0_btree_root);
bf686dbe 499 volume->ondisk->vol0_btree_root = nnode->node_offset;
10a5d1ba 500 hammer_modify_volume_done(volume);
bf686dbe
MD
501 hammer_rel_volume(volume, 0);
502 }
503
b3bad96f 504 hammer_cursor_replaced_node(onode, nnode);
36f82b23 505 hammer_delete_node(cursor->trans, onode);
bf686dbe 506
b58c6388 507 if (hammer_debug_general & 0x4000) {
2f85fa4d 508 kprintf("REBLOCK LNODE %016llx -> %016llx\n",
973c11b9
MD
509 (long long)onode->node_offset,
510 (long long)nnode->node_offset);
2f85fa4d
MD
511 }
512 hammer_modify_node_done(nnode);
513 cursor->node = nnode;
514
515 hammer_unlock(&onode->lock);
516 hammer_rel_node(onode);
517
518 return (error);
519}
520
521/*
522 * Reblock a B-Tree internal node. The parent must be adjusted to point to
523 * the new copy of the internal node, and the node's children's parent
524 * pointers must also be adjusted to point to the new copy.
525 *
526 * elm is a pointer to the parent element pointing at cursor.node.
527 */
528static int
529hammer_reblock_int_node(struct hammer_ioc_reblock *reblock,
530 hammer_cursor_t cursor, hammer_btree_elm_t elm)
531{
1775b6a0 532 struct hammer_node_lock lockroot;
2f85fa4d
MD
533 hammer_node_t onode;
534 hammer_node_t nnode;
df2ccbac 535 hammer_off_t hint;
2f85fa4d
MD
536 int error;
537 int i;
538
1775b6a0 539 hammer_node_lock_init(&lockroot, cursor->node);
24cf83d2 540 error = hammer_btree_lock_children(cursor, 1, &lockroot, NULL);
2f85fa4d
MD
541 if (error)
542 goto done;
543
df2ccbac
MD
544 /*
545 * The internal node is visited after recursing through its
546 * first element. Use the subtree offset allocated for that
547 * element as a hint for allocating the internal node.
548 */
2f85fa4d 549 onode = cursor->node;
df2ccbac
MD
550 if (onode->ondisk->count)
551 hint = onode->ondisk->elms[0].internal.subtree_offset;
552 else
553 hint = 0;
b4f86ea3 554 nnode = hammer_alloc_btree(cursor->trans, 0, &error);
2f85fa4d
MD
555
556 if (nnode == NULL)
557 goto done;
558
559 /*
560 * Move the node. Adjust the parent's pointer to us first.
561 */
562 hammer_lock_ex(&nnode->lock);
563 hammer_modify_node_noundo(cursor->trans, nnode);
564 bcopy(onode->ondisk, nnode->ondisk, sizeof(*nnode->ondisk));
565
566 if (elm) {
567 /*
568 * We are not the root of the B-Tree
569 */
570 hammer_modify_node(cursor->trans, cursor->parent,
571 &elm->internal.subtree_offset,
572 sizeof(elm->internal.subtree_offset));
573 elm->internal.subtree_offset = nnode->node_offset;
574 hammer_modify_node_done(cursor->parent);
575 } else {
576 /*
577 * We are the root of the B-Tree
578 */
579 hammer_volume_t volume;
580
581 volume = hammer_get_root_volume(cursor->trans->hmp, &error);
582 KKASSERT(error == 0);
583
584 hammer_modify_volume_field(cursor->trans, volume,
585 vol0_btree_root);
586 volume->ondisk->vol0_btree_root = nnode->node_offset;
587 hammer_modify_volume_done(volume);
588 hammer_rel_volume(volume, 0);
589 }
590
591 /*
592 * Now adjust our children's pointers to us.
593 */
594 for (i = 0; i < nnode->ondisk->count; ++i) {
595 elm = &nnode->ondisk->elms[i];
596 error = btree_set_parent(cursor->trans, nnode, elm);
597 if (error)
598 panic("reblock internal node: fixup problem");
599 }
600
601 /*
602 * Clean up.
603 *
604 * The new node replaces the current node in the cursor. The cursor
605 * expects it to be locked so leave it locked. Discard onode.
606 */
b3bad96f 607 hammer_cursor_replaced_node(onode, nnode);
2f85fa4d
MD
608 hammer_delete_node(cursor->trans, onode);
609
610 if (hammer_debug_general & 0x4000) {
611 kprintf("REBLOCK INODE %016llx -> %016llx\n",
973c11b9
MD
612 (long long)onode->node_offset,
613 (long long)nnode->node_offset);
b58c6388 614 }
8d0efe43 615 hammer_modify_node_done(nnode);
bf686dbe 616 cursor->node = nnode;
09ac686b
MD
617
618 hammer_unlock(&onode->lock);
bf686dbe
MD
619 hammer_rel_node(onode);
620
2f85fa4d 621done:
24cf83d2 622 hammer_btree_unlock_children(cursor->trans->hmp, &lockroot, NULL);
bf686dbe
MD
623 return (error);
624}
625