sys/vfs/hammer: Fix comment
[dragonfly.git] / sys / vfs / hammer / hammer_reblock.c
1 /*
2  * Copyright (c) 2008-2012 The DragonFly Project.  All rights reserved.
3  * 
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  * 
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  * 
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34 /*
35  * HAMMER reblocker - This code frees up fragmented physical space
36  *
37  * HAMMER only keeps track of free space on a big-block basis.  A big-block
38  * containing holes can only be freed by migrating the remaining data in
39  * that big-block into a new big-block, then freeing the big-block.
40  *
41  * This function is called from an ioctl or via the hammer support thread.
42  */
43
44 #include "hammer.h"
45
46 static int hammer_reblock_helper(struct hammer_ioc_reblock *reblock,
47                                  hammer_cursor_t cursor,
48                                  hammer_btree_elm_t elm);
49 static int hammer_reblock_data(struct hammer_ioc_reblock *reblock,
50                                 hammer_cursor_t cursor, hammer_btree_elm_t elm);
51 static int hammer_reblock_leaf_node(struct hammer_ioc_reblock *reblock,
52                                 hammer_cursor_t cursor, hammer_btree_elm_t elm);
53 static int hammer_reblock_int_node(struct hammer_ioc_reblock *reblock,
54                                 hammer_cursor_t cursor, hammer_btree_elm_t elm);
55
56 int
57 hammer_ioc_reblock(hammer_transaction_t trans, hammer_inode_t ip,
58                    struct hammer_ioc_reblock *reblock)
59 {
60         struct hammer_cursor cursor;
61         hammer_btree_elm_t elm;
62         int checkspace_count;
63         int error;
64         int seq;
65         int slop;
66
67         if ((reblock->key_beg.localization | reblock->key_end.localization) &
68             HAMMER_LOCALIZE_PSEUDOFS_MASK) {
69                 return(EINVAL);
70         }
71         if (reblock->key_beg.obj_id >= reblock->key_end.obj_id)
72                 return(EINVAL);
73         if (reblock->free_level < 0 ||
74             reblock->free_level > HAMMER_BIGBLOCK_SIZE)
75                 return(EINVAL);
76
77         /*
78          * A fill_percentage <= 20% is considered an emergency.  free_level is
79          * inverted from fill_percentage.
80          */
81         if (reblock->free_level >= HAMMER_BIGBLOCK_SIZE * 8 / 10)
82                 slop = HAMMER_CHKSPC_EMERGENCY;
83         else
84                 slop = HAMMER_CHKSPC_REBLOCK;
85
86         reblock->key_cur = reblock->key_beg;
87         reblock->key_cur.localization &= HAMMER_LOCALIZE_MASK;
88         reblock->key_cur.localization += ip->obj_localization;
89
90         checkspace_count = 0;
91         seq = trans->hmp->flusher.done;
92 retry:
93         error = hammer_init_cursor(trans, &cursor, NULL, NULL);
94         if (error) {
95                 hammer_done_cursor(&cursor);
96                 goto failed;
97         }
98         cursor.key_beg.localization = reblock->key_cur.localization;
99         cursor.key_beg.obj_id = reblock->key_cur.obj_id;
100         cursor.key_beg.key = HAMMER_MIN_KEY;
101         cursor.key_beg.create_tid = 1;
102         cursor.key_beg.delete_tid = 0;
103         cursor.key_beg.rec_type = HAMMER_MIN_RECTYPE;
104         cursor.key_beg.obj_type = 0;
105
106         cursor.key_end.localization = (reblock->key_end.localization &
107                                         HAMMER_LOCALIZE_MASK) +
108                                       ip->obj_localization;
109         cursor.key_end.obj_id = reblock->key_end.obj_id;
110         cursor.key_end.key = HAMMER_MAX_KEY;
111         cursor.key_end.create_tid = HAMMER_MAX_TID - 1;
112         cursor.key_end.delete_tid = 0;
113         cursor.key_end.rec_type = HAMMER_MAX_RECTYPE;
114         cursor.key_end.obj_type = 0;
115
116         cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE;
117         cursor.flags |= HAMMER_CURSOR_BACKEND;
118         cursor.flags |= HAMMER_CURSOR_NOSWAPCACHE;
119
120         /*
121          * This flag allows the btree scan code to return internal nodes,
122          * so we can reblock them in addition to the leafs.  Only specify it
123          * if we intend to reblock B-Tree nodes.
124          */
125         if (reblock->head.flags & HAMMER_IOC_DO_BTREE)
126                 cursor.flags |= HAMMER_CURSOR_REBLOCKING;
127
128         error = hammer_btree_first(&cursor);
129         while (error == 0) {
130                 /*
131                  * Internal or Leaf node
132                  */
133                 KKASSERT(cursor.index < cursor.node->ondisk->count);
134                 elm = &cursor.node->ondisk->elms[cursor.index];
135                 reblock->key_cur.obj_id = elm->base.obj_id;
136                 reblock->key_cur.localization = elm->base.localization;
137
138                 /*
139                  * Yield to more important tasks
140                  */
141                 if ((error = hammer_signal_check(trans->hmp)) != 0)
142                         break;
143
144                 /*
145                  * If there is insufficient free space it may be due to
146                  * reserved bigblocks, which flushing might fix.
147                  *
148                  * We must force a retest in case the unlocked cursor is
149                  * moved to the end of the leaf, or moved to an internal
150                  * node.
151                  *
152                  * WARNING: See warnings in hammer_unlock_cursor() function.
153                  */
154                 if (hammer_checkspace(trans->hmp, slop)) {
155                         if (++checkspace_count == 10) {
156                                 error = ENOSPC;
157                                 break;
158                         }
159                         hammer_unlock_cursor(&cursor);
160                         cursor.flags |= HAMMER_CURSOR_RETEST;
161                         hammer_flusher_wait(trans->hmp, seq);
162                         hammer_lock_cursor(&cursor);
163                         seq = hammer_flusher_async(trans->hmp, NULL);
164                         goto skip;
165                 }
166
167                 /*
168                  * Acquiring the sync_lock prevents the operation from
169                  * crossing a synchronization boundary.
170                  *
171                  * NOTE: cursor.node may have changed on return.
172                  *
173                  * WARNING: See warnings in hammer_unlock_cursor() function.
174                  */
175                 hammer_sync_lock_sh(trans);
176                 error = hammer_reblock_helper(reblock, &cursor, elm);
177                 hammer_sync_unlock(trans);
178
179                 while (hammer_flusher_meta_halflimit(trans->hmp) ||
180                        hammer_flusher_undo_exhausted(trans, 2)) {
181                         hammer_unlock_cursor(&cursor);
182                         hammer_flusher_wait(trans->hmp, seq);
183                         hammer_lock_cursor(&cursor);
184                         seq = hammer_flusher_async_one(trans->hmp);
185                 }
186
187                 /*
188                  * Setup for iteration, our cursor flags may be modified by
189                  * other threads while we are unlocked.
190                  */
191                 cursor.flags |= HAMMER_CURSOR_ATEDISK;
192
193                 /*
194                  * We allocate data buffers, which atm we don't track
195                  * dirty levels for because we allow the kernel to write
196                  * them.  But if we allocate too many we can still deadlock
197                  * the buffer cache.
198                  *
199                  * WARNING: See warnings in hammer_unlock_cursor() function.
200                  *          (The cursor's node and element may change!)
201                  */
202                 if (bd_heatup()) {
203                         hammer_unlock_cursor(&cursor);
204                         bwillwrite(HAMMER_XBUFSIZE);
205                         hammer_lock_cursor(&cursor);
206                 }
207                 vm_wait_nominal();
208 skip:
209                 if (error == 0) {
210                         error = hammer_btree_iterate(&cursor);
211                 }
212         }
213         if (error == ENOENT)
214                 error = 0;
215         hammer_done_cursor(&cursor);
216         if (error == EWOULDBLOCK) {
217                 hammer_flusher_sync(trans->hmp);
218                 goto retry;
219         }
220         if (error == EDEADLK)
221                 goto retry;
222         if (error == EINTR) {
223                 reblock->head.flags |= HAMMER_IOC_HEAD_INTR;
224                 error = 0;
225         }
226 failed:
227         reblock->key_cur.localization &= HAMMER_LOCALIZE_MASK;
228         return(error);
229 }
230
231 /*
232  * Reblock the B-Tree (leaf) node, record, and/or data if necessary.
233  *
234  * XXX We have no visibility into internal B-Tree nodes at the moment,
235  * only leaf nodes.
236  */
237 static int
238 hammer_reblock_helper(struct hammer_ioc_reblock *reblock,
239                       hammer_cursor_t cursor, hammer_btree_elm_t elm)
240 {
241         hammer_mount_t hmp;
242         hammer_off_t tmp_offset;
243         hammer_node_ondisk_t ondisk;
244         struct hammer_btree_leaf_elm leaf;
245         int error;
246         int bytes;
247         int cur;
248         int iocflags;
249
250         error = 0;
251         hmp = cursor->trans->hmp;
252
253         /*
254          * Reblock data.  Note that data embedded in a record is reblocked
255          * by the record reblock code.  Data processing only occurs at leaf
256          * nodes and for RECORD element types.
257          */
258         if (cursor->node->ondisk->type != HAMMER_BTREE_TYPE_LEAF)
259                 goto skip;
260         if (elm->leaf.base.btype != HAMMER_BTREE_TYPE_RECORD)
261                 return(0);
262         tmp_offset = elm->leaf.data_offset;
263         if (tmp_offset == 0)
264                 goto skip;
265         if (error)
266                 goto skip;
267
268         /*
269          * NOTE: Localization restrictions may also have been set-up, we can't
270          *       just set the match flags willy-nilly here.
271          */
272         switch(elm->leaf.base.rec_type) {
273         case HAMMER_RECTYPE_INODE:
274         case HAMMER_RECTYPE_SNAPSHOT:
275         case HAMMER_RECTYPE_CONFIG:
276                 iocflags = HAMMER_IOC_DO_INODES;
277                 break;
278         case HAMMER_RECTYPE_EXT:
279         case HAMMER_RECTYPE_FIX:
280         case HAMMER_RECTYPE_PFS:
281         case HAMMER_RECTYPE_DIRENTRY:
282                 iocflags = HAMMER_IOC_DO_DIRS;
283                 break;
284         case HAMMER_RECTYPE_DATA:
285         case HAMMER_RECTYPE_DB:
286                 iocflags = HAMMER_IOC_DO_DATA;
287                 break;
288         default:
289                 iocflags = 0;
290                 break;
291         }
292         if (reblock->head.flags & iocflags) {
293                 ++reblock->data_count;
294                 reblock->data_byte_count += elm->leaf.data_len;
295                 bytes = hammer_blockmap_getfree(hmp, tmp_offset, &cur, &error);
296                 if (hammer_debug_general & 0x4000)
297                         kprintf("D %6d/%d\n", bytes, reblock->free_level);
298                 if (error == 0 && (cur == 0 || reblock->free_level == 0) &&
299                     bytes >= reblock->free_level) {
300                         /*
301                          * This is nasty, the uncache code may have to get
302                          * vnode locks and because of that we can't hold
303                          * the cursor locked.
304                          *
305                          * WARNING: See warnings in hammer_unlock_cursor()
306                          *          function.
307                          */
308                         leaf = elm->leaf;
309                         hammer_unlock_cursor(cursor);
310                         hammer_io_direct_uncache(hmp, &leaf);
311                         hammer_lock_cursor(cursor);
312
313                         /*
314                          * elm may have become stale or invalid, reload it.
315                          * ondisk variable is temporary only.  Note that
316                          * cursor->node and thus cursor->node->ondisk may
317                          * also changed.
318                          */
319                         ondisk = cursor->node->ondisk;
320                         elm = &ondisk->elms[cursor->index];
321                         if (cursor->flags & HAMMER_CURSOR_RETEST) {
322                                 kprintf("hammer: debug: retest on "
323                                         "reblocker uncache\n");
324                                 error = EDEADLK;
325                         } else if (ondisk->type != HAMMER_BTREE_TYPE_LEAF ||
326                                    cursor->index >= ondisk->count) {
327                                 kprintf("hammer: debug: shifted on "
328                                         "reblocker uncache\n");
329                                 error = EDEADLK;
330                         } else if (bcmp(&elm->leaf, &leaf, sizeof(leaf))) {
331                                 kprintf("hammer: debug: changed on "
332                                         "reblocker uncache\n");
333                                 error = EDEADLK;
334                         }
335                         if (error == 0)
336                                 error = hammer_cursor_upgrade(cursor);
337                         if (error == 0) {
338                                 KKASSERT(cursor->index < ondisk->count);
339                                 error = hammer_reblock_data(reblock,
340                                                             cursor, elm);
341                         }
342                         if (error == 0) {
343                                 ++reblock->data_moves;
344                                 reblock->data_byte_moves += elm->leaf.data_len;
345                         }
346                 }
347         }
348
349 skip:
350         /*
351          * Reblock a B-Tree internal or leaf node.  A leaf node is reblocked
352          * on initial entry only (element 0).  An internal node is reblocked
353          * when entered upward from its first leaf node only (also element 0).
354          * Further revisits of the internal node (index > 0) are ignored.
355          */
356         tmp_offset = cursor->node->node_offset;
357         if (cursor->index == 0 &&
358             error == 0 && (reblock->head.flags & HAMMER_IOC_DO_BTREE)) {
359                 ++reblock->btree_count;
360                 bytes = hammer_blockmap_getfree(hmp, tmp_offset, &cur, &error);
361                 if (hammer_debug_general & 0x4000)
362                         kprintf("B %6d/%d\n", bytes, reblock->free_level);
363                 if (error == 0 && (cur == 0 || reblock->free_level == 0) &&
364                     bytes >= reblock->free_level) {
365                         error = hammer_cursor_upgrade(cursor);
366                         if (error == 0) {
367                                 if (cursor->parent) {
368                                         KKASSERT(cursor->parent_index <
369                                                  cursor->parent->ondisk->count);
370                                         elm = &cursor->parent->ondisk->elms[cursor->parent_index];
371                                 } else {
372                                         elm = NULL;
373                                 }
374                                 switch(cursor->node->ondisk->type) {
375                                 case HAMMER_BTREE_TYPE_LEAF:
376                                         error = hammer_reblock_leaf_node(
377                                                         reblock, cursor, elm);
378                                         break;
379                                 case HAMMER_BTREE_TYPE_INTERNAL:
380                                         error = hammer_reblock_int_node(
381                                                         reblock, cursor, elm);
382                                         break;
383                                 default:
384                                         panic("Illegal B-Tree node type");
385                                 }
386                         }
387                         if (error == 0) {
388                                 ++reblock->btree_moves;
389                         }
390                 }
391         }
392
393         hammer_cursor_downgrade(cursor);
394         return(error);
395 }
396
397 /*
398  * Reblock a record's data.  Both the B-Tree element and record pointers
399  * to the data must be adjusted.
400  */
401 static int
402 hammer_reblock_data(struct hammer_ioc_reblock *reblock,
403                     hammer_cursor_t cursor, hammer_btree_elm_t elm)
404 {
405         struct hammer_buffer *data_buffer = NULL;
406         hammer_off_t ndata_offset;
407         int error;
408         void *ndata;
409
410         error = hammer_btree_extract(cursor, HAMMER_CURSOR_GET_DATA |
411                                              HAMMER_CURSOR_GET_LEAF);
412         if (error)
413                 return (error);
414         ndata = hammer_alloc_data(cursor->trans, elm->leaf.data_len,
415                                   elm->leaf.base.rec_type,
416                                   &ndata_offset, &data_buffer,
417                                   0, &error);
418         if (error)
419                 goto done;
420         hammer_io_notmeta(data_buffer);
421
422         /*
423          * Move the data.  Note that we must invalidate any cached
424          * data buffer in the cursor before calling blockmap_free.
425          * The blockmap_free may free up the entire big-block and
426          * will not be able to invalidate it if the cursor is holding
427          * a data buffer cached in that big block.
428          */
429         hammer_modify_buffer(cursor->trans, data_buffer, NULL, 0);
430         bcopy(cursor->data, ndata, elm->leaf.data_len);
431         hammer_modify_buffer_done(data_buffer);
432         hammer_cursor_invalidate_cache(cursor);
433
434         hammer_blockmap_free(cursor->trans,
435                              elm->leaf.data_offset, elm->leaf.data_len);
436
437         hammer_modify_node(cursor->trans, cursor->node,
438                            &elm->leaf.data_offset, sizeof(hammer_off_t));
439         elm->leaf.data_offset = ndata_offset;
440         hammer_modify_node_done(cursor->node);
441
442 done:
443         if (data_buffer)
444                 hammer_rel_buffer(data_buffer, 0);
445         return (error);
446 }
447
448 /*
449  * Reblock a B-Tree leaf node.  The parent must be adjusted to point to
450  * the new copy of the leaf node.
451  *
452  * elm is a pointer to the parent element pointing at cursor.node.
453  */
454 static int
455 hammer_reblock_leaf_node(struct hammer_ioc_reblock *reblock,
456                          hammer_cursor_t cursor, hammer_btree_elm_t elm)
457 {
458         hammer_node_t onode;
459         hammer_node_t nnode;
460         int error;
461
462         /*
463          * Don't supply a hint when allocating the leaf.  Fills are done
464          * from the leaf upwards.
465          */
466         onode = cursor->node;
467         nnode = hammer_alloc_btree(cursor->trans, 0, &error);
468
469         if (nnode == NULL)
470                 return (error);
471
472         /*
473          * Move the node
474          */
475         hammer_lock_ex(&nnode->lock);
476         hammer_modify_node_noundo(cursor->trans, nnode);
477         bcopy(onode->ondisk, nnode->ondisk, sizeof(*nnode->ondisk));
478
479         if (elm) {
480                 /*
481                  * We are not the root of the B-Tree 
482                  */
483                 hammer_modify_node(cursor->trans, cursor->parent,
484                                    &elm->internal.subtree_offset,
485                                    sizeof(elm->internal.subtree_offset));
486                 elm->internal.subtree_offset = nnode->node_offset;
487                 hammer_modify_node_done(cursor->parent);
488         } else {
489                 /*
490                  * We are the root of the B-Tree
491                  */
492                 hammer_volume_t volume;
493                         
494                 volume = hammer_get_root_volume(cursor->trans->hmp, &error);
495                 KKASSERT(error == 0);
496
497                 hammer_modify_volume_field(cursor->trans, volume,
498                                            vol0_btree_root);
499                 volume->ondisk->vol0_btree_root = nnode->node_offset;
500                 hammer_modify_volume_done(volume);
501                 hammer_rel_volume(volume, 0);
502         }
503
504         hammer_cursor_replaced_node(onode, nnode);
505         hammer_delete_node(cursor->trans, onode);
506
507         if (hammer_debug_general & 0x4000) {
508                 kprintf("REBLOCK LNODE %016llx -> %016llx\n",
509                         (long long)onode->node_offset,
510                         (long long)nnode->node_offset);
511         }
512         hammer_modify_node_done(nnode);
513         cursor->node = nnode;
514
515         hammer_unlock(&onode->lock);
516         hammer_rel_node(onode);
517
518         return (error);
519 }
520
521 /*
522  * Reblock a B-Tree internal node.  The parent must be adjusted to point to
523  * the new copy of the internal node, and the node's children's parent
524  * pointers must also be adjusted to point to the new copy.
525  *
526  * elm is a pointer to the parent element pointing at cursor.node.
527  */
528 static int
529 hammer_reblock_int_node(struct hammer_ioc_reblock *reblock,
530                          hammer_cursor_t cursor, hammer_btree_elm_t elm)
531 {
532         struct hammer_node_lock lockroot;
533         hammer_node_t onode;
534         hammer_node_t nnode;
535         int error;
536         int i;
537
538         hammer_node_lock_init(&lockroot, cursor->node);
539         error = hammer_btree_lock_children(cursor, 1, &lockroot, NULL);
540         if (error)
541                 goto done;
542
543         onode = cursor->node;
544         nnode = hammer_alloc_btree(cursor->trans, 0, &error);
545
546         if (nnode == NULL)
547                 goto done;
548
549         /*
550          * Move the node.  Adjust the parent's pointer to us first.
551          */
552         hammer_lock_ex(&nnode->lock);
553         hammer_modify_node_noundo(cursor->trans, nnode);
554         bcopy(onode->ondisk, nnode->ondisk, sizeof(*nnode->ondisk));
555
556         if (elm) {
557                 /*
558                  * We are not the root of the B-Tree 
559                  */
560                 hammer_modify_node(cursor->trans, cursor->parent,
561                                    &elm->internal.subtree_offset,
562                                    sizeof(elm->internal.subtree_offset));
563                 elm->internal.subtree_offset = nnode->node_offset;
564                 hammer_modify_node_done(cursor->parent);
565         } else {
566                 /*
567                  * We are the root of the B-Tree
568                  */
569                 hammer_volume_t volume;
570                         
571                 volume = hammer_get_root_volume(cursor->trans->hmp, &error);
572                 KKASSERT(error == 0);
573
574                 hammer_modify_volume_field(cursor->trans, volume,
575                                            vol0_btree_root);
576                 volume->ondisk->vol0_btree_root = nnode->node_offset;
577                 hammer_modify_volume_done(volume);
578                 hammer_rel_volume(volume, 0);
579         }
580
581         /*
582          * Now adjust our children's pointers to us.
583          */
584         for (i = 0; i < nnode->ondisk->count; ++i) {
585                 elm = &nnode->ondisk->elms[i];
586                 error = btree_set_parent(cursor->trans, nnode, elm);
587                 if (error)
588                         panic("reblock internal node: fixup problem");
589         }
590
591         /*
592          * Clean up.
593          *
594          * The new node replaces the current node in the cursor.  The cursor
595          * expects it to be locked so leave it locked.  Discard onode.
596          */
597         hammer_cursor_replaced_node(onode, nnode);
598         hammer_delete_node(cursor->trans, onode);
599
600         if (hammer_debug_general & 0x4000) {
601                 kprintf("REBLOCK INODE %016llx -> %016llx\n",
602                         (long long)onode->node_offset,
603                         (long long)nnode->node_offset);
604         }
605         hammer_modify_node_done(nnode);
606         cursor->node = nnode;
607
608         hammer_unlock(&onode->lock);
609         hammer_rel_node(onode);
610
611 done:
612         hammer_btree_unlock_children(cursor->trans->hmp, &lockroot, NULL);
613         return (error);
614 }
615