c62892cc813162130b4acbcfc44939dcb2ebac1b
[dragonfly.git] / sys / vfs / hammer / hammer_reblock.c
1 /*
2  * Copyright (c) 2008 The DragonFly Project.  All rights reserved.
3  * 
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  * 
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  * 
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  * 
34  * $DragonFly: src/sys/vfs/hammer/hammer_reblock.c,v 1.31 2008/07/13 09:32:48 dillon Exp $
35  */
36 /*
37  * HAMMER reblocker - This code frees up fragmented physical space
38  *
39  * HAMMER only keeps track of free space on a big-block basis.  A big-block
40  * containing holes can only be freed by migrating the remaining data in
41  * that big-block into a new big-block, then freeing the big-block.
42  *
43  * This function is called from an ioctl or via the hammer support thread.
44  */
45
46 #include "hammer.h"
47
48 static int hammer_reblock_helper(struct hammer_ioc_reblock *reblock,
49                                  hammer_cursor_t cursor,
50                                  hammer_btree_elm_t elm);
51 static int hammer_reblock_data(struct hammer_ioc_reblock *reblock,
52                                 hammer_cursor_t cursor, hammer_btree_elm_t elm);
53 static int hammer_reblock_leaf_node(struct hammer_ioc_reblock *reblock,
54                                 hammer_cursor_t cursor, hammer_btree_elm_t elm);
55 static int hammer_reblock_int_node(struct hammer_ioc_reblock *reblock,
56                                 hammer_cursor_t cursor, hammer_btree_elm_t elm);
57
58 int
59 hammer_ioc_reblock(hammer_transaction_t trans, hammer_inode_t ip,
60                struct hammer_ioc_reblock *reblock)
61 {
62         struct hammer_cursor cursor;
63         hammer_btree_elm_t elm;
64         int checkspace_count;
65         int error;
66         int seq;
67
68         if ((reblock->key_beg.localization | reblock->key_end.localization) &
69             HAMMER_LOCALIZE_PSEUDOFS_MASK) {
70                 return(EINVAL);
71         }
72         if (reblock->key_beg.obj_id >= reblock->key_end.obj_id)
73                 return(EINVAL);
74         if (reblock->free_level < 0)
75                 return(EINVAL);
76
77         reblock->key_cur = reblock->key_beg;
78         reblock->key_cur.localization &= HAMMER_LOCALIZE_MASK;
79         reblock->key_cur.localization += ip->obj_localization;
80
81         checkspace_count = 0;
82         seq = trans->hmp->flusher.act;
83 retry:
84         error = hammer_init_cursor(trans, &cursor, NULL, NULL);
85         if (error) {
86                 hammer_done_cursor(&cursor);
87                 goto failed;
88         }
89         cursor.key_beg.localization = reblock->key_cur.localization;
90         cursor.key_beg.obj_id = reblock->key_cur.obj_id;
91         cursor.key_beg.key = HAMMER_MIN_KEY;
92         cursor.key_beg.create_tid = 1;
93         cursor.key_beg.delete_tid = 0;
94         cursor.key_beg.rec_type = HAMMER_MIN_RECTYPE;
95         cursor.key_beg.obj_type = 0;
96
97         cursor.key_end.localization = (reblock->key_end.localization &
98                                         HAMMER_LOCALIZE_MASK) +
99                                       ip->obj_localization;
100         cursor.key_end.obj_id = reblock->key_end.obj_id;
101         cursor.key_end.key = HAMMER_MAX_KEY;
102         cursor.key_end.create_tid = HAMMER_MAX_TID - 1;
103         cursor.key_end.delete_tid = 0;
104         cursor.key_end.rec_type = HAMMER_MAX_RECTYPE;
105         cursor.key_end.obj_type = 0;
106
107         cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE;
108         cursor.flags |= HAMMER_CURSOR_BACKEND;
109
110         /*
111          * This flag allows the btree scan code to return internal nodes,
112          * so we can reblock them in addition to the leafs.  Only specify it
113          * if we intend to reblock B-Tree nodes.
114          */
115         if (reblock->head.flags & HAMMER_IOC_DO_BTREE)
116                 cursor.flags |= HAMMER_CURSOR_REBLOCKING;
117
118         error = hammer_btree_first(&cursor);
119         while (error == 0) {
120                 /*
121                  * Internal or Leaf node
122                  */
123                 elm = &cursor.node->ondisk->elms[cursor.index];
124                 reblock->key_cur.obj_id = elm->base.obj_id;
125                 reblock->key_cur.localization = elm->base.localization;
126
127                 /*
128                  * Yield to more important tasks
129                  */
130                 if ((error = hammer_signal_check(trans->hmp)) != 0)
131                         break;
132
133                 /*
134                  * If there is insufficient free space it may be due to
135                  * reserved bigblocks, which flushing might fix.
136                  */
137                 if (hammer_checkspace(trans->hmp, HAMMER_CHKSPC_REBLOCK)) {
138                         if (++checkspace_count == 10) {
139                                 error = ENOSPC;
140                                 break;
141                         }
142                         hammer_unlock_cursor(&cursor, 0);
143                         hammer_flusher_wait(trans->hmp, seq);
144                         hammer_lock_cursor(&cursor, 0);
145                         seq = hammer_flusher_async(trans->hmp, NULL);
146                 }
147
148                 /*
149                  * We allocate data buffers, which atm we don't track
150                  * dirty levels for because we allow the kernel to write
151                  * them.  But if we allocate too many we can still deadlock
152                  * the buffer cache.
153                  */
154                 if (bd_heatup()) {
155                         hammer_unlock_cursor(&cursor, 0);
156                         bwillwrite(HAMMER_BUFSIZE);
157                         hammer_lock_cursor(&cursor, 0);
158                 }
159
160                 /*
161                  * Acquiring the sync_lock prevents the operation from
162                  * crossing a synchronization boundary.
163                  *
164                  * NOTE: cursor.node may have changed on return.
165                  */
166                 hammer_sync_lock_sh(trans);
167                 error = hammer_reblock_helper(reblock, &cursor, elm);
168                 hammer_sync_unlock(trans);
169
170                 while (hammer_flusher_meta_halflimit(trans->hmp) ||
171                        hammer_flusher_undo_exhausted(trans, 2)) {
172                         hammer_unlock_cursor(&cursor, 0);
173                         hammer_flusher_wait(trans->hmp, seq);
174                         hammer_lock_cursor(&cursor, 0);
175                         seq = hammer_flusher_async_one(trans->hmp);
176                 }
177                 if (error == 0) {
178                         cursor.flags |= HAMMER_CURSOR_ATEDISK;
179                         error = hammer_btree_iterate(&cursor);
180                 }
181
182         }
183         if (error == ENOENT)
184                 error = 0;
185         hammer_done_cursor(&cursor);
186         if (error == EWOULDBLOCK) {
187                 hammer_flusher_sync(trans->hmp);
188                 goto retry;
189         }
190         if (error == EDEADLK)
191                 goto retry;
192         if (error == EINTR) {
193                 reblock->head.flags |= HAMMER_IOC_HEAD_INTR;
194                 error = 0;
195         }
196 failed:
197         reblock->key_cur.localization &= HAMMER_LOCALIZE_MASK;
198         return(error);
199 }
200
201 /*
202  * Reblock the B-Tree (leaf) node, record, and/or data if necessary.
203  *
204  * XXX We have no visibility into internal B-Tree nodes at the moment,
205  * only leaf nodes.
206  */
207 static int
208 hammer_reblock_helper(struct hammer_ioc_reblock *reblock,
209                       hammer_cursor_t cursor, hammer_btree_elm_t elm)
210 {
211         hammer_mount_t hmp;
212         hammer_off_t tmp_offset;
213         int error;
214         int bytes;
215         int cur;
216         int iocflags;
217
218         error = 0;
219         hmp = cursor->trans->hmp;
220
221         /*
222          * Reblock data.  Note that data embedded in a record is reblocked
223          * by the record reblock code.  Data processing only occurs at leaf
224          * nodes and for RECORD element types.
225          */
226         if (cursor->node->ondisk->type != HAMMER_BTREE_TYPE_LEAF)
227                 goto skip;
228         if (elm->leaf.base.btype != HAMMER_BTREE_TYPE_RECORD)
229                 return(0);
230         tmp_offset = elm->leaf.data_offset;
231         if (tmp_offset == 0)
232                 goto skip;
233         if (error)
234                 goto skip;
235
236         /*
237          * NOTE: Localization restrictions may also have been set-up, we can't
238          * just set the match flags willy-nilly here.
239          */
240         switch(elm->leaf.base.rec_type) {
241         case HAMMER_RECTYPE_INODE:
242                 iocflags = HAMMER_IOC_DO_INODES;
243                 break;
244         case HAMMER_RECTYPE_EXT:
245         case HAMMER_RECTYPE_FIX:
246         case HAMMER_RECTYPE_PFS:
247         case HAMMER_RECTYPE_DIRENTRY:
248                 iocflags = HAMMER_IOC_DO_DIRS;
249                 break;
250         case HAMMER_RECTYPE_DATA:
251         case HAMMER_RECTYPE_DB:
252                 iocflags = HAMMER_IOC_DO_DATA;
253                 break;
254         default:
255                 iocflags = 0;
256                 break;
257         }
258         if (reblock->head.flags & iocflags) {
259                 ++reblock->data_count;
260                 reblock->data_byte_count += elm->leaf.data_len;
261                 bytes = hammer_blockmap_getfree(hmp, tmp_offset, &cur, &error);
262                 if (hammer_debug_general & 0x4000)
263                         kprintf("D %6d/%d\n", bytes, reblock->free_level);
264                 if (error == 0 && (cur == 0 || reblock->free_level == 0) &&
265                     bytes >= reblock->free_level) {
266                         hammer_io_direct_uncache(hmp, &elm->leaf);
267                         error = hammer_cursor_upgrade(cursor);
268                         if (error == 0) {
269                                 error = hammer_reblock_data(reblock,
270                                                             cursor, elm);
271                         }
272                         if (error == 0) {
273                                 ++reblock->data_moves;
274                                 reblock->data_byte_moves += elm->leaf.data_len;
275                         }
276                 }
277         }
278
279 skip:
280         /*
281          * Reblock a B-Tree internal or leaf node.
282          */
283         tmp_offset = cursor->node->node_offset;
284         if (cursor->index == 0 &&
285             error == 0 && (reblock->head.flags & HAMMER_IOC_DO_BTREE)) {
286                 ++reblock->btree_count;
287                 bytes = hammer_blockmap_getfree(hmp, tmp_offset, &cur, &error);
288                 if (hammer_debug_general & 0x4000)
289                         kprintf("B %6d/%d\n", bytes, reblock->free_level);
290                 if (error == 0 && (cur == 0 || reblock->free_level == 0) &&
291                     bytes >= reblock->free_level) {
292                         error = hammer_cursor_upgrade(cursor);
293                         if (error == 0) {
294                                 if (cursor->parent)
295                                         elm = &cursor->parent->ondisk->elms[cursor->parent_index];
296                                 else
297                                         elm = NULL;
298                                 switch(cursor->node->ondisk->type) {
299                                 case HAMMER_BTREE_TYPE_LEAF:
300                                         error = hammer_reblock_leaf_node(
301                                                         reblock, cursor, elm);
302                                         break;
303                                 case HAMMER_BTREE_TYPE_INTERNAL:
304                                         error = hammer_reblock_int_node(
305                                                         reblock, cursor, elm);
306                                         break;
307                                 default:
308                                         panic("Illegal B-Tree node type");
309                                 }
310                         }
311                         if (error == 0) {
312                                 ++reblock->btree_moves;
313                         }
314                 }
315         }
316
317         hammer_cursor_downgrade(cursor);
318         return(error);
319 }
320
321 /*
322  * Reblock a record's data.  Both the B-Tree element and record pointers
323  * to the data must be adjusted.
324  */
325 static int
326 hammer_reblock_data(struct hammer_ioc_reblock *reblock,
327                     hammer_cursor_t cursor, hammer_btree_elm_t elm)
328 {
329         struct hammer_buffer *data_buffer = NULL;
330         hammer_off_t ndata_offset;
331         int error;
332         void *ndata;
333
334         error = hammer_btree_extract(cursor, HAMMER_CURSOR_GET_DATA |
335                                              HAMMER_CURSOR_GET_LEAF);
336         if (error)
337                 return (error);
338         ndata = hammer_alloc_data(cursor->trans, elm->leaf.data_len,
339                                   elm->leaf.base.rec_type,
340                                   &ndata_offset, &data_buffer, &error);
341         if (error)
342                 goto done;
343
344         /*
345          * Move the data
346          */
347         hammer_modify_buffer(cursor->trans, data_buffer, NULL, 0);
348         bcopy(cursor->data, ndata, elm->leaf.data_len);
349         hammer_modify_buffer_done(data_buffer);
350
351         hammer_blockmap_free(cursor->trans,
352                              elm->leaf.data_offset, elm->leaf.data_len);
353
354         hammer_modify_node(cursor->trans, cursor->node,
355                            &elm->leaf.data_offset, sizeof(hammer_off_t));
356         elm->leaf.data_offset = ndata_offset;
357         hammer_modify_node_done(cursor->node);
358
359 done:
360         if (data_buffer)
361                 hammer_rel_buffer(data_buffer, 0);
362         return (error);
363 }
364
365 /*
366  * Reblock a B-Tree leaf node.  The parent must be adjusted to point to
367  * the new copy of the leaf node.
368  *
369  * elm is a pointer to the parent element pointing at cursor.node.
370  */
371 static int
372 hammer_reblock_leaf_node(struct hammer_ioc_reblock *reblock,
373                          hammer_cursor_t cursor, hammer_btree_elm_t elm)
374 {
375         hammer_node_t onode;
376         hammer_node_t nnode;
377         int error;
378
379         onode = cursor->node;
380         nnode = hammer_alloc_btree(cursor->trans, &error);
381
382         if (nnode == NULL)
383                 return (error);
384
385         /*
386          * Move the node
387          */
388         hammer_lock_ex(&nnode->lock);
389         hammer_modify_node_noundo(cursor->trans, nnode);
390         bcopy(onode->ondisk, nnode->ondisk, sizeof(*nnode->ondisk));
391
392         if (elm) {
393                 /*
394                  * We are not the root of the B-Tree 
395                  */
396                 hammer_modify_node(cursor->trans, cursor->parent,
397                                    &elm->internal.subtree_offset,
398                                    sizeof(elm->internal.subtree_offset));
399                 elm->internal.subtree_offset = nnode->node_offset;
400                 hammer_modify_node_done(cursor->parent);
401         } else {
402                 /*
403                  * We are the root of the B-Tree
404                  */
405                 hammer_volume_t volume;
406                         
407                 volume = hammer_get_root_volume(cursor->trans->hmp, &error);
408                 KKASSERT(error == 0);
409
410                 hammer_modify_volume_field(cursor->trans, volume,
411                                            vol0_btree_root);
412                 volume->ondisk->vol0_btree_root = nnode->node_offset;
413                 hammer_modify_volume_done(volume);
414                 hammer_rel_volume(volume, 0);
415         }
416
417         hammer_cursor_replaced_node(onode, nnode);
418         hammer_delete_node(cursor->trans, onode);
419
420         if (hammer_debug_general & 0x4000) {
421                 kprintf("REBLOCK LNODE %016llx -> %016llx\n",
422                         onode->node_offset, nnode->node_offset);
423         }
424         hammer_modify_node_done(nnode);
425         cursor->node = nnode;
426
427         hammer_unlock(&onode->lock);
428         hammer_rel_node(onode);
429
430         return (error);
431 }
432
433 /*
434  * Reblock a B-Tree internal node.  The parent must be adjusted to point to
435  * the new copy of the internal node, and the node's children's parent
436  * pointers must also be adjusted to point to the new copy.
437  *
438  * elm is a pointer to the parent element pointing at cursor.node.
439  */
440 static int
441 hammer_reblock_int_node(struct hammer_ioc_reblock *reblock,
442                          hammer_cursor_t cursor, hammer_btree_elm_t elm)
443 {
444         hammer_node_locklist_t locklist = NULL;
445         hammer_node_t onode;
446         hammer_node_t nnode;
447         int error;
448         int i;
449
450         error = hammer_btree_lock_children(cursor, &locklist);
451         if (error)
452                 goto done;
453
454         onode = cursor->node;
455         nnode = hammer_alloc_btree(cursor->trans, &error);
456
457         if (nnode == NULL)
458                 goto done;
459
460         /*
461          * Move the node.  Adjust the parent's pointer to us first.
462          */
463         hammer_lock_ex(&nnode->lock);
464         hammer_modify_node_noundo(cursor->trans, nnode);
465         bcopy(onode->ondisk, nnode->ondisk, sizeof(*nnode->ondisk));
466
467         if (elm) {
468                 /*
469                  * We are not the root of the B-Tree 
470                  */
471                 hammer_modify_node(cursor->trans, cursor->parent,
472                                    &elm->internal.subtree_offset,
473                                    sizeof(elm->internal.subtree_offset));
474                 elm->internal.subtree_offset = nnode->node_offset;
475                 hammer_modify_node_done(cursor->parent);
476         } else {
477                 /*
478                  * We are the root of the B-Tree
479                  */
480                 hammer_volume_t volume;
481                         
482                 volume = hammer_get_root_volume(cursor->trans->hmp, &error);
483                 KKASSERT(error == 0);
484
485                 hammer_modify_volume_field(cursor->trans, volume,
486                                            vol0_btree_root);
487                 volume->ondisk->vol0_btree_root = nnode->node_offset;
488                 hammer_modify_volume_done(volume);
489                 hammer_rel_volume(volume, 0);
490         }
491
492         /*
493          * Now adjust our children's pointers to us.
494          */
495         for (i = 0; i < nnode->ondisk->count; ++i) {
496                 elm = &nnode->ondisk->elms[i];
497                 error = btree_set_parent(cursor->trans, nnode, elm);
498                 if (error)
499                         panic("reblock internal node: fixup problem");
500         }
501
502         /*
503          * Clean up.
504          *
505          * The new node replaces the current node in the cursor.  The cursor
506          * expects it to be locked so leave it locked.  Discard onode.
507          */
508         hammer_cursor_replaced_node(onode, nnode);
509         hammer_delete_node(cursor->trans, onode);
510
511         if (hammer_debug_general & 0x4000) {
512                 kprintf("REBLOCK INODE %016llx -> %016llx\n",
513                         onode->node_offset, nnode->node_offset);
514         }
515         hammer_modify_node_done(nnode);
516         cursor->node = nnode;
517
518         hammer_unlock(&onode->lock);
519         hammer_rel_node(onode);
520
521 done:
522         hammer_btree_unlock_children(&locklist);
523         return (error);
524 }
525