Merge branch 'vendor/PAM_PASSWDQC'
[dragonfly.git] / sys / vfs / hammer / hammer_reblock.c
1 /*
2  * Copyright (c) 2008 The DragonFly Project.  All rights reserved.
3  * 
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  * 
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  * 
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  * 
34  * $DragonFly: src/sys/vfs/hammer/hammer_reblock.c,v 1.34 2008/11/13 02:18:43 dillon Exp $
35  */
36 /*
37  * HAMMER reblocker - This code frees up fragmented physical space
38  *
39  * HAMMER only keeps track of free space on a big-block basis.  A big-block
40  * containing holes can only be freed by migrating the remaining data in
41  * that big-block into a new big-block, then freeing the big-block.
42  *
43  * This function is called from an ioctl or via the hammer support thread.
44  */
45
46 #include "hammer.h"
47
48 static int hammer_reblock_helper(struct hammer_ioc_reblock *reblock,
49                                  hammer_cursor_t cursor,
50                                  hammer_btree_elm_t elm);
51 static int hammer_reblock_data(struct hammer_ioc_reblock *reblock,
52                                 hammer_cursor_t cursor, hammer_btree_elm_t elm);
53 static int hammer_reblock_leaf_node(struct hammer_ioc_reblock *reblock,
54                                 hammer_cursor_t cursor, hammer_btree_elm_t elm);
55 static int hammer_reblock_int_node(struct hammer_ioc_reblock *reblock,
56                                 hammer_cursor_t cursor, hammer_btree_elm_t elm);
57
58 int
59 hammer_ioc_reblock(hammer_transaction_t trans, hammer_inode_t ip,
60                struct hammer_ioc_reblock *reblock)
61 {
62         struct hammer_cursor cursor;
63         hammer_btree_elm_t elm;
64         int checkspace_count;
65         int error;
66         int seq;
67         int slop;
68
69         /*
70          * A fill level <= 20% is considered an emergency.  free_level is
71          * inverted from fill_level.
72          */
73         if (reblock->free_level >= HAMMER_LARGEBLOCK_SIZE * 8 / 10)
74                 slop = HAMMER_CHKSPC_EMERGENCY;
75         else
76                 slop = HAMMER_CHKSPC_REBLOCK;
77
78         if ((reblock->key_beg.localization | reblock->key_end.localization) &
79             HAMMER_LOCALIZE_PSEUDOFS_MASK) {
80                 return(EINVAL);
81         }
82         if (reblock->key_beg.obj_id >= reblock->key_end.obj_id)
83                 return(EINVAL);
84         if (reblock->free_level < 0)
85                 return(EINVAL);
86
87         reblock->key_cur = reblock->key_beg;
88         reblock->key_cur.localization &= HAMMER_LOCALIZE_MASK;
89         reblock->key_cur.localization += ip->obj_localization;
90
91         checkspace_count = 0;
92         seq = trans->hmp->flusher.act;
93 retry:
94         error = hammer_init_cursor(trans, &cursor, NULL, NULL);
95         if (error) {
96                 hammer_done_cursor(&cursor);
97                 goto failed;
98         }
99         cursor.key_beg.localization = reblock->key_cur.localization;
100         cursor.key_beg.obj_id = reblock->key_cur.obj_id;
101         cursor.key_beg.key = HAMMER_MIN_KEY;
102         cursor.key_beg.create_tid = 1;
103         cursor.key_beg.delete_tid = 0;
104         cursor.key_beg.rec_type = HAMMER_MIN_RECTYPE;
105         cursor.key_beg.obj_type = 0;
106
107         cursor.key_end.localization = (reblock->key_end.localization &
108                                         HAMMER_LOCALIZE_MASK) +
109                                       ip->obj_localization;
110         cursor.key_end.obj_id = reblock->key_end.obj_id;
111         cursor.key_end.key = HAMMER_MAX_KEY;
112         cursor.key_end.create_tid = HAMMER_MAX_TID - 1;
113         cursor.key_end.delete_tid = 0;
114         cursor.key_end.rec_type = HAMMER_MAX_RECTYPE;
115         cursor.key_end.obj_type = 0;
116
117         cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE;
118         cursor.flags |= HAMMER_CURSOR_BACKEND;
119
120         /*
121          * This flag allows the btree scan code to return internal nodes,
122          * so we can reblock them in addition to the leafs.  Only specify it
123          * if we intend to reblock B-Tree nodes.
124          */
125         if (reblock->head.flags & HAMMER_IOC_DO_BTREE)
126                 cursor.flags |= HAMMER_CURSOR_REBLOCKING;
127
128         error = hammer_btree_first(&cursor);
129         while (error == 0) {
130                 /*
131                  * Internal or Leaf node
132                  */
133                 elm = &cursor.node->ondisk->elms[cursor.index];
134                 reblock->key_cur.obj_id = elm->base.obj_id;
135                 reblock->key_cur.localization = elm->base.localization;
136
137                 /*
138                  * Yield to more important tasks
139                  */
140                 if ((error = hammer_signal_check(trans->hmp)) != 0)
141                         break;
142
143                 /*
144                  * If there is insufficient free space it may be due to
145                  * reserved bigblocks, which flushing might fix.
146                  */
147                 if (hammer_checkspace(trans->hmp, slop)) {
148                         if (++checkspace_count == 10) {
149                                 error = ENOSPC;
150                                 break;
151                         }
152                         hammer_unlock_cursor(&cursor, 0);
153                         hammer_flusher_wait(trans->hmp, seq);
154                         hammer_lock_cursor(&cursor, 0);
155                         seq = hammer_flusher_async(trans->hmp, NULL);
156                         continue;
157                 }
158
159                 /*
160                  * Acquiring the sync_lock prevents the operation from
161                  * crossing a synchronization boundary.
162                  *
163                  * NOTE: cursor.node may have changed on return.
164                  */
165                 hammer_sync_lock_sh(trans);
166                 error = hammer_reblock_helper(reblock, &cursor, elm);
167                 hammer_sync_unlock(trans);
168
169                 while (hammer_flusher_meta_halflimit(trans->hmp) ||
170                        hammer_flusher_undo_exhausted(trans, 2)) {
171                         hammer_unlock_cursor(&cursor, 0);
172                         hammer_flusher_wait(trans->hmp, seq);
173                         hammer_lock_cursor(&cursor, 0);
174                         seq = hammer_flusher_async_one(trans->hmp);
175                 }
176
177                 /*
178                  * Setup for iteration, our cursor flags may be modified by
179                  * other threads while we are unlocked.
180                  */
181                 cursor.flags |= HAMMER_CURSOR_ATEDISK;
182
183                 /*
184                  * We allocate data buffers, which atm we don't track
185                  * dirty levels for because we allow the kernel to write
186                  * them.  But if we allocate too many we can still deadlock
187                  * the buffer cache.
188                  *
189                  * (The cursor's node and element may change!)
190                  */
191                 if (bd_heatup()) {
192                         hammer_unlock_cursor(&cursor, 0);
193                         bwillwrite(HAMMER_XBUFSIZE);
194                         hammer_lock_cursor(&cursor, 0);
195                 }
196
197                 if (error == 0) {
198                         error = hammer_btree_iterate(&cursor);
199                 }
200
201         }
202         if (error == ENOENT)
203                 error = 0;
204         hammer_done_cursor(&cursor);
205         if (error == EWOULDBLOCK) {
206                 hammer_flusher_sync(trans->hmp);
207                 goto retry;
208         }
209         if (error == EDEADLK)
210                 goto retry;
211         if (error == EINTR) {
212                 reblock->head.flags |= HAMMER_IOC_HEAD_INTR;
213                 error = 0;
214         }
215 failed:
216         reblock->key_cur.localization &= HAMMER_LOCALIZE_MASK;
217         return(error);
218 }
219
220 /*
221  * Reblock the B-Tree (leaf) node, record, and/or data if necessary.
222  *
223  * XXX We have no visibility into internal B-Tree nodes at the moment,
224  * only leaf nodes.
225  */
226 static int
227 hammer_reblock_helper(struct hammer_ioc_reblock *reblock,
228                       hammer_cursor_t cursor, hammer_btree_elm_t elm)
229 {
230         hammer_mount_t hmp;
231         hammer_off_t tmp_offset;
232         struct hammer_btree_leaf_elm leaf;
233         int error;
234         int bytes;
235         int cur;
236         int iocflags;
237
238         error = 0;
239         hmp = cursor->trans->hmp;
240
241         /*
242          * Reblock data.  Note that data embedded in a record is reblocked
243          * by the record reblock code.  Data processing only occurs at leaf
244          * nodes and for RECORD element types.
245          */
246         if (cursor->node->ondisk->type != HAMMER_BTREE_TYPE_LEAF)
247                 goto skip;
248         if (elm->leaf.base.btype != HAMMER_BTREE_TYPE_RECORD)
249                 return(0);
250         tmp_offset = elm->leaf.data_offset;
251         if (tmp_offset == 0)
252                 goto skip;
253         if (error)
254                 goto skip;
255
256         /*
257          * NOTE: Localization restrictions may also have been set-up, we can't
258          * just set the match flags willy-nilly here.
259          */
260         switch(elm->leaf.base.rec_type) {
261         case HAMMER_RECTYPE_INODE:
262                 iocflags = HAMMER_IOC_DO_INODES;
263                 break;
264         case HAMMER_RECTYPE_EXT:
265         case HAMMER_RECTYPE_FIX:
266         case HAMMER_RECTYPE_PFS:
267         case HAMMER_RECTYPE_DIRENTRY:
268                 iocflags = HAMMER_IOC_DO_DIRS;
269                 break;
270         case HAMMER_RECTYPE_DATA:
271         case HAMMER_RECTYPE_DB:
272                 iocflags = HAMMER_IOC_DO_DATA;
273                 break;
274         default:
275                 iocflags = 0;
276                 break;
277         }
278         if (reblock->head.flags & iocflags) {
279                 ++reblock->data_count;
280                 reblock->data_byte_count += elm->leaf.data_len;
281                 bytes = hammer_blockmap_getfree(hmp, tmp_offset, &cur, &error);
282                 if (hammer_debug_general & 0x4000)
283                         kprintf("D %6d/%d\n", bytes, reblock->free_level);
284                 if (error == 0 && (cur == 0 || reblock->free_level == 0) &&
285                     bytes >= reblock->free_level) {
286                         /*
287                          * This is nasty, the uncache code may have to get
288                          * vnode locks and because of that we can't hold
289                          * the cursor locked.
290                          */
291                         leaf = elm->leaf;
292                         hammer_unlock_cursor(cursor, 0);
293                         hammer_io_direct_uncache(hmp, &leaf);
294                         hammer_lock_cursor(cursor, 0);
295                         if (cursor->flags & HAMMER_CURSOR_RETEST) {
296                                 kprintf("hammer: retest after uncache\n");
297                                 error = EDEADLK;
298                         } else {
299                                 KKASSERT(bcmp(&elm->leaf, &leaf, sizeof(leaf)) == 0);
300                         }
301                         if (error == 0)
302                                 error = hammer_cursor_upgrade(cursor);
303                         if (error == 0) {
304                                 error = hammer_reblock_data(reblock,
305                                                             cursor, elm);
306                         }
307                         if (error == 0) {
308                                 ++reblock->data_moves;
309                                 reblock->data_byte_moves += elm->leaf.data_len;
310                         }
311                 }
312         }
313
314 skip:
315         /*
316          * Reblock a B-Tree internal or leaf node.
317          */
318         tmp_offset = cursor->node->node_offset;
319         if (cursor->index == 0 &&
320             error == 0 && (reblock->head.flags & HAMMER_IOC_DO_BTREE)) {
321                 ++reblock->btree_count;
322                 bytes = hammer_blockmap_getfree(hmp, tmp_offset, &cur, &error);
323                 if (hammer_debug_general & 0x4000)
324                         kprintf("B %6d/%d\n", bytes, reblock->free_level);
325                 if (error == 0 && (cur == 0 || reblock->free_level == 0) &&
326                     bytes >= reblock->free_level) {
327                         error = hammer_cursor_upgrade(cursor);
328                         if (error == 0) {
329                                 if (cursor->parent)
330                                         elm = &cursor->parent->ondisk->elms[cursor->parent_index];
331                                 else
332                                         elm = NULL;
333                                 switch(cursor->node->ondisk->type) {
334                                 case HAMMER_BTREE_TYPE_LEAF:
335                                         error = hammer_reblock_leaf_node(
336                                                         reblock, cursor, elm);
337                                         break;
338                                 case HAMMER_BTREE_TYPE_INTERNAL:
339                                         error = hammer_reblock_int_node(
340                                                         reblock, cursor, elm);
341                                         break;
342                                 default:
343                                         panic("Illegal B-Tree node type");
344                                 }
345                         }
346                         if (error == 0) {
347                                 ++reblock->btree_moves;
348                         }
349                 }
350         }
351
352         hammer_cursor_downgrade(cursor);
353         return(error);
354 }
355
356 /*
357  * Reblock a record's data.  Both the B-Tree element and record pointers
358  * to the data must be adjusted.
359  */
360 static int
361 hammer_reblock_data(struct hammer_ioc_reblock *reblock,
362                     hammer_cursor_t cursor, hammer_btree_elm_t elm)
363 {
364         struct hammer_buffer *data_buffer = NULL;
365         hammer_off_t ndata_offset;
366         int error;
367         void *ndata;
368
369         error = hammer_btree_extract(cursor, HAMMER_CURSOR_GET_DATA |
370                                              HAMMER_CURSOR_GET_LEAF);
371         if (error)
372                 return (error);
373         ndata = hammer_alloc_data(cursor->trans, elm->leaf.data_len,
374                                   elm->leaf.base.rec_type,
375                                   &ndata_offset, &data_buffer, &error);
376         if (error)
377                 goto done;
378
379         /*
380          * Move the data
381          */
382         hammer_modify_buffer(cursor->trans, data_buffer, NULL, 0);
383         bcopy(cursor->data, ndata, elm->leaf.data_len);
384         hammer_modify_buffer_done(data_buffer);
385
386         hammer_blockmap_free(cursor->trans,
387                              elm->leaf.data_offset, elm->leaf.data_len);
388
389         hammer_modify_node(cursor->trans, cursor->node,
390                            &elm->leaf.data_offset, sizeof(hammer_off_t));
391         elm->leaf.data_offset = ndata_offset;
392         hammer_modify_node_done(cursor->node);
393
394 done:
395         if (data_buffer)
396                 hammer_rel_buffer(data_buffer, 0);
397         return (error);
398 }
399
400 /*
401  * Reblock a B-Tree leaf node.  The parent must be adjusted to point to
402  * the new copy of the leaf node.
403  *
404  * elm is a pointer to the parent element pointing at cursor.node.
405  */
406 static int
407 hammer_reblock_leaf_node(struct hammer_ioc_reblock *reblock,
408                          hammer_cursor_t cursor, hammer_btree_elm_t elm)
409 {
410         hammer_node_t onode;
411         hammer_node_t nnode;
412         int error;
413
414         onode = cursor->node;
415         nnode = hammer_alloc_btree(cursor->trans, &error);
416
417         if (nnode == NULL)
418                 return (error);
419
420         /*
421          * Move the node
422          */
423         hammer_lock_ex(&nnode->lock);
424         hammer_modify_node_noundo(cursor->trans, nnode);
425         bcopy(onode->ondisk, nnode->ondisk, sizeof(*nnode->ondisk));
426
427         if (elm) {
428                 /*
429                  * We are not the root of the B-Tree 
430                  */
431                 hammer_modify_node(cursor->trans, cursor->parent,
432                                    &elm->internal.subtree_offset,
433                                    sizeof(elm->internal.subtree_offset));
434                 elm->internal.subtree_offset = nnode->node_offset;
435                 hammer_modify_node_done(cursor->parent);
436         } else {
437                 /*
438                  * We are the root of the B-Tree
439                  */
440                 hammer_volume_t volume;
441                         
442                 volume = hammer_get_root_volume(cursor->trans->hmp, &error);
443                 KKASSERT(error == 0);
444
445                 hammer_modify_volume_field(cursor->trans, volume,
446                                            vol0_btree_root);
447                 volume->ondisk->vol0_btree_root = nnode->node_offset;
448                 hammer_modify_volume_done(volume);
449                 hammer_rel_volume(volume, 0);
450         }
451
452         hammer_cursor_replaced_node(onode, nnode);
453         hammer_delete_node(cursor->trans, onode);
454
455         if (hammer_debug_general & 0x4000) {
456                 kprintf("REBLOCK LNODE %016llx -> %016llx\n",
457                         onode->node_offset, nnode->node_offset);
458         }
459         hammer_modify_node_done(nnode);
460         cursor->node = nnode;
461
462         hammer_unlock(&onode->lock);
463         hammer_rel_node(onode);
464
465         return (error);
466 }
467
468 /*
469  * Reblock a B-Tree internal node.  The parent must be adjusted to point to
470  * the new copy of the internal node, and the node's children's parent
471  * pointers must also be adjusted to point to the new copy.
472  *
473  * elm is a pointer to the parent element pointing at cursor.node.
474  */
475 static int
476 hammer_reblock_int_node(struct hammer_ioc_reblock *reblock,
477                          hammer_cursor_t cursor, hammer_btree_elm_t elm)
478 {
479         hammer_node_locklist_t locklist = NULL;
480         hammer_node_t onode;
481         hammer_node_t nnode;
482         int error;
483         int i;
484
485         error = hammer_btree_lock_children(cursor, &locklist);
486         if (error)
487                 goto done;
488
489         onode = cursor->node;
490         nnode = hammer_alloc_btree(cursor->trans, &error);
491
492         if (nnode == NULL)
493                 goto done;
494
495         /*
496          * Move the node.  Adjust the parent's pointer to us first.
497          */
498         hammer_lock_ex(&nnode->lock);
499         hammer_modify_node_noundo(cursor->trans, nnode);
500         bcopy(onode->ondisk, nnode->ondisk, sizeof(*nnode->ondisk));
501
502         if (elm) {
503                 /*
504                  * We are not the root of the B-Tree 
505                  */
506                 hammer_modify_node(cursor->trans, cursor->parent,
507                                    &elm->internal.subtree_offset,
508                                    sizeof(elm->internal.subtree_offset));
509                 elm->internal.subtree_offset = nnode->node_offset;
510                 hammer_modify_node_done(cursor->parent);
511         } else {
512                 /*
513                  * We are the root of the B-Tree
514                  */
515                 hammer_volume_t volume;
516                         
517                 volume = hammer_get_root_volume(cursor->trans->hmp, &error);
518                 KKASSERT(error == 0);
519
520                 hammer_modify_volume_field(cursor->trans, volume,
521                                            vol0_btree_root);
522                 volume->ondisk->vol0_btree_root = nnode->node_offset;
523                 hammer_modify_volume_done(volume);
524                 hammer_rel_volume(volume, 0);
525         }
526
527         /*
528          * Now adjust our children's pointers to us.
529          */
530         for (i = 0; i < nnode->ondisk->count; ++i) {
531                 elm = &nnode->ondisk->elms[i];
532                 error = btree_set_parent(cursor->trans, nnode, elm);
533                 if (error)
534                         panic("reblock internal node: fixup problem");
535         }
536
537         /*
538          * Clean up.
539          *
540          * The new node replaces the current node in the cursor.  The cursor
541          * expects it to be locked so leave it locked.  Discard onode.
542          */
543         hammer_cursor_replaced_node(onode, nnode);
544         hammer_delete_node(cursor->trans, onode);
545
546         if (hammer_debug_general & 0x4000) {
547                 kprintf("REBLOCK INODE %016llx -> %016llx\n",
548                         onode->node_offset, nnode->node_offset);
549         }
550         hammer_modify_node_done(nnode);
551         cursor->node = nnode;
552
553         hammer_unlock(&onode->lock);
554         hammer_rel_node(onode);
555
556 done:
557         hammer_btree_unlock_children(cursor, &locklist);
558         return (error);
559 }
560