3b83b7a06d3efbe2ee5e84db496b22b5f803e4d8
[dragonfly.git] / sys / vfs / hammer / hammer_prune.c
1 /*
2  * Copyright (c) 2008 The DragonFly Project.  All rights reserved.
3  * 
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  * 
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  * 
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  * 
34  * $DragonFly: src/sys/vfs/hammer/hammer_prune.c,v 1.13 2008/07/11 01:22:29 dillon Exp $
35  */
36
37 #include "hammer.h"
38
39 /*
40  * Iterate through the specified range of object ids and remove any
41  * deleted records that fall entirely within a prune modulo.
42  *
43  * A reverse iteration is used to prevent overlapping records from being
44  * created during the iteration due to alignments.  This also allows us
45  * to adjust alignments without blowing up the B-Tree.
46  */
47 static int prune_should_delete(struct hammer_ioc_prune *prune,
48                                hammer_btree_leaf_elm_t elm);
49 static void prune_check_nlinks(hammer_cursor_t cursor,
50                                hammer_btree_leaf_elm_t elm);
51
52 int
53 hammer_ioc_prune(hammer_transaction_t trans, hammer_inode_t ip,
54                  struct hammer_ioc_prune *prune)
55 {
56         struct hammer_cursor cursor;
57         hammer_btree_leaf_elm_t elm;
58         struct hammer_ioc_prune_elm *copy_elms;
59         struct hammer_ioc_prune_elm *user_elms;
60         int error;
61         int isdir;
62         int elm_array_size;
63
64         if (prune->nelms < 0 || prune->nelms > HAMMER_MAX_PRUNE_ELMS)
65                 return(EINVAL);
66         if ((prune->key_beg.localization | prune->key_end.localization) &
67             HAMMER_LOCALIZE_PSEUDOFS_MASK) {
68                 return(EINVAL);
69         }
70         if (prune->key_beg.localization > prune->key_end.localization)
71                 return(EINVAL);
72         if (prune->key_beg.localization == prune->key_end.localization) {
73                 if (prune->key_beg.obj_id > prune->key_end.obj_id)
74                         return(EINVAL);
75                 /* key-space limitations - no check needed */
76         }
77         if ((prune->head.flags & HAMMER_IOC_PRUNE_ALL) && prune->nelms)
78                 return(EINVAL);
79
80         prune->key_cur.localization = prune->key_end.localization +
81                                       ip->obj_localization;
82         prune->key_cur.obj_id = prune->key_end.obj_id;
83         prune->key_cur.key = HAMMER_MAX_KEY;
84
85         /*
86          * Copy element array from userland
87          */
88         elm_array_size = sizeof(*copy_elms) * prune->nelms;
89         user_elms = prune->elms;
90         copy_elms = kmalloc(elm_array_size, M_TEMP, M_WAITOK);
91         if ((error = copyin(user_elms, copy_elms, elm_array_size)) != 0)
92                 goto failed;
93         prune->elms = copy_elms;
94
95         /*
96          * Scan backwards.  Retries typically occur if a deadlock is detected.
97          */
98 retry:
99         error = hammer_init_cursor(trans, &cursor, NULL, NULL);
100         if (error) {
101                 hammer_done_cursor(&cursor);
102                 goto failed;
103         }
104         cursor.key_beg.localization = prune->key_beg.localization +
105                                       ip->obj_localization;
106         cursor.key_beg.obj_id = prune->key_beg.obj_id;
107         cursor.key_beg.key = HAMMER_MIN_KEY;
108         cursor.key_beg.create_tid = 1;
109         cursor.key_beg.delete_tid = 0;
110         cursor.key_beg.rec_type = HAMMER_MIN_RECTYPE;
111         cursor.key_beg.obj_type = 0;
112
113         cursor.key_end.localization = prune->key_cur.localization;
114         cursor.key_end.obj_id = prune->key_cur.obj_id;
115         cursor.key_end.key = prune->key_cur.key;
116         cursor.key_end.create_tid = HAMMER_MAX_TID - 1;
117         cursor.key_end.delete_tid = 0;
118         cursor.key_end.rec_type = HAMMER_MAX_RECTYPE;
119         cursor.key_end.obj_type = 0;
120
121         cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE;
122         cursor.flags |= HAMMER_CURSOR_BACKEND;
123
124         /*
125          * This flag allows the B-Tree code to clean up loose ends.
126          */
127         cursor.flags |= HAMMER_CURSOR_PRUNING;
128
129         error = hammer_btree_last(&cursor);
130
131         while (error == 0) {
132                 /*
133                  * Check for work
134                  */
135                 elm = &cursor.node->ondisk->elms[cursor.index].leaf;
136                 prune->key_cur = elm->base;
137
138                 /*
139                  * Yield to more important tasks
140                  */
141                 if ((error = hammer_signal_check(trans->hmp)) != 0)
142                         break;
143                 if (hammer_flusher_meta_limit(trans->hmp) ||
144                     hammer_flusher_undo_exhausted(trans, 2)) {
145                         error = EWOULDBLOCK;
146                         break;
147                 }
148
149                 if (prune->stat_oldest_tid > elm->base.create_tid)
150                         prune->stat_oldest_tid = elm->base.create_tid;
151
152                 if (hammer_debug_general & 0x0200) {
153                         kprintf("check %016llx %016llx cre=%016llx del=%016llx\n",
154                                         elm->base.obj_id,
155                                         elm->base.key,
156                                         elm->base.create_tid,
157                                         elm->base.delete_tid);
158                 }
159                                 
160                 if (prune_should_delete(prune, elm)) {
161                         if (hammer_debug_general & 0x0200) {
162                                 kprintf("check %016llx %016llx: DELETE\n",
163                                         elm->base.obj_id, elm->base.key);
164                         }
165
166                         /*
167                          * NOTE: This can return EDEADLK
168                          *
169                          * Acquiring the sync lock guarantees that the
170                          * operation will not cross a synchronization
171                          * boundary (see the flusher).
172                          */
173                         isdir = (elm->base.rec_type == HAMMER_RECTYPE_DIRENTRY);
174
175                         hammer_sync_lock_sh(trans);
176                         error = hammer_delete_at_cursor(&cursor,
177                                                         HAMMER_DELETE_DESTROY,
178                                                         &prune->stat_bytes);
179                         hammer_sync_unlock(trans);
180                         if (error)
181                                 break;
182
183                         if (isdir)
184                                 ++prune->stat_dirrecords;
185                         else
186                                 ++prune->stat_rawrecords;
187
188                         /*
189                          * The current record might now be the one after
190                          * the one we deleted, set ATEDISK to force us
191                          * to skip it (since we are iterating backwards).
192                          */
193                         cursor.flags |= HAMMER_CURSOR_ATEDISK;
194                 } else {
195                         /*
196                          * Nothing to delete, but we may have to check other
197                          * things.
198                          */
199                         prune_check_nlinks(&cursor, elm);
200                         cursor.flags |= HAMMER_CURSOR_ATEDISK;
201                         if (hammer_debug_general & 0x0100) {
202                                 kprintf("check %016llx %016llx: SKIP\n",
203                                         elm->base.obj_id, elm->base.key);
204                         }
205                 }
206                 ++prune->stat_scanrecords;
207                 error = hammer_btree_iterate_reverse(&cursor);
208         }
209         if (error == ENOENT)
210                 error = 0;
211         hammer_done_cursor(&cursor);
212         if (error == EWOULDBLOCK) {
213                 hammer_flusher_sync(trans->hmp);
214                 goto retry;
215         }
216         if (error == EDEADLK)
217                 goto retry;
218         if (error == EINTR) {
219                 prune->head.flags |= HAMMER_IOC_HEAD_INTR;
220                 error = 0;
221         }
222 failed:
223         prune->key_cur.localization &= HAMMER_LOCALIZE_MASK;
224         prune->elms = user_elms;
225         kfree(copy_elms, M_TEMP);
226         return(error);
227 }
228
229 /*
230  * Check pruning list.  The list must be sorted in descending order.
231  *
232  * Return non-zero if the record should be deleted.
233  */
234 static int
235 prune_should_delete(struct hammer_ioc_prune *prune, hammer_btree_leaf_elm_t elm)
236 {
237         struct hammer_ioc_prune_elm *scan;
238         int i;
239
240         /*
241          * If pruning everything remove all records with a non-zero
242          * delete_tid.
243          */
244         if (prune->head.flags & HAMMER_IOC_PRUNE_ALL) {
245                 if (elm->base.delete_tid != 0)
246                         return(1);
247                 return(0);
248         }
249
250         for (i = 0; i < prune->nelms; ++i) {
251                 scan = &prune->elms[i];
252
253                 /*
254                  * Check for loop termination.
255                  */
256                 if (elm->base.create_tid >= scan->end_tid ||
257                     elm->base.delete_tid > scan->end_tid) {
258                         break;
259                 }
260
261                 /*
262                  * Determine if we can delete the record.
263                  */
264                 if (elm->base.delete_tid &&
265                     elm->base.create_tid >= scan->beg_tid &&
266                     elm->base.delete_tid <= scan->end_tid &&
267                     (elm->base.create_tid - scan->beg_tid) / scan->mod_tid ==
268                     (elm->base.delete_tid - scan->beg_tid) / scan->mod_tid) {
269                         return(1);
270                 }
271         }
272         return(0);
273 }
274
275 /*
276  * Dangling inodes can occur if processes are holding open descriptors on
277  * deleted files as-of when a machine crashes.  When we find one simply
278  * acquire the inode and release it.  The inode handling code will then
279  * do the right thing.
280  */
281 static
282 void
283 prune_check_nlinks(hammer_cursor_t cursor, hammer_btree_leaf_elm_t elm)
284 {
285         hammer_inode_t ip;
286         int error;
287
288         if (elm->base.rec_type != HAMMER_RECTYPE_INODE)
289                 return;
290         if (elm->base.delete_tid != 0)
291                 return;
292         if (hammer_btree_extract(cursor, HAMMER_CURSOR_GET_DATA))
293                 return;
294         if (cursor->data->inode.nlinks)
295                 return;
296         hammer_cursor_downgrade(cursor);
297         ip = hammer_get_inode(cursor->trans, NULL, elm->base.obj_id,
298                       HAMMER_MAX_TID,
299                       elm->base.localization & HAMMER_LOCALIZE_PSEUDOFS_MASK,
300                       0, &error);
301         if (ip) {
302                 kprintf("pruning disconnected inode %016llx\n",
303                         elm->base.obj_id);
304                 hammer_rel_inode(ip, 0);
305         } else {
306                 kprintf("unable to prune disconnected inode %016llx\n",
307                         elm->base.obj_id);
308         }
309 }
310
311 #if 0
312
313 /*
314  * NOTE: THIS CODE HAS BEEN REMOVED!  Pruning no longer attempts to realign
315  *       adjacent records because it seriously interferes with every 
316  *       mirroring algorithm I could come up with.
317  *
318  *       This means that historical accesses beyond the first snapshot
319  *       softlink should be on snapshot boundaries only.  Historical
320  *       accesses from "now" to the first snapshot softlink continue to
321  *       be fine-grained.
322  *
323  * NOTE: It also looks like there's a bug in the removed code.  It is believed
324  *       that create_tid can sometimes get set to 0xffffffffffffffff.  Just as
325  *       well we no longer try to do this fancy shit.  Probably the attempt to
326  *       correct the rhb is blowing up the cursor's indexing or addressing mapping.
327  *
328  * Align the record to cover any gaps created through the deletion of
329  * records within the pruning space.  If we were to just delete the records
330  * there would be gaps which in turn would cause a snapshot that is NOT on
331  * a pruning boundary to appear corrupt to the user.  Forcing alignment
332  * of the create_tid and delete_tid for retained records 'reconnects'
333  * the previously contiguous space, making it contiguous again after the
334  * deletions.
335  *
336  * The use of a reverse iteration allows us to safely align the records and
337  * related elements without creating temporary overlaps.  XXX we should
338  * add ordering dependancies for record buffers to guarantee consistency
339  * during recovery.
340  */
341 static int
342 realign_prune(struct hammer_ioc_prune *prune,
343               hammer_cursor_t cursor, int realign_cre, int realign_del)
344 {
345         struct hammer_ioc_prune_elm *scan;
346         hammer_btree_elm_t elm;
347         hammer_tid_t delta;
348         hammer_tid_t tid;
349         int error;
350
351         hammer_cursor_downgrade(cursor);
352
353         elm = &cursor->node->ondisk->elms[cursor->index];
354         ++prune->stat_realignments;
355
356         /*
357          * Align the create_tid.  By doing a reverse iteration we guarantee
358          * that all records after our current record have already been
359          * aligned, allowing us to safely correct the right-hand-boundary
360          * (because no record to our right is otherwise exactly matching
361          * will have a create_tid to the left of our aligned create_tid).
362          */
363         error = 0;
364         if (realign_cre >= 0) {
365                 scan = &prune->elms[realign_cre];
366
367                 delta = (elm->leaf.base.create_tid - scan->beg_tid) % 
368                         scan->mod_tid;
369                 if (delta) {
370                         tid = elm->leaf.base.create_tid - delta + scan->mod_tid;
371
372                         /* can EDEADLK */
373                         error = hammer_btree_correct_rhb(cursor, tid + 1);
374                         if (error == 0) {
375                                 error = hammer_btree_extract(cursor,
376                                                      HAMMER_CURSOR_GET_LEAF);
377                         }
378                         if (error == 0) {
379                                 /* can EDEADLK */
380                                 error = hammer_cursor_upgrade(cursor);
381                         }
382                         if (error == 0) {
383                                 hammer_modify_node(cursor->trans, cursor->node,
384                                             &elm->leaf.base.create_tid,
385                                             sizeof(elm->leaf.base.create_tid));
386                                 elm->leaf.base.create_tid = tid;
387                                 hammer_modify_node_done(cursor->node);
388                         }
389                 }
390         }
391
392         /*
393          * Align the delete_tid.  This only occurs if the record is historical
394          * was deleted at some point.  Realigning the delete_tid does not
395          * move the record within the B-Tree but may cause it to temporarily
396          * overlap a record that has not yet been pruned.
397          */
398         if (error == 0 && realign_del >= 0) {
399                 scan = &prune->elms[realign_del];
400
401                 delta = (elm->leaf.base.delete_tid - scan->beg_tid) % 
402                         scan->mod_tid;
403                 if (delta) {
404                         error = hammer_btree_extract(cursor,
405                                                      HAMMER_CURSOR_GET_LEAF);
406                         if (error == 0) {
407                                 hammer_modify_node(cursor->trans, cursor->node,
408                                             &elm->leaf.base.delete_tid,
409                                             sizeof(elm->leaf.base.delete_tid));
410                                 elm->leaf.base.delete_tid =
411                                             elm->leaf.base.delete_tid -
412                                             delta + scan->mod_tid;
413                                 hammer_modify_node_done(cursor->node);
414                         }
415                 }
416         }
417         return (error);
418 }
419
420 #endif