HAMMER 46/Many: Performance pass, media changes, bug fixes.
[dragonfly.git] / sys / vfs / hammer / hammer_prune.c
1 /*
2  * Copyright (c) 2008 The DragonFly Project.  All rights reserved.
3  * 
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  * 
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  * 
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  * 
34  * $DragonFly: src/sys/vfs/hammer/hammer_prune.c,v 1.2 2008/05/18 01:48:50 dillon Exp $
35  */
36
37 #include "hammer.h"
38
39 /*
40  * Iterate through the specified range of object ids and remove any
41  * deleted records that fall entirely within a prune modulo.
42  *
43  * A reverse iteration is used to prevent overlapping records from being
44  * created during the iteration due to alignments.  This also allows us
45  * to adjust alignments without blowing up the B-Tree.
46  */
47 static int check_prune(struct hammer_ioc_prune *prune, hammer_btree_elm_t elm,
48                         int *realign_cre, int *realign_del);
49 static int realign_prune(struct hammer_ioc_prune *prune, hammer_cursor_t cursor,
50                         int realign_cre, int realign_del);
51
52 int
53 hammer_ioc_prune(hammer_transaction_t trans, hammer_inode_t ip,
54                  struct hammer_ioc_prune *prune)
55 {
56         struct hammer_cursor cursor;
57         hammer_btree_elm_t elm;
58         int error;
59         int isdir;
60         int realign_cre;
61         int realign_del;
62
63         if (prune->nelms < 0 || prune->nelms > HAMMER_MAX_PRUNE_ELMS)
64                 return(EINVAL);
65         if (prune->beg_obj_id >= prune->end_obj_id)
66                 return(EINVAL);
67         if ((prune->head.flags & HAMMER_IOC_PRUNE_ALL) && prune->nelms)
68                 return(EINVAL);
69
70         prune->cur_localization = prune->end_localization;
71         prune->cur_obj_id = prune->end_obj_id;
72         prune->cur_key = HAMMER_MAX_KEY;
73
74 retry:
75         error = hammer_init_cursor(trans, &cursor, NULL, NULL);
76         if (error) {
77                 hammer_done_cursor(&cursor);
78                 return(error);
79         }
80         cursor.key_beg.localization = prune->beg_localization;
81         cursor.key_beg.obj_id = prune->beg_obj_id;
82         cursor.key_beg.key = HAMMER_MIN_KEY;
83         cursor.key_beg.create_tid = 1;
84         cursor.key_beg.delete_tid = 0;
85         cursor.key_beg.rec_type = HAMMER_MIN_RECTYPE;
86         cursor.key_beg.obj_type = 0;
87
88         cursor.key_end.localization = prune->cur_localization;
89         cursor.key_end.obj_id = prune->cur_obj_id;
90         cursor.key_end.key = prune->cur_key;
91         cursor.key_end.create_tid = HAMMER_MAX_TID - 1;
92         cursor.key_end.delete_tid = 0;
93         cursor.key_end.rec_type = HAMMER_MAX_RECTYPE;
94         cursor.key_end.obj_type = 0;
95
96         cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE;
97         cursor.flags |= HAMMER_CURSOR_BACKEND;
98
99         /*
100          * This flag allows the B-Tree code to clean up loose ends.
101          */
102         cursor.flags |= HAMMER_CURSOR_PRUNING;
103
104         hammer_sync_lock_sh(trans);
105         error = hammer_btree_last(&cursor);
106         while (error == 0) {
107                 /*
108                  * Yield to more important tasks
109                  */
110                 if (trans->hmp->sync_lock.wanted) {
111                         hammer_sync_unlock(trans);
112                         tsleep(trans, 0, "hmrslo", hz / 10);
113                         hammer_sync_lock_sh(trans);
114                 }
115
116                 /*
117                  * Check for work
118                  */
119                 elm = &cursor.node->ondisk->elms[cursor.index];
120                 prune->cur_localization = elm->base.localization;
121                 prune->cur_obj_id = elm->base.obj_id;
122                 prune->cur_key = elm->base.key;
123
124                 if (prune->stat_oldest_tid > elm->leaf.base.create_tid)
125                         prune->stat_oldest_tid = elm->leaf.base.create_tid;
126
127                 if (check_prune(prune, elm, &realign_cre, &realign_del) == 0) {
128                         if (hammer_debug_general & 0x0200) {
129                                 kprintf("check %016llx %016llx: DELETE\n",
130                                         elm->base.obj_id, elm->base.key);
131                         }
132
133                         /*
134                          * NOTE: This can return EDEADLK
135                          *
136                          * Acquiring the sync lock guarantees that the
137                          * operation will not cross a synchronization
138                          * boundary (see the flusher).
139                          */
140                         isdir = (elm->base.rec_type == HAMMER_RECTYPE_DIRENTRY);
141
142                         error = hammer_delete_at_cursor(&cursor,
143                                                         &prune->stat_bytes);
144                         if (error)
145                                 break;
146
147                         if (isdir)
148                                 ++prune->stat_dirrecords;
149                         else
150                                 ++prune->stat_rawrecords;
151
152                         /*
153                          * The current record might now be the one after
154                          * the one we deleted, set ATEDISK to force us
155                          * to skip it (since we are iterating backwards).
156                          */
157                         cursor.flags |= HAMMER_CURSOR_ATEDISK;
158                 } else if (realign_cre >= 0 || realign_del >= 0) {
159                         error = realign_prune(prune, &cursor,
160                                               realign_cre, realign_del);
161                         if (error == 0) {
162                                 cursor.flags |= HAMMER_CURSOR_ATEDISK;
163                                 if (hammer_debug_general & 0x0200) {
164                                         kprintf("check %016llx %016llx: "
165                                                 "REALIGN\n",
166                                                 elm->base.obj_id,
167                                                 elm->base.key);
168                                 }
169                         }
170                 } else {
171                         cursor.flags |= HAMMER_CURSOR_ATEDISK;
172                         if (hammer_debug_general & 0x0100) {
173                                 kprintf("check %016llx %016llx: SKIP\n",
174                                         elm->base.obj_id, elm->base.key);
175                         }
176                 }
177                 ++prune->stat_scanrecords;
178
179                 /*
180                  * Bad hack for now, don't blow out the kernel's buffer
181                  * cache.  NOTE: We still hold locks on the cursor, we
182                  * cannot call the flusher synchronously.
183                  */
184                 if (trans->hmp->locked_dirty_count +
185                     trans->hmp->io_running_count > hammer_limit_dirtybufs) {
186                         hammer_flusher_async(trans->hmp);
187                         tsleep(trans, 0, "hmrslo", hz / 10);
188                 }
189                 error = hammer_signal_check(trans->hmp);
190                 if (error == 0)
191                         error = hammer_btree_iterate_reverse(&cursor);
192         }
193         hammer_sync_unlock(trans);
194         if (error == ENOENT)
195                 error = 0;
196         hammer_done_cursor(&cursor);
197         if (error == EDEADLK)
198                 goto retry;
199         if (error == EINTR) {
200                 prune->head.flags |= HAMMER_IOC_HEAD_INTR;
201                 error = 0;
202         }
203         return(error);
204 }
205
206 /*
207  * Check pruning list.  The list must be sorted in descending order.
208  */
209 static int
210 check_prune(struct hammer_ioc_prune *prune, hammer_btree_elm_t elm,
211             int *realign_cre, int *realign_del)
212 {
213         struct hammer_ioc_prune_elm *scan;
214         int i;
215
216         *realign_cre = -1;
217         *realign_del = -1;
218
219         /*
220          * If pruning everything remove all records with a non-zero
221          * delete_tid.
222          */
223         if (prune->head.flags & HAMMER_IOC_PRUNE_ALL) {
224                 if (elm->base.delete_tid != 0)
225                         return(0);
226                 return(-1);
227         }
228
229         for (i = 0; i < prune->nelms; ++i) {
230                 scan = &prune->elms[i];
231
232                 /*
233                  * Locate the scan index covering the create and delete TIDs.
234                  */
235                 if (*realign_cre < 0 &&
236                     elm->base.create_tid >= scan->beg_tid &&
237                     elm->base.create_tid < scan->end_tid) {
238                         *realign_cre = i;
239                 }
240                 if (*realign_del < 0 && elm->base.delete_tid &&
241                     elm->base.delete_tid > scan->beg_tid &&
242                     elm->base.delete_tid <= scan->end_tid) {
243                         *realign_del = i;
244                 }
245
246                 /*
247                  * Now check for loop termination.
248                  */
249                 if (elm->base.create_tid >= scan->end_tid ||
250                     elm->base.delete_tid > scan->end_tid) {
251                         break;
252                 }
253
254                 /*
255                  * Now determine if we can delete the record.
256                  */
257                 if (elm->base.delete_tid &&
258                     elm->base.create_tid >= scan->beg_tid &&
259                     elm->base.delete_tid <= scan->end_tid &&
260                     elm->base.create_tid / scan->mod_tid ==
261                     elm->base.delete_tid / scan->mod_tid) {
262                         return(0);
263                 }
264         }
265         return(-1);
266 }
267
268 /*
269  * Align the record to cover any gaps created through the deletion of
270  * records within the pruning space.  If we were to just delete the records
271  * there would be gaps which in turn would cause a snapshot that is NOT on
272  * a pruning boundary to appear corrupt to the user.  Forcing alignment
273  * of the create_tid and delete_tid for retained records 'reconnects'
274  * the previously contiguous space, making it contiguous again after the
275  * deletions.
276  *
277  * The use of a reverse iteration allows us to safely align the records and
278  * related elements without creating temporary overlaps.  XXX we should
279  * add ordering dependancies for record buffers to guarantee consistency
280  * during recovery.
281  */
282 static int
283 realign_prune(struct hammer_ioc_prune *prune,
284               hammer_cursor_t cursor, int realign_cre, int realign_del)
285 {
286         hammer_btree_elm_t elm;
287         hammer_tid_t delta;
288         hammer_tid_t mod;
289         hammer_tid_t tid;
290         int error;
291
292         hammer_cursor_downgrade(cursor);
293
294         elm = &cursor->node->ondisk->elms[cursor->index];
295         ++prune->stat_realignments;
296
297         /*
298          * Align the create_tid.  By doing a reverse iteration we guarantee
299          * that all records after our current record have already been
300          * aligned, allowing us to safely correct the right-hand-boundary
301          * (because no record to our right if otherwise exactly matching
302          * will have a create_tid to the left of our aligned create_tid).
303          *
304          * Ordering is important here XXX but disk write ordering for
305          * inter-cluster corrections is not currently guaranteed.
306          */
307         error = 0;
308         if (realign_cre >= 0) {
309                 mod = prune->elms[realign_cre].mod_tid;
310                 delta = elm->leaf.base.create_tid % mod;
311                 if (delta) {
312                         tid = elm->leaf.base.create_tid - delta + mod;
313
314                         /* can EDEADLK */
315                         error = hammer_btree_correct_rhb(cursor, tid + 1);
316                         if (error == 0) {
317                                 error = hammer_btree_extract(cursor,
318                                                      HAMMER_CURSOR_GET_LEAF);
319                         }
320                         if (error == 0) {
321                                 /* can EDEADLK */
322                                 error = hammer_cursor_upgrade(cursor);
323                         }
324                         if (error == 0) {
325                                 hammer_modify_node(cursor->trans, cursor->node,
326                                             &elm->leaf.base.create_tid,
327                                             sizeof(elm->leaf.base.create_tid));
328                                 elm->leaf.base.create_tid = tid;
329                                 hammer_modify_node_done(cursor->node);
330                         }
331                 }
332         }
333
334         /*
335          * Align the delete_tid.  This only occurs if the record is historical
336          * was deleted at some point.  Realigning the delete_tid does not
337          * move the record within the B-Tree but may cause it to temporarily
338          * overlap a record that has not yet been pruned.
339          */
340         if (error == 0 && realign_del >= 0) {
341                 mod = prune->elms[realign_del].mod_tid;
342                 delta = elm->leaf.base.delete_tid % mod;
343                 if (delta) {
344                         error = hammer_btree_extract(cursor,
345                                                      HAMMER_CURSOR_GET_LEAF);
346                         if (error == 0) {
347                                 hammer_modify_node(cursor->trans, cursor->node,
348                                             &elm->leaf.base.delete_tid,
349                                             sizeof(elm->leaf.base.delete_tid));
350                                 elm->leaf.base.delete_tid =
351                                             elm->leaf.base.delete_tid -
352                                             delta + mod;
353                                 hammer_modify_node_done(cursor->node);
354                         }
355                 }
356         }
357         return (error);
358 }
359