2 * Copyright (c) 2008 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * $DragonFly: src/sys/vfs/hammer/hammer_prune.c,v 1.2 2008/05/18 01:48:50 dillon Exp $
40 * Iterate through the specified range of object ids and remove any
41 * deleted records that fall entirely within a prune modulo.
43 * A reverse iteration is used to prevent overlapping records from being
44 * created during the iteration due to alignments. This also allows us
45 * to adjust alignments without blowing up the B-Tree.
47 static int check_prune(struct hammer_ioc_prune *prune, hammer_btree_elm_t elm,
48 int *realign_cre, int *realign_del);
49 static int realign_prune(struct hammer_ioc_prune *prune, hammer_cursor_t cursor,
50 int realign_cre, int realign_del);
53 hammer_ioc_prune(hammer_transaction_t trans, hammer_inode_t ip,
54 struct hammer_ioc_prune *prune)
56 struct hammer_cursor cursor;
57 hammer_btree_elm_t elm;
63 if (prune->nelms < 0 || prune->nelms > HAMMER_MAX_PRUNE_ELMS)
65 if (prune->beg_obj_id >= prune->end_obj_id)
67 if ((prune->head.flags & HAMMER_IOC_PRUNE_ALL) && prune->nelms)
70 prune->cur_localization = prune->end_localization;
71 prune->cur_obj_id = prune->end_obj_id;
72 prune->cur_key = HAMMER_MAX_KEY;
75 error = hammer_init_cursor(trans, &cursor, NULL, NULL);
77 hammer_done_cursor(&cursor);
80 cursor.key_beg.localization = prune->beg_localization;
81 cursor.key_beg.obj_id = prune->beg_obj_id;
82 cursor.key_beg.key = HAMMER_MIN_KEY;
83 cursor.key_beg.create_tid = 1;
84 cursor.key_beg.delete_tid = 0;
85 cursor.key_beg.rec_type = HAMMER_MIN_RECTYPE;
86 cursor.key_beg.obj_type = 0;
88 cursor.key_end.localization = prune->cur_localization;
89 cursor.key_end.obj_id = prune->cur_obj_id;
90 cursor.key_end.key = prune->cur_key;
91 cursor.key_end.create_tid = HAMMER_MAX_TID - 1;
92 cursor.key_end.delete_tid = 0;
93 cursor.key_end.rec_type = HAMMER_MAX_RECTYPE;
94 cursor.key_end.obj_type = 0;
96 cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE;
97 cursor.flags |= HAMMER_CURSOR_BACKEND;
100 * This flag allows the B-Tree code to clean up loose ends.
102 cursor.flags |= HAMMER_CURSOR_PRUNING;
104 hammer_sync_lock_sh(trans);
105 error = hammer_btree_last(&cursor);
108 * Yield to more important tasks
110 if (trans->hmp->sync_lock.wanted) {
111 hammer_sync_unlock(trans);
112 tsleep(trans, 0, "hmrslo", hz / 10);
113 hammer_sync_lock_sh(trans);
119 elm = &cursor.node->ondisk->elms[cursor.index];
120 prune->cur_localization = elm->base.localization;
121 prune->cur_obj_id = elm->base.obj_id;
122 prune->cur_key = elm->base.key;
124 if (prune->stat_oldest_tid > elm->leaf.base.create_tid)
125 prune->stat_oldest_tid = elm->leaf.base.create_tid;
127 if (check_prune(prune, elm, &realign_cre, &realign_del) == 0) {
128 if (hammer_debug_general & 0x0200) {
129 kprintf("check %016llx %016llx: DELETE\n",
130 elm->base.obj_id, elm->base.key);
134 * NOTE: This can return EDEADLK
136 * Acquiring the sync lock guarantees that the
137 * operation will not cross a synchronization
138 * boundary (see the flusher).
140 isdir = (elm->base.rec_type == HAMMER_RECTYPE_DIRENTRY);
142 error = hammer_delete_at_cursor(&cursor,
148 ++prune->stat_dirrecords;
150 ++prune->stat_rawrecords;
153 * The current record might now be the one after
154 * the one we deleted, set ATEDISK to force us
155 * to skip it (since we are iterating backwards).
157 cursor.flags |= HAMMER_CURSOR_ATEDISK;
158 } else if (realign_cre >= 0 || realign_del >= 0) {
159 error = realign_prune(prune, &cursor,
160 realign_cre, realign_del);
162 cursor.flags |= HAMMER_CURSOR_ATEDISK;
163 if (hammer_debug_general & 0x0200) {
164 kprintf("check %016llx %016llx: "
171 cursor.flags |= HAMMER_CURSOR_ATEDISK;
172 if (hammer_debug_general & 0x0100) {
173 kprintf("check %016llx %016llx: SKIP\n",
174 elm->base.obj_id, elm->base.key);
177 ++prune->stat_scanrecords;
180 * Bad hack for now, don't blow out the kernel's buffer
181 * cache. NOTE: We still hold locks on the cursor, we
182 * cannot call the flusher synchronously.
184 if (trans->hmp->locked_dirty_count +
185 trans->hmp->io_running_count > hammer_limit_dirtybufs) {
186 hammer_flusher_async(trans->hmp);
187 tsleep(trans, 0, "hmrslo", hz / 10);
189 error = hammer_signal_check(trans->hmp);
191 error = hammer_btree_iterate_reverse(&cursor);
193 hammer_sync_unlock(trans);
196 hammer_done_cursor(&cursor);
197 if (error == EDEADLK)
199 if (error == EINTR) {
200 prune->head.flags |= HAMMER_IOC_HEAD_INTR;
207 * Check pruning list. The list must be sorted in descending order.
210 check_prune(struct hammer_ioc_prune *prune, hammer_btree_elm_t elm,
211 int *realign_cre, int *realign_del)
213 struct hammer_ioc_prune_elm *scan;
220 * If pruning everything remove all records with a non-zero
223 if (prune->head.flags & HAMMER_IOC_PRUNE_ALL) {
224 if (elm->base.delete_tid != 0)
229 for (i = 0; i < prune->nelms; ++i) {
230 scan = &prune->elms[i];
233 * Locate the scan index covering the create and delete TIDs.
235 if (*realign_cre < 0 &&
236 elm->base.create_tid >= scan->beg_tid &&
237 elm->base.create_tid < scan->end_tid) {
240 if (*realign_del < 0 && elm->base.delete_tid &&
241 elm->base.delete_tid > scan->beg_tid &&
242 elm->base.delete_tid <= scan->end_tid) {
247 * Now check for loop termination.
249 if (elm->base.create_tid >= scan->end_tid ||
250 elm->base.delete_tid > scan->end_tid) {
255 * Now determine if we can delete the record.
257 if (elm->base.delete_tid &&
258 elm->base.create_tid >= scan->beg_tid &&
259 elm->base.delete_tid <= scan->end_tid &&
260 elm->base.create_tid / scan->mod_tid ==
261 elm->base.delete_tid / scan->mod_tid) {
269 * Align the record to cover any gaps created through the deletion of
270 * records within the pruning space. If we were to just delete the records
271 * there would be gaps which in turn would cause a snapshot that is NOT on
272 * a pruning boundary to appear corrupt to the user. Forcing alignment
273 * of the create_tid and delete_tid for retained records 'reconnects'
274 * the previously contiguous space, making it contiguous again after the
277 * The use of a reverse iteration allows us to safely align the records and
278 * related elements without creating temporary overlaps. XXX we should
279 * add ordering dependancies for record buffers to guarantee consistency
283 realign_prune(struct hammer_ioc_prune *prune,
284 hammer_cursor_t cursor, int realign_cre, int realign_del)
286 hammer_btree_elm_t elm;
292 hammer_cursor_downgrade(cursor);
294 elm = &cursor->node->ondisk->elms[cursor->index];
295 ++prune->stat_realignments;
298 * Align the create_tid. By doing a reverse iteration we guarantee
299 * that all records after our current record have already been
300 * aligned, allowing us to safely correct the right-hand-boundary
301 * (because no record to our right if otherwise exactly matching
302 * will have a create_tid to the left of our aligned create_tid).
304 * Ordering is important here XXX but disk write ordering for
305 * inter-cluster corrections is not currently guaranteed.
308 if (realign_cre >= 0) {
309 mod = prune->elms[realign_cre].mod_tid;
310 delta = elm->leaf.base.create_tid % mod;
312 tid = elm->leaf.base.create_tid - delta + mod;
315 error = hammer_btree_correct_rhb(cursor, tid + 1);
317 error = hammer_btree_extract(cursor,
318 HAMMER_CURSOR_GET_LEAF);
322 error = hammer_cursor_upgrade(cursor);
325 hammer_modify_node(cursor->trans, cursor->node,
326 &elm->leaf.base.create_tid,
327 sizeof(elm->leaf.base.create_tid));
328 elm->leaf.base.create_tid = tid;
329 hammer_modify_node_done(cursor->node);
335 * Align the delete_tid. This only occurs if the record is historical
336 * was deleted at some point. Realigning the delete_tid does not
337 * move the record within the B-Tree but may cause it to temporarily
338 * overlap a record that has not yet been pruned.
340 if (error == 0 && realign_del >= 0) {
341 mod = prune->elms[realign_del].mod_tid;
342 delta = elm->leaf.base.delete_tid % mod;
344 error = hammer_btree_extract(cursor,
345 HAMMER_CURSOR_GET_LEAF);
347 hammer_modify_node(cursor->trans, cursor->node,
348 &elm->leaf.base.delete_tid,
349 sizeof(elm->leaf.base.delete_tid));
350 elm->leaf.base.delete_tid =
351 elm->leaf.base.delete_tid -
353 hammer_modify_node_done(cursor->node);