9466ba14200cd2eff730e8882dc979d35cf9ed8d
[dragonfly.git] / sys / vfs / hammer / hammer_undo.c
1 /*
2  * Copyright (c) 2008 The DragonFly Project.  All rights reserved.
3  * 
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  * 
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  * 
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  * 
34  * $DragonFly: src/sys/vfs/hammer/hammer_undo.c,v 1.10 2008/05/03 20:21:20 dillon Exp $
35  */
36
37 /*
38  * HAMMER undo - undo buffer/FIFO management.
39  */
40
41 #include "hammer.h"
42
43 static int hammer_und_rb_compare(hammer_undo_t node1, hammer_undo_t node2);
44
45 RB_GENERATE2(hammer_und_rb_tree, hammer_undo, rb_node,
46              hammer_und_rb_compare, hammer_off_t, offset);
47
48 /*
49  * Convert a zone-3 undo offset into a zone-2 buffer offset.
50  */
51 hammer_off_t
52 hammer_undo_lookup(hammer_mount_t hmp, hammer_off_t zone3_off, int *errorp)
53 {
54         hammer_volume_t root_volume;
55         hammer_blockmap_t undomap;
56         struct hammer_blockmap_layer2 *layer2;
57         hammer_off_t result_offset;
58         int i;
59
60         KKASSERT((zone3_off & HAMMER_OFF_ZONE_MASK) == HAMMER_ZONE_UNDO);
61         root_volume = hammer_get_root_volume(hmp, errorp);
62         if (*errorp)
63                 return(0);
64         undomap = &hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
65         KKASSERT(HAMMER_ZONE_DECODE(undomap->alloc_offset) == HAMMER_ZONE_UNDO_INDEX);
66         KKASSERT (zone3_off < undomap->alloc_offset);
67
68         i = (zone3_off & HAMMER_OFF_SHORT_MASK) / HAMMER_LARGEBLOCK_SIZE;
69         layer2 = &root_volume->ondisk->vol0_undo_array[i];
70         result_offset = layer2->u.phys_offset +
71                         (zone3_off & HAMMER_LARGEBLOCK_MASK64);
72
73         hammer_rel_volume(root_volume, 0);
74         return(result_offset);
75 }
76
77 /*
78  * Generate an UNDO record for the block of data at the specified zone1
79  * or zone2 offset.
80  *
81  * The recovery code will execute UNDOs in reverse order, allowing overlaps.
82  * All the UNDOs are executed together so if we already laid one down we
83  * do not have to lay another one down for the same range.
84  */
85 int
86 hammer_generate_undo(hammer_transaction_t trans, hammer_io_t io,
87                      hammer_off_t zone_off, void *base, int len)
88 {
89         hammer_volume_t root_volume;
90         hammer_volume_ondisk_t ondisk;
91         hammer_blockmap_t undomap;
92         hammer_buffer_t buffer = NULL;
93         struct hammer_blockmap_layer2 *layer2;
94         hammer_fifo_undo_t undo;
95         hammer_fifo_tail_t tail;
96         hammer_off_t next_offset;
97         hammer_off_t result_offset;
98         int i;
99         int error;
100         int bytes;
101
102         /*
103          * Enter the offset into our undo history.  If there is an existing
104          * undo we do not have to generate a new one.
105          */
106         if (hammer_enter_undo_history(trans->hmp, zone_off, len) == EALREADY) {
107                 return(0);
108         }
109
110         root_volume = trans->rootvol;
111         ondisk = root_volume->ondisk;
112         undomap = &trans->hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
113
114         /* no undo recursion */
115         hammer_modify_volume(NULL, root_volume, NULL, 0);
116
117 again:
118         /*
119          * Allocate space in the FIFO
120          */
121         bytes = ((len + HAMMER_HEAD_ALIGN_MASK) & ~HAMMER_HEAD_ALIGN_MASK) +
122                 sizeof(struct hammer_fifo_undo) +
123                 sizeof(struct hammer_fifo_tail);
124         if (hammer_undo_space(trans->hmp) < bytes + HAMMER_BUFSIZE*2)
125                 panic("hammer: insufficient undo FIFO space!");
126
127         next_offset = undomap->next_offset;
128
129         /*
130          * Wrap next_offset
131          */
132         if (undomap->next_offset == undomap->alloc_offset) {
133                 next_offset = HAMMER_ZONE_ENCODE(HAMMER_ZONE_UNDO_INDEX, 0);
134                 undomap->next_offset = next_offset;
135                 kprintf("undo zone's next_offset wrapped\n");
136         }
137
138         i = (next_offset & HAMMER_OFF_SHORT_MASK) / HAMMER_LARGEBLOCK_SIZE;
139         layer2 = &root_volume->ondisk->vol0_undo_array[i];
140         result_offset = layer2->u.phys_offset +
141                         (next_offset & HAMMER_LARGEBLOCK_MASK64);
142
143         undo = hammer_bread(trans->hmp, result_offset, &error, &buffer);
144
145         /*
146          * We raced another thread, try again.
147          */
148         if (undomap->next_offset != next_offset)
149                 goto again;
150
151         hammer_modify_buffer(NULL, buffer, NULL, 0);
152
153         /*
154          * The FIFO entry would cross a buffer boundary, PAD to the end
155          * of the buffer and try again.  Due to our data alignment, the
156          * worst case (smallest) PAD record is 8 bytes.  PAD records only
157          * populate the first 8 bytes of hammer_fifo_head and the tail may
158          * be at the same offset as the head.
159          */
160         if ((result_offset ^ (result_offset + bytes)) & ~HAMMER_BUFMASK64) {
161                 bytes = HAMMER_BUFSIZE - ((int)next_offset & HAMMER_BUFMASK);
162                 tail = (void *)((char *)undo + bytes - sizeof(*tail));
163                 if ((void *)undo != (void *)tail) {
164                         tail->tail_signature = HAMMER_TAIL_SIGNATURE;
165                         tail->tail_type = HAMMER_HEAD_TYPE_PAD;
166                         tail->tail_size = bytes;
167                 }
168                 undo->head.hdr_signature = HAMMER_HEAD_SIGNATURE;
169                 undo->head.hdr_type = HAMMER_HEAD_TYPE_PAD;
170                 undo->head.hdr_size = bytes;
171                 undomap->next_offset += bytes;
172                 hammer_modify_buffer_done(buffer);
173                 goto again;
174         }
175         if (hammer_debug_general & 0x0080)
176                 kprintf("undo %016llx %d %d\n", result_offset, bytes, len);
177
178         /*
179          * We're good, create the entry.
180          */
181         undo->head.hdr_signature = HAMMER_HEAD_SIGNATURE;
182         undo->head.hdr_type = HAMMER_HEAD_TYPE_UNDO;
183         undo->head.hdr_size = bytes;
184         undo->head.reserved01 = 0;
185         undo->head.hdr_crc = 0;
186         undo->undo_offset = zone_off;
187         undo->undo_data_bytes = len;
188         bcopy(base, undo + 1, len);
189
190         tail = (void *)((char *)undo + bytes - sizeof(*tail));
191         tail->tail_signature = HAMMER_TAIL_SIGNATURE;
192         tail->tail_type = HAMMER_HEAD_TYPE_UNDO;
193         tail->tail_size = bytes;
194
195         undo->head.hdr_crc = crc32(undo, bytes);
196         hammer_modify_buffer_done(buffer);
197
198         /*
199          * Update the undo offset space in the IO XXX
200          */
201
202         undomap->next_offset += bytes;
203         hammer_modify_volume_done(root_volume);
204
205         if (buffer)
206                 hammer_rel_buffer(buffer, 0);
207         return(error);
208 }
209
210 /*
211  * UNDO HISTORY API
212  *
213  * It is not necessary to layout an undo record for the same address space
214  * multiple times.  Maintain a cache of recent undo's.
215  */
216
217 /*
218  * Enter an undo into the history.  Return EALREADY if the request completely
219  * covers a previous request.
220  */
221 int
222 hammer_enter_undo_history(hammer_mount_t hmp, hammer_off_t offset, int bytes)
223 {
224         hammer_undo_t node;
225         hammer_undo_t onode;
226
227         node = RB_LOOKUP(hammer_und_rb_tree, &hmp->rb_undo_root, offset);
228         if (node) {
229                 TAILQ_REMOVE(&hmp->undo_lru_list, node, lru_entry);
230                 TAILQ_INSERT_TAIL(&hmp->undo_lru_list, node, lru_entry);
231                 if (bytes <= node->bytes)
232                         return(EALREADY);
233                 node->bytes = bytes;
234                 return(0);
235         }
236         if (hmp->undo_alloc != HAMMER_MAX_UNDOS) {
237                 node = &hmp->undos[hmp->undo_alloc++];
238         } else {
239                 node = TAILQ_FIRST(&hmp->undo_lru_list);
240                 TAILQ_REMOVE(&hmp->undo_lru_list, node, lru_entry);
241                 RB_REMOVE(hammer_und_rb_tree, &hmp->rb_undo_root, node);
242         }
243         node->offset = offset;
244         node->bytes = bytes;
245         TAILQ_INSERT_TAIL(&hmp->undo_lru_list, node, lru_entry);
246         onode = RB_INSERT(hammer_und_rb_tree, &hmp->rb_undo_root, node);
247         KKASSERT(onode == NULL);
248         return(0);
249 }
250
251 void
252 hammer_clear_undo_history(hammer_mount_t hmp)
253 {
254         RB_INIT(&hmp->rb_undo_root);
255         TAILQ_INIT(&hmp->undo_lru_list);
256         hmp->undo_alloc = 0;
257 }
258
259 /*
260  * Misc helper routines.  Return available space and total space.
261  */
262 int64_t
263 hammer_undo_space(hammer_mount_t hmp)
264 {
265         hammer_blockmap_t rootmap;
266         int64_t bytes;
267         int64_t max_bytes;
268
269         rootmap = &hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
270
271         if (rootmap->first_offset <= rootmap->next_offset) {
272                 bytes = (int)(rootmap->next_offset - rootmap->first_offset);
273         } else {
274                 bytes = (int)(rootmap->alloc_offset - rootmap->first_offset +
275                               rootmap->next_offset);
276         }
277         max_bytes = (int)(rootmap->alloc_offset & HAMMER_OFF_SHORT_MASK);
278         return(max_bytes - bytes);
279 }
280
281 int64_t
282 hammer_undo_max(hammer_mount_t hmp)
283 {
284         hammer_blockmap_t rootmap;
285         int64_t max_bytes;
286
287         rootmap = &hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
288         max_bytes = (int)(rootmap->alloc_offset & HAMMER_OFF_SHORT_MASK);
289
290         return(max_bytes);
291 }
292
293 static int
294 hammer_und_rb_compare(hammer_undo_t node1, hammer_undo_t node2)
295 {
296         if (node1->offset < node2->offset)
297                 return(-1);
298         if (node1->offset > node2->offset)
299                 return(1);
300         return(0);
301 }
302