0aa60bf20f621bac19d5b8e5ea1ba7f7f33bef36
[dragonfly.git] / sys / vfs / hammer / hammer_undo.c
1 /*
2  * Copyright (c) 2008 The DragonFly Project.  All rights reserved.
3  * 
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  * 
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  * 
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  * 
34  * $DragonFly: src/sys/vfs/hammer/hammer_undo.c,v 1.19 2008/07/16 18:30:59 dillon Exp $
35  */
36
37 /*
38  * HAMMER undo - undo buffer/FIFO management.
39  */
40
41 #include "hammer.h"
42
43 static int hammer_und_rb_compare(hammer_undo_t node1, hammer_undo_t node2);
44
45 RB_GENERATE2(hammer_und_rb_tree, hammer_undo, rb_node,
46              hammer_und_rb_compare, hammer_off_t, offset);
47
48 /*
49  * Convert a zone-3 undo offset into a zone-2 buffer offset.
50  */
51 hammer_off_t
52 hammer_undo_lookup(hammer_mount_t hmp, hammer_off_t zone3_off, int *errorp)
53 {
54         hammer_volume_t root_volume;
55         hammer_blockmap_t undomap;
56         hammer_off_t result_offset;
57         int i;
58
59         KKASSERT((zone3_off & HAMMER_OFF_ZONE_MASK) == HAMMER_ZONE_UNDO);
60         root_volume = hammer_get_root_volume(hmp, errorp);
61         if (*errorp)
62                 return(0);
63         undomap = &hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
64         KKASSERT(HAMMER_ZONE_DECODE(undomap->alloc_offset) == HAMMER_ZONE_UNDO_INDEX);
65         KKASSERT (zone3_off < undomap->alloc_offset);
66
67         i = (zone3_off & HAMMER_OFF_SHORT_MASK) / HAMMER_LARGEBLOCK_SIZE;
68         result_offset = root_volume->ondisk->vol0_undo_array[i] +
69                         (zone3_off & HAMMER_LARGEBLOCK_MASK64);
70
71         hammer_rel_volume(root_volume, 0);
72         return(result_offset);
73 }
74
75 /*
76  * Generate an UNDO record for the block of data at the specified zone1
77  * or zone2 offset.
78  *
79  * The recovery code will execute UNDOs in reverse order, allowing overlaps.
80  * All the UNDOs are executed together so if we already laid one down we
81  * do not have to lay another one down for the same range.
82  */
83 int
84 hammer_generate_undo(hammer_transaction_t trans, hammer_io_t io,
85                      hammer_off_t zone_off, void *base, int len)
86 {
87         hammer_mount_t hmp;
88         hammer_volume_t root_volume;
89         hammer_blockmap_t undomap;
90         hammer_buffer_t buffer = NULL;
91         hammer_fifo_undo_t undo;
92         hammer_fifo_tail_t tail;
93         hammer_off_t next_offset;
94         int error;
95         int bytes;
96
97         hmp = trans->hmp;
98
99         /*
100          * Enter the offset into our undo history.  If there is an existing
101          * undo we do not have to generate a new one.
102          */
103         if (hammer_enter_undo_history(hmp, zone_off, len) == EALREADY)
104                 return(0);
105
106         root_volume = trans->rootvol;
107         undomap = &hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
108
109         /* no undo recursion */
110         hammer_modify_volume(NULL, root_volume, NULL, 0);
111
112         hammer_lock_ex(&hmp->undo_lock);
113 again:
114         /*
115          * Allocate space in the FIFO
116          */
117         bytes = ((len + HAMMER_HEAD_ALIGN_MASK) & ~HAMMER_HEAD_ALIGN_MASK) +
118                 sizeof(struct hammer_fifo_undo) +
119                 sizeof(struct hammer_fifo_tail);
120         if (hammer_undo_space(trans) < bytes + HAMMER_BUFSIZE*2)
121                 panic("hammer: insufficient undo FIFO space!");
122
123         next_offset = undomap->next_offset;
124
125         /*
126          * Wrap next_offset
127          */
128         if (undomap->next_offset == undomap->alloc_offset) {
129                 next_offset = HAMMER_ZONE_ENCODE(HAMMER_ZONE_UNDO_INDEX, 0);
130                 undomap->next_offset = next_offset;
131         }
132
133         /*
134          * This is a tail-chasing FIFO, when we hit the start of a new
135          * buffer we don't have to read it in.
136          */
137         if ((next_offset & HAMMER_BUFMASK) == 0)
138                 undo = hammer_bnew(hmp, next_offset, &error, &buffer);
139         else
140                 undo = hammer_bread(hmp, next_offset, &error, &buffer);
141         hammer_modify_buffer(NULL, buffer, NULL, 0);
142
143         KKASSERT(undomap->next_offset == next_offset);
144
145         /*
146          * The FIFO entry would cross a buffer boundary, PAD to the end
147          * of the buffer and try again.  Due to our data alignment, the
148          * worst case (smallest) PAD record is 8 bytes.  PAD records only
149          * populate the first 8 bytes of hammer_fifo_head and the tail may
150          * be at the same offset as the head.
151          */
152         if ((next_offset ^ (next_offset + bytes)) & ~HAMMER_BUFMASK64) {
153                 bytes = HAMMER_BUFSIZE - ((int)next_offset & HAMMER_BUFMASK);
154                 tail = (void *)((char *)undo + bytes - sizeof(*tail));
155                 if ((void *)undo != (void *)tail) {
156                         tail->tail_signature = HAMMER_TAIL_SIGNATURE;
157                         tail->tail_type = HAMMER_HEAD_TYPE_PAD;
158                         tail->tail_size = bytes;
159                 }
160                 undo->head.hdr_signature = HAMMER_HEAD_SIGNATURE;
161                 undo->head.hdr_type = HAMMER_HEAD_TYPE_PAD;
162                 undo->head.hdr_size = bytes;
163                 /* NO CRC */
164                 undomap->next_offset += bytes;
165                 hammer_modify_buffer_done(buffer);
166                 goto again;
167         }
168         if (hammer_debug_general & 0x0080)
169                 kprintf("undo %016llx %d %d\n", next_offset, bytes, len);
170
171         /*
172          * We're good, create the entry.
173          */
174         undo->head.hdr_signature = HAMMER_HEAD_SIGNATURE;
175         undo->head.hdr_type = HAMMER_HEAD_TYPE_UNDO;
176         undo->head.hdr_size = bytes;
177         undo->head.reserved01 = 0;
178         undo->head.hdr_crc = 0;
179         undo->undo_offset = zone_off;
180         undo->undo_data_bytes = len;
181         bcopy(base, undo + 1, len);
182
183         tail = (void *)((char *)undo + bytes - sizeof(*tail));
184         tail->tail_signature = HAMMER_TAIL_SIGNATURE;
185         tail->tail_type = HAMMER_HEAD_TYPE_UNDO;
186         tail->tail_size = bytes;
187
188         KKASSERT(bytes >= sizeof(undo->head));
189         undo->head.hdr_crc = crc32(undo, HAMMER_FIFO_HEAD_CRCOFF) ^
190                              crc32(&undo->head + 1, bytes - sizeof(undo->head));
191         undomap->next_offset += bytes;
192
193         hammer_modify_buffer_done(buffer);
194         hammer_modify_volume_done(root_volume);
195
196         hammer_unlock(&hmp->undo_lock);
197
198         if (buffer)
199                 hammer_rel_buffer(buffer, 0);
200         return(error);
201 }
202
203 /*
204  * UNDO HISTORY API
205  *
206  * It is not necessary to layout an undo record for the same address space
207  * multiple times.  Maintain a cache of recent undo's.
208  */
209
210 /*
211  * Enter an undo into the history.  Return EALREADY if the request completely
212  * covers a previous request.
213  */
214 int
215 hammer_enter_undo_history(hammer_mount_t hmp, hammer_off_t offset, int bytes)
216 {
217         hammer_undo_t node;
218         hammer_undo_t onode;
219
220         node = RB_LOOKUP(hammer_und_rb_tree, &hmp->rb_undo_root, offset);
221         if (node) {
222                 TAILQ_REMOVE(&hmp->undo_lru_list, node, lru_entry);
223                 TAILQ_INSERT_TAIL(&hmp->undo_lru_list, node, lru_entry);
224                 if (bytes <= node->bytes)
225                         return(EALREADY);
226                 node->bytes = bytes;
227                 return(0);
228         }
229         if (hmp->undo_alloc != HAMMER_MAX_UNDOS) {
230                 node = &hmp->undos[hmp->undo_alloc++];
231         } else {
232                 node = TAILQ_FIRST(&hmp->undo_lru_list);
233                 TAILQ_REMOVE(&hmp->undo_lru_list, node, lru_entry);
234                 RB_REMOVE(hammer_und_rb_tree, &hmp->rb_undo_root, node);
235         }
236         node->offset = offset;
237         node->bytes = bytes;
238         TAILQ_INSERT_TAIL(&hmp->undo_lru_list, node, lru_entry);
239         onode = RB_INSERT(hammer_und_rb_tree, &hmp->rb_undo_root, node);
240         KKASSERT(onode == NULL);
241         return(0);
242 }
243
244 void
245 hammer_clear_undo_history(hammer_mount_t hmp)
246 {
247         RB_INIT(&hmp->rb_undo_root);
248         TAILQ_INIT(&hmp->undo_lru_list);
249         hmp->undo_alloc = 0;
250 }
251
252 /*
253  * Return how much of the undo FIFO has been used
254  *
255  * The calculation includes undo FIFO space still reserved from a previous
256  * flush (because it will still be run on recovery if a crash occurs and
257  * we can't overwrite it yet).
258  */
259 int64_t
260 hammer_undo_used(hammer_transaction_t trans)
261 {
262         hammer_blockmap_t cundomap;
263         hammer_blockmap_t dundomap;
264         int64_t max_bytes;
265         int64_t bytes;
266
267         cundomap = &trans->hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
268         dundomap = &trans->rootvol->ondisk->
269                                 vol0_blockmap[HAMMER_ZONE_UNDO_INDEX];
270
271         if (dundomap->first_offset <= cundomap->next_offset) {
272                 bytes = cundomap->next_offset - dundomap->first_offset;
273         } else {
274                 bytes = cundomap->alloc_offset - dundomap->first_offset +
275                         (cundomap->next_offset & HAMMER_OFF_LONG_MASK);
276         }
277         max_bytes = cundomap->alloc_offset & HAMMER_OFF_SHORT_MASK;
278         KKASSERT(bytes <= max_bytes);
279         return(bytes);
280 }
281
282 /*
283  * Return how much of the undo FIFO is available for new records.
284  */
285 int64_t
286 hammer_undo_space(hammer_transaction_t trans)
287 {
288         hammer_blockmap_t rootmap;
289         int64_t max_bytes;
290
291         rootmap = &trans->hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
292         max_bytes = rootmap->alloc_offset & HAMMER_OFF_SHORT_MASK;
293         return(max_bytes - hammer_undo_used(trans));
294 }
295
296 int64_t
297 hammer_undo_max(hammer_mount_t hmp)
298 {
299         hammer_blockmap_t rootmap;
300         int64_t max_bytes;
301
302         rootmap = &hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
303         max_bytes = rootmap->alloc_offset & HAMMER_OFF_SHORT_MASK;
304
305         return(max_bytes);
306 }
307
308 static int
309 hammer_und_rb_compare(hammer_undo_t node1, hammer_undo_t node2)
310 {
311         if (node1->offset < node2->offset)
312                 return(-1);
313         if (node1->offset > node2->offset)
314                 return(1);
315         return(0);
316 }
317