HAMMER 41B/Many: Cleanup.
[dragonfly.git] / sys / vfs / hammer / hammer_undo.c
CommitLineData
bf686dbe
MD
1/*
2 * Copyright (c) 2008 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
77062c8a 34 * $DragonFly: src/sys/vfs/hammer/hammer_undo.c,v 1.14 2008/05/06 00:21:08 dillon Exp $
bf686dbe
MD
35 */
36
37/*
38 * HAMMER undo - undo buffer/FIFO management.
39 */
40
41#include "hammer.h"
42
e8599db1
MD
43static int hammer_und_rb_compare(hammer_undo_t node1, hammer_undo_t node2);
44
45RB_GENERATE2(hammer_und_rb_tree, hammer_undo, rb_node,
46 hammer_und_rb_compare, hammer_off_t, offset);
47
bf686dbe
MD
48/*
49 * Convert a zone-3 undo offset into a zone-2 buffer offset.
50 */
51hammer_off_t
52hammer_undo_lookup(hammer_mount_t hmp, hammer_off_t zone3_off, int *errorp)
53{
54 hammer_volume_t root_volume;
55 hammer_blockmap_t undomap;
56 struct hammer_blockmap_layer2 *layer2;
57 hammer_off_t result_offset;
58 int i;
59
60 KKASSERT((zone3_off & HAMMER_OFF_ZONE_MASK) == HAMMER_ZONE_UNDO);
61 root_volume = hammer_get_root_volume(hmp, errorp);
62 if (*errorp)
63 return(0);
0729c8c8 64 undomap = &hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
bf686dbe
MD
65 KKASSERT(HAMMER_ZONE_DECODE(undomap->alloc_offset) == HAMMER_ZONE_UNDO_INDEX);
66 KKASSERT (zone3_off < undomap->alloc_offset);
67
68 i = (zone3_off & HAMMER_OFF_SHORT_MASK) / HAMMER_LARGEBLOCK_SIZE;
69 layer2 = &root_volume->ondisk->vol0_undo_array[i];
70 result_offset = layer2->u.phys_offset +
71 (zone3_off & HAMMER_LARGEBLOCK_MASK64);
72
73 hammer_rel_volume(root_volume, 0);
74 return(result_offset);
75}
76
77/*
78 * Generate an UNDO record for the block of data at the specified zone1
f90dde4c 79 * or zone2 offset.
e8599db1
MD
80 *
81 * The recovery code will execute UNDOs in reverse order, allowing overlaps.
82 * All the UNDOs are executed together so if we already laid one down we
83 * do not have to lay another one down for the same range.
bf686dbe
MD
84 */
85int
059819e3 86hammer_generate_undo(hammer_transaction_t trans, hammer_io_t io,
f90dde4c 87 hammer_off_t zone_off, void *base, int len)
bf686dbe
MD
88{
89 hammer_volume_t root_volume;
90 hammer_volume_ondisk_t ondisk;
91 hammer_blockmap_t undomap;
92 hammer_buffer_t buffer = NULL;
bf686dbe
MD
93 hammer_fifo_undo_t undo;
94 hammer_fifo_tail_t tail;
95 hammer_off_t next_offset;
bf686dbe
MD
96 int error;
97 int bytes;
98
e8599db1
MD
99 /*
100 * Enter the offset into our undo history. If there is an existing
101 * undo we do not have to generate a new one.
102 */
c9b9e29d 103 if (hammer_enter_undo_history(trans->hmp, zone_off, len) == EALREADY)
e8599db1 104 return(0);
e8599db1 105
b58c6388 106 root_volume = trans->rootvol;
bf686dbe 107 ondisk = root_volume->ondisk;
0729c8c8 108 undomap = &trans->hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
36f82b23
MD
109
110 /* no undo recursion */
111 hammer_modify_volume(NULL, root_volume, NULL, 0);
bf686dbe 112
f90dde4c 113again:
bf686dbe
MD
114 /*
115 * Allocate space in the FIFO
116 */
f90dde4c
MD
117 bytes = ((len + HAMMER_HEAD_ALIGN_MASK) & ~HAMMER_HEAD_ALIGN_MASK) +
118 sizeof(struct hammer_fifo_undo) +
119 sizeof(struct hammer_fifo_tail);
1f07f686
MD
120 if (hammer_undo_space(trans->hmp) < bytes + HAMMER_BUFSIZE*2)
121 panic("hammer: insufficient undo FIFO space!");
f90dde4c 122
bf686dbe
MD
123 next_offset = undomap->next_offset;
124
125 /*
126 * Wrap next_offset
127 */
128 if (undomap->next_offset == undomap->alloc_offset) {
129 next_offset = HAMMER_ZONE_ENCODE(HAMMER_ZONE_UNDO_INDEX, 0);
130 undomap->next_offset = next_offset;
77062c8a 131 hkprintf("undo zone's next_offset wrapped\n");
bf686dbe
MD
132 }
133
c9b9e29d 134 undo = hammer_bread(trans->hmp, next_offset, &error, &buffer);
c4bae5fd 135 hammer_modify_buffer(NULL, buffer, NULL, 0);
bf686dbe
MD
136
137 /*
138 * We raced another thread, try again.
139 */
c4bae5fd
MD
140 if (undomap->next_offset != next_offset) {
141 hammer_modify_buffer_done(buffer);
bf686dbe 142 goto again;
c4bae5fd 143 }
c9b9e29d 144
bf686dbe
MD
145 /*
146 * The FIFO entry would cross a buffer boundary, PAD to the end
147 * of the buffer and try again. Due to our data alignment, the
148 * worst case (smallest) PAD record is 8 bytes. PAD records only
149 * populate the first 8 bytes of hammer_fifo_head and the tail may
150 * be at the same offset as the head.
151 */
c9b9e29d 152 if ((next_offset ^ (next_offset + bytes)) & ~HAMMER_BUFMASK64) {
bf686dbe
MD
153 bytes = HAMMER_BUFSIZE - ((int)next_offset & HAMMER_BUFMASK);
154 tail = (void *)((char *)undo + bytes - sizeof(*tail));
155 if ((void *)undo != (void *)tail) {
156 tail->tail_signature = HAMMER_TAIL_SIGNATURE;
157 tail->tail_type = HAMMER_HEAD_TYPE_PAD;
158 tail->tail_size = bytes;
159 }
160 undo->head.hdr_signature = HAMMER_HEAD_SIGNATURE;
161 undo->head.hdr_type = HAMMER_HEAD_TYPE_PAD;
162 undo->head.hdr_size = bytes;
19619882 163 /* NO CRC */
bf686dbe 164 undomap->next_offset += bytes;
10a5d1ba 165 hammer_modify_buffer_done(buffer);
bf686dbe
MD
166 goto again;
167 }
168 if (hammer_debug_general & 0x0080)
c9b9e29d 169 kprintf("undo %016llx %d %d\n", next_offset, bytes, len);
bf686dbe
MD
170
171 /*
172 * We're good, create the entry.
173 */
174 undo->head.hdr_signature = HAMMER_HEAD_SIGNATURE;
f90dde4c 175 undo->head.hdr_type = HAMMER_HEAD_TYPE_UNDO;
bf686dbe 176 undo->head.hdr_size = bytes;
059819e3
MD
177 undo->head.reserved01 = 0;
178 undo->head.hdr_crc = 0;
f90dde4c 179 undo->undo_offset = zone_off;
bf686dbe
MD
180 undo->undo_data_bytes = len;
181 bcopy(base, undo + 1, len);
f90dde4c
MD
182
183 tail = (void *)((char *)undo + bytes - sizeof(*tail));
184 tail->tail_signature = HAMMER_TAIL_SIGNATURE;
185 tail->tail_type = HAMMER_HEAD_TYPE_UNDO;
186 tail->tail_size = bytes;
187
19619882
MD
188 KKASSERT(bytes >= sizeof(undo->head));
189 undo->head.hdr_crc = crc32(undo, HAMMER_FIFO_HEAD_CRCOFF) ^
190 crc32(&undo->head + 1, bytes - sizeof(undo->head));
bf686dbe 191 undomap->next_offset += bytes;
c9b9e29d
MD
192
193 hammer_modify_buffer_done(buffer);
10a5d1ba 194 hammer_modify_volume_done(root_volume);
bf686dbe
MD
195
196 if (buffer)
197 hammer_rel_buffer(buffer, 0);
bf686dbe
MD
198 return(error);
199}
200
e8599db1
MD
201/*
202 * UNDO HISTORY API
203 *
204 * It is not necessary to layout an undo record for the same address space
205 * multiple times. Maintain a cache of recent undo's.
206 */
207
208/*
209 * Enter an undo into the history. Return EALREADY if the request completely
210 * covers a previous request.
211 */
212int
213hammer_enter_undo_history(hammer_mount_t hmp, hammer_off_t offset, int bytes)
214{
215 hammer_undo_t node;
216 hammer_undo_t onode;
217
218 node = RB_LOOKUP(hammer_und_rb_tree, &hmp->rb_undo_root, offset);
219 if (node) {
220 TAILQ_REMOVE(&hmp->undo_lru_list, node, lru_entry);
221 TAILQ_INSERT_TAIL(&hmp->undo_lru_list, node, lru_entry);
222 if (bytes <= node->bytes)
223 return(EALREADY);
224 node->bytes = bytes;
225 return(0);
226 }
227 if (hmp->undo_alloc != HAMMER_MAX_UNDOS) {
228 node = &hmp->undos[hmp->undo_alloc++];
229 } else {
230 node = TAILQ_FIRST(&hmp->undo_lru_list);
231 TAILQ_REMOVE(&hmp->undo_lru_list, node, lru_entry);
232 RB_REMOVE(hammer_und_rb_tree, &hmp->rb_undo_root, node);
233 }
234 node->offset = offset;
235 node->bytes = bytes;
236 TAILQ_INSERT_TAIL(&hmp->undo_lru_list, node, lru_entry);
237 onode = RB_INSERT(hammer_und_rb_tree, &hmp->rb_undo_root, node);
238 KKASSERT(onode == NULL);
239 return(0);
240}
241
242void
243hammer_clear_undo_history(hammer_mount_t hmp)
244{
245 RB_INIT(&hmp->rb_undo_root);
246 TAILQ_INIT(&hmp->undo_lru_list);
247 hmp->undo_alloc = 0;
248}
249
250/*
251 * Misc helper routines. Return available space and total space.
252 */
1f07f686 253int64_t
c9b9e29d 254hammer_undo_used(hammer_mount_t hmp)
1f07f686
MD
255{
256 hammer_blockmap_t rootmap;
1f07f686 257 int64_t max_bytes;
c9b9e29d 258 int64_t bytes;
1f07f686
MD
259
260 rootmap = &hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
261
262 if (rootmap->first_offset <= rootmap->next_offset) {
c9b9e29d 263 bytes = rootmap->next_offset - rootmap->first_offset;
1f07f686 264 } else {
c9b9e29d
MD
265 bytes = rootmap->alloc_offset - rootmap->first_offset +
266 (rootmap->next_offset & HAMMER_OFF_LONG_MASK);
1f07f686 267 }
c9b9e29d
MD
268 max_bytes = rootmap->alloc_offset & HAMMER_OFF_SHORT_MASK;
269 KKASSERT(bytes <= max_bytes);
270 return(bytes);
271}
272
273int64_t
274hammer_undo_space(hammer_mount_t hmp)
275{
276 hammer_blockmap_t rootmap;
277 int64_t max_bytes;
278
279 rootmap = &hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
280 max_bytes = rootmap->alloc_offset & HAMMER_OFF_SHORT_MASK;
281 return(max_bytes - hammer_undo_used(hmp));
1f07f686
MD
282}
283
284int64_t
285hammer_undo_max(hammer_mount_t hmp)
286{
287 hammer_blockmap_t rootmap;
288 int64_t max_bytes;
289
290 rootmap = &hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
c9b9e29d 291 max_bytes = rootmap->alloc_offset & HAMMER_OFF_SHORT_MASK;
1f07f686
MD
292
293 return(max_bytes);
294}
295
e8599db1
MD
296static int
297hammer_und_rb_compare(hammer_undo_t node1, hammer_undo_t node2)
298{
299 if (node1->offset < node2->offset)
300 return(-1);
301 if (node1->offset > node2->offset)
302 return(1);
303 return(0);
304}
305