2 * Copyright (c) 2008 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * HAMMER undo - undo buffer/FIFO management.
41 static int hammer_und_rb_compare(hammer_undo_t node1, hammer_undo_t node2);
43 RB_GENERATE2(hammer_und_rb_tree, hammer_undo, rb_node,
44 hammer_und_rb_compare, hammer_off_t, offset);
47 * Convert a zone-3 undo offset into a zone-2 buffer offset.
50 hammer_undo_lookup(hammer_mount_t hmp, hammer_off_t zone3_off, int *errorp)
52 hammer_volume_t root_volume;
53 hammer_blockmap_t undomap __debugvar;
54 hammer_off_t result_offset;
57 KKASSERT((zone3_off & HAMMER_OFF_ZONE_MASK) == HAMMER_ZONE_UNDO);
58 root_volume = hammer_get_root_volume(hmp, errorp);
61 undomap = &hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
62 KKASSERT(HAMMER_ZONE_DECODE(undomap->alloc_offset) == HAMMER_ZONE_UNDO_INDEX);
63 KKASSERT(zone3_off < undomap->alloc_offset);
66 * undo offsets[i] in zone-2 +
67 * big-block offset of zone-3 address
68 * which results zone-2 address
70 i = (zone3_off & HAMMER_OFF_SHORT_MASK) / HAMMER_BIGBLOCK_SIZE;
71 result_offset = root_volume->ondisk->vol0_undo_array[i] +
72 (zone3_off & HAMMER_BIGBLOCK_MASK64);
74 hammer_rel_volume(root_volume, 0);
75 return(result_offset);
79 * Generate UNDO record(s) for the block of data at the specified zone1
82 * The recovery code will execute UNDOs in reverse order, allowing overlaps.
83 * All the UNDOs are executed together so if we already laid one down we
84 * do not have to lay another one down for the same range.
86 * For HAMMER version 4+ UNDO a 512 byte boundary is enforced and a PAD
87 * will be laid down for any unused space. UNDO FIFO media structures
88 * will implement the hdr_seq field (it used to be reserved01), and
89 * both flush and recovery mechanics will be very different.
91 * WARNING! See also hammer_generate_redo() in hammer_redo.c
94 hammer_generate_undo(hammer_transaction_t trans,
95 hammer_off_t zone_off, void *base, int len)
98 hammer_volume_t root_volume;
99 hammer_blockmap_t undomap;
100 hammer_buffer_t buffer = NULL;
101 hammer_fifo_undo_t undo;
102 hammer_fifo_tail_t tail;
103 hammer_off_t next_offset;
111 * A SYNC record may be required before we can lay down a general
112 * UNDO. This ensures that the nominal recovery span contains
113 * at least one SYNC record telling the recovery code how far
114 * out-of-span it must go to run the REDOs.
116 if ((hmp->flags & HAMMER_MOUNT_REDO_SYNC) == 0 &&
117 hmp->version >= HAMMER_VOL_VERSION_FOUR) {
118 hammer_generate_redo_sync(trans);
122 * Enter the offset into our undo history. If there is an existing
123 * undo we do not have to generate a new one.
125 if (hammer_enter_undo_history(hmp, zone_off, len) == EALREADY)
128 root_volume = trans->rootvol;
129 undomap = &hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
131 /* no undo recursion */
132 hammer_modify_volume_noundo(NULL, root_volume);
133 hammer_lock_ex(&hmp->undo_lock);
135 /* undo had better not roll over (loose test) */
136 if (hammer_undo_space(trans) < len + HAMMER_BUFSIZE*3)
137 panic("HAMMER: insufficient undo FIFO space!");
140 * Loop until the undo for the entire range has been laid down.
144 * Fetch the layout offset in the UNDO FIFO, wrap it as
147 if (undomap->next_offset == undomap->alloc_offset) {
148 undomap->next_offset =
149 HAMMER_ZONE_ENCODE(HAMMER_ZONE_UNDO_INDEX, 0);
151 next_offset = undomap->next_offset;
154 * This is a tail-chasing FIFO, when we hit the start of a new
155 * buffer we don't have to read it in.
157 if ((next_offset & HAMMER_BUFMASK) == 0) {
158 undo = hammer_bnew(hmp, next_offset, &error, &buffer);
159 hammer_format_undo(undo, hmp->undo_seqno ^ 0x40000000);
161 undo = hammer_bread(hmp, next_offset, &error, &buffer);
165 /* no undo recursion */
166 hammer_modify_buffer_noundo(NULL, buffer);
169 * Calculate how big a media structure fits up to the next
170 * alignment point and how large a data payload we can
173 * If n calculates to 0 or negative there is no room for
174 * anything but a PAD.
176 bytes = HAMMER_UNDO_ALIGN -
177 ((int)next_offset & HAMMER_UNDO_MASK);
179 (int)sizeof(struct hammer_fifo_undo) -
180 (int)sizeof(struct hammer_fifo_tail);
183 * If available space is insufficient for any payload
184 * we have to lay down a PAD.
186 * The minimum PAD is 8 bytes and the head and tail will
187 * overlap each other in that case. PADs do not have
188 * sequence numbers or CRCs.
190 * A PAD may not start on a boundary. That is, every
191 * 512-byte block in the UNDO/REDO FIFO must begin with
192 * a record containing a sequence number.
195 KKASSERT(bytes >= sizeof(struct hammer_fifo_tail));
196 KKASSERT(((int)next_offset & HAMMER_UNDO_MASK) != 0);
197 tail = (void *)((char *)undo + bytes - sizeof(*tail));
198 if ((void *)undo != (void *)tail) {
199 tail->tail_signature = HAMMER_TAIL_SIGNATURE;
200 tail->tail_type = HAMMER_HEAD_TYPE_PAD;
201 tail->tail_size = bytes;
203 undo->head.hdr_signature = HAMMER_HEAD_SIGNATURE;
204 undo->head.hdr_type = HAMMER_HEAD_TYPE_PAD;
205 undo->head.hdr_size = bytes;
206 /* NO CRC OR SEQ NO */
207 undomap->next_offset += bytes;
208 hammer_modify_buffer_done(buffer);
209 hammer_stats_undo += bytes;
214 * Calculate the actual payload and recalculate the size
215 * of the media structure as necessary.
219 bytes = ((n + HAMMER_HEAD_ALIGN_MASK) &
220 ~HAMMER_HEAD_ALIGN_MASK) +
221 (int)sizeof(struct hammer_fifo_undo) +
222 (int)sizeof(struct hammer_fifo_tail);
224 if (hammer_debug_general & 0x0080) {
225 kprintf("undo %016llx %d %d\n",
226 (long long)next_offset, bytes, n);
229 undo->head.hdr_signature = HAMMER_HEAD_SIGNATURE;
230 undo->head.hdr_type = HAMMER_HEAD_TYPE_UNDO;
231 undo->head.hdr_size = bytes;
232 undo->head.hdr_seq = hmp->undo_seqno++;
233 undo->head.hdr_crc = 0;
234 undo->undo_offset = zone_off;
235 undo->undo_data_bytes = n;
236 bcopy(base, undo + 1, n);
238 tail = (void *)((char *)undo + bytes - sizeof(*tail));
239 tail->tail_signature = HAMMER_TAIL_SIGNATURE;
240 tail->tail_type = HAMMER_HEAD_TYPE_UNDO;
241 tail->tail_size = bytes;
243 KKASSERT(bytes >= sizeof(undo->head));
244 undo->head.hdr_crc = crc32(undo, HAMMER_FIFO_HEAD_CRCOFF) ^
245 crc32(&undo->head + 1, bytes - sizeof(undo->head));
246 undomap->next_offset += bytes;
247 hammer_stats_undo += bytes;
250 * Before we finish off the buffer we have to deal with any
251 * junk between the end of the media structure we just laid
252 * down and the UNDO alignment boundary. We do this by laying
253 * down a dummy PAD. Even though we will probably overwrite
254 * it almost immediately we have to do this so recovery runs
255 * can iterate the UNDO space without having to depend on
256 * the indices in the volume header.
258 * This dummy PAD will be overwritten on the next undo so
259 * we do not adjust undomap->next_offset.
261 bytes = HAMMER_UNDO_ALIGN -
262 ((int)undomap->next_offset & HAMMER_UNDO_MASK);
263 if (bytes != HAMMER_UNDO_ALIGN) {
264 KKASSERT(bytes >= sizeof(struct hammer_fifo_tail));
265 undo = (void *)(tail + 1);
266 tail = (void *)((char *)undo + bytes - sizeof(*tail));
267 if ((void *)undo != (void *)tail) {
268 tail->tail_signature = HAMMER_TAIL_SIGNATURE;
269 tail->tail_type = HAMMER_HEAD_TYPE_PAD;
270 tail->tail_size = bytes;
272 undo->head.hdr_signature = HAMMER_HEAD_SIGNATURE;
273 undo->head.hdr_type = HAMMER_HEAD_TYPE_PAD;
274 undo->head.hdr_size = bytes;
275 /* NO CRC OR SEQ NO */
277 hammer_modify_buffer_done(buffer);
283 base = (char *)base + n;
286 hammer_modify_volume_done(root_volume);
287 hammer_unlock(&hmp->undo_lock);
290 hammer_rel_buffer(buffer, 0);
295 * Preformat a new UNDO block. We could read the old one in but we get
296 * better performance if we just pre-format a new one.
298 * The recovery code always works forwards so the caller just makes sure the
299 * seqno is not contiguous with prior UNDOs or ancient UNDOs now being
302 * The preformatted UNDO headers use the smallest possible sector size
303 * (512) to ensure that any missed media writes are caught.
305 * NOTE: Also used by the REDO code.
308 hammer_format_undo(void *base, u_int32_t seqno)
310 hammer_fifo_head_t head;
311 hammer_fifo_tail_t tail;
313 int bytes = HAMMER_UNDO_ALIGN;
315 bzero(base, HAMMER_BUFSIZE);
317 for (i = 0; i < HAMMER_BUFSIZE; i += bytes) {
318 head = (void *)((char *)base + i);
319 tail = (void *)((char *)head + bytes - sizeof(*tail));
321 head->hdr_signature = HAMMER_HEAD_SIGNATURE;
322 head->hdr_type = HAMMER_HEAD_TYPE_DUMMY;
323 head->hdr_size = bytes;
324 head->hdr_seq = seqno++;
327 tail->tail_signature = HAMMER_TAIL_SIGNATURE;
328 tail->tail_type = HAMMER_HEAD_TYPE_DUMMY;
329 tail->tail_size = bytes;
331 head->hdr_crc = crc32(head, HAMMER_FIFO_HEAD_CRCOFF) ^
332 crc32(head + 1, bytes - sizeof(*head));
337 * HAMMER version 4+ conversion support.
339 * Convert a HAMMER version < 4 UNDO FIFO area to a 4+ UNDO FIFO area.
340 * The 4+ UNDO FIFO area is backwards compatible. The conversion is
341 * needed to initialize the sequence space and place headers on the
342 * new 512-byte undo boundary.
345 hammer_upgrade_undo_4(hammer_transaction_t trans)
348 hammer_volume_t root_volume;
349 hammer_blockmap_t undomap;
350 hammer_buffer_t buffer = NULL;
351 hammer_fifo_head_t head;
352 hammer_fifo_tail_t tail;
353 hammer_off_t next_offset;
360 root_volume = trans->rootvol;
362 /* no undo recursion */
363 hammer_lock_ex(&hmp->undo_lock);
364 hammer_modify_volume_noundo(NULL, root_volume);
367 * Adjust the in-core undomap and the on-disk undomap.
369 next_offset = HAMMER_ZONE_ENCODE(HAMMER_ZONE_UNDO_INDEX, 0);
370 undomap = &hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
371 undomap->next_offset = next_offset;
372 undomap->first_offset = next_offset;
374 undomap = &root_volume->ondisk->vol0_blockmap[HAMMER_ZONE_UNDO_INDEX];
375 undomap->next_offset = next_offset;
376 undomap->first_offset = next_offset;
379 * Loop over the entire UNDO space creating DUMMY entries. Sequence
380 * numbers are assigned.
383 bytes = HAMMER_UNDO_ALIGN;
385 while (next_offset != undomap->alloc_offset) {
386 head = hammer_bnew(hmp, next_offset, &error, &buffer);
389 hammer_modify_buffer_noundo(NULL, buffer);
390 tail = (void *)((char *)head + bytes - sizeof(*tail));
392 head->hdr_signature = HAMMER_HEAD_SIGNATURE;
393 head->hdr_type = HAMMER_HEAD_TYPE_DUMMY;
394 head->hdr_size = bytes;
395 head->hdr_seq = seqno;
398 tail = (void *)((char *)head + bytes - sizeof(*tail));
399 tail->tail_signature = HAMMER_TAIL_SIGNATURE;
400 tail->tail_type = HAMMER_HEAD_TYPE_DUMMY;
401 tail->tail_size = bytes;
403 head->hdr_crc = crc32(head, HAMMER_FIFO_HEAD_CRCOFF) ^
404 crc32(head + 1, bytes - sizeof(*head));
405 hammer_modify_buffer_done(buffer);
407 hammer_stats_undo += bytes;
408 next_offset += HAMMER_UNDO_ALIGN;
413 * The sequence number will be the next sequence number to lay down.
415 hmp->undo_seqno = seqno;
416 kprintf("version upgrade seqno start %08x\n", seqno);
418 hammer_modify_volume_done(root_volume);
419 hammer_unlock(&hmp->undo_lock);
422 hammer_rel_buffer(buffer, 0);
429 * It is not necessary to layout an undo record for the same address space
430 * multiple times. Maintain a cache of recent undo's.
434 * Enter an undo into the history. Return EALREADY if the request completely
435 * covers a previous request.
438 hammer_enter_undo_history(hammer_mount_t hmp, hammer_off_t offset, int bytes)
441 hammer_undo_t onode __debugvar;
443 node = RB_LOOKUP(hammer_und_rb_tree, &hmp->rb_undo_root, offset);
445 TAILQ_REMOVE(&hmp->undo_lru_list, node, lru_entry);
446 TAILQ_INSERT_TAIL(&hmp->undo_lru_list, node, lru_entry);
447 if (bytes <= node->bytes)
452 if (hmp->undo_alloc != HAMMER_MAX_UNDOS) {
453 node = &hmp->undos[hmp->undo_alloc++];
455 node = TAILQ_FIRST(&hmp->undo_lru_list);
456 TAILQ_REMOVE(&hmp->undo_lru_list, node, lru_entry);
457 RB_REMOVE(hammer_und_rb_tree, &hmp->rb_undo_root, node);
459 node->offset = offset;
461 TAILQ_INSERT_TAIL(&hmp->undo_lru_list, node, lru_entry);
462 onode = RB_INSERT(hammer_und_rb_tree, &hmp->rb_undo_root, node);
463 KKASSERT(onode == NULL);
468 hammer_clear_undo_history(hammer_mount_t hmp)
470 RB_INIT(&hmp->rb_undo_root);
471 TAILQ_INIT(&hmp->undo_lru_list);
476 * Return how much of the undo FIFO has been used
478 * The calculation includes undo FIFO space still reserved from a previous
479 * flush (because it will still be run on recovery if a crash occurs and
480 * we can't overwrite it yet).
483 hammer_undo_used(hammer_transaction_t trans)
485 hammer_blockmap_t cundomap;
486 hammer_blockmap_t dundomap;
487 int64_t max_bytes __debugvar;
490 cundomap = &trans->hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
491 dundomap = &trans->rootvol->ondisk->
492 vol0_blockmap[HAMMER_ZONE_UNDO_INDEX];
494 if (dundomap->first_offset <= cundomap->next_offset) {
495 bytes = cundomap->next_offset - dundomap->first_offset;
497 bytes = cundomap->alloc_offset - dundomap->first_offset +
498 (cundomap->next_offset & HAMMER_OFF_LONG_MASK);
500 max_bytes = cundomap->alloc_offset & HAMMER_OFF_SHORT_MASK;
501 KKASSERT(bytes <= max_bytes);
506 * Return how much of the undo FIFO is available for new records.
509 hammer_undo_space(hammer_transaction_t trans)
511 hammer_blockmap_t rootmap;
514 rootmap = &trans->hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
515 max_bytes = rootmap->alloc_offset & HAMMER_OFF_SHORT_MASK;
516 return(max_bytes - hammer_undo_used(trans));
520 hammer_undo_max(hammer_mount_t hmp)
522 hammer_blockmap_t rootmap;
525 rootmap = &hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
526 max_bytes = rootmap->alloc_offset & HAMMER_OFF_SHORT_MASK;
532 * Returns 1 if the undo buffer should be reclaimed on release. The
533 * only undo buffer we do NOT want to reclaim is the one at the current
537 hammer_undo_reclaim(hammer_io_t io)
539 hammer_blockmap_t undomap;
540 hammer_off_t next_offset;
542 undomap = &io->hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
543 next_offset = undomap->next_offset & ~HAMMER_BUFMASK64;
544 if (((struct hammer_buffer *)io)->zoneX_offset == next_offset)
550 hammer_und_rb_compare(hammer_undo_t node1, hammer_undo_t node2)
552 if (node1->offset < node2->offset)
554 if (node1->offset > node2->offset)