2 * Copyright (c) 2010 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * HAMMER redo - REDO record support for the UNDO/REDO FIFO.
38 * See also hammer_undo.c
43 RB_GENERATE2(hammer_redo_rb_tree, hammer_inode, rb_redonode,
44 hammer_redo_rb_compare, hammer_off_t, redo_fifo_start);
47 * HAMMER version 4+ REDO support.
49 * REDO records are used to improve fsync() performance. Instead of having
50 * to go through a complete double-flush cycle involving at least two disk
51 * synchronizations the fsync need only flush UNDO/REDO FIFO buffers through
52 * the related REDO records, which is a single synchronization requiring
53 * no track seeking. If a recovery becomes necessary the recovery code
54 * will generate logical data writes based on the REDO records encountered.
55 * That is, the recovery code will UNDO any partial meta-data/data writes
56 * at the raw disk block level and then REDO the data writes at the logical
60 hammer_generate_redo(hammer_transaction_t trans, hammer_inode_t ip,
61 hammer_off_t file_off, uint32_t flags,
65 hammer_volume_t root_volume;
66 hammer_blockmap_t undomap;
67 hammer_buffer_t buffer = NULL;
68 hammer_fifo_redo_t redo;
69 hammer_fifo_tail_t tail;
70 hammer_off_t next_offset;
80 root_volume = trans->rootvol;
81 undomap = &hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
84 * No undo recursion when modifying the root volume
86 hammer_modify_volume_noundo(NULL, root_volume);
87 hammer_lock_ex(&hmp->undo_lock);
89 /* undo had better not roll over (loose test) */
90 if (hammer_undo_space(trans) < len + HAMMER_BUFSIZE*3)
91 hpanic("insufficient UNDO/REDO FIFO space for redo!");
94 * Loop until the undo for the entire range has been laid down.
95 * Loop at least once (len might be 0 as a degenerate case).
99 * Fetch the layout offset in the UNDO FIFO, wrap it as
102 if (undomap->next_offset == undomap->alloc_offset) {
103 undomap->next_offset =
104 HAMMER_ZONE_ENCODE(HAMMER_ZONE_UNDO_INDEX, 0);
106 next_offset = undomap->next_offset;
109 * This is a tail-chasing FIFO, when we hit the start of a new
110 * buffer we don't have to read it in.
112 if ((next_offset & HAMMER_BUFMASK) == 0) {
113 redo = hammer_bnew(hmp, next_offset, &error, &buffer);
114 hammer_format_undo(redo, hmp->undo_seqno ^ 0x40000000);
116 redo = hammer_bread(hmp, next_offset, &error, &buffer);
120 hammer_modify_buffer_noundo(NULL, buffer);
123 * Calculate how big a media structure fits up to the next
124 * alignment point and how large a data payload we can
127 * If n calculates to 0 or negative there is no room for
128 * anything but a PAD.
130 bytes = HAMMER_UNDO_ALIGN -
131 ((int)next_offset & HAMMER_UNDO_MASK);
133 (int)sizeof(struct hammer_fifo_redo) -
134 (int)sizeof(struct hammer_fifo_tail);
137 * If available space is insufficient for any payload
138 * we have to lay down a PAD.
140 * The minimum PAD is 8 bytes and the head and tail will
141 * overlap each other in that case. PADs do not have
142 * sequence numbers or CRCs.
144 * A PAD may not start on a boundary. That is, every
145 * 512-byte block in the UNDO/REDO FIFO must begin with
146 * a record containing a sequence number.
149 KKASSERT(bytes >= sizeof(struct hammer_fifo_tail));
150 KKASSERT(((int)next_offset & HAMMER_UNDO_MASK) != 0);
151 tail = (void *)((char *)redo + bytes - sizeof(*tail));
152 if ((void *)redo != (void *)tail) {
153 tail->tail_signature = HAMMER_TAIL_SIGNATURE;
154 tail->tail_type = HAMMER_HEAD_TYPE_PAD;
155 tail->tail_size = bytes;
157 redo->head.hdr_signature = HAMMER_HEAD_SIGNATURE;
158 redo->head.hdr_type = HAMMER_HEAD_TYPE_PAD;
159 redo->head.hdr_size = bytes;
160 /* NO CRC OR SEQ NO */
161 undomap->next_offset += bytes;
162 hammer_modify_buffer_done(buffer);
163 hammer_stats_redo += bytes;
168 * When generating an inode-related REDO record we track
169 * the point in the UNDO/REDO FIFO containing the inode's
170 * earliest REDO record. See hammer_generate_redo_sync().
172 * redo_fifo_next is cleared when an inode is staged to
173 * the backend and then used to determine how to reassign
174 * redo_fifo_start after the inode flush completes.
177 redo->redo_objid = ip->obj_id;
178 redo->redo_localization = ip->obj_localization;
179 if ((ip->flags & HAMMER_INODE_RDIRTY) == 0) {
180 ip->redo_fifo_start = next_offset;
181 if (RB_INSERT(hammer_redo_rb_tree,
182 &hmp->rb_redo_root, ip)) {
183 hpanic("cannot insert inode %p on "
186 ip->flags |= HAMMER_INODE_RDIRTY;
188 if (ip->redo_fifo_next == 0)
189 ip->redo_fifo_next = next_offset;
191 redo->redo_objid = 0;
192 redo->redo_localization = 0;
196 * Calculate the actual payload and recalculate the size
197 * of the media structure as necessary. If no data buffer
198 * is supplied there is no payload.
202 } else if (n > len) {
205 bytes = ((n + HAMMER_HEAD_ALIGN_MASK) &
206 ~HAMMER_HEAD_ALIGN_MASK) +
207 (int)sizeof(struct hammer_fifo_redo) +
208 (int)sizeof(struct hammer_fifo_tail);
209 if (hammer_debug_general & 0x0080) {
210 hdkprintf("redo %016jx %d %d\n",
211 (intmax_t)next_offset, bytes, n);
214 redo->head.hdr_signature = HAMMER_HEAD_SIGNATURE;
215 redo->head.hdr_type = HAMMER_HEAD_TYPE_REDO;
216 redo->head.hdr_size = bytes;
217 redo->head.hdr_seq = hmp->undo_seqno++;
218 redo->head.hdr_crc = 0;
219 redo->redo_mtime = trans->time;
220 redo->redo_offset = file_off;
221 redo->redo_flags = flags;
224 * Incremental payload. If no payload we throw the entire
225 * len into redo_data_bytes and will not loop.
228 redo->redo_data_bytes = n;
229 bcopy(base, redo + 1, n);
231 base = (char *)base + n;
234 redo->redo_data_bytes = len;
239 tail = (void *)((char *)redo + bytes - sizeof(*tail));
240 tail->tail_signature = HAMMER_TAIL_SIGNATURE;
241 tail->tail_type = HAMMER_HEAD_TYPE_REDO;
242 tail->tail_size = bytes;
244 KKASSERT(bytes >= sizeof(redo->head));
245 hammer_crc_set_fifo_head(&redo->head, bytes);
246 undomap->next_offset += bytes;
247 hammer_stats_redo += bytes;
250 * Before we finish off the buffer we have to deal with any
251 * junk between the end of the media structure we just laid
252 * down and the UNDO alignment boundary. We do this by laying
253 * down a dummy PAD. Even though we will probably overwrite
254 * it almost immediately we have to do this so recovery runs
255 * can iterate the UNDO space without having to depend on
256 * the indices in the volume header.
258 * This dummy PAD will be overwritten on the next undo so
259 * we do not adjust undomap->next_offset.
261 bytes = HAMMER_UNDO_ALIGN -
262 ((int)undomap->next_offset & HAMMER_UNDO_MASK);
263 if (bytes != HAMMER_UNDO_ALIGN) {
264 KKASSERT(bytes >= sizeof(struct hammer_fifo_tail));
265 redo = (void *)(tail + 1);
266 tail = (void *)((char *)redo + bytes - sizeof(*tail));
267 if ((void *)redo != (void *)tail) {
268 tail->tail_signature = HAMMER_TAIL_SIGNATURE;
269 tail->tail_type = HAMMER_HEAD_TYPE_PAD;
270 tail->tail_size = bytes;
272 redo->head.hdr_signature = HAMMER_HEAD_SIGNATURE;
273 redo->head.hdr_type = HAMMER_HEAD_TYPE_PAD;
274 redo->head.hdr_size = bytes;
275 /* NO CRC OR SEQ NO */
277 hammer_modify_buffer_done(buffer);
281 hammer_modify_volume_done(root_volume);
282 hammer_unlock(&hmp->undo_lock);
285 hammer_rel_buffer(buffer, 0);
288 * Make sure the nominal undo span contains at least one REDO_SYNC,
289 * otherwise the REDO recovery will not be triggered.
291 if ((hmp->flags & HAMMER_MOUNT_REDO_SYNC) == 0 &&
292 flags != HAMMER_REDO_SYNC) {
293 hammer_generate_redo_sync(trans);
300 * Generate a REDO SYNC record. At least one such record must be generated
301 * in the nominal recovery span for the recovery code to be able to run
302 * REDOs outside of the span.
304 * The SYNC record contains the aggregate earliest UNDO/REDO FIFO offset
305 * for all inodes with active REDOs. This changes dynamically as inodes
308 * During recovery stage2 any new flush cycles must specify the original
309 * redo sync offset. That way a crash will re-run the REDOs, at least
310 * up to the point where the UNDO FIFO does not overwrite the area.
313 hammer_generate_redo_sync(hammer_transaction_t trans)
315 hammer_mount_t hmp = trans->hmp;
317 hammer_off_t redo_fifo_start;
319 if (hmp->flags & HAMMER_MOUNT_REDO_RECOVERY_RUN) {
321 redo_fifo_start = hmp->recover_stage2_offset;
323 ip = RB_FIRST(hammer_redo_rb_tree, &hmp->rb_redo_root);
325 redo_fifo_start = ip->redo_fifo_start;
329 if (redo_fifo_start) {
330 if (hammer_debug_io & 0x0004) {
331 hdkprintf("SYNC IP %p %016jx\n",
332 ip, (intmax_t)redo_fifo_start);
334 hammer_generate_redo(trans, NULL, redo_fifo_start,
335 HAMMER_REDO_SYNC, NULL, 0);
336 trans->hmp->flags |= HAMMER_MOUNT_REDO_SYNC;
341 * This is called when an inode is queued to the backend.
344 hammer_redo_fifo_start_flush(hammer_inode_t ip)
346 ip->redo_fifo_next = 0;
350 * This is called when an inode backend flush is finished. We have to make
351 * sure that RDIRTY is not set unless dirty bufs are present. Dirty bufs
352 * can get destroyed through operations such as truncations and leave
353 * us with a stale redo_fifo_next.
356 hammer_redo_fifo_end_flush(hammer_inode_t ip)
358 hammer_mount_t hmp = ip->hmp;
360 if (ip->flags & HAMMER_INODE_RDIRTY) {
361 RB_REMOVE(hammer_redo_rb_tree, &hmp->rb_redo_root, ip);
362 ip->flags &= ~HAMMER_INODE_RDIRTY;
364 if ((ip->flags & HAMMER_INODE_BUFS) == 0)
365 ip->redo_fifo_next = 0;
366 if (ip->redo_fifo_next) {
367 ip->redo_fifo_start = ip->redo_fifo_next;
368 if (RB_INSERT(hammer_redo_rb_tree, &hmp->rb_redo_root, ip)) {
369 hpanic("cannot reinsert inode %p on redo FIFO", ip);
371 ip->flags |= HAMMER_INODE_RDIRTY;