sys/vfs/hammer: Fix terminology for undo fifo
[dragonfly.git] / sys / vfs / hammer / hammer_undo.c
1 /*
2  * Copyright (c) 2008 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34
35 /*
36  * HAMMER undo - undo buffer/FIFO management.
37  */
38
39 #include "hammer.h"
40
41 static int
42 hammer_und_rb_compare(hammer_undo_t node1, hammer_undo_t node2)
43 {
44         if (node1->offset < node2->offset)
45                 return(-1);
46         if (node1->offset > node2->offset)
47                 return(1);
48         return(0);
49 }
50
51 RB_GENERATE2(hammer_und_rb_tree, hammer_undo, rb_node,
52              hammer_und_rb_compare, hammer_off_t, offset);
53
54 /*
55  * Convert a zone-3 undo offset into a zone-2 buffer offset.
56  */
57 hammer_off_t
58 hammer_undo_lookup(hammer_mount_t hmp, hammer_off_t zone3_off, int *errorp)
59 {
60         hammer_volume_t root_volume;
61         hammer_blockmap_t undomap __debugvar;
62         hammer_off_t result_offset;
63         int i;
64
65         KKASSERT(hammer_is_zone_undo(zone3_off));
66         root_volume = hammer_get_root_volume(hmp, errorp);
67         if (*errorp)
68                 return(0);
69         undomap = &hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
70         KKASSERT(HAMMER_ZONE_DECODE(undomap->alloc_offset) == HAMMER_ZONE_UNDO_INDEX);
71         KKASSERT(zone3_off < undomap->alloc_offset);
72
73         /*
74          * undo offsets[i] in zone-2 +
75          * big-block offset of zone-3 address
76          * which results zone-2 address
77          */
78         i = HAMMER_OFF_SHORT_ENCODE(zone3_off) / HAMMER_BIGBLOCK_SIZE;
79         result_offset = root_volume->ondisk->vol0_undo_array[i] +
80                         (zone3_off & HAMMER_BIGBLOCK_MASK64);
81
82         hammer_rel_volume(root_volume, 0);
83         return(result_offset);
84 }
85
86 /*
87  * Generate UNDO record(s) for the block of data at the specified zone1
88  * or zone2 offset.
89  *
90  * The recovery code will execute UNDOs in reverse order, allowing overlaps.
91  * All the UNDOs are executed together so if we already laid one down we
92  * do not have to lay another one down for the same range.
93  *
94  * For HAMMER version 4+ UNDO a 512 byte boundary is enforced and a PAD
95  * will be laid down for any unused space.  UNDO FIFO media structures
96  * will implement the hdr_seq field (it used to be reserved01), and
97  * both flush and recovery mechanics will be very different.
98  *
99  * WARNING!  See also hammer_generate_redo() in hammer_redo.c
100  */
101 int
102 hammer_generate_undo(hammer_transaction_t trans,
103                      hammer_off_t zone_off, void *base, int len)
104 {
105         hammer_mount_t hmp;
106         hammer_volume_t root_volume;
107         hammer_blockmap_t undomap;
108         hammer_buffer_t buffer = NULL;
109         hammer_fifo_undo_t undo;
110         hammer_fifo_tail_t tail;
111         hammer_off_t next_offset;
112         int error;
113         int bytes;
114         int n;
115
116         hmp = trans->hmp;
117
118         /*
119          * A SYNC record may be required before we can lay down a general
120          * UNDO.  This ensures that the nominal recovery span contains
121          * at least one SYNC record telling the recovery code how far
122          * out-of-span it must go to run the REDOs.
123          */
124         if ((hmp->flags & HAMMER_MOUNT_REDO_SYNC) == 0 &&
125             hmp->version >= HAMMER_VOL_VERSION_FOUR) {
126                 hammer_generate_redo_sync(trans);
127         }
128
129         /*
130          * Enter the offset into our undo history.  If there is an existing
131          * undo we do not have to generate a new one.
132          */
133         if (hammer_enter_undo_history(hmp, zone_off, len) == EALREADY)
134                 return(0);
135
136         root_volume = trans->rootvol;
137         undomap = &hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
138
139         /* no undo recursion */
140         hammer_modify_volume_noundo(NULL, root_volume);
141         hammer_lock_ex(&hmp->undo_lock);
142
143         /* undo had better not roll over (loose test) */
144         if (hammer_undo_space(trans) < len + HAMMER_BUFSIZE*3)
145                 hpanic("insufficient UNDO/REDO FIFO space for undo!");
146
147         /*
148          * Loop until the undo for the entire range has been laid down.
149          */
150         while (len) {
151                 /*
152                  * Fetch the layout offset in the UNDO FIFO, wrap it as
153                  * necessary.
154                  */
155                 if (undomap->next_offset == undomap->alloc_offset) {
156                         undomap->next_offset =
157                                 HAMMER_ZONE_ENCODE(HAMMER_ZONE_UNDO_INDEX, 0);
158                 }
159                 next_offset = undomap->next_offset;
160
161                 /*
162                  * This is a tail-chasing FIFO, when we hit the start of a new
163                  * buffer we don't have to read it in.
164                  */
165                 if ((next_offset & HAMMER_BUFMASK) == 0) {
166                         undo = hammer_bnew(hmp, next_offset, &error, &buffer);
167                         hammer_format_undo(undo, hmp->undo_seqno ^ 0x40000000);
168                 } else {
169                         undo = hammer_bread(hmp, next_offset, &error, &buffer);
170                 }
171                 if (error)
172                         break;
173                 /* no undo recursion */
174                 hammer_modify_buffer_noundo(NULL, buffer);
175
176                 /*
177                  * Calculate how big a media structure fits up to the next
178                  * alignment point and how large a data payload we can
179                  * accomodate.
180                  *
181                  * If n calculates to 0 or negative there is no room for
182                  * anything but a PAD.
183                  */
184                 bytes = HAMMER_UNDO_ALIGN -
185                         ((int)next_offset & HAMMER_UNDO_MASK);
186                 n = bytes -
187                     (int)sizeof(struct hammer_fifo_undo) -
188                     (int)sizeof(struct hammer_fifo_tail);
189
190                 /*
191                  * If available space is insufficient for any payload
192                  * we have to lay down a PAD.
193                  *
194                  * The minimum PAD is 8 bytes and the head and tail will
195                  * overlap each other in that case.  PADs do not have
196                  * sequence numbers or CRCs.
197                  *
198                  * A PAD may not start on a boundary.  That is, every
199                  * 512-byte block in the UNDO/REDO FIFO must begin with
200                  * a record containing a sequence number.
201                  */
202                 if (n <= 0) {
203                         KKASSERT(bytes >= sizeof(struct hammer_fifo_tail));
204                         KKASSERT(((int)next_offset & HAMMER_UNDO_MASK) != 0);
205                         tail = (void *)((char *)undo + bytes - sizeof(*tail));
206                         if ((void *)undo != (void *)tail) {
207                                 tail->tail_signature = HAMMER_TAIL_SIGNATURE;
208                                 tail->tail_type = HAMMER_HEAD_TYPE_PAD;
209                                 tail->tail_size = bytes;
210                         }
211                         undo->head.hdr_signature = HAMMER_HEAD_SIGNATURE;
212                         undo->head.hdr_type = HAMMER_HEAD_TYPE_PAD;
213                         undo->head.hdr_size = bytes;
214                         /* NO CRC OR SEQ NO */
215                         undomap->next_offset += bytes;
216                         hammer_modify_buffer_done(buffer);
217                         hammer_stats_undo += bytes;
218                         continue;
219                 }
220
221                 /*
222                  * Calculate the actual payload and recalculate the size
223                  * of the media structure as necessary.
224                  */
225                 if (n > len) {
226                         n = len;
227                         bytes = ((n + HAMMER_HEAD_ALIGN_MASK) &
228                                  ~HAMMER_HEAD_ALIGN_MASK) +
229                                 (int)sizeof(struct hammer_fifo_undo) +
230                                 (int)sizeof(struct hammer_fifo_tail);
231                 }
232                 if (hammer_debug_general & 0x0080) {
233                         hdkprintf("undo %016jx %d %d\n",
234                                 (intmax_t)next_offset, bytes, n);
235                 }
236
237                 undo->head.hdr_signature = HAMMER_HEAD_SIGNATURE;
238                 undo->head.hdr_type = HAMMER_HEAD_TYPE_UNDO;
239                 undo->head.hdr_size = bytes;
240                 undo->head.hdr_seq = hmp->undo_seqno++;
241                 undo->head.hdr_crc = 0;
242                 undo->undo_offset = zone_off;
243                 undo->undo_data_bytes = n;
244                 bcopy(base, undo + 1, n);
245
246                 tail = (void *)((char *)undo + bytes - sizeof(*tail));
247                 tail->tail_signature = HAMMER_TAIL_SIGNATURE;
248                 tail->tail_type = HAMMER_HEAD_TYPE_UNDO;
249                 tail->tail_size = bytes;
250
251                 KKASSERT(bytes >= sizeof(undo->head));
252                 hammer_crc_set_fifo_head(&undo->head, bytes);
253                 undomap->next_offset += bytes;
254                 hammer_stats_undo += bytes;
255
256                 /*
257                  * Before we finish off the buffer we have to deal with any
258                  * junk between the end of the media structure we just laid
259                  * down and the UNDO alignment boundary.  We do this by laying
260                  * down a dummy PAD.  Even though we will probably overwrite
261                  * it almost immediately we have to do this so recovery runs
262                  * can iterate the UNDO space without having to depend on
263                  * the indices in the volume header.
264                  *
265                  * This dummy PAD will be overwritten on the next undo so
266                  * we do not adjust undomap->next_offset.
267                  */
268                 bytes = HAMMER_UNDO_ALIGN -
269                         ((int)undomap->next_offset & HAMMER_UNDO_MASK);
270                 if (bytes != HAMMER_UNDO_ALIGN) {
271                         KKASSERT(bytes >= sizeof(struct hammer_fifo_tail));
272                         undo = (void *)(tail + 1);
273                         tail = (void *)((char *)undo + bytes - sizeof(*tail));
274                         if ((void *)undo != (void *)tail) {
275                                 tail->tail_signature = HAMMER_TAIL_SIGNATURE;
276                                 tail->tail_type = HAMMER_HEAD_TYPE_PAD;
277                                 tail->tail_size = bytes;
278                         }
279                         undo->head.hdr_signature = HAMMER_HEAD_SIGNATURE;
280                         undo->head.hdr_type = HAMMER_HEAD_TYPE_PAD;
281                         undo->head.hdr_size = bytes;
282                         /* NO CRC OR SEQ NO */
283                 }
284                 hammer_modify_buffer_done(buffer);
285
286                 /*
287                  * Adjust for loop
288                  */
289                 len -= n;
290                 base = (char *)base + n;
291                 zone_off += n;
292         }
293         hammer_modify_volume_done(root_volume);
294         hammer_unlock(&hmp->undo_lock);
295
296         if (buffer)
297                 hammer_rel_buffer(buffer, 0);
298         return(error);
299 }
300
301 /*
302  * Preformat a new UNDO block.  We could read the old one in but we get
303  * better performance if we just pre-format a new one.
304  *
305  * The recovery code always works forwards so the caller just makes sure the
306  * seqno is not contiguous with prior UNDOs or ancient UNDOs now being
307  * overwritten.
308  *
309  * The preformatted UNDO headers use the smallest possible sector size
310  * (512) to ensure that any missed media writes are caught.
311  *
312  * NOTE: Also used by the REDO code.
313  */
314 void
315 hammer_format_undo(void *base, uint32_t seqno)
316 {
317         hammer_fifo_head_t head;
318         hammer_fifo_tail_t tail;
319         int i;
320         int bytes = HAMMER_UNDO_ALIGN;
321
322         bzero(base, HAMMER_BUFSIZE);
323
324         for (i = 0; i < HAMMER_BUFSIZE; i += bytes) {
325                 head = (void *)((char *)base + i);
326                 tail = (void *)((char *)head + bytes - sizeof(*tail));
327
328                 head->hdr_signature = HAMMER_HEAD_SIGNATURE;
329                 head->hdr_type = HAMMER_HEAD_TYPE_DUMMY;
330                 head->hdr_size = bytes;
331                 head->hdr_seq = seqno++;
332                 head->hdr_crc = 0;
333
334                 tail->tail_signature = HAMMER_TAIL_SIGNATURE;
335                 tail->tail_type = HAMMER_HEAD_TYPE_DUMMY;
336                 tail->tail_size = bytes;
337
338                 hammer_crc_set_fifo_head(head, bytes);
339         }
340 }
341
342 /*
343  * HAMMER version 4+ conversion support.
344  *
345  * Convert a HAMMER version < 4 UNDO FIFO area to a 4+ UNDO FIFO area.
346  * The 4+ UNDO FIFO area is backwards compatible.  The conversion is
347  * needed to initialize the sequence space and place headers on the
348  * new 512-byte undo boundary.
349  */
350 int
351 hammer_upgrade_undo_4(hammer_transaction_t trans)
352 {
353         hammer_mount_t hmp;
354         hammer_volume_t root_volume;
355         hammer_blockmap_t undomap;
356         hammer_buffer_t buffer = NULL;
357         hammer_fifo_head_t head;
358         hammer_fifo_tail_t tail;
359         hammer_off_t next_offset;
360         uint32_t seqno;
361         int error;
362         int bytes;
363
364         hmp = trans->hmp;
365
366         root_volume = trans->rootvol;
367
368         /* no undo recursion */
369         hammer_lock_ex(&hmp->undo_lock);
370         hammer_modify_volume_noundo(NULL, root_volume);
371
372         /*
373          * Adjust the in-core undomap and the on-disk undomap.
374          */
375         next_offset = HAMMER_ZONE_ENCODE(HAMMER_ZONE_UNDO_INDEX, 0);
376         undomap = &hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
377         undomap->next_offset = next_offset;
378         undomap->first_offset = next_offset;
379
380         undomap = &root_volume->ondisk->vol0_blockmap[HAMMER_ZONE_UNDO_INDEX];
381         undomap->next_offset = next_offset;
382         undomap->first_offset = next_offset;
383
384         /*
385          * Loop over the entire UNDO space creating DUMMY entries.  Sequence
386          * numbers are assigned.
387          */
388         seqno = 0;
389         bytes = HAMMER_UNDO_ALIGN;
390
391         while (next_offset != undomap->alloc_offset) {
392                 head = hammer_bnew(hmp, next_offset, &error, &buffer);
393                 if (error)
394                         break;
395                 hammer_modify_buffer_noundo(NULL, buffer);
396                 tail = (void *)((char *)head + bytes - sizeof(*tail));
397
398                 head->hdr_signature = HAMMER_HEAD_SIGNATURE;
399                 head->hdr_type = HAMMER_HEAD_TYPE_DUMMY;
400                 head->hdr_size = bytes;
401                 head->hdr_seq = seqno;
402                 head->hdr_crc = 0;
403
404                 tail = (void *)((char *)head + bytes - sizeof(*tail));
405                 tail->tail_signature = HAMMER_TAIL_SIGNATURE;
406                 tail->tail_type = HAMMER_HEAD_TYPE_DUMMY;
407                 tail->tail_size = bytes;
408
409                 hammer_crc_set_fifo_head(head, bytes);
410                 hammer_modify_buffer_done(buffer);
411
412                 hammer_stats_undo += bytes;
413                 next_offset += HAMMER_UNDO_ALIGN;
414                 ++seqno;
415         }
416
417         /*
418          * The sequence number will be the next sequence number to lay down.
419          */
420         hmp->undo_seqno = seqno;
421         hmkprintf(hmp, "version upgrade seqno start %08x\n", seqno);
422
423         hammer_modify_volume_done(root_volume);
424         hammer_unlock(&hmp->undo_lock);
425
426         if (buffer)
427                 hammer_rel_buffer(buffer, 0);
428         return (error);
429 }
430
431 /*
432  * UNDO HISTORY API
433  *
434  * It is not necessary to layout an undo record for the same address space
435  * multiple times.  Maintain a cache of recent undo's.
436  */
437
438 /*
439  * Enter an undo into the history.  Return EALREADY if the request completely
440  * covers a previous request.
441  */
442 int
443 hammer_enter_undo_history(hammer_mount_t hmp, hammer_off_t offset, int bytes)
444 {
445         hammer_undo_t node;
446         hammer_undo_t onode __debugvar;
447
448         node = RB_LOOKUP(hammer_und_rb_tree, &hmp->rb_undo_root, offset);
449         if (node) {
450                 TAILQ_REMOVE(&hmp->undo_lru_list, node, lru_entry);
451                 TAILQ_INSERT_TAIL(&hmp->undo_lru_list, node, lru_entry);
452                 if (bytes <= node->bytes)
453                         return(EALREADY);
454                 node->bytes = bytes;
455                 return(0);
456         }
457         if (hmp->undo_alloc != HAMMER_MAX_UNDOS) {
458                 node = &hmp->undos[hmp->undo_alloc++];
459         } else {
460                 node = TAILQ_FIRST(&hmp->undo_lru_list);
461                 TAILQ_REMOVE(&hmp->undo_lru_list, node, lru_entry);
462                 RB_REMOVE(hammer_und_rb_tree, &hmp->rb_undo_root, node);
463         }
464         node->offset = offset;
465         node->bytes = bytes;
466         TAILQ_INSERT_TAIL(&hmp->undo_lru_list, node, lru_entry);
467         onode = RB_INSERT(hammer_und_rb_tree, &hmp->rb_undo_root, node);
468         KKASSERT(onode == NULL);
469         return(0);
470 }
471
472 void
473 hammer_clear_undo_history(hammer_mount_t hmp)
474 {
475         RB_INIT(&hmp->rb_undo_root);
476         TAILQ_INIT(&hmp->undo_lru_list);
477         hmp->undo_alloc = 0;
478 }
479
480 /*
481  * Return how much of the undo FIFO has been used
482  *
483  * The calculation includes undo FIFO space still reserved from a previous
484  * flush (because it will still be run on recovery if a crash occurs and
485  * we can't overwrite it yet).
486  */
487 int64_t
488 hammer_undo_used(hammer_transaction_t trans)
489 {
490         hammer_blockmap_t cundomap;
491         hammer_blockmap_t dundomap;
492         int64_t max_bytes __debugvar;
493         int64_t bytes;
494
495         cundomap = &trans->hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
496         dundomap = &trans->rootvol->ondisk->
497                                 vol0_blockmap[HAMMER_ZONE_UNDO_INDEX];
498
499         if (dundomap->first_offset <= cundomap->next_offset) {
500                 bytes = cundomap->next_offset - dundomap->first_offset;
501         } else {
502                 bytes = cundomap->alloc_offset - dundomap->first_offset +
503                         HAMMER_OFF_LONG_ENCODE(cundomap->next_offset);
504         }
505         max_bytes = HAMMER_OFF_SHORT_ENCODE(cundomap->alloc_offset);
506         KKASSERT(bytes <= max_bytes);
507         return(bytes);
508 }
509
510 /*
511  * Return how much of the undo FIFO is available for new records.
512  */
513 int64_t
514 hammer_undo_space(hammer_transaction_t trans)
515 {
516         hammer_blockmap_t rootmap;
517         int64_t max_bytes;
518
519         rootmap = &trans->hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
520         max_bytes = HAMMER_OFF_SHORT_ENCODE(rootmap->alloc_offset);
521         return(max_bytes - hammer_undo_used(trans));
522 }
523
524 int64_t
525 hammer_undo_max(hammer_mount_t hmp)
526 {
527         hammer_blockmap_t rootmap;
528         int64_t max_bytes;
529
530         rootmap = &hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
531         max_bytes = HAMMER_OFF_SHORT_ENCODE(rootmap->alloc_offset);
532
533         return(max_bytes);
534 }
535
536 /*
537  * Returns 1 if the undo buffer should be reclaimed on release.  The
538  * only undo buffer we do NOT want to reclaim is the one at the current
539  * append offset.
540  */
541 int
542 hammer_undo_reclaim(hammer_io_t io)
543 {
544         hammer_blockmap_t undomap;
545         hammer_off_t next_offset;
546
547         undomap = &io->hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
548         next_offset = undomap->next_offset & ~HAMMER_BUFMASK64;
549         if (HAMMER_ITOB(io)->zoneX_offset == next_offset)
550                 return(0);
551         return(1);
552 }