Merge branch 'vendor/OPENSSL'
[dragonfly.git] / sys / vfs / hammer / hammer_undo.c
1 /*
2  * Copyright (c) 2008 The DragonFly Project.  All rights reserved.
3  * 
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  * 
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  * 
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34
35 /*
36  * HAMMER undo - undo buffer/FIFO management.
37  */
38
39 #include "hammer.h"
40
41 static int hammer_und_rb_compare(hammer_undo_t node1, hammer_undo_t node2);
42
43 RB_GENERATE2(hammer_und_rb_tree, hammer_undo, rb_node,
44              hammer_und_rb_compare, hammer_off_t, offset);
45
46 /*
47  * Convert a zone-3 undo offset into a zone-2 buffer offset.
48  */
49 hammer_off_t
50 hammer_undo_lookup(hammer_mount_t hmp, hammer_off_t zone3_off, int *errorp)
51 {
52         hammer_volume_t root_volume;
53         hammer_blockmap_t undomap __debugvar;
54         hammer_off_t result_offset;
55         int i;
56
57         KKASSERT((zone3_off & HAMMER_OFF_ZONE_MASK) == HAMMER_ZONE_UNDO);
58         root_volume = hammer_get_root_volume(hmp, errorp);
59         if (*errorp)
60                 return(0);
61         undomap = &hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
62         KKASSERT(HAMMER_ZONE_DECODE(undomap->alloc_offset) == HAMMER_ZONE_UNDO_INDEX);
63         KKASSERT(zone3_off < undomap->alloc_offset);
64
65         i = (zone3_off & HAMMER_OFF_SHORT_MASK) / HAMMER_LARGEBLOCK_SIZE;
66         result_offset = root_volume->ondisk->vol0_undo_array[i] +
67                         (zone3_off & HAMMER_LARGEBLOCK_MASK64);
68
69         hammer_rel_volume(root_volume, 0);
70         return(result_offset);
71 }
72
73 /*
74  * Generate UNDO record(s) for the block of data at the specified zone1
75  * or zone2 offset.
76  *
77  * The recovery code will execute UNDOs in reverse order, allowing overlaps.
78  * All the UNDOs are executed together so if we already laid one down we
79  * do not have to lay another one down for the same range.
80  *
81  * For HAMMER version 4+ UNDO a 512 byte boundary is enforced and a PAD
82  * will be laid down for any unused space.  UNDO FIFO media structures
83  * will implement the hdr_seq field (it used to be reserved01), and
84  * both flush and recovery mechanics will be very different.
85  *
86  * WARNING!  See also hammer_generate_redo() in hammer_redo.c
87  */
88 int
89 hammer_generate_undo(hammer_transaction_t trans,
90                      hammer_off_t zone_off, void *base, int len)
91 {
92         hammer_mount_t hmp;
93         hammer_volume_t root_volume;
94         hammer_blockmap_t undomap;
95         hammer_buffer_t buffer = NULL;
96         hammer_fifo_undo_t undo;
97         hammer_fifo_tail_t tail;
98         hammer_off_t next_offset;
99         int error;
100         int bytes;
101         int n;
102
103         hmp = trans->hmp;
104
105         /*
106          * A SYNC record may be required before we can lay down a general
107          * UNDO.  This ensures that the nominal recovery span contains
108          * at least one SYNC record telling the recovery code how far
109          * out-of-span it must go to run the REDOs.
110          */
111         if ((hmp->flags & HAMMER_MOUNT_REDO_SYNC) == 0 &&
112             hmp->version >= HAMMER_VOL_VERSION_FOUR) {
113                 hammer_generate_redo_sync(trans);
114         }
115
116         /*
117          * Enter the offset into our undo history.  If there is an existing
118          * undo we do not have to generate a new one.
119          */
120         if (hammer_enter_undo_history(hmp, zone_off, len) == EALREADY)
121                 return(0);
122
123         root_volume = trans->rootvol;
124         undomap = &hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
125
126         /* no undo recursion */
127         hammer_modify_volume(NULL, root_volume, NULL, 0);
128         hammer_lock_ex(&hmp->undo_lock);
129
130         /* undo had better not roll over (loose test) */
131         if (hammer_undo_space(trans) < len + HAMMER_BUFSIZE*3)
132                 panic("hammer: insufficient undo FIFO space!");
133
134         /*
135          * Loop until the undo for the entire range has been laid down.
136          */
137         while (len) {
138                 /*
139                  * Fetch the layout offset in the UNDO FIFO, wrap it as
140                  * necessary.
141                  */
142                 if (undomap->next_offset == undomap->alloc_offset) {
143                         undomap->next_offset =
144                                 HAMMER_ZONE_ENCODE(HAMMER_ZONE_UNDO_INDEX, 0);
145                 }
146                 next_offset = undomap->next_offset;
147
148                 /*
149                  * This is a tail-chasing FIFO, when we hit the start of a new
150                  * buffer we don't have to read it in.
151                  */
152                 if ((next_offset & HAMMER_BUFMASK) == 0) {
153                         undo = hammer_bnew(hmp, next_offset, &error, &buffer);
154                         hammer_format_undo(undo, hmp->undo_seqno ^ 0x40000000);
155                 } else {
156                         undo = hammer_bread(hmp, next_offset, &error, &buffer);
157                 }
158                 if (error)
159                         break;
160                 hammer_modify_buffer(NULL, buffer, NULL, 0);
161
162                 /*
163                  * Calculate how big a media structure fits up to the next
164                  * alignment point and how large a data payload we can
165                  * accomodate.
166                  *
167                  * If n calculates to 0 or negative there is no room for
168                  * anything but a PAD.
169                  */
170                 bytes = HAMMER_UNDO_ALIGN -
171                         ((int)next_offset & HAMMER_UNDO_MASK);
172                 n = bytes -
173                     (int)sizeof(struct hammer_fifo_undo) -
174                     (int)sizeof(struct hammer_fifo_tail);
175
176                 /*
177                  * If available space is insufficient for any payload
178                  * we have to lay down a PAD.
179                  *
180                  * The minimum PAD is 8 bytes and the head and tail will
181                  * overlap each other in that case.  PADs do not have
182                  * sequence numbers or CRCs.
183                  *
184                  * A PAD may not start on a boundary.  That is, every
185                  * 512-byte block in the UNDO/REDO FIFO must begin with
186                  * a record containing a sequence number.
187                  */
188                 if (n <= 0) {
189                         KKASSERT(bytes >= sizeof(struct hammer_fifo_tail));
190                         KKASSERT(((int)next_offset & HAMMER_UNDO_MASK) != 0);
191                         tail = (void *)((char *)undo + bytes - sizeof(*tail));
192                         if ((void *)undo != (void *)tail) {
193                                 tail->tail_signature = HAMMER_TAIL_SIGNATURE;
194                                 tail->tail_type = HAMMER_HEAD_TYPE_PAD;
195                                 tail->tail_size = bytes;
196                         }
197                         undo->head.hdr_signature = HAMMER_HEAD_SIGNATURE;
198                         undo->head.hdr_type = HAMMER_HEAD_TYPE_PAD;
199                         undo->head.hdr_size = bytes;
200                         /* NO CRC OR SEQ NO */
201                         undomap->next_offset += bytes;
202                         hammer_modify_buffer_done(buffer);
203                         hammer_stats_undo += bytes;
204                         continue;
205                 }
206
207                 /*
208                  * Calculate the actual payload and recalculate the size
209                  * of the media structure as necessary.
210                  */
211                 if (n > len) {
212                         n = len;
213                         bytes = ((n + HAMMER_HEAD_ALIGN_MASK) &
214                                  ~HAMMER_HEAD_ALIGN_MASK) +
215                                 (int)sizeof(struct hammer_fifo_undo) +
216                                 (int)sizeof(struct hammer_fifo_tail);
217                 }
218                 if (hammer_debug_general & 0x0080) {
219                         kprintf("undo %016llx %d %d\n",
220                                 (long long)next_offset, bytes, n);
221                 }
222
223                 undo->head.hdr_signature = HAMMER_HEAD_SIGNATURE;
224                 undo->head.hdr_type = HAMMER_HEAD_TYPE_UNDO;
225                 undo->head.hdr_size = bytes;
226                 undo->head.hdr_seq = hmp->undo_seqno++;
227                 undo->head.hdr_crc = 0;
228                 undo->undo_offset = zone_off;
229                 undo->undo_data_bytes = n;
230                 bcopy(base, undo + 1, n);
231
232                 tail = (void *)((char *)undo + bytes - sizeof(*tail));
233                 tail->tail_signature = HAMMER_TAIL_SIGNATURE;
234                 tail->tail_type = HAMMER_HEAD_TYPE_UNDO;
235                 tail->tail_size = bytes;
236
237                 KKASSERT(bytes >= sizeof(undo->head));
238                 undo->head.hdr_crc = crc32(undo, HAMMER_FIFO_HEAD_CRCOFF) ^
239                              crc32(&undo->head + 1, bytes - sizeof(undo->head));
240                 undomap->next_offset += bytes;
241                 hammer_stats_undo += bytes;
242
243                 /*
244                  * Before we finish off the buffer we have to deal with any
245                  * junk between the end of the media structure we just laid
246                  * down and the UNDO alignment boundary.  We do this by laying
247                  * down a dummy PAD.  Even though we will probably overwrite
248                  * it almost immediately we have to do this so recovery runs
249                  * can iterate the UNDO space without having to depend on
250                  * the indices in the volume header.
251                  *
252                  * This dummy PAD will be overwritten on the next undo so
253                  * we do not adjust undomap->next_offset.
254                  */
255                 bytes = HAMMER_UNDO_ALIGN -
256                         ((int)undomap->next_offset & HAMMER_UNDO_MASK);
257                 if (bytes != HAMMER_UNDO_ALIGN) {
258                         KKASSERT(bytes >= sizeof(struct hammer_fifo_tail));
259                         undo = (void *)(tail + 1);
260                         tail = (void *)((char *)undo + bytes - sizeof(*tail));
261                         if ((void *)undo != (void *)tail) {
262                                 tail->tail_signature = HAMMER_TAIL_SIGNATURE;
263                                 tail->tail_type = HAMMER_HEAD_TYPE_PAD;
264                                 tail->tail_size = bytes;
265                         }
266                         undo->head.hdr_signature = HAMMER_HEAD_SIGNATURE;
267                         undo->head.hdr_type = HAMMER_HEAD_TYPE_PAD;
268                         undo->head.hdr_size = bytes;
269                         /* NO CRC OR SEQ NO */
270                 }
271                 hammer_modify_buffer_done(buffer);
272
273                 /*
274                  * Adjust for loop
275                  */
276                 len -= n;
277                 base = (char *)base + n;
278                 zone_off += n;
279         }
280         hammer_modify_volume_done(root_volume);
281         hammer_unlock(&hmp->undo_lock);
282
283         if (buffer)
284                 hammer_rel_buffer(buffer, 0);
285         return(error);
286 }
287
288 /*
289  * Preformat a new UNDO block.  We could read the old one in but we get
290  * better performance if we just pre-format a new one.
291  *
292  * The recovery code always works forwards so the caller just makes sure the
293  * seqno is not contiguous with prior UNDOs or ancient UNDOs now being
294  * overwritten.
295  *
296  * The preformatted UNDO headers use the smallest possible sector size
297  * (512) to ensure that any missed media writes are caught.
298  *
299  * NOTE: Also used by the REDO code.
300  */
301 void
302 hammer_format_undo(void *base, u_int32_t seqno)
303 {
304         hammer_fifo_head_t head;
305         hammer_fifo_tail_t tail;
306         int i;
307         int bytes = HAMMER_UNDO_ALIGN;
308
309         bzero(base, HAMMER_BUFSIZE);
310
311         for (i = 0; i < HAMMER_BUFSIZE; i += bytes) {
312                 head = (void *)((char *)base + i);
313                 tail = (void *)((char *)head + bytes - sizeof(*tail));
314
315                 head->hdr_signature = HAMMER_HEAD_SIGNATURE;
316                 head->hdr_type = HAMMER_HEAD_TYPE_DUMMY;
317                 head->hdr_size = bytes;
318                 head->hdr_seq = seqno++;
319                 head->hdr_crc = 0;
320
321                 tail->tail_signature = HAMMER_TAIL_SIGNATURE;
322                 tail->tail_type = HAMMER_HEAD_TYPE_DUMMY;
323                 tail->tail_size = bytes;
324
325                 head->hdr_crc = crc32(head, HAMMER_FIFO_HEAD_CRCOFF) ^
326                              crc32(head + 1, bytes - sizeof(*head));
327         }
328 }
329
330 /*
331  * HAMMER version 4+ conversion support.
332  *
333  * Convert a HAMMER version < 4 UNDO FIFO area to a 4+ UNDO FIFO area.
334  * The 4+ UNDO FIFO area is backwards compatible.  The conversion is
335  * needed to initialize the sequence space and place headers on the
336  * new 512-byte undo boundary.
337  */
338 int
339 hammer_upgrade_undo_4(hammer_transaction_t trans)
340 {
341         hammer_mount_t hmp;
342         hammer_volume_t root_volume;
343         hammer_blockmap_t undomap;
344         hammer_buffer_t buffer = NULL;
345         hammer_fifo_head_t head;
346         hammer_fifo_tail_t tail;
347         hammer_off_t next_offset;
348         u_int32_t seqno;
349         int error;
350         int bytes;
351
352         hmp = trans->hmp;
353
354         root_volume = trans->rootvol;
355
356         /* no undo recursion */
357         hammer_lock_ex(&hmp->undo_lock);
358         hammer_modify_volume(NULL, root_volume, NULL, 0);
359
360         /*
361          * Adjust the in-core undomap and the on-disk undomap.
362          */
363         next_offset = HAMMER_ZONE_ENCODE(HAMMER_ZONE_UNDO_INDEX, 0);
364         undomap = &hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
365         undomap->next_offset = next_offset;
366         undomap->first_offset = next_offset;
367
368         undomap = &root_volume->ondisk->vol0_blockmap[HAMMER_ZONE_UNDO_INDEX];
369         undomap->next_offset = next_offset;
370         undomap->first_offset = next_offset;
371
372         /*
373          * Loop over the entire UNDO space creating DUMMY entries.  Sequence
374          * numbers are assigned.
375          */
376         seqno = 0;
377         bytes = HAMMER_UNDO_ALIGN;
378
379         while (next_offset != undomap->alloc_offset) {
380                 head = hammer_bnew(hmp, next_offset, &error, &buffer);
381                 if (error)
382                         break;
383                 hammer_modify_buffer(NULL, buffer, NULL, 0);
384                 tail = (void *)((char *)head + bytes - sizeof(*tail));
385
386                 head->hdr_signature = HAMMER_HEAD_SIGNATURE;
387                 head->hdr_type = HAMMER_HEAD_TYPE_DUMMY;
388                 head->hdr_size = bytes;
389                 head->hdr_seq = seqno;
390                 head->hdr_crc = 0;
391
392                 tail = (void *)((char *)head + bytes - sizeof(*tail));
393                 tail->tail_signature = HAMMER_TAIL_SIGNATURE;
394                 tail->tail_type = HAMMER_HEAD_TYPE_DUMMY;
395                 tail->tail_size = bytes;
396
397                 head->hdr_crc = crc32(head, HAMMER_FIFO_HEAD_CRCOFF) ^
398                              crc32(head + 1, bytes - sizeof(*head));
399                 hammer_modify_buffer_done(buffer);
400
401                 hammer_stats_undo += bytes;
402                 next_offset += HAMMER_UNDO_ALIGN;
403                 ++seqno;
404         }
405
406         /*
407          * The sequence number will be the next sequence number to lay down.
408          */
409         hmp->undo_seqno = seqno;
410         kprintf("version upgrade seqno start %08x\n", seqno);
411
412         hammer_modify_volume_done(root_volume);
413         hammer_unlock(&hmp->undo_lock);
414
415         if (buffer)
416                 hammer_rel_buffer(buffer, 0);
417         return (error);
418 }
419
420 /*
421  * UNDO HISTORY API
422  *
423  * It is not necessary to layout an undo record for the same address space
424  * multiple times.  Maintain a cache of recent undo's.
425  */
426
427 /*
428  * Enter an undo into the history.  Return EALREADY if the request completely
429  * covers a previous request.
430  */
431 int
432 hammer_enter_undo_history(hammer_mount_t hmp, hammer_off_t offset, int bytes)
433 {
434         hammer_undo_t node;
435         hammer_undo_t onode __debugvar;
436
437         node = RB_LOOKUP(hammer_und_rb_tree, &hmp->rb_undo_root, offset);
438         if (node) {
439                 TAILQ_REMOVE(&hmp->undo_lru_list, node, lru_entry);
440                 TAILQ_INSERT_TAIL(&hmp->undo_lru_list, node, lru_entry);
441                 if (bytes <= node->bytes)
442                         return(EALREADY);
443                 node->bytes = bytes;
444                 return(0);
445         }
446         if (hmp->undo_alloc != HAMMER_MAX_UNDOS) {
447                 node = &hmp->undos[hmp->undo_alloc++];
448         } else {
449                 node = TAILQ_FIRST(&hmp->undo_lru_list);
450                 TAILQ_REMOVE(&hmp->undo_lru_list, node, lru_entry);
451                 RB_REMOVE(hammer_und_rb_tree, &hmp->rb_undo_root, node);
452         }
453         node->offset = offset;
454         node->bytes = bytes;
455         TAILQ_INSERT_TAIL(&hmp->undo_lru_list, node, lru_entry);
456         onode = RB_INSERT(hammer_und_rb_tree, &hmp->rb_undo_root, node);
457         KKASSERT(onode == NULL);
458         return(0);
459 }
460
461 void
462 hammer_clear_undo_history(hammer_mount_t hmp)
463 {
464         RB_INIT(&hmp->rb_undo_root);
465         TAILQ_INIT(&hmp->undo_lru_list);
466         hmp->undo_alloc = 0;
467 }
468
469 /*
470  * Return how much of the undo FIFO has been used
471  *
472  * The calculation includes undo FIFO space still reserved from a previous
473  * flush (because it will still be run on recovery if a crash occurs and
474  * we can't overwrite it yet).
475  */
476 int64_t
477 hammer_undo_used(hammer_transaction_t trans)
478 {
479         hammer_blockmap_t cundomap;
480         hammer_blockmap_t dundomap;
481         int64_t max_bytes __debugvar;
482         int64_t bytes;
483
484         cundomap = &trans->hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
485         dundomap = &trans->rootvol->ondisk->
486                                 vol0_blockmap[HAMMER_ZONE_UNDO_INDEX];
487
488         if (dundomap->first_offset <= cundomap->next_offset) {
489                 bytes = cundomap->next_offset - dundomap->first_offset;
490         } else {
491                 bytes = cundomap->alloc_offset - dundomap->first_offset +
492                         (cundomap->next_offset & HAMMER_OFF_LONG_MASK);
493         }
494         max_bytes = cundomap->alloc_offset & HAMMER_OFF_SHORT_MASK;
495         KKASSERT(bytes <= max_bytes);
496         return(bytes);
497 }
498
499 /*
500  * Return how much of the undo FIFO is available for new records.
501  */
502 int64_t
503 hammer_undo_space(hammer_transaction_t trans)
504 {
505         hammer_blockmap_t rootmap;
506         int64_t max_bytes;
507
508         rootmap = &trans->hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
509         max_bytes = rootmap->alloc_offset & HAMMER_OFF_SHORT_MASK;
510         return(max_bytes - hammer_undo_used(trans));
511 }
512
513 int64_t
514 hammer_undo_max(hammer_mount_t hmp)
515 {
516         hammer_blockmap_t rootmap;
517         int64_t max_bytes;
518
519         rootmap = &hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
520         max_bytes = rootmap->alloc_offset & HAMMER_OFF_SHORT_MASK;
521
522         return(max_bytes);
523 }
524
525 /*
526  * Returns 1 if the undo buffer should be reclaimed on release.  The
527  * only undo buffer we do NOT want to reclaim is the one at the current
528  * append offset.
529  */
530 int
531 hammer_undo_reclaim(hammer_io_t io)
532 {
533         hammer_blockmap_t undomap;
534         hammer_off_t next_offset;
535
536         undomap = &io->hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
537         next_offset = undomap->next_offset & ~HAMMER_BUFMASK64;
538         if (((struct hammer_buffer *)io)->zoneX_offset == next_offset)
539                 return(0);
540         return(1);
541 }
542
543 static int
544 hammer_und_rb_compare(hammer_undo_t node1, hammer_undo_t node2)
545 {
546         if (node1->offset < node2->offset)
547                 return(-1);
548         if (node1->offset > node2->offset)
549                 return(1);
550         return(0);
551 }
552