HAMMER VFS - Version 4 part 1/many - UNDO FIFO layout work.
[dragonfly.git] / sys / vfs / hammer / hammer_undo.c
CommitLineData
bf686dbe
MD
1/*
2 * Copyright (c) 2008 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
cdb6e4e6 34 * $DragonFly: src/sys/vfs/hammer/hammer_undo.c,v 1.20 2008/07/18 00:19:53 dillon Exp $
bf686dbe
MD
35 */
36
37/*
38 * HAMMER undo - undo buffer/FIFO management.
39 */
40
41#include "hammer.h"
42
e8599db1 43static int hammer_und_rb_compare(hammer_undo_t node1, hammer_undo_t node2);
02428fb6 44static void hammer_format_undo(void *base, u_int32_t seqno);
e8599db1
MD
45
46RB_GENERATE2(hammer_und_rb_tree, hammer_undo, rb_node,
47 hammer_und_rb_compare, hammer_off_t, offset);
48
bf686dbe
MD
49/*
50 * Convert a zone-3 undo offset into a zone-2 buffer offset.
51 */
52hammer_off_t
53hammer_undo_lookup(hammer_mount_t hmp, hammer_off_t zone3_off, int *errorp)
54{
55 hammer_volume_t root_volume;
56 hammer_blockmap_t undomap;
bf686dbe
MD
57 hammer_off_t result_offset;
58 int i;
59
60 KKASSERT((zone3_off & HAMMER_OFF_ZONE_MASK) == HAMMER_ZONE_UNDO);
61 root_volume = hammer_get_root_volume(hmp, errorp);
62 if (*errorp)
63 return(0);
0729c8c8 64 undomap = &hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
bf686dbe
MD
65 KKASSERT(HAMMER_ZONE_DECODE(undomap->alloc_offset) == HAMMER_ZONE_UNDO_INDEX);
66 KKASSERT (zone3_off < undomap->alloc_offset);
67
68 i = (zone3_off & HAMMER_OFF_SHORT_MASK) / HAMMER_LARGEBLOCK_SIZE;
cb51be26 69 result_offset = root_volume->ondisk->vol0_undo_array[i] +
bf686dbe
MD
70 (zone3_off & HAMMER_LARGEBLOCK_MASK64);
71
72 hammer_rel_volume(root_volume, 0);
73 return(result_offset);
74}
75
76/*
02428fb6 77 * Generate UNDO record(s) for the block of data at the specified zone1
f90dde4c 78 * or zone2 offset.
e8599db1
MD
79 *
80 * The recovery code will execute UNDOs in reverse order, allowing overlaps.
81 * All the UNDOs are executed together so if we already laid one down we
82 * do not have to lay another one down for the same range.
02428fb6
MD
83 *
84 * For HAMMER version 4+ UNDO a 512 byte boundary is enforced and a PAD
85 * will be laid down for any unused space. UNDO FIFO media structures
86 * will implement the hdr_seq field (it used to be reserved01), and
87 * both flush and recovery mechanics will be very different.
bf686dbe
MD
88 */
89int
02428fb6 90hammer_generate_undo(hammer_transaction_t trans,
f90dde4c 91 hammer_off_t zone_off, void *base, int len)
bf686dbe 92{
d99d6bf5 93 hammer_mount_t hmp;
bf686dbe 94 hammer_volume_t root_volume;
bf686dbe
MD
95 hammer_blockmap_t undomap;
96 hammer_buffer_t buffer = NULL;
bf686dbe
MD
97 hammer_fifo_undo_t undo;
98 hammer_fifo_tail_t tail;
99 hammer_off_t next_offset;
bf686dbe
MD
100 int error;
101 int bytes;
02428fb6 102 int n;
bf686dbe 103
d99d6bf5
MD
104 hmp = trans->hmp;
105
e8599db1
MD
106 /*
107 * Enter the offset into our undo history. If there is an existing
108 * undo we do not have to generate a new one.
109 */
d99d6bf5 110 if (hammer_enter_undo_history(hmp, zone_off, len) == EALREADY)
e8599db1 111 return(0);
e8599db1 112
b58c6388 113 root_volume = trans->rootvol;
d99d6bf5 114 undomap = &hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
36f82b23
MD
115
116 /* no undo recursion */
117 hammer_modify_volume(NULL, root_volume, NULL, 0);
d99d6bf5 118 hammer_lock_ex(&hmp->undo_lock);
f90dde4c 119
02428fb6
MD
120 /* undo had better not roll over (loose test) */
121 if (hammer_undo_space(trans) < len + HAMMER_BUFSIZE*3)
122 panic("hammer: insufficient undo FIFO space!");
bf686dbe
MD
123
124 /*
02428fb6 125 * Loop until the undo for the entire range has been laid down.
bf686dbe 126 */
02428fb6
MD
127 while (len) {
128 /*
129 * Fetch the layout offset in the UNDO FIFO, wrap it as
130 * necessary.
131 */
132 if (undomap->next_offset == undomap->alloc_offset) {
133 undomap->next_offset =
134 HAMMER_ZONE_ENCODE(HAMMER_ZONE_UNDO_INDEX, 0);
135 }
136 next_offset = undomap->next_offset;
bf686dbe 137
02428fb6
MD
138 /*
139 * This is a tail-chasing FIFO, when we hit the start of a new
140 * buffer we don't have to read it in.
141 */
142 if ((next_offset & HAMMER_BUFMASK) == 0) {
143 undo = hammer_bnew(hmp, next_offset, &error, &buffer);
144 hammer_format_undo(undo, hmp->undo_seqno ^ 0x40000000);
145 } else {
146 undo = hammer_bread(hmp, next_offset, &error, &buffer);
147 }
148 if (error)
149 break;
150 hammer_modify_buffer(NULL, buffer, NULL, 0);
cdb6e4e6 151
02428fb6
MD
152 /*
153 * Calculate how big a media structure fits up to the next
154 * alignment point and how large a data payload we can
155 * accomodate.
156 *
157 * If n calculates to 0 or negative there is no room for
158 * anything but a PAD.
159 */
160 bytes = HAMMER_UNDO_ALIGN -
161 ((int)next_offset & HAMMER_UNDO_MASK);
162 n = bytes -
163 (int)sizeof(struct hammer_fifo_undo) -
164 (int)sizeof(struct hammer_fifo_tail);
d99d6bf5 165
02428fb6
MD
166 /*
167 * If available space is insufficient for any payload
168 * we have to lay down a PAD.
169 *
170 * The minimum PAD is 8 bytes and the head and tail will
171 * overlap each other in that case. PADs do not have
172 * sequence numbers or CRCs.
173 *
174 * A PAD may not start on a boundary. That is, every
175 * 512-byte block in the UNDO/REDO FIFO must begin with
176 * a record containing a sequence number.
177 */
178 if (n <= 0) {
179 KKASSERT(bytes >= sizeof(struct hammer_fifo_tail));
180 KKASSERT(((int)next_offset & HAMMER_UNDO_MASK) != 0);
181 tail = (void *)((char *)undo + bytes - sizeof(*tail));
182 if ((void *)undo != (void *)tail) {
183 tail->tail_signature = HAMMER_TAIL_SIGNATURE;
184 tail->tail_type = HAMMER_HEAD_TYPE_PAD;
185 tail->tail_size = bytes;
186 }
187 undo->head.hdr_signature = HAMMER_HEAD_SIGNATURE;
188 undo->head.hdr_type = HAMMER_HEAD_TYPE_PAD;
189 undo->head.hdr_size = bytes;
190 /* NO CRC OR SEQ NO */
191 undomap->next_offset += bytes;
192 hammer_modify_buffer_done(buffer);
193 hammer_stats_undo += bytes;
194 continue;
195 }
c9b9e29d 196
02428fb6
MD
197 /*
198 * Calculate the actual payload and recalculate the size
199 * of the media structure as necessary.
200 */
201 if (n > len) {
202 n = len;
203 bytes = ((n + HAMMER_HEAD_ALIGN_MASK) &
204 ~HAMMER_HEAD_ALIGN_MASK) +
205 (int)sizeof(struct hammer_fifo_undo) +
206 (int)sizeof(struct hammer_fifo_tail);
bf686dbe 207 }
02428fb6
MD
208 if (hammer_debug_general & 0x0080) {
209 kprintf("undo %016llx %d %d\n",
210 (long long)next_offset, bytes, n);
211 }
212
bf686dbe 213 undo->head.hdr_signature = HAMMER_HEAD_SIGNATURE;
02428fb6 214 undo->head.hdr_type = HAMMER_HEAD_TYPE_UNDO;
bf686dbe 215 undo->head.hdr_size = bytes;
02428fb6
MD
216 undo->head.hdr_seq = hmp->undo_seqno++;
217 undo->head.hdr_crc = 0;
218 undo->undo_offset = zone_off;
219 undo->undo_data_bytes = n;
220 bcopy(base, undo + 1, n);
221
222 tail = (void *)((char *)undo + bytes - sizeof(*tail));
223 tail->tail_signature = HAMMER_TAIL_SIGNATURE;
224 tail->tail_type = HAMMER_HEAD_TYPE_UNDO;
225 tail->tail_size = bytes;
226
227 KKASSERT(bytes >= sizeof(undo->head));
228 undo->head.hdr_crc = crc32(undo, HAMMER_FIFO_HEAD_CRCOFF) ^
229 crc32(&undo->head + 1, bytes - sizeof(undo->head));
bf686dbe 230 undomap->next_offset += bytes;
89e744ce 231 hammer_stats_undo += bytes;
02428fb6
MD
232
233 /*
234 * Before we finish off the buffer we have to deal with any
235 * junk between the end of the media structure we just laid
236 * down and the UNDO alignment boundary. We do this by laying
237 * down a dummy PAD. Even though we will probably overwrite
238 * it almost immediately we have to do this so recovery runs
239 * can iterate the UNDO space without having to depend on
240 * the indices in the volume header.
241 *
242 * This dummy PAD will be overwritten on the next undo so
243 * we do not adjust undomap->next_offset.
244 */
245 bytes = HAMMER_UNDO_ALIGN -
246 ((int)undomap->next_offset & HAMMER_UNDO_MASK);
247 if (bytes != HAMMER_UNDO_ALIGN) {
248 KKASSERT(bytes >= sizeof(struct hammer_fifo_tail));
249 undo = (void *)(tail + 1);
250 tail = (void *)((char *)undo + bytes - sizeof(*tail));
251 if ((void *)undo != (void *)tail) {
252 tail->tail_signature = HAMMER_TAIL_SIGNATURE;
253 tail->tail_type = HAMMER_HEAD_TYPE_PAD;
254 tail->tail_size = bytes;
255 }
256 undo->head.hdr_signature = HAMMER_HEAD_SIGNATURE;
257 undo->head.hdr_type = HAMMER_HEAD_TYPE_PAD;
258 undo->head.hdr_size = bytes;
259 /* NO CRC OR SEQ NO */
260 }
261 hammer_modify_buffer_done(buffer);
262
263 /*
264 * Adjust for loop
265 */
266 len -= n;
267 base = (char *)base + n;
268 zone_off += n;
269 }
270 hammer_modify_volume_done(root_volume);
271 hammer_unlock(&hmp->undo_lock);
272 /* XXX flush volume header */
273
274 if (buffer)
275 hammer_rel_buffer(buffer, 0);
276 return(error);
277}
278
279#if 0
280/*
281 * HAMMER version 4+ REDO support.
282 *
283 * Generate REDO record(s) for logical data writes to a file. REDO records
284 * are only created if the created inode was previously synced (such that
285 * it will still exist after any recovery), and also only for a limited
286 * amount of write data between fsyncs.
287 *
288 * REDO records are used to improve fsync() performance. Instead of having
289 * to go through a complete flush cycle involving at least two disk
290 * synchronizations the fsync need only flush UNDO FIFO buffers through
291 * the related REDO records, which is a single synchronization requiring
292 * no track seeking. If a recovery becomes necessary the recovery code
293 * will generate logical data writes based on the REDO records encountered.
294 * That is, the recovery code will UNDO any partial meta-data/data writes
295 * at the raw disk block level and then REDO the data writes at the logical
296 * level.
297 */
298int
299hammer_generate_redo(hammer_transaction_t trans, hammer_inode_t ip,
300 hammer_off_t file_off, hammer_off_t zone_off,
301 void *base, int len)
302{
303}
304#endif
305
306/*
307 * Preformat a new UNDO block. We could read the old one in but we get
308 * better performance if we just pre-format a new one.
309 *
310 * The recovery code always works forwards so the caller just makes sure the
311 * seqno is not contiguous with prior UNDOs or ancient UNDOs now being
312 * overwritten.
313 */
314static
315void
316hammer_format_undo(void *base, u_int32_t seqno)
317{
318 hammer_fifo_head_t head;
319 hammer_fifo_tail_t tail;
320 int i;
321 int bytes = HAMMER_UNDO_ALIGN;
322
323 bzero(base, HAMMER_BUFSIZE);
324
325 for (i = 0; i < HAMMER_BUFSIZE; i += bytes) {
326 head = (void *)((char *)base + i);
327 tail = (void *)((char *)head + bytes - sizeof(*tail));
328
329 head->hdr_signature = HAMMER_HEAD_SIGNATURE;
330 head->hdr_type = HAMMER_HEAD_TYPE_DUMMY;
331 head->hdr_size = bytes;
332 head->hdr_seq = seqno++;
333 head->hdr_crc = 0;
334
335 tail->tail_signature = HAMMER_TAIL_SIGNATURE;
336 tail->tail_type = HAMMER_HEAD_TYPE_DUMMY;
337 tail->tail_size = bytes;
338
339 head->hdr_crc = crc32(head, HAMMER_FIFO_HEAD_CRCOFF) ^
340 crc32(head + 1, bytes - sizeof(*head));
bf686dbe 341 }
02428fb6
MD
342}
343
344/*
345 * HAMMER version 4+ conversion support.
346 *
347 * Convert a HAMMER version < 4 UNDO FIFO area to a 4+ UNDO FIFO area.
348 * The 4+ UNDO FIFO area is backwards compatible. The conversion is
349 * needed to initialize the sequence space and place headers on the
350 * new 512-byte undo boundary.
351 */
352int
353hammer_upgrade_undo_4(hammer_transaction_t trans)
354{
355 hammer_mount_t hmp;
356 hammer_volume_t root_volume;
357 hammer_blockmap_t undomap;
358 hammer_buffer_t buffer = NULL;
359 hammer_fifo_head_t head;
360 hammer_fifo_tail_t tail;
361 hammer_off_t next_offset;
362 u_int32_t seqno;
363 int error;
364 int bytes;
365
366 hmp = trans->hmp;
367
368 root_volume = trans->rootvol;
369
370 /* no undo recursion */
371 hammer_lock_ex(&hmp->undo_lock);
372 hammer_modify_volume(NULL, root_volume, NULL, 0);
373
374 /*
375 * Adjust the in-core undomap and the on-disk undomap.
376 */
377 next_offset = HAMMER_ZONE_ENCODE(HAMMER_ZONE_UNDO_INDEX, 0);
378 undomap = &hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
379 undomap->next_offset = next_offset;
380 undomap->first_offset = next_offset;
381
382 undomap = &root_volume->ondisk->vol0_blockmap[HAMMER_ZONE_UNDO_INDEX];
383 undomap->next_offset = next_offset;
384 undomap->first_offset = next_offset;
385
386 /*
387 * Loop over the entire UNDO space creating DUMMY entries. Sequence
388 * numbers are assigned.
389 */
390 seqno = 0;
391 bytes = HAMMER_UNDO_ALIGN;
392
393 while (next_offset != undomap->alloc_offset) {
394 head = hammer_bnew(hmp, next_offset, &error, &buffer);
395 if (error)
396 break;
397 hammer_modify_buffer(NULL, buffer, NULL, 0);
398 tail = (void *)((char *)head + bytes - sizeof(*tail));
399
400 head->hdr_signature = HAMMER_HEAD_SIGNATURE;
401 head->hdr_type = HAMMER_HEAD_TYPE_DUMMY;
402 head->hdr_size = bytes;
403 head->hdr_seq = seqno;
404 head->hdr_crc = 0;
405
406 tail = (void *)((char *)head + bytes - sizeof(*tail));
407 tail->tail_signature = HAMMER_TAIL_SIGNATURE;
408 tail->tail_type = HAMMER_HEAD_TYPE_DUMMY;
409 tail->tail_size = bytes;
410
411 head->hdr_crc = crc32(head, HAMMER_FIFO_HEAD_CRCOFF) ^
412 crc32(head + 1, bytes - sizeof(*head));
413 hammer_modify_buffer_done(buffer);
414
415 hammer_stats_undo += bytes;
416 next_offset += HAMMER_UNDO_ALIGN;
417 ++seqno;
973c11b9 418 }
bf686dbe
MD
419
420 /*
02428fb6 421 * The sequence number will be the next sequence number to lay down.
bf686dbe 422 */
02428fb6
MD
423 hmp->undo_seqno = seqno;
424 kprintf("version upgrade seqno start %08x\n", seqno);
c9b9e29d 425
10a5d1ba 426 hammer_modify_volume_done(root_volume);
d99d6bf5
MD
427 hammer_unlock(&hmp->undo_lock);
428
bf686dbe
MD
429 if (buffer)
430 hammer_rel_buffer(buffer, 0);
02428fb6 431 return (error);
bf686dbe
MD
432}
433
e8599db1
MD
434/*
435 * UNDO HISTORY API
436 *
437 * It is not necessary to layout an undo record for the same address space
438 * multiple times. Maintain a cache of recent undo's.
439 */
440
441/*
442 * Enter an undo into the history. Return EALREADY if the request completely
443 * covers a previous request.
444 */
445int
446hammer_enter_undo_history(hammer_mount_t hmp, hammer_off_t offset, int bytes)
447{
448 hammer_undo_t node;
449 hammer_undo_t onode;
450
451 node = RB_LOOKUP(hammer_und_rb_tree, &hmp->rb_undo_root, offset);
452 if (node) {
453 TAILQ_REMOVE(&hmp->undo_lru_list, node, lru_entry);
454 TAILQ_INSERT_TAIL(&hmp->undo_lru_list, node, lru_entry);
455 if (bytes <= node->bytes)
456 return(EALREADY);
457 node->bytes = bytes;
458 return(0);
459 }
460 if (hmp->undo_alloc != HAMMER_MAX_UNDOS) {
461 node = &hmp->undos[hmp->undo_alloc++];
462 } else {
463 node = TAILQ_FIRST(&hmp->undo_lru_list);
464 TAILQ_REMOVE(&hmp->undo_lru_list, node, lru_entry);
465 RB_REMOVE(hammer_und_rb_tree, &hmp->rb_undo_root, node);
466 }
467 node->offset = offset;
468 node->bytes = bytes;
469 TAILQ_INSERT_TAIL(&hmp->undo_lru_list, node, lru_entry);
470 onode = RB_INSERT(hammer_und_rb_tree, &hmp->rb_undo_root, node);
471 KKASSERT(onode == NULL);
472 return(0);
473}
474
475void
476hammer_clear_undo_history(hammer_mount_t hmp)
477{
478 RB_INIT(&hmp->rb_undo_root);
479 TAILQ_INIT(&hmp->undo_lru_list);
480 hmp->undo_alloc = 0;
481}
482
483/*
06ad81ff
MD
484 * Return how much of the undo FIFO has been used
485 *
486 * The calculation includes undo FIFO space still reserved from a previous
487 * flush (because it will still be run on recovery if a crash occurs and
488 * we can't overwrite it yet).
e8599db1 489 */
1f07f686 490int64_t
06ad81ff 491hammer_undo_used(hammer_transaction_t trans)
1f07f686 492{
06ad81ff
MD
493 hammer_blockmap_t cundomap;
494 hammer_blockmap_t dundomap;
1f07f686 495 int64_t max_bytes;
c9b9e29d 496 int64_t bytes;
1f07f686 497
06ad81ff
MD
498 cundomap = &trans->hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
499 dundomap = &trans->rootvol->ondisk->
500 vol0_blockmap[HAMMER_ZONE_UNDO_INDEX];
1f07f686 501
06ad81ff
MD
502 if (dundomap->first_offset <= cundomap->next_offset) {
503 bytes = cundomap->next_offset - dundomap->first_offset;
1f07f686 504 } else {
06ad81ff
MD
505 bytes = cundomap->alloc_offset - dundomap->first_offset +
506 (cundomap->next_offset & HAMMER_OFF_LONG_MASK);
1f07f686 507 }
06ad81ff 508 max_bytes = cundomap->alloc_offset & HAMMER_OFF_SHORT_MASK;
c9b9e29d
MD
509 KKASSERT(bytes <= max_bytes);
510 return(bytes);
511}
512
06ad81ff
MD
513/*
514 * Return how much of the undo FIFO is available for new records.
515 */
c9b9e29d 516int64_t
06ad81ff 517hammer_undo_space(hammer_transaction_t trans)
c9b9e29d
MD
518{
519 hammer_blockmap_t rootmap;
520 int64_t max_bytes;
521
06ad81ff 522 rootmap = &trans->hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
c9b9e29d 523 max_bytes = rootmap->alloc_offset & HAMMER_OFF_SHORT_MASK;
06ad81ff 524 return(max_bytes - hammer_undo_used(trans));
1f07f686
MD
525}
526
527int64_t
528hammer_undo_max(hammer_mount_t hmp)
529{
530 hammer_blockmap_t rootmap;
531 int64_t max_bytes;
532
533 rootmap = &hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
c9b9e29d 534 max_bytes = rootmap->alloc_offset & HAMMER_OFF_SHORT_MASK;
1f07f686
MD
535
536 return(max_bytes);
537}
538
710733a6
MD
539/*
540 * Returns 1 if the undo buffer should be reclaimed on release. The
541 * only undo buffer we do NOT want to reclaim is the one at the current
542 * append offset.
543 */
544int
545hammer_undo_reclaim(hammer_io_t io)
546{
547 hammer_blockmap_t undomap;
548 hammer_off_t next_offset;
549
550 undomap = &io->hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
551 next_offset = undomap->next_offset & ~HAMMER_BUFMASK64;
552 if (((struct hammer_buffer *)io)->zoneX_offset == next_offset)
553 return(0);
554 return(1);
555}
556
e8599db1
MD
557static int
558hammer_und_rb_compare(hammer_undo_t node1, hammer_undo_t node2)
559{
560 if (node1->offset < node2->offset)
561 return(-1);
562 if (node1->offset > node2->offset)
563 return(1);
564 return(0);
565}
566