b3d9fbeeac3dff01dd5e98e60b5d83034ebe82c0
[dragonfly.git] / sys / vfs / hammer / hammer_blockmap.c
1 /*
2  * Copyright (c) 2008 The DragonFly Project.  All rights reserved.
3  * 
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  * 
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  * 
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  * 
34  * $DragonFly: src/sys/vfs/hammer/hammer_blockmap.c,v 1.27 2008/07/31 22:30:33 dillon Exp $
35  */
36
37 /*
38  * HAMMER blockmap
39  */
40 #include "hammer.h"
41
42 static int hammer_res_rb_compare(hammer_reserve_t res1, hammer_reserve_t res2);
43 static void hammer_reserve_setdelay(hammer_mount_t hmp,
44                                     hammer_off_t base_offset,
45                                     struct hammer_blockmap_layer2 *layer2);
46
47
48 /*
49  * Reserved big-blocks red-black tree support
50  */
51 RB_GENERATE2(hammer_res_rb_tree, hammer_reserve, rb_node,
52              hammer_res_rb_compare, hammer_off_t, zone_offset);
53
54 static int
55 hammer_res_rb_compare(hammer_reserve_t res1, hammer_reserve_t res2)
56 {
57         if (res1->zone_offset < res2->zone_offset)
58                 return(-1);
59         if (res1->zone_offset > res2->zone_offset)
60                 return(1);
61         return(0);
62 }
63
64 /*
65  * Allocate bytes from a zone
66  */
67 hammer_off_t
68 hammer_blockmap_alloc(hammer_transaction_t trans, int zone,
69                       int bytes, int *errorp)
70 {
71         hammer_mount_t hmp;
72         hammer_volume_t root_volume;
73         hammer_blockmap_t blockmap;
74         hammer_blockmap_t freemap;
75         hammer_reserve_t resv;
76         struct hammer_blockmap_layer1 *layer1;
77         struct hammer_blockmap_layer2 *layer2;
78         hammer_buffer_t buffer1 = NULL;
79         hammer_buffer_t buffer2 = NULL;
80         hammer_buffer_t buffer3 = NULL;
81         hammer_off_t tmp_offset;
82         hammer_off_t next_offset;
83         hammer_off_t result_offset;
84         hammer_off_t layer1_offset;
85         hammer_off_t layer2_offset;
86         hammer_off_t base_off;
87         int loops = 0;
88         int offset;             /* offset within big-block */
89
90         hmp = trans->hmp;
91
92         /*
93          * Deal with alignment and buffer-boundary issues.
94          *
95          * Be careful, certain primary alignments are used below to allocate
96          * new blockmap blocks.
97          */
98         bytes = (bytes + 15) & ~15;
99         KKASSERT(bytes > 0 && bytes <= HAMMER_XBUFSIZE);
100         KKASSERT(zone >= HAMMER_ZONE_BTREE_INDEX && zone < HAMMER_MAX_ZONES);
101
102         /*
103          * Setup
104          */
105         root_volume = trans->rootvol;
106         *errorp = 0;
107         blockmap = &hmp->blockmap[zone];
108         freemap = &hmp->blockmap[HAMMER_ZONE_FREEMAP_INDEX];
109         KKASSERT(HAMMER_ZONE_DECODE(blockmap->next_offset) == zone);
110
111         next_offset = blockmap->next_offset;
112 again:
113         /*
114          * Check for wrap
115          */
116         if (next_offset == HAMMER_ZONE_ENCODE(zone + 1, 0)) {
117                 if (++loops == 2) {
118                         result_offset = 0;
119                         *errorp = ENOSPC;
120                         goto failed;
121                 }
122                 next_offset = HAMMER_ZONE_ENCODE(zone, 0);
123         }
124
125         /*
126          * The allocation request may not cross a buffer boundary.  Special
127          * large allocations must not cross a large-block boundary.
128          */
129         tmp_offset = next_offset + bytes - 1;
130         if (bytes <= HAMMER_BUFSIZE) {
131                 if ((next_offset ^ tmp_offset) & ~HAMMER_BUFMASK64) {
132                         next_offset = tmp_offset & ~HAMMER_BUFMASK64;
133                         goto again;
134                 }
135         } else {
136                 if ((next_offset ^ tmp_offset) & ~HAMMER_LARGEBLOCK_MASK64) {
137                         next_offset = tmp_offset & ~HAMMER_LARGEBLOCK_MASK64;
138                         goto again;
139                 }
140         }
141         offset = (int)next_offset & HAMMER_LARGEBLOCK_MASK;
142
143         /*
144          * Dive layer 1.
145          */
146         layer1_offset = freemap->phys_offset +
147                         HAMMER_BLOCKMAP_LAYER1_OFFSET(next_offset);
148         layer1 = hammer_bread(hmp, layer1_offset, errorp, &buffer1);
149         if (*errorp) {
150                 result_offset = 0;
151                 goto failed;
152         }
153
154         /*
155          * Check CRC.
156          */
157         if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE)) {
158                 hammer_lock_ex(&hmp->blkmap_lock);
159                 if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE))
160                         panic("CRC FAILED: LAYER1");
161                 hammer_unlock(&hmp->blkmap_lock);
162         }
163
164         /*
165          * If we are at a big-block boundary and layer1 indicates no 
166          * free big-blocks, then we cannot allocate a new bigblock in
167          * layer2, skip to the next layer1 entry.
168          */
169         if (offset == 0 && layer1->blocks_free == 0) {
170                 next_offset = (next_offset + HAMMER_BLOCKMAP_LAYER2) &
171                               ~HAMMER_BLOCKMAP_LAYER2_MASK;
172                 goto again;
173         }
174         KKASSERT(layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL);
175
176         /*
177          * Dive layer 2, each entry represents a large-block.
178          */
179         layer2_offset = layer1->phys_offset +
180                         HAMMER_BLOCKMAP_LAYER2_OFFSET(next_offset);
181         layer2 = hammer_bread(hmp, layer2_offset, errorp, &buffer2);
182         if (*errorp) {
183                 result_offset = 0;
184                 goto failed;
185         }
186
187         /*
188          * Check CRC.  This can race another thread holding the lock
189          * and in the middle of modifying layer2.
190          */
191         if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE)) {
192                 hammer_lock_ex(&hmp->blkmap_lock);
193                 if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE))
194                         panic("CRC FAILED: LAYER2");
195                 hammer_unlock(&hmp->blkmap_lock);
196         }
197
198         /*
199          * Skip the layer if the zone is owned by someone other then us.
200          */
201         if (layer2->zone && layer2->zone != zone) {
202                 next_offset += (HAMMER_LARGEBLOCK_SIZE - offset);
203                 goto again;
204         }
205         if (offset < layer2->append_off) {
206                 next_offset += layer2->append_off - offset;
207                 goto again;
208         }
209
210         /*
211          * We need the lock from this point on.  We have to re-check zone
212          * ownership after acquiring the lock and also check for reservations.
213          */
214         hammer_lock_ex(&hmp->blkmap_lock);
215
216         if (layer2->zone && layer2->zone != zone) {
217                 hammer_unlock(&hmp->blkmap_lock);
218                 next_offset += (HAMMER_LARGEBLOCK_SIZE - offset);
219                 goto again;
220         }
221         if (offset < layer2->append_off) {
222                 hammer_unlock(&hmp->blkmap_lock);
223                 next_offset += layer2->append_off - offset;
224                 goto again;
225         }
226
227         /*
228          * The bigblock might be reserved by another zone.  If it is reserved
229          * by our zone we may have to move next_offset past the append_off.
230          */
231         base_off = (next_offset &
232                     (~HAMMER_LARGEBLOCK_MASK64 & ~HAMMER_OFF_ZONE_MASK)) | 
233                     HAMMER_ZONE_RAW_BUFFER;
234         resv = RB_LOOKUP(hammer_res_rb_tree, &hmp->rb_resv_root, base_off);
235         if (resv) {
236                 if (resv->zone != zone) {
237                         hammer_unlock(&hmp->blkmap_lock);
238                         next_offset = (next_offset + HAMMER_LARGEBLOCK_SIZE) &
239                                       ~HAMMER_LARGEBLOCK_MASK64;
240                         goto again;
241                 }
242                 if (offset < resv->append_off) {
243                         hammer_unlock(&hmp->blkmap_lock);
244                         next_offset += resv->append_off - offset;
245                         goto again;
246                 }
247         }
248
249         /*
250          * Ok, we can allocate out of this layer2 big-block.  Assume ownership
251          * of the layer for real.  At this point we've validated any
252          * reservation that might exist and can just ignore resv.
253          */
254         if (layer2->zone == 0) {
255                 /*
256                  * Assign the bigblock to our zone
257                  */
258                 hammer_modify_buffer(trans, buffer1,
259                                      layer1, sizeof(*layer1));
260                 --layer1->blocks_free;
261                 layer1->layer1_crc = crc32(layer1,
262                                            HAMMER_LAYER1_CRCSIZE);
263                 hammer_modify_buffer_done(buffer1);
264                 hammer_modify_buffer(trans, buffer2,
265                                      layer2, sizeof(*layer2));
266                 layer2->zone = zone;
267                 KKASSERT(layer2->bytes_free == HAMMER_LARGEBLOCK_SIZE);
268                 KKASSERT(layer2->append_off == 0);
269                 hammer_modify_volume_field(trans, trans->rootvol,
270                                            vol0_stat_freebigblocks);
271                 --root_volume->ondisk->vol0_stat_freebigblocks;
272                 hmp->copy_stat_freebigblocks =
273                         root_volume->ondisk->vol0_stat_freebigblocks;
274                 hammer_modify_volume_done(trans->rootvol);
275         } else {
276                 hammer_modify_buffer(trans, buffer2,
277                                      layer2, sizeof(*layer2));
278         }
279         KKASSERT(layer2->zone == zone);
280
281         layer2->bytes_free -= bytes;
282         KKASSERT(layer2->append_off <= offset);
283         layer2->append_off = offset + bytes;
284         layer2->entry_crc = crc32(layer2, HAMMER_LAYER2_CRCSIZE);
285         hammer_modify_buffer_done(buffer2);
286         KKASSERT(layer2->bytes_free >= 0);
287
288         if (resv) {
289                 KKASSERT(resv->append_off <= offset);
290                 resv->append_off = offset + bytes;
291                 resv->flags &= ~HAMMER_RESF_LAYER2FREE;
292         }
293
294         /*
295          * If we are allocating from the base of a new buffer we can avoid
296          * a disk read by calling hammer_bnew().
297          */
298         if ((next_offset & HAMMER_BUFMASK) == 0) {
299                 hammer_bnew_ext(trans->hmp, next_offset, bytes,
300                                 errorp, &buffer3);
301         }
302         result_offset = next_offset;
303
304         /*
305          * Process allocated result_offset
306          */
307         hammer_modify_volume(NULL, root_volume, NULL, 0);
308         blockmap->next_offset = next_offset + bytes;
309         hammer_modify_volume_done(root_volume);
310         hammer_unlock(&hmp->blkmap_lock);
311 failed:
312
313         /*
314          * Cleanup
315          */
316         if (buffer1)
317                 hammer_rel_buffer(buffer1, 0);
318         if (buffer2)
319                 hammer_rel_buffer(buffer2, 0);
320         if (buffer3)
321                 hammer_rel_buffer(buffer3, 0);
322
323         return(result_offset);
324 }
325
326 /*
327  * Frontend function - Reserve bytes in a zone.
328  *
329  * This code reserves bytes out of a blockmap without committing to any
330  * meta-data modifications, allowing the front-end to directly issue disk
331  * write I/O for large blocks of data
332  *
333  * The backend later finalizes the reservation with hammer_blockmap_finalize()
334  * upon committing the related record.
335  */
336 hammer_reserve_t
337 hammer_blockmap_reserve(hammer_mount_t hmp, int zone, int bytes,
338                         hammer_off_t *zone_offp, int *errorp)
339 {
340         hammer_volume_t root_volume;
341         hammer_blockmap_t blockmap;
342         hammer_blockmap_t freemap;
343         struct hammer_blockmap_layer1 *layer1;
344         struct hammer_blockmap_layer2 *layer2;
345         hammer_buffer_t buffer1 = NULL;
346         hammer_buffer_t buffer2 = NULL;
347         hammer_buffer_t buffer3 = NULL;
348         hammer_off_t tmp_offset;
349         hammer_off_t next_offset;
350         hammer_off_t layer1_offset;
351         hammer_off_t layer2_offset;
352         hammer_off_t base_off;
353         hammer_reserve_t resv;
354         hammer_reserve_t resx;
355         int loops = 0;
356         int offset;
357
358         /*
359          * Setup
360          */
361         KKASSERT(zone >= HAMMER_ZONE_BTREE_INDEX && zone < HAMMER_MAX_ZONES);
362         root_volume = hammer_get_root_volume(hmp, errorp);
363         if (*errorp)
364                 return(NULL);
365         blockmap = &hmp->blockmap[zone];
366         freemap = &hmp->blockmap[HAMMER_ZONE_FREEMAP_INDEX];
367         KKASSERT(HAMMER_ZONE_DECODE(blockmap->next_offset) == zone);
368
369         /*
370          * Deal with alignment and buffer-boundary issues.
371          *
372          * Be careful, certain primary alignments are used below to allocate
373          * new blockmap blocks.
374          */
375         bytes = (bytes + 15) & ~15;
376         KKASSERT(bytes > 0 && bytes <= HAMMER_XBUFSIZE);
377
378         next_offset = blockmap->next_offset;
379 again:
380         resv = NULL;
381         /*
382          * Check for wrap
383          */
384         if (next_offset == HAMMER_ZONE_ENCODE(zone + 1, 0)) {
385                 if (++loops == 2) {
386                         *errorp = ENOSPC;
387                         goto failed;
388                 }
389                 next_offset = HAMMER_ZONE_ENCODE(zone, 0);
390         }
391
392         /*
393          * The allocation request may not cross a buffer boundary.  Special
394          * large allocations must not cross a large-block boundary.
395          */
396         tmp_offset = next_offset + bytes - 1;
397         if (bytes <= HAMMER_BUFSIZE) {
398                 if ((next_offset ^ tmp_offset) & ~HAMMER_BUFMASK64) {
399                         next_offset = tmp_offset & ~HAMMER_BUFMASK64;
400                         goto again;
401                 }
402         } else {
403                 if ((next_offset ^ tmp_offset) & ~HAMMER_LARGEBLOCK_MASK64) {
404                         next_offset = tmp_offset & ~HAMMER_LARGEBLOCK_MASK64;
405                         goto again;
406                 }
407         }
408         offset = (int)next_offset & HAMMER_LARGEBLOCK_MASK;
409
410         /*
411          * Dive layer 1.
412          */
413         layer1_offset = freemap->phys_offset +
414                         HAMMER_BLOCKMAP_LAYER1_OFFSET(next_offset);
415         layer1 = hammer_bread(hmp, layer1_offset, errorp, &buffer1);
416         if (*errorp)
417                 goto failed;
418
419         /*
420          * Check CRC.
421          */
422         if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE)) {
423                 hammer_lock_ex(&hmp->blkmap_lock);
424                 if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE))
425                         panic("CRC FAILED: LAYER1");
426                 hammer_unlock(&hmp->blkmap_lock);
427         }
428
429         /*
430          * If we are at a big-block boundary and layer1 indicates no 
431          * free big-blocks, then we cannot allocate a new bigblock in
432          * layer2, skip to the next layer1 entry.
433          */
434         if ((next_offset & HAMMER_LARGEBLOCK_MASK) == 0 &&
435             layer1->blocks_free == 0) {
436                 next_offset = (next_offset + HAMMER_BLOCKMAP_LAYER2) &
437                               ~HAMMER_BLOCKMAP_LAYER2_MASK;
438                 goto again;
439         }
440         KKASSERT(layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL);
441
442         /*
443          * Dive layer 2, each entry represents a large-block.
444          */
445         layer2_offset = layer1->phys_offset +
446                         HAMMER_BLOCKMAP_LAYER2_OFFSET(next_offset);
447         layer2 = hammer_bread(hmp, layer2_offset, errorp, &buffer2);
448         if (*errorp)
449                 goto failed;
450
451         /*
452          * Check CRC if not allocating into uninitialized space (which we
453          * aren't when reserving space).
454          */
455         if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE)) {
456                 hammer_lock_ex(&hmp->blkmap_lock);
457                 if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE))
458                         panic("CRC FAILED: LAYER2");
459                 hammer_unlock(&hmp->blkmap_lock);
460         }
461
462         /*
463          * Skip the layer if the zone is owned by someone other then us.
464          */
465         if (layer2->zone && layer2->zone != zone) {
466                 next_offset += (HAMMER_LARGEBLOCK_SIZE - offset);
467                 goto again;
468         }
469         if (offset < layer2->append_off) {
470                 next_offset += layer2->append_off - offset;
471                 goto again;
472         }
473
474         /*
475          * We need the lock from this point on.  We have to re-check zone
476          * ownership after acquiring the lock and also check for reservations.
477          */
478         hammer_lock_ex(&hmp->blkmap_lock);
479
480         if (layer2->zone && layer2->zone != zone) {
481                 hammer_unlock(&hmp->blkmap_lock);
482                 next_offset += (HAMMER_LARGEBLOCK_SIZE - offset);
483                 goto again;
484         }
485         if (offset < layer2->append_off) {
486                 hammer_unlock(&hmp->blkmap_lock);
487                 next_offset += layer2->append_off - offset;
488                 goto again;
489         }
490
491         /*
492          * The bigblock might be reserved by another zone.  If it is reserved
493          * by our zone we may have to move next_offset past the append_off.
494          */
495         base_off = (next_offset &
496                     (~HAMMER_LARGEBLOCK_MASK64 & ~HAMMER_OFF_ZONE_MASK)) |
497                     HAMMER_ZONE_RAW_BUFFER;
498         resv = RB_LOOKUP(hammer_res_rb_tree, &hmp->rb_resv_root, base_off);
499         if (resv) {
500                 if (resv->zone != zone) {
501                         hammer_unlock(&hmp->blkmap_lock);
502                         next_offset = (next_offset + HAMMER_LARGEBLOCK_SIZE) &
503                                       ~HAMMER_LARGEBLOCK_MASK64;
504                         goto again;
505                 }
506                 if (offset < resv->append_off) {
507                         hammer_unlock(&hmp->blkmap_lock);
508                         next_offset += resv->append_off - offset;
509                         goto again;
510                 }
511                 ++resv->refs;
512                 resx = NULL;
513         } else {
514                 resx = kmalloc(sizeof(*resv), hmp->m_misc,
515                                M_WAITOK | M_ZERO | M_USE_RESERVE);
516                 resx->refs = 1;
517                 resx->zone = zone;
518                 resx->zone_offset = base_off;
519                 if (layer2->bytes_free == HAMMER_LARGEBLOCK_SIZE)
520                         resx->flags |= HAMMER_RESF_LAYER2FREE;
521                 resv = RB_INSERT(hammer_res_rb_tree, &hmp->rb_resv_root, resx);
522                 KKASSERT(resv == NULL);
523                 resv = resx;
524                 ++hammer_count_reservations;
525         }
526         resv->append_off = offset + bytes;
527
528         /*
529          * If we are not reserving a whole buffer but are at the start of
530          * a new block, call hammer_bnew() to avoid a disk read.
531          *
532          * If we are reserving a whole buffer (or more), the caller will
533          * probably use a direct read, so do nothing.
534          */
535         if (bytes < HAMMER_BUFSIZE && (next_offset & HAMMER_BUFMASK) == 0) {
536                 hammer_bnew(hmp, next_offset, errorp, &buffer3);
537         }
538
539         /*
540          * Adjust our iterator and alloc_offset.  The layer1 and layer2
541          * space beyond alloc_offset is uninitialized.  alloc_offset must
542          * be big-block aligned.
543          */
544         blockmap->next_offset = next_offset + bytes;
545         hammer_unlock(&hmp->blkmap_lock);
546
547 failed:
548         if (buffer1)
549                 hammer_rel_buffer(buffer1, 0);
550         if (buffer2)
551                 hammer_rel_buffer(buffer2, 0);
552         if (buffer3)
553                 hammer_rel_buffer(buffer3, 0);
554         hammer_rel_volume(root_volume, 0);
555         *zone_offp = next_offset;
556
557         return(resv);
558 }
559
560 #if 0
561 /*
562  * Backend function - undo a portion of a reservation.
563  */
564 void
565 hammer_blockmap_reserve_undo(hammer_mount_t hmp, hammer_reserve_t resv,
566                          hammer_off_t zone_offset, int bytes)
567 {
568         resv->bytes_freed += bytes;
569 }
570
571 #endif
572
573 /*
574  * Dereference a reservation structure.  Upon the final release the
575  * underlying big-block is checked and if it is entirely free we delete
576  * any related HAMMER buffers to avoid potential conflicts with future
577  * reuse of the big-block.
578  */
579 void
580 hammer_blockmap_reserve_complete(hammer_mount_t hmp, hammer_reserve_t resv)
581 {
582         hammer_off_t base_offset;
583
584         KKASSERT(resv->refs > 0);
585         KKASSERT((resv->zone_offset & HAMMER_OFF_ZONE_MASK) ==
586                  HAMMER_ZONE_RAW_BUFFER);
587
588         /*
589          * Setting append_off to the max prevents any new allocations
590          * from occuring while we are trying to dispose of the reservation,
591          * allowing us to safely delete any related HAMMER buffers.
592          */
593         if (resv->refs == 1 && (resv->flags & HAMMER_RESF_LAYER2FREE)) {
594                 resv->append_off = HAMMER_LARGEBLOCK_SIZE;
595                 resv->flags &= ~HAMMER_RESF_LAYER2FREE;
596                 base_offset = resv->zone_offset & ~HAMMER_ZONE_RAW_BUFFER;
597                 base_offset = HAMMER_ZONE_ENCODE(base_offset, resv->zone);
598                 hammer_del_buffers(hmp, base_offset, resv->zone_offset,
599                                    HAMMER_LARGEBLOCK_SIZE);
600         }
601         if (--resv->refs == 0) {
602                 KKASSERT((resv->flags & HAMMER_RESF_ONDELAY) == 0);
603                 RB_REMOVE(hammer_res_rb_tree, &hmp->rb_resv_root, resv);
604                 kfree(resv, hmp->m_misc);
605                 --hammer_count_reservations;
606         }
607 }
608
609 /*
610  * Prevent a potentially free big-block from being reused until after
611  * the related flushes have completely cycled, otherwise crash recovery
612  * could resurrect a data block that was already reused and overwritten.
613  *
614  * Return 0 if the layer2 entry is still completely free after the
615  * reservation has been allocated.
616  */
617 static void
618 hammer_reserve_setdelay(hammer_mount_t hmp, hammer_off_t base_offset,
619                         struct hammer_blockmap_layer2 *layer2)
620 {
621         hammer_reserve_t resv;
622
623         /*
624          * Allocate the reservation if necessary.
625          */
626 again:
627         resv = RB_LOOKUP(hammer_res_rb_tree, &hmp->rb_resv_root, base_offset);
628         if (resv == NULL) {
629                 resv = kmalloc(sizeof(*resv), hmp->m_misc,
630                                M_WAITOK | M_ZERO | M_USE_RESERVE);
631                 resv->zone_offset = base_offset;
632                 resv->refs = 0;
633                 /* XXX inherent lock until refs bumped later on */
634                 if (layer2->bytes_free == HAMMER_LARGEBLOCK_SIZE)
635                         resv->flags |= HAMMER_RESF_LAYER2FREE;
636                 if (RB_INSERT(hammer_res_rb_tree, &hmp->rb_resv_root, resv)) {
637                         kfree(resv, hmp->m_misc);
638                         goto again;
639                 }
640                 ++hammer_count_reservations;
641         }
642
643         /*
644          * Enter the reservation on the on-delay list, or move it if it
645          * is already on the list.
646          */
647         if (resv->flags & HAMMER_RESF_ONDELAY) {
648                 TAILQ_REMOVE(&hmp->delay_list, resv, delay_entry);
649                 resv->flush_group = hmp->flusher.next + 1;
650                 TAILQ_INSERT_TAIL(&hmp->delay_list, resv, delay_entry);
651         } else {
652                 ++resv->refs;
653                 ++hmp->rsv_fromdelay;
654                 resv->flags |= HAMMER_RESF_ONDELAY;
655                 resv->flush_group = hmp->flusher.next + 1;
656                 TAILQ_INSERT_TAIL(&hmp->delay_list, resv, delay_entry);
657         }
658 }
659
660 void
661 hammer_reserve_clrdelay(hammer_mount_t hmp, hammer_reserve_t resv)
662 {
663         KKASSERT(resv->flags & HAMMER_RESF_ONDELAY);
664         resv->flags &= ~HAMMER_RESF_ONDELAY;
665         TAILQ_REMOVE(&hmp->delay_list, resv, delay_entry);
666         --hmp->rsv_fromdelay;
667         hammer_blockmap_reserve_complete(hmp, resv);
668 }
669
670 /*
671  * Backend function - free (offset, bytes) in a zone.
672  *
673  * XXX error return
674  */
675 void
676 hammer_blockmap_free(hammer_transaction_t trans,
677                      hammer_off_t zone_offset, int bytes)
678 {
679         hammer_mount_t hmp;
680         hammer_volume_t root_volume;
681         hammer_blockmap_t blockmap;
682         hammer_blockmap_t freemap;
683         struct hammer_blockmap_layer1 *layer1;
684         struct hammer_blockmap_layer2 *layer2;
685         hammer_buffer_t buffer1 = NULL;
686         hammer_buffer_t buffer2 = NULL;
687         hammer_off_t layer1_offset;
688         hammer_off_t layer2_offset;
689         hammer_off_t base_off;
690         int error;
691         int zone;
692
693         if (bytes == 0)
694                 return;
695         hmp = trans->hmp;
696
697         /*
698          * Alignment
699          */
700         bytes = (bytes + 15) & ~15;
701         KKASSERT(bytes <= HAMMER_XBUFSIZE);
702         KKASSERT(((zone_offset ^ (zone_offset + (bytes - 1))) & 
703                   ~HAMMER_LARGEBLOCK_MASK64) == 0);
704
705         /*
706          * Basic zone validation & locking
707          */
708         zone = HAMMER_ZONE_DECODE(zone_offset);
709         KKASSERT(zone >= HAMMER_ZONE_BTREE_INDEX && zone < HAMMER_MAX_ZONES);
710         root_volume = trans->rootvol;
711         error = 0;
712
713         blockmap = &hmp->blockmap[zone];
714         freemap = &hmp->blockmap[HAMMER_ZONE_FREEMAP_INDEX];
715
716         /*
717          * Dive layer 1.
718          */
719         layer1_offset = freemap->phys_offset +
720                         HAMMER_BLOCKMAP_LAYER1_OFFSET(zone_offset);
721         layer1 = hammer_bread(hmp, layer1_offset, &error, &buffer1);
722         if (error)
723                 goto failed;
724         KKASSERT(layer1->phys_offset &&
725                  layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL);
726         if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE)) {
727                 hammer_lock_ex(&hmp->blkmap_lock);
728                 if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE))
729                         panic("CRC FAILED: LAYER1");
730                 hammer_unlock(&hmp->blkmap_lock);
731         }
732
733         /*
734          * Dive layer 2, each entry represents a large-block.
735          */
736         layer2_offset = layer1->phys_offset +
737                         HAMMER_BLOCKMAP_LAYER2_OFFSET(zone_offset);
738         layer2 = hammer_bread(hmp, layer2_offset, &error, &buffer2);
739         if (error)
740                 goto failed;
741         if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE)) {
742                 hammer_lock_ex(&hmp->blkmap_lock);
743                 if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE))
744                         panic("CRC FAILED: LAYER2");
745                 hammer_unlock(&hmp->blkmap_lock);
746         }
747
748         hammer_lock_ex(&hmp->blkmap_lock);
749
750         hammer_modify_buffer(trans, buffer2, layer2, sizeof(*layer2));
751
752         /*
753          * Free space previously allocated via blockmap_alloc().
754          */
755         KKASSERT(layer2->zone == zone);
756         layer2->bytes_free += bytes;
757         KKASSERT(layer2->bytes_free <= HAMMER_LARGEBLOCK_SIZE);
758
759         /*
760          * If a big-block becomes entirely free we must create a covering
761          * reservation to prevent premature reuse.  Note, however, that
762          * the big-block and/or reservation may still have an append_off
763          * that allows further (non-reused) allocations.
764          *
765          * Once the reservation has been made we re-check layer2 and if
766          * the big-block is still entirely free we reset the layer2 entry.
767          * The reservation will prevent premature reuse.
768          *
769          * NOTE: hammer_buffer's are only invalidated when the reservation
770          * is completed, if the layer2 entry is still completely free at
771          * that time.  Any allocations from the reservation that may have
772          * occured in the mean time, or active references on the reservation
773          * from new pending allocations, will prevent the invalidation from
774          * occuring.
775          */
776         if (layer2->bytes_free == HAMMER_LARGEBLOCK_SIZE) {
777                 base_off = (zone_offset & (~HAMMER_LARGEBLOCK_MASK64 & ~HAMMER_OFF_ZONE_MASK)) | HAMMER_ZONE_RAW_BUFFER;
778
779                 hammer_reserve_setdelay(hmp, base_off, layer2);
780                 if (layer2->bytes_free == HAMMER_LARGEBLOCK_SIZE) {
781                         layer2->zone = 0;
782                         layer2->append_off = 0;
783                         hammer_modify_buffer(trans, buffer1,
784                                              layer1, sizeof(*layer1));
785                         ++layer1->blocks_free;
786                         layer1->layer1_crc = crc32(layer1,
787                                                    HAMMER_LAYER1_CRCSIZE);
788                         hammer_modify_buffer_done(buffer1);
789                         hammer_modify_volume_field(trans,
790                                         trans->rootvol,
791                                         vol0_stat_freebigblocks);
792                         ++root_volume->ondisk->vol0_stat_freebigblocks;
793                         hmp->copy_stat_freebigblocks =
794                            root_volume->ondisk->vol0_stat_freebigblocks;
795                         hammer_modify_volume_done(trans->rootvol);
796                 }
797         }
798         layer2->entry_crc = crc32(layer2, HAMMER_LAYER2_CRCSIZE);
799         hammer_modify_buffer_done(buffer2);
800         hammer_unlock(&hmp->blkmap_lock);
801
802 failed:
803         if (buffer1)
804                 hammer_rel_buffer(buffer1, 0);
805         if (buffer2)
806                 hammer_rel_buffer(buffer2, 0);
807 }
808
809 /*
810  * Backend function - finalize (offset, bytes) in a zone.
811  *
812  * Allocate space that was previously reserved by the frontend.
813  */
814 int
815 hammer_blockmap_finalize(hammer_transaction_t trans,
816                          hammer_reserve_t resv,
817                          hammer_off_t zone_offset, int bytes)
818 {
819         hammer_mount_t hmp;
820         hammer_volume_t root_volume;
821         hammer_blockmap_t blockmap;
822         hammer_blockmap_t freemap;
823         struct hammer_blockmap_layer1 *layer1;
824         struct hammer_blockmap_layer2 *layer2;
825         hammer_buffer_t buffer1 = NULL;
826         hammer_buffer_t buffer2 = NULL;
827         hammer_off_t layer1_offset;
828         hammer_off_t layer2_offset;
829         int error;
830         int zone;
831         int offset;
832
833         if (bytes == 0)
834                 return(0);
835         hmp = trans->hmp;
836
837         /*
838          * Alignment
839          */
840         bytes = (bytes + 15) & ~15;
841         KKASSERT(bytes <= HAMMER_XBUFSIZE);
842
843         /*
844          * Basic zone validation & locking
845          */
846         zone = HAMMER_ZONE_DECODE(zone_offset);
847         KKASSERT(zone >= HAMMER_ZONE_BTREE_INDEX && zone < HAMMER_MAX_ZONES);
848         root_volume = trans->rootvol;
849         error = 0;
850
851         blockmap = &hmp->blockmap[zone];
852         freemap = &hmp->blockmap[HAMMER_ZONE_FREEMAP_INDEX];
853
854         /*
855          * Dive layer 1.
856          */
857         layer1_offset = freemap->phys_offset +
858                         HAMMER_BLOCKMAP_LAYER1_OFFSET(zone_offset);
859         layer1 = hammer_bread(hmp, layer1_offset, &error, &buffer1);
860         if (error)
861                 goto failed;
862         KKASSERT(layer1->phys_offset &&
863                  layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL);
864         if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE)) {
865                 hammer_lock_ex(&hmp->blkmap_lock);
866                 if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE))
867                         panic("CRC FAILED: LAYER1");
868                 hammer_unlock(&hmp->blkmap_lock);
869         }
870
871         /*
872          * Dive layer 2, each entry represents a large-block.
873          */
874         layer2_offset = layer1->phys_offset +
875                         HAMMER_BLOCKMAP_LAYER2_OFFSET(zone_offset);
876         layer2 = hammer_bread(hmp, layer2_offset, &error, &buffer2);
877         if (error)
878                 goto failed;
879         if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE)) {
880                 hammer_lock_ex(&hmp->blkmap_lock);
881                 if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE))
882                         panic("CRC FAILED: LAYER2");
883                 hammer_unlock(&hmp->blkmap_lock);
884         }
885
886         hammer_lock_ex(&hmp->blkmap_lock);
887
888         hammer_modify_buffer(trans, buffer2, layer2, sizeof(*layer2));
889
890         /*
891          * Finalize some or all of the space covered by a current
892          * reservation.  An allocation in the same layer may have
893          * already assigned ownership.
894          */
895         if (layer2->zone == 0) {
896                 hammer_modify_buffer(trans, buffer1,
897                                      layer1, sizeof(*layer1));
898                 --layer1->blocks_free;
899                 layer1->layer1_crc = crc32(layer1,
900                                            HAMMER_LAYER1_CRCSIZE);
901                 hammer_modify_buffer_done(buffer1);
902                 layer2->zone = zone;
903                 KKASSERT(layer2->bytes_free == HAMMER_LARGEBLOCK_SIZE);
904                 KKASSERT(layer2->append_off == 0);
905                 hammer_modify_volume_field(trans,
906                                 trans->rootvol,
907                                 vol0_stat_freebigblocks);
908                 --root_volume->ondisk->vol0_stat_freebigblocks;
909                 hmp->copy_stat_freebigblocks =
910                    root_volume->ondisk->vol0_stat_freebigblocks;
911                 hammer_modify_volume_done(trans->rootvol);
912         }
913         if (layer2->zone != zone)
914                 kprintf("layer2 zone mismatch %d %d\n", layer2->zone, zone);
915         KKASSERT(layer2->zone == zone);
916         layer2->bytes_free -= bytes;
917         if (resv)
918                 resv->flags &= ~HAMMER_RESF_LAYER2FREE;
919
920         /*
921          * Finalizations can occur out of order, or combined with allocations.
922          * append_off must be set to the highest allocated offset.
923          */
924         offset = ((int)zone_offset & HAMMER_LARGEBLOCK_MASK) + bytes;
925         if (layer2->append_off < offset)
926                 layer2->append_off = offset;
927
928         layer2->entry_crc = crc32(layer2, HAMMER_LAYER2_CRCSIZE);
929         hammer_modify_buffer_done(buffer2);
930         hammer_unlock(&hmp->blkmap_lock);
931
932 failed:
933         if (buffer1)
934                 hammer_rel_buffer(buffer1, 0);
935         if (buffer2)
936                 hammer_rel_buffer(buffer2, 0);
937         return(error);
938 }
939
940 /*
941  * Return the number of free bytes in the big-block containing the
942  * specified blockmap offset.
943  */
944 int
945 hammer_blockmap_getfree(hammer_mount_t hmp, hammer_off_t zone_offset,
946                         int *curp, int *errorp)
947 {
948         hammer_volume_t root_volume;
949         hammer_blockmap_t blockmap;
950         hammer_blockmap_t freemap;
951         struct hammer_blockmap_layer1 *layer1;
952         struct hammer_blockmap_layer2 *layer2;
953         hammer_buffer_t buffer = NULL;
954         hammer_off_t layer1_offset;
955         hammer_off_t layer2_offset;
956         int bytes;
957         int zone;
958
959         zone = HAMMER_ZONE_DECODE(zone_offset);
960         KKASSERT(zone >= HAMMER_ZONE_BTREE_INDEX && zone < HAMMER_MAX_ZONES);
961         root_volume = hammer_get_root_volume(hmp, errorp);
962         if (*errorp) {
963                 *curp = 0;
964                 return(0);
965         }
966         blockmap = &hmp->blockmap[zone];
967         freemap = &hmp->blockmap[HAMMER_ZONE_FREEMAP_INDEX];
968
969         /*
970          * Dive layer 1.
971          */
972         layer1_offset = freemap->phys_offset +
973                         HAMMER_BLOCKMAP_LAYER1_OFFSET(zone_offset);
974         layer1 = hammer_bread(hmp, layer1_offset, errorp, &buffer);
975         if (*errorp) {
976                 bytes = 0;
977                 goto failed;
978         }
979         KKASSERT(layer1->phys_offset);
980         if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE)) {
981                 hammer_lock_ex(&hmp->blkmap_lock);
982                 if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE))
983                         panic("CRC FAILED: LAYER1");
984                 hammer_unlock(&hmp->blkmap_lock);
985         }
986
987         /*
988          * Dive layer 2, each entry represents a large-block.
989          *
990          * (reuse buffer, layer1 pointer becomes invalid)
991          */
992         layer2_offset = layer1->phys_offset +
993                         HAMMER_BLOCKMAP_LAYER2_OFFSET(zone_offset);
994         layer2 = hammer_bread(hmp, layer2_offset, errorp, &buffer);
995         if (*errorp) {
996                 bytes = 0;
997                 goto failed;
998         }
999         if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE)) {
1000                 hammer_lock_ex(&hmp->blkmap_lock);
1001                 if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE))
1002                         panic("CRC FAILED: LAYER2");
1003                 hammer_unlock(&hmp->blkmap_lock);
1004         }
1005         KKASSERT(layer2->zone == zone);
1006
1007         bytes = layer2->bytes_free;
1008
1009         if ((blockmap->next_offset ^ zone_offset) & ~HAMMER_LARGEBLOCK_MASK64)
1010                 *curp = 0;
1011         else
1012                 *curp = 1;
1013 failed:
1014         if (buffer)
1015                 hammer_rel_buffer(buffer, 0);
1016         hammer_rel_volume(root_volume, 0);
1017         if (hammer_debug_general & 0x0800) {
1018                 kprintf("hammer_blockmap_getfree: %016llx -> %d\n",
1019                         zone_offset, bytes);
1020         }
1021         return(bytes);
1022 }
1023
1024
1025 /*
1026  * Lookup a blockmap offset.
1027  */
1028 hammer_off_t
1029 hammer_blockmap_lookup(hammer_mount_t hmp, hammer_off_t zone_offset,
1030                        int *errorp)
1031 {
1032         hammer_volume_t root_volume;
1033         hammer_blockmap_t freemap;
1034         struct hammer_blockmap_layer1 *layer1;
1035         struct hammer_blockmap_layer2 *layer2;
1036         hammer_buffer_t buffer = NULL;
1037         hammer_off_t layer1_offset;
1038         hammer_off_t layer2_offset;
1039         hammer_off_t result_offset;
1040         hammer_off_t base_off;
1041         hammer_reserve_t resv;
1042         int zone;
1043
1044         /*
1045          * Calculate the zone-2 offset.
1046          */
1047         zone = HAMMER_ZONE_DECODE(zone_offset);
1048         KKASSERT(zone >= HAMMER_ZONE_BTREE_INDEX && zone < HAMMER_MAX_ZONES);
1049
1050         result_offset = (zone_offset & ~HAMMER_OFF_ZONE_MASK) |
1051                         HAMMER_ZONE_RAW_BUFFER;
1052
1053         /*
1054          * We can actually stop here, normal blockmaps are now direct-mapped
1055          * onto the freemap and so represent zone-2 addresses.
1056          */
1057         if (hammer_verify_zone == 0) {
1058                 *errorp = 0;
1059                 return(result_offset);
1060         }
1061
1062         /*
1063          * Validate the allocation zone
1064          */
1065         root_volume = hammer_get_root_volume(hmp, errorp);
1066         if (*errorp)
1067                 return(0);
1068         freemap = &hmp->blockmap[HAMMER_ZONE_FREEMAP_INDEX];
1069         KKASSERT(freemap->phys_offset != 0);
1070
1071         /*
1072          * Dive layer 1.
1073          */
1074         layer1_offset = freemap->phys_offset +
1075                         HAMMER_BLOCKMAP_LAYER1_OFFSET(zone_offset);
1076         layer1 = hammer_bread(hmp, layer1_offset, errorp, &buffer);
1077         if (*errorp)
1078                 goto failed;
1079         KKASSERT(layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL);
1080         if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE)) {
1081                 hammer_lock_ex(&hmp->blkmap_lock);
1082                 if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE))
1083                         panic("CRC FAILED: LAYER1");
1084                 hammer_unlock(&hmp->blkmap_lock);
1085         }
1086
1087         /*
1088          * Dive layer 2, each entry represents a large-block.
1089          */
1090         layer2_offset = layer1->phys_offset +
1091                         HAMMER_BLOCKMAP_LAYER2_OFFSET(zone_offset);
1092         layer2 = hammer_bread(hmp, layer2_offset, errorp, &buffer);
1093
1094         if (*errorp)
1095                 goto failed;
1096         if (layer2->zone == 0) {
1097                 base_off = (zone_offset & (~HAMMER_LARGEBLOCK_MASK64 & ~HAMMER_OFF_ZONE_MASK)) | HAMMER_ZONE_RAW_BUFFER;
1098                 resv = RB_LOOKUP(hammer_res_rb_tree, &hmp->rb_resv_root,
1099                                  base_off);
1100                 KKASSERT(resv && resv->zone == zone);
1101
1102         } else if (layer2->zone != zone) {
1103                 panic("hammer_blockmap_lookup: bad zone %d/%d\n",
1104                         layer2->zone, zone);
1105         }
1106         if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE)) {
1107                 hammer_lock_ex(&hmp->blkmap_lock);
1108                 if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE))
1109                         panic("CRC FAILED: LAYER2");
1110                 hammer_unlock(&hmp->blkmap_lock);
1111         }
1112
1113 failed:
1114         if (buffer)
1115                 hammer_rel_buffer(buffer, 0);
1116         hammer_rel_volume(root_volume, 0);
1117         if (hammer_debug_general & 0x0800) {
1118                 kprintf("hammer_blockmap_lookup: %016llx -> %016llx\n",
1119                         zone_offset, result_offset);
1120         }
1121         return(result_offset);
1122 }
1123
1124
1125 /*
1126  * Check space availability
1127  */
1128 int
1129 hammer_checkspace(hammer_mount_t hmp, int slop)
1130 {
1131         const int in_size = sizeof(struct hammer_inode_data) +
1132                             sizeof(union hammer_btree_elm);
1133         const int rec_size = (sizeof(union hammer_btree_elm) * 2);
1134         int64_t usedbytes;
1135
1136         usedbytes = hmp->rsv_inodes * in_size +
1137                     hmp->rsv_recs * rec_size +
1138                     hmp->rsv_databytes +
1139                     ((int64_t)hmp->rsv_fromdelay << HAMMER_LARGEBLOCK_BITS) +
1140                     ((int64_t)hidirtybufspace << 2) +
1141                     (slop << HAMMER_LARGEBLOCK_BITS);
1142
1143         hammer_count_extra_space_used = usedbytes;      /* debugging */
1144
1145         if (hmp->copy_stat_freebigblocks >=
1146             (usedbytes >> HAMMER_LARGEBLOCK_BITS)) {
1147                 return(0);
1148         }
1149         return (ENOSPC);
1150 }
1151