2 * Copyright (c) 2008 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * $DragonFly: src/sys/vfs/hammer/hammer_freemap.c,v 1.15 2008/06/10 00:40:31 dillon Exp $
38 * HAMMER freemap - bigblock allocator. The freemap is a 2-layer blockmap
39 * with one layer2 entry for each big-block in the filesystem. Big blocks
42 * Our allocator is fairly straightforward, we just iterate through available
43 * blocks looking for a free one. We shortcut the iteration based on
44 * layer1 availability.
49 static int hammer_freemap_reserved(hammer_mount_t hmp, hammer_off_t zone2_base);
52 * Backend big-block allocation
55 hammer_freemap_alloc(hammer_transaction_t trans, hammer_off_t owner,
59 hammer_volume_ondisk_t ondisk;
60 hammer_off_t layer1_offset;
61 hammer_off_t layer2_offset;
62 hammer_off_t result_offset;
63 hammer_blockmap_t blockmap;
64 hammer_buffer_t buffer1 = NULL;
65 hammer_buffer_t buffer2 = NULL;
66 struct hammer_blockmap_layer1 *layer1;
67 struct hammer_blockmap_layer2 *layer2;
73 ondisk = trans->rootvol->ondisk;
75 hammer_lock_ex(&hmp->free_lock);
77 blockmap = &hmp->blockmap[HAMMER_ZONE_FREEMAP_INDEX];
78 result_offset = blockmap->next_offset;
79 vol_no = HAMMER_VOL_DECODE(result_offset);
81 layer1_offset = blockmap->phys_offset +
82 HAMMER_BLOCKMAP_LAYER1_OFFSET(result_offset);
84 layer1 = hammer_bread(hmp, layer1_offset, errorp, &buffer1);
85 if (layer1->phys_offset == HAMMER_BLOCKMAP_UNAVAIL) {
87 * End-of-volume, try next volume.
91 if (vol_no >= hmp->nvolumes)
93 result_offset = HAMMER_ENCODE_RAW_BUFFER(vol_no, 0);
94 if (vol_no == 0 && ++loops == 2) {
100 layer2_offset = layer1->phys_offset +
101 HAMMER_BLOCKMAP_LAYER2_OFFSET(result_offset);
102 layer2 = hammer_bread(hmp, layer2_offset, errorp,
105 if (layer2->u.owner == HAMMER_BLOCKMAP_FREE &&
106 !hammer_freemap_reserved(hmp, result_offset)) {
107 hammer_modify_buffer(trans, buffer2,
108 layer2, sizeof(*layer2));
109 layer2->u.owner = owner &
110 ~HAMMER_LARGEBLOCK_MASK64;
111 hammer_modify_buffer_done(buffer2);
112 hammer_modify_buffer(trans, buffer1,
113 layer1, sizeof(*layer1));
114 --layer1->blocks_free;
115 hammer_modify_buffer_done(buffer1);
116 hammer_modify_volume_field(trans,
118 vol0_stat_freebigblocks);
119 --ondisk->vol0_stat_freebigblocks;
120 hmp->copy_stat_freebigblocks =
121 ondisk->vol0_stat_freebigblocks;
122 hammer_modify_volume_done(trans->rootvol);
125 if (layer1->blocks_free == 0 ||
126 layer2->u.owner == HAMMER_BLOCKMAP_UNAVAIL) {
128 * layer2 has no free blocks remaining,
129 * skip to the next layer.
131 result_offset = (result_offset + HAMMER_BLOCKMAP_LAYER2) & ~HAMMER_BLOCKMAP_LAYER2_MASK;
132 if (HAMMER_VOL_DECODE(result_offset) != vol_no)
135 result_offset += HAMMER_LARGEBLOCK_SIZE;
136 if (HAMMER_VOL_DECODE(result_offset) != vol_no)
141 hammer_modify_volume(trans, trans->rootvol, NULL, 0);
142 blockmap->next_offset = result_offset + HAMMER_LARGEBLOCK_SIZE;
143 hammer_modify_volume_done(trans->rootvol);
145 hammer_unlock(&hmp->free_lock);
147 hammer_rel_buffer(buffer1, 0);
149 hammer_rel_buffer(buffer2, 0);
150 return(result_offset);
154 * Backend big-block free
157 hammer_freemap_free(hammer_transaction_t trans, hammer_off_t phys_offset,
158 hammer_off_t owner, int *errorp)
161 hammer_volume_ondisk_t ondisk;
162 hammer_off_t layer1_offset;
163 hammer_off_t layer2_offset;
164 hammer_blockmap_t blockmap;
165 hammer_buffer_t buffer1 = NULL;
166 hammer_buffer_t buffer2 = NULL;
167 struct hammer_blockmap_layer1 *layer1;
168 struct hammer_blockmap_layer2 *layer2;
169 hammer_reserve_t resv;
173 KKASSERT((phys_offset & HAMMER_LARGEBLOCK_MASK64) == 0);
174 KKASSERT(hammer_freemap_reserved(hmp, phys_offset) == 0);
177 * Create a reservation
179 resv = kmalloc(sizeof(*resv), M_HAMMER, M_WAITOK|M_ZERO);
181 resv->zone_offset = phys_offset;
182 resv->flush_group = hmp->flusher_next + 1;
183 RB_INSERT(hammer_res_rb_tree, &hmp->rb_resv_root, resv);
184 TAILQ_INSERT_TAIL(&hmp->delay_list, resv, delay_entry);
185 ++hammer_count_reservations;
187 hammer_lock_ex(&hmp->free_lock);
190 ondisk = trans->rootvol->ondisk;
192 blockmap = &hmp->blockmap[HAMMER_ZONE_FREEMAP_INDEX];
193 layer1_offset = blockmap->phys_offset +
194 HAMMER_BLOCKMAP_LAYER1_OFFSET(phys_offset);
195 layer1 = hammer_bread(hmp, layer1_offset, errorp, &buffer1);
197 KKASSERT(layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL);
199 layer2_offset = layer1->phys_offset +
200 HAMMER_BLOCKMAP_LAYER2_OFFSET(phys_offset);
201 layer2 = hammer_bread(hmp, layer2_offset, errorp, &buffer2);
203 KKASSERT(layer2->u.owner == (owner & ~HAMMER_LARGEBLOCK_MASK64));
204 hammer_modify_buffer(trans, buffer1, layer1, sizeof(*layer1));
205 ++layer1->blocks_free;
206 hammer_modify_buffer_done(buffer1);
207 hammer_modify_buffer(trans, buffer2, layer2, sizeof(*layer2));
208 layer2->u.owner = HAMMER_BLOCKMAP_FREE;
209 hammer_modify_buffer_done(buffer2);
211 hammer_modify_volume_field(trans, trans->rootvol,
212 vol0_stat_freebigblocks);
213 ++ondisk->vol0_stat_freebigblocks;
214 hammer_modify_volume_done(trans->rootvol);
215 hmp->copy_stat_freebigblocks = ondisk->vol0_stat_freebigblocks;
217 hammer_unlock(&hmp->free_lock);
220 hammer_rel_buffer(buffer1, 0);
222 hammer_rel_buffer(buffer2, 0);
226 * Check whether a free block has been reserved in zone-2.
229 hammer_freemap_reserved(hammer_mount_t hmp, hammer_off_t zone2_base)
231 if (RB_LOOKUP(hammer_res_rb_tree, &hmp->rb_resv_root, zone2_base))
237 * Check space availability
240 hammer_checkspace(hammer_mount_t hmp)
242 const int in_size = sizeof(struct hammer_inode_data) +
243 sizeof(union hammer_btree_elm);
244 const int rec_size = (sizeof(union hammer_btree_elm) * 2);
245 const int blkconv = HAMMER_LARGEBLOCK_SIZE / HAMMER_BUFSIZE;
246 const int limit_inodes = HAMMER_LARGEBLOCK_SIZE / in_size;
247 const int limit_recs = HAMMER_LARGEBLOCK_SIZE / rec_size;
251 * Quick and very dirty, not even using the right units (bigblocks
252 * vs 16K buffers), but this catches almost everything.
254 if (hmp->copy_stat_freebigblocks >= hmp->rsv_databufs + 8 &&
255 hmp->rsv_inodes < limit_inodes &&
256 hmp->rsv_recs < limit_recs &&
257 hmp->rsv_databytes < HAMMER_LARGEBLOCK_SIZE) {
262 * Do a more involved check
264 usedbigblocks = (hmp->rsv_inodes * in_size / HAMMER_LARGEBLOCK_SIZE) +
265 (hmp->rsv_recs * rec_size / HAMMER_LARGEBLOCK_SIZE) +
266 hmp->rsv_databufs / blkconv + 6;
267 if (hmp->copy_stat_freebigblocks >= usedbigblocks)