HAMMER 53D/Many: Stabilization
[dragonfly.git] / sys / vfs / hammer / hammer_freemap.c
1 /*
2  * Copyright (c) 2008 The DragonFly Project.  All rights reserved.
3  * 
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  * 
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  * 
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  * 
34  * $DragonFly: src/sys/vfs/hammer/hammer_freemap.c,v 1.15 2008/06/10 00:40:31 dillon Exp $
35  */
36
37 /*
38  * HAMMER freemap - bigblock allocator.  The freemap is a 2-layer blockmap
39  * with one layer2 entry for each big-block in the filesystem.  Big blocks
40  * are 8MB blocks.
41  *
42  * Our allocator is fairly straightforward, we just iterate through available
43  * blocks looking for a free one.  We shortcut the iteration based on
44  * layer1 availability.
45  */
46
47 #include "hammer.h"
48
49 static int hammer_freemap_reserved(hammer_mount_t hmp, hammer_off_t zone2_base);
50
51 /*
52  * Backend big-block allocation
53  */
54 hammer_off_t
55 hammer_freemap_alloc(hammer_transaction_t trans, hammer_off_t owner,
56                      int *errorp)
57 {
58         hammer_mount_t hmp;
59         hammer_volume_ondisk_t ondisk;
60         hammer_off_t layer1_offset;
61         hammer_off_t layer2_offset;
62         hammer_off_t result_offset;
63         hammer_blockmap_t blockmap;
64         hammer_buffer_t buffer1 = NULL;
65         hammer_buffer_t buffer2 = NULL;
66         struct hammer_blockmap_layer1 *layer1;
67         struct hammer_blockmap_layer2 *layer2;
68         int vol_no;
69         int loops = 0;
70
71         hmp = trans->hmp;
72         *errorp = 0;
73         ondisk = trans->rootvol->ondisk;
74
75         hammer_lock_ex(&hmp->free_lock);
76
77         blockmap = &hmp->blockmap[HAMMER_ZONE_FREEMAP_INDEX];
78         result_offset = blockmap->next_offset;
79         vol_no = HAMMER_VOL_DECODE(result_offset);
80         for (;;) { 
81                 layer1_offset = blockmap->phys_offset +
82                                 HAMMER_BLOCKMAP_LAYER1_OFFSET(result_offset);
83
84                 layer1 = hammer_bread(hmp, layer1_offset, errorp, &buffer1);
85                 if (layer1->phys_offset == HAMMER_BLOCKMAP_UNAVAIL) {
86                         /*
87                          * End-of-volume, try next volume.
88                          */
89 new_volume:
90                         ++vol_no;
91                         if (vol_no >= hmp->nvolumes)
92                                 vol_no = 0;
93                         result_offset = HAMMER_ENCODE_RAW_BUFFER(vol_no, 0);
94                         if (vol_no == 0 && ++loops == 2) {
95                                 *errorp = ENOSPC;
96                                 result_offset = 0;
97                                 goto done;
98                         }
99                 } else {
100                         layer2_offset = layer1->phys_offset +
101                                 HAMMER_BLOCKMAP_LAYER2_OFFSET(result_offset);
102                         layer2 = hammer_bread(hmp, layer2_offset, errorp,
103                                               &buffer2);
104
105                         if (layer2->u.owner == HAMMER_BLOCKMAP_FREE &&
106                             !hammer_freemap_reserved(hmp, result_offset)) {
107                                 hammer_modify_buffer(trans, buffer2,
108                                                      layer2, sizeof(*layer2));
109                                 layer2->u.owner = owner &
110                                                 ~HAMMER_LARGEBLOCK_MASK64;
111                                 hammer_modify_buffer_done(buffer2);
112                                 hammer_modify_buffer(trans, buffer1,
113                                                      layer1, sizeof(*layer1));
114                                 --layer1->blocks_free;
115                                 hammer_modify_buffer_done(buffer1);
116                                 hammer_modify_volume_field(trans,
117                                                      trans->rootvol,
118                                                      vol0_stat_freebigblocks);
119                                 --ondisk->vol0_stat_freebigblocks;
120                                 hmp->copy_stat_freebigblocks =
121                                         ondisk->vol0_stat_freebigblocks;
122                                 hammer_modify_volume_done(trans->rootvol);
123                                 break;
124                         }
125                         if (layer1->blocks_free == 0 ||
126                             layer2->u.owner == HAMMER_BLOCKMAP_UNAVAIL) {
127                                 /*
128                                  * layer2 has no free blocks remaining,
129                                  * skip to the next layer.
130                                  */
131                                 result_offset = (result_offset + HAMMER_BLOCKMAP_LAYER2) & ~HAMMER_BLOCKMAP_LAYER2_MASK;
132                                 if (HAMMER_VOL_DECODE(result_offset) != vol_no)
133                                         goto new_volume;
134                         } else {
135                                 result_offset += HAMMER_LARGEBLOCK_SIZE;
136                                 if (HAMMER_VOL_DECODE(result_offset) != vol_no)
137                                         goto new_volume;
138                         }
139                 }
140         }
141         hammer_modify_volume(trans, trans->rootvol, NULL, 0);
142         blockmap->next_offset = result_offset + HAMMER_LARGEBLOCK_SIZE;
143         hammer_modify_volume_done(trans->rootvol);
144 done:
145         hammer_unlock(&hmp->free_lock);
146         if (buffer1)
147                 hammer_rel_buffer(buffer1, 0);
148         if (buffer2)
149                 hammer_rel_buffer(buffer2, 0);
150         return(result_offset);
151 }
152
153 /*
154  * Backend big-block free
155  */
156 void
157 hammer_freemap_free(hammer_transaction_t trans, hammer_off_t phys_offset, 
158                     hammer_off_t owner, int *errorp)
159 {
160         hammer_mount_t hmp;
161         hammer_volume_ondisk_t ondisk;
162         hammer_off_t layer1_offset;
163         hammer_off_t layer2_offset;
164         hammer_blockmap_t blockmap;
165         hammer_buffer_t buffer1 = NULL;
166         hammer_buffer_t buffer2 = NULL;
167         struct hammer_blockmap_layer1 *layer1;
168         struct hammer_blockmap_layer2 *layer2;
169         hammer_reserve_t resv;
170
171         hmp = trans->hmp;
172
173         KKASSERT((phys_offset & HAMMER_LARGEBLOCK_MASK64) == 0);
174         KKASSERT(hammer_freemap_reserved(hmp, phys_offset) == 0);
175
176         /*
177          * Create a reservation
178          */
179         resv = kmalloc(sizeof(*resv), M_HAMMER, M_WAITOK|M_ZERO);
180         resv->refs = 1;
181         resv->zone_offset = phys_offset;
182         resv->flush_group = hmp->flusher_next + 1;
183         RB_INSERT(hammer_res_rb_tree, &hmp->rb_resv_root, resv);
184         TAILQ_INSERT_TAIL(&hmp->delay_list, resv, delay_entry);
185         ++hammer_count_reservations;
186
187         hammer_lock_ex(&hmp->free_lock);
188
189         *errorp = 0;
190         ondisk = trans->rootvol->ondisk;
191
192         blockmap = &hmp->blockmap[HAMMER_ZONE_FREEMAP_INDEX];
193         layer1_offset = blockmap->phys_offset +
194                         HAMMER_BLOCKMAP_LAYER1_OFFSET(phys_offset);
195         layer1 = hammer_bread(hmp, layer1_offset, errorp, &buffer1);
196
197         KKASSERT(layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL);
198
199         layer2_offset = layer1->phys_offset +
200                         HAMMER_BLOCKMAP_LAYER2_OFFSET(phys_offset);
201         layer2 = hammer_bread(hmp, layer2_offset, errorp, &buffer2);
202
203         KKASSERT(layer2->u.owner == (owner & ~HAMMER_LARGEBLOCK_MASK64));
204         hammer_modify_buffer(trans, buffer1, layer1, sizeof(*layer1));
205         ++layer1->blocks_free;
206         hammer_modify_buffer_done(buffer1);
207         hammer_modify_buffer(trans, buffer2, layer2, sizeof(*layer2));
208         layer2->u.owner = HAMMER_BLOCKMAP_FREE;
209         hammer_modify_buffer_done(buffer2);
210
211         hammer_modify_volume_field(trans, trans->rootvol,
212                                    vol0_stat_freebigblocks);
213         ++ondisk->vol0_stat_freebigblocks;
214         hammer_modify_volume_done(trans->rootvol);
215         hmp->copy_stat_freebigblocks = ondisk->vol0_stat_freebigblocks;
216
217         hammer_unlock(&hmp->free_lock);
218
219         if (buffer1)
220                 hammer_rel_buffer(buffer1, 0);
221         if (buffer2)
222                 hammer_rel_buffer(buffer2, 0);
223 }
224
225 /*
226  * Check whether a free block has been reserved in zone-2.
227  */
228 static int
229 hammer_freemap_reserved(hammer_mount_t hmp, hammer_off_t zone2_base)
230 {
231         if (RB_LOOKUP(hammer_res_rb_tree, &hmp->rb_resv_root, zone2_base))
232                 return(1);
233         return(0);
234 }
235
236 /*
237  * Check space availability
238  */
239 int
240 hammer_checkspace(hammer_mount_t hmp)
241 {
242         const int in_size = sizeof(struct hammer_inode_data) +
243                             sizeof(union hammer_btree_elm);
244         const int rec_size = (sizeof(union hammer_btree_elm) * 2);
245         const int blkconv = HAMMER_LARGEBLOCK_SIZE / HAMMER_BUFSIZE;
246         const int limit_inodes = HAMMER_LARGEBLOCK_SIZE / in_size;
247         const int limit_recs = HAMMER_LARGEBLOCK_SIZE / rec_size;
248         int usedbigblocks;;
249
250         /*
251          * Quick and very dirty, not even using the right units (bigblocks
252          * vs 16K buffers), but this catches almost everything.
253          */
254         if (hmp->copy_stat_freebigblocks >= hmp->rsv_databufs + 8 &&
255             hmp->rsv_inodes < limit_inodes &&
256             hmp->rsv_recs < limit_recs &&
257             hmp->rsv_databytes < HAMMER_LARGEBLOCK_SIZE) {
258                 return(0);
259         }
260
261         /*
262          * Do a more involved check
263          */
264         usedbigblocks = (hmp->rsv_inodes * in_size / HAMMER_LARGEBLOCK_SIZE) +
265                         (hmp->rsv_recs * rec_size / HAMMER_LARGEBLOCK_SIZE) +
266                         hmp->rsv_databufs / blkconv + 6;
267         if (hmp->copy_stat_freebigblocks >= usedbigblocks)
268                 return(0);
269         return (ENOSPC);
270 }
271