Merge from vendor branch GDB:
[games.git] / sbin / hammer / ondisk.c
1 /*
2  * Copyright (c) 2007 The DragonFly Project.  All rights reserved.
3  * 
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  * 
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  * 
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  * 
34  * $DragonFly: src/sbin/hammer/ondisk.c,v 1.9 2008/01/24 02:16:47 dillon Exp $
35  */
36
37 #include <sys/types.h>
38 #include <assert.h>
39 #include <stdio.h>
40 #include <stdlib.h>
41 #include <stdarg.h>
42 #include <string.h>
43 #include <unistd.h>
44 #include <err.h>
45 #include <fcntl.h>
46 #include "hammer_util.h"
47
48 static void initbuffer(hammer_alist_t live, hammer_fsbuf_head_t head,
49                         u_int64_t type);
50 static void alloc_new_buffer(struct cluster_info *cluster, hammer_alist_t live,
51                         u_int64_t type, int32_t nelements);
52 #if 0
53 static void readhammerbuf(struct volume_info *vol, void *data,
54                         int64_t offset);
55 #endif
56 static void writehammerbuf(struct volume_info *vol, const void *data,
57                         int64_t offset);
58
59
60 struct hammer_alist_config Buf_alist_config;
61 struct hammer_alist_config Vol_normal_alist_config;
62 struct hammer_alist_config Vol_super_alist_config;
63 struct hammer_alist_config Supercl_alist_config;
64 struct hammer_alist_config Clu_master_alist_config;
65 struct hammer_alist_config Clu_slave_alist_config;
66 uuid_t Hammer_FSType;
67 uuid_t Hammer_FSId;
68 int64_t BootAreaSize;
69 int64_t MemAreaSize;
70 int     UsingSuperClusters;
71 int     NumVolumes;
72 int     RootVolNo = -1;
73 struct volume_list VolList = TAILQ_HEAD_INITIALIZER(VolList);
74
75 void
76 init_alist_templates(void)
77 {
78         /*
79          * Initialize the alist templates we will be using
80          */
81         hammer_alist_template(&Buf_alist_config, HAMMER_FSBUF_MAXBLKS,
82                               1, HAMMER_FSBUF_METAELMS, 0);
83         hammer_alist_template(&Vol_normal_alist_config, HAMMER_VOL_MAXCLUSTERS,
84                               1, HAMMER_VOL_METAELMS_1LYR, 0);
85         hammer_alist_template(&Vol_super_alist_config,
86                           HAMMER_VOL_MAXSUPERCLUSTERS * HAMMER_SCL_MAXCLUSTERS,
87                               HAMMER_SCL_MAXCLUSTERS, HAMMER_VOL_METAELMS_2LYR,
88                               0);
89         hammer_super_alist_template(&Vol_super_alist_config);
90         hammer_alist_template(&Supercl_alist_config, HAMMER_VOL_MAXCLUSTERS,
91                               1, HAMMER_SUPERCL_METAELMS, 0);
92         hammer_alist_template(&Clu_master_alist_config, HAMMER_CLU_MAXBUFFERS,
93                               1, HAMMER_CLU_MASTER_METAELMS, 0);
94         hammer_alist_template(&Clu_slave_alist_config,
95                               HAMMER_CLU_MAXBUFFERS * HAMMER_FSBUF_MAXBLKS,
96                               HAMMER_FSBUF_MAXBLKS, HAMMER_CLU_SLAVE_METAELMS,
97                               1);
98         hammer_buffer_alist_template(&Clu_slave_alist_config);
99 }
100
101 /*
102  * Lookup the requested information structure and related on-disk buffer.
103  * Missing structures are created.
104  */
105
106 struct volume_info *
107 setup_volume(int32_t vol_no, const char *filename, int isnew, int oflags)
108 {
109         struct volume_info *vol;
110         struct volume_info *scan;
111         struct hammer_volume_ondisk *ondisk;
112         int n;
113
114         /*
115          * Allocate the volume structure
116          */
117         vol = malloc(sizeof(*vol));
118         bzero(vol, sizeof(*vol));
119         TAILQ_INIT(&vol->cluster_list);
120         TAILQ_INIT(&vol->supercl_list);
121         vol->name = strdup(filename);
122         vol->fd = open(filename, oflags);
123         if (vol->fd < 0) {
124                 free(vol->name);
125                 free(vol);
126                 err(1, "setup_volume: %s: Open failed", filename);
127         }
128
129         /*
130          * Read or initialize the volume header
131          */
132         vol->ondisk = ondisk = malloc(HAMMER_BUFSIZE);
133         if (isnew) {
134                 bzero(ondisk, HAMMER_BUFSIZE);
135                 vol->using_supercl = UsingSuperClusters;
136         } else {
137                 n = pread(vol->fd, ondisk, HAMMER_BUFSIZE, 0);
138                 if (n != HAMMER_BUFSIZE) {
139                         err(1, "setup_volume: %s: Read failed at offset 0",
140                             filename);
141                 }
142                 if (ondisk->vol_flags & HAMMER_VOLF_USINGSUPERCL)
143                         vol->using_supercl = 1;
144                 vol_no = ondisk->vol_no;
145                 if (RootVolNo < 0) {
146                         RootVolNo = ondisk->vol_rootvol;
147                 } else if (RootVolNo != (int)ondisk->vol_rootvol) {
148                         errx(1, "setup_volume: %s: root volume disagreement: "
149                                 "%d vs %d",
150                                 vol->name, RootVolNo, ondisk->vol_rootvol);
151                 }
152
153                 if (bcmp(&Hammer_FSType, &ondisk->vol_fstype, sizeof(Hammer_FSType)) != 0) {
154                         errx(1, "setup_volume: %s: Header does not indicate "
155                                 "that this is a hammer volume", vol->name);
156                 }
157                 if (TAILQ_EMPTY(&VolList)) {
158                         Hammer_FSId = vol->ondisk->vol_fsid;
159                 } else if (bcmp(&Hammer_FSId, &ondisk->vol_fsid, sizeof(Hammer_FSId)) != 0) {
160                         errx(1, "setup_volume: %s: FSId does match other "
161                                 "volumes!", vol->name);
162                 }
163         }
164         vol->vol_no = vol_no;
165         if (vol->using_supercl) {
166                 vol->clu_alist.config = &Vol_super_alist_config;
167                 vol->clu_alist.meta = ondisk->vol_almeta.super;
168                 vol->clu_alist.info = vol;
169         } else {
170                 vol->clu_alist.config = &Vol_normal_alist_config;
171                 vol->clu_alist.meta = ondisk->vol_almeta.normal;
172         }
173         vol->buf_alist.config = &Buf_alist_config;
174         vol->buf_alist.meta = ondisk->head.buf_almeta;
175
176         if (isnew) {
177                 hammer_alist_init(&vol->clu_alist, 0, 0, HAMMER_ASTATE_ALLOC);
178                 initbuffer(&vol->buf_alist, &ondisk->head, HAMMER_FSBUF_VOLUME);
179                 vol->cache.modified = 1;
180         }
181
182         /*
183          * Link the volume structure in
184          */
185         TAILQ_FOREACH(scan, &VolList, entry) {
186                 if (scan->vol_no == vol_no) {
187                         errx(1, "setup_volume %s: Duplicate volume number %d "
188                                 "against %s", filename, vol_no, scan->name);
189                 }
190         }
191         TAILQ_INSERT_TAIL(&VolList, vol, entry);
192         return(vol);
193 }
194
195 struct volume_info *
196 get_volume(int32_t vol_no)
197 {
198         struct volume_info *vol;
199
200         TAILQ_FOREACH(vol, &VolList, entry) {
201                 if (vol->vol_no == vol_no)
202                         break;
203         }
204         if (vol == NULL)
205                 errx(1, "get_volume: Volume %d does not exist!", vol_no);
206         ++vol->cache.refs;
207         /* not added to or removed from hammer cache */
208         return(vol);
209 }
210
211 void
212 rel_volume(struct volume_info *volume)
213 {
214         /* not added to or removed from hammer cache */
215         --volume->cache.refs;
216 }
217
218 struct supercl_info *
219 get_supercl(struct volume_info *vol, int32_t scl_no, hammer_alloc_state_t isnew)
220 {
221         struct hammer_supercl_ondisk *ondisk;
222         struct supercl_info *scl;
223         int32_t scl_group;
224         int64_t scl_group_size;
225         int64_t clusterSize = vol->ondisk->vol_clsize;
226         int n;
227
228         assert(vol->using_supercl);
229
230         TAILQ_FOREACH(scl, &vol->supercl_list, entry) {
231                 if (scl->scl_no == scl_no)
232                         break;
233         }
234         if (scl == NULL) {
235                 /*
236                  * Allocate the scl
237                  */
238                 scl = malloc(sizeof(*scl));
239                 bzero(scl, sizeof(*scl));
240                 scl->scl_no = scl_no;
241                 scl->volume = vol;
242                 TAILQ_INSERT_TAIL(&vol->supercl_list, scl, entry);
243                 ++vol->cache.refs;
244                 scl->cache.u.supercl = scl;
245                 hammer_cache_add(&scl->cache, ISSUPERCL);
246
247                 /*
248                  * Calculate the super-cluster's offset in the volume.
249                  *
250                  * The arrangement is [scl * N][N * 32768 clusters], repeat.
251                  * N is typically 16.
252                  */
253                 scl_group = scl_no / HAMMER_VOL_SUPERCLUSTER_GROUP;
254                 scl_group_size = ((int64_t)HAMMER_BUFSIZE *
255                                   HAMMER_VOL_SUPERCLUSTER_GROUP) +
256                                   ((int64_t)HAMMER_VOL_SUPERCLUSTER_GROUP *
257                                   clusterSize * HAMMER_SCL_MAXCLUSTERS);
258                 scl->scl_offset = vol->ondisk->vol_clo_beg +
259                                   scl_group * scl_group_size +
260                                   (scl_no % HAMMER_VOL_SUPERCLUSTER_GROUP) *
261                                   HAMMER_BUFSIZE;
262         }
263         ++scl->cache.refs;
264         hammer_cache_flush();
265         if ((ondisk = scl->ondisk) == NULL) {
266                 scl->ondisk = ondisk = malloc(HAMMER_BUFSIZE);
267                 scl->clu_alist.config = &Supercl_alist_config;
268                 scl->clu_alist.meta = ondisk->scl_meta;
269                 scl->buf_alist.config = &Buf_alist_config;
270                 scl->buf_alist.meta = ondisk->head.buf_almeta;
271                 if (isnew == 0) {
272                         n = pread(vol->fd, ondisk, HAMMER_BUFSIZE,
273                                   scl->scl_offset);
274                         if (n != HAMMER_BUFSIZE) {
275                                 err(1, "get_supercl: %s:%d Read failed "
276                                     "at offset %lld",
277                                     vol->name, scl_no, scl->scl_offset);
278                         }
279                 }
280         }
281         if (isnew) {
282                 bzero(ondisk, HAMMER_BUFSIZE);
283                 hammer_alist_init(&scl->clu_alist, 0, 0, isnew);
284                 initbuffer(&scl->buf_alist, &ondisk->head,
285                            HAMMER_FSBUF_SUPERCL);
286                 scl->cache.modified = 1;
287         }
288         return(scl);
289 }
290
291 void
292 rel_supercl(struct supercl_info *supercl)
293 {
294         struct volume_info *volume;
295
296         assert(supercl->cache.refs > 0);
297         if (--supercl->cache.refs == 0) {
298                 if (supercl->cache.delete) {
299                         volume = supercl->volume;
300                         if (supercl->cache.modified)
301                                 flush_supercl(supercl);
302                         TAILQ_REMOVE(&volume->supercl_list, supercl, entry);
303                         hammer_cache_del(&supercl->cache);
304                         free(supercl->ondisk);
305                         free(supercl);
306                         rel_volume(volume);
307                 }
308         }
309 }
310
311 struct cluster_info *
312 get_cluster(struct volume_info *vol, int32_t clu_no, hammer_alloc_state_t isnew)
313 {
314         struct hammer_cluster_ondisk *ondisk;
315         struct cluster_info *cl;
316         int32_t scl_group;
317         int64_t scl_group_size;
318         int64_t clusterSize = vol->ondisk->vol_clsize;
319         int n;
320
321         TAILQ_FOREACH(cl, &vol->cluster_list, entry) {
322                 if (cl->clu_no == clu_no)
323                         break;
324         }
325         if (cl == NULL) {
326                 /*
327                  * Allocate the cluster
328                  */
329                 cl = malloc(sizeof(*cl));
330                 bzero(cl, sizeof(*cl));
331                 TAILQ_INIT(&cl->buffer_list);
332                 cl->clu_no = clu_no;
333                 cl->volume = vol;
334                 TAILQ_INSERT_TAIL(&vol->cluster_list, cl, entry);
335                 ++vol->cache.refs;
336                 cl->cache.u.cluster = cl;
337                 hammer_cache_add(&cl->cache, ISCLUSTER);
338                 if (vol->using_supercl) {
339                         cl->supercl = get_supercl(vol, clu_no / HAMMER_SCL_MAXCLUSTERS, 0);
340                         ++cl->supercl->cache.refs;
341                 }
342
343                 /*
344                  * Calculate the cluster's offset in the volume
345                  *
346                  * The arrangement is [scl * N][N * 32768 clusters], repeat.
347                  * N is typically 16.
348                  *
349                  * Note that the cluster offset calculation is slightly
350                  * different from the supercluster offset calculation due
351                  * to the way the grouping works.
352                  */
353                 if (vol->using_supercl) {
354                         scl_group = clu_no / HAMMER_VOL_SUPERCLUSTER_GROUP /
355                                     HAMMER_SCL_MAXCLUSTERS;
356                         scl_group_size = 
357                                 ((int64_t)HAMMER_BUFSIZE *
358                                 HAMMER_VOL_SUPERCLUSTER_GROUP) +
359                                 ((int64_t)HAMMER_VOL_SUPERCLUSTER_GROUP *
360                                 clusterSize * HAMMER_SCL_MAXCLUSTERS);
361                         scl_group_size += HAMMER_VOL_SUPERCLUSTER_GROUP *
362                                           HAMMER_BUFSIZE;
363                         cl->clu_offset =
364                                 vol->ondisk->vol_clo_beg +
365                                 scl_group * scl_group_size +
366                                 (HAMMER_BUFSIZE * HAMMER_VOL_SUPERCLUSTER_GROUP) +
367                                  ((int64_t)clu_no % ((int64_t)HAMMER_SCL_MAXCLUSTERS * HAMMER_VOL_SUPERCLUSTER_GROUP)) *
368                                  clusterSize;
369                 } else {
370                         cl->clu_offset = vol->ondisk->vol_clo_beg +
371                                          (int64_t)clu_no * clusterSize;
372                 }
373         }
374         ++cl->cache.refs;
375         hammer_cache_flush();
376         if ((ondisk = cl->ondisk) == NULL) {
377                 cl->ondisk = ondisk = malloc(HAMMER_BUFSIZE);
378                 cl->alist_master.config = &Clu_master_alist_config;
379                 cl->alist_master.meta = ondisk->clu_master_meta;
380                 cl->alist_btree.config = &Clu_slave_alist_config;
381                 cl->alist_btree.meta = ondisk->clu_btree_meta;
382                 cl->alist_btree.info = cl;
383                 cl->alist_record.config = &Clu_slave_alist_config;
384                 cl->alist_record.meta = ondisk->clu_record_meta;
385                 cl->alist_record.info = cl;
386                 cl->alist_mdata.config = &Clu_slave_alist_config;
387                 cl->alist_mdata.meta = ondisk->clu_mdata_meta;
388                 cl->alist_mdata.info = cl;
389                 if (isnew == 0) {
390                         n = pread(vol->fd, ondisk, HAMMER_BUFSIZE,
391                                   cl->clu_offset);
392                         if (n != HAMMER_BUFSIZE) {
393                                 err(1, "get_cluster: %s:%d Read failed "
394                                     "at offset %lld",
395                                     vol->name, clu_no, cl->clu_offset);
396                         }
397                 }
398         }
399         if (isnew) {
400                 bzero(ondisk, HAMMER_BUFSIZE);
401                 hammer_alist_init(&cl->alist_master, 0, 0, isnew);
402                 hammer_alist_init(&cl->alist_btree, 0, 0, HAMMER_ASTATE_ALLOC);
403                 hammer_alist_init(&cl->alist_record, 0, 0, HAMMER_ASTATE_ALLOC);
404                 hammer_alist_init(&cl->alist_mdata, 0, 0, HAMMER_ASTATE_ALLOC);
405                 cl->cache.modified = 1;
406         }
407         return(cl);
408 }
409
410 void
411 rel_cluster(struct cluster_info *cluster)
412 {
413         struct volume_info *volume;
414         struct supercl_info *supercl;
415
416         assert(cluster->cache.refs > 0);
417         if (--cluster->cache.refs == 0) {
418                 if (cluster->cache.delete) {
419                         volume = cluster->volume;
420                         supercl = cluster->supercl;
421                         if (cluster->cache.modified)
422                                 flush_cluster(cluster);
423                         TAILQ_REMOVE(&volume->cluster_list, cluster, entry);
424                         hammer_cache_del(&cluster->cache);
425                         free(cluster->ondisk);
426                         free(cluster);
427                         rel_volume(volume);
428                         if (supercl)
429                                 rel_supercl(supercl);
430                 }
431         }
432 }
433
434 /*
435  * Acquire the specified buffer.
436  * 
437  * We are formatting a new buffer is buf_type != 0
438  */
439 struct buffer_info *
440 get_buffer(struct cluster_info *cl, int32_t buf_no, int64_t buf_type)
441 {
442         hammer_fsbuf_ondisk_t ondisk;
443         struct buffer_info *buf;
444         int n;
445
446         /*
447          * Find the buffer.  Note that buffer 0 corresponds to the cluster
448          * header and should never be requested.
449          */
450         assert(buf_no != 0);
451         TAILQ_FOREACH(buf, &cl->buffer_list, entry) {
452                 if (buf->buf_no == buf_no)
453                         break;
454         }
455         if (buf == NULL) {
456                 buf = malloc(sizeof(*buf));
457                 bzero(buf, sizeof(*buf));
458                 buf->buf_no = buf_no;
459                 buf->buf_offset = cl->clu_offset + buf_no * HAMMER_BUFSIZE;
460                 buf->cluster = cl;
461                 buf->volume = cl->volume;
462                 TAILQ_INSERT_TAIL(&cl->buffer_list, buf, entry);
463                 ++cl->cache.refs;
464                 buf->cache.u.buffer = buf;
465                 hammer_cache_add(&buf->cache, ISBUFFER);
466         }
467         ++buf->cache.refs;
468         hammer_cache_flush();
469         if ((ondisk = buf->ondisk) == NULL) {
470                 buf->ondisk = ondisk = malloc(HAMMER_BUFSIZE);
471                 buf->alist.config = &Buf_alist_config;
472                 buf->alist.meta = ondisk->head.buf_almeta;
473                 if (buf_type == 0) {
474                         n = pread(cl->volume->fd, ondisk, HAMMER_BUFSIZE,
475                                   buf->buf_offset);
476                         if (n != HAMMER_BUFSIZE) {
477                                 err(1, "get_buffer: %s:%d:%d Read failed at "
478                                        "offset %lld",
479                                     cl->volume->name, buf->cluster->clu_no,
480                                     buf_no, buf->buf_offset);
481                         }
482                 }
483         }
484         if (buf_type) {
485                 bzero(ondisk, HAMMER_BUFSIZE);
486                 initbuffer(&buf->alist, &ondisk->head, buf_type);
487                 buf->cache.modified = 1;
488         }
489         return(buf);
490 }
491
492 void
493 rel_buffer(struct buffer_info *buffer)
494 {
495         struct cluster_info *cluster;
496
497         assert(buffer->cache.refs > 0);
498         if (--buffer->cache.refs == 0) {
499                 if (buffer->cache.delete) {
500                         cluster = buffer->cluster;
501                         if (buffer->cache.modified)
502                                 flush_buffer(buffer);
503                         TAILQ_REMOVE(&cluster->buffer_list, buffer, entry);
504                         hammer_cache_del(&buffer->cache);
505                         free(buffer->ondisk);
506                         free(buffer);
507                         rel_cluster(cluster);
508                 }
509         }
510 }
511
512 /*
513  * Retrieve a pointer to a B-Tree node given a cluster offset.  The underlying
514  * bufp is freed if non-NULL and a referenced buffer is loaded into it.
515  */
516 hammer_node_ondisk_t
517 get_node(struct cluster_info *cl, int32_t offset, struct buffer_info **bufp)
518 {
519         struct buffer_info *buf;
520
521         if (*bufp)
522                 rel_buffer(*bufp);
523         *bufp = buf = get_buffer(cl, offset / HAMMER_BUFSIZE, 0);
524         if (buf->ondisk->head.buf_type != HAMMER_FSBUF_BTREE) {
525                 errx(1, "get_node %d:%d:%d - not a B-Tree node buffer!",
526                      cl->volume->vol_no, cl->clu_no, offset);
527         }
528         return((void *)((char *)buf->ondisk + (offset & HAMMER_BUFMASK)));
529 }
530
531 /*
532  * Allocate HAMMER elements - btree nodes, data storage, and record elements
533  */
534 void *
535 alloc_btree_element(struct cluster_info *cluster, int32_t *offp)
536 {
537         struct buffer_info *buf;
538         hammer_alist_t live;
539         int32_t elm_no;
540         void *item;
541
542         live = &cluster->alist_btree;
543         elm_no = hammer_alist_alloc_fwd(live, 1, cluster->ondisk->idx_index);
544         if (elm_no == HAMMER_ALIST_BLOCK_NONE)
545                 elm_no = hammer_alist_alloc_fwd(live, 1, 0);
546         if (elm_no == HAMMER_ALIST_BLOCK_NONE) {
547                 alloc_new_buffer(cluster, live,
548                                  HAMMER_FSBUF_BTREE, HAMMER_BTREE_NODES);
549                 ++cluster->ondisk->stat_idx_bufs;
550                 ++cluster->volume->ondisk->vol_stat_idx_bufs;
551                 ++cluster->volume->ondisk->vol0_stat_idx_bufs;
552                 elm_no = hammer_alist_alloc(live, 1);
553                 assert(elm_no != HAMMER_ALIST_BLOCK_NONE);
554         }
555         cluster->ondisk->idx_index = elm_no;
556         buf = get_buffer(cluster, elm_no / HAMMER_FSBUF_MAXBLKS, 0);
557         assert(buf->ondisk->head.buf_type != 0);
558         item = &buf->ondisk->btree.nodes[elm_no & HAMMER_FSBUF_BLKMASK];
559         *offp = buf->buf_no * HAMMER_BUFSIZE +
560                 ((char *)item - (char *)buf->ondisk);
561         return(item);
562 }
563
564 void *
565 alloc_data_element(struct cluster_info *cluster, int32_t bytes, int32_t *offp)
566 {
567         struct buffer_info *buf;
568         hammer_alist_t live;
569         int32_t elm_no;
570         int32_t nblks = (bytes + HAMMER_DATA_BLKMASK) & ~HAMMER_DATA_BLKMASK;
571         void *item;
572
573         /*
574          * Try to allocate a btree-node.  If elm_no is HAMMER_ALIST_BLOCK_NONE
575          * and buf is non-NULL we have to initialize a new buffer's a-list.
576          */
577         live = &cluster->alist_mdata;
578         elm_no = hammer_alist_alloc_fwd(live, nblks, cluster->ondisk->idx_data);
579         if (elm_no == HAMMER_ALIST_BLOCK_NONE)
580                 elm_no = hammer_alist_alloc_fwd(live, 1, 0);
581         if (elm_no == HAMMER_ALIST_BLOCK_NONE) {
582                 alloc_new_buffer(cluster, live,
583                                  HAMMER_FSBUF_DATA, HAMMER_DATA_NODES);
584                 ++cluster->ondisk->stat_data_bufs;
585                 ++cluster->volume->ondisk->vol_stat_data_bufs;
586                 ++cluster->volume->ondisk->vol0_stat_data_bufs;
587                 elm_no = hammer_alist_alloc(live, nblks);
588                 assert(elm_no != HAMMER_ALIST_BLOCK_NONE);
589         }
590         cluster->ondisk->idx_index = elm_no;
591         buf = get_buffer(cluster, elm_no / HAMMER_FSBUF_MAXBLKS, 0);
592         assert(buf->ondisk->head.buf_type != 0);
593         item = &buf->ondisk->data.data[elm_no & HAMMER_FSBUF_BLKMASK];
594         *offp = buf->buf_no * HAMMER_BUFSIZE +
595                 ((char *)item - (char *)buf->ondisk);
596         return(item);
597 }
598
599 void *
600 alloc_record_element(struct cluster_info *cluster, int32_t *offp,
601                      u_int8_t rec_type)
602 {
603         struct buffer_info *buf;
604         hammer_alist_t live;
605         int32_t elm_no;
606         void *item;
607
608         live = &cluster->alist_record;
609         elm_no = hammer_alist_alloc_rev(live, 1, cluster->ondisk->idx_record);
610         if (elm_no == HAMMER_ALIST_BLOCK_NONE)
611                 elm_no = hammer_alist_alloc_rev(live, 1,HAMMER_ALIST_BLOCK_MAX);
612         if (elm_no == HAMMER_ALIST_BLOCK_NONE) {
613                 alloc_new_buffer(cluster, live,
614                                  HAMMER_FSBUF_RECORDS, HAMMER_RECORD_NODES);
615                 ++cluster->ondisk->stat_rec_bufs;
616                 ++cluster->volume->ondisk->vol_stat_rec_bufs;
617                 ++cluster->volume->ondisk->vol0_stat_rec_bufs;
618                 elm_no = hammer_alist_alloc_rev(live, 1,HAMMER_ALIST_BLOCK_MAX);
619                 assert(elm_no != HAMMER_ALIST_BLOCK_NONE);
620         }
621         cluster->ondisk->idx_record = elm_no;
622         buf = get_buffer(cluster, elm_no / HAMMER_FSBUF_MAXBLKS, 0);
623         assert(buf->ondisk->head.buf_type != 0);
624         item = &buf->ondisk->record.recs[elm_no & HAMMER_FSBUF_BLKMASK];
625         *offp = buf->buf_no * HAMMER_BUFSIZE +
626                 ((char *)item - (char *)buf->ondisk);
627         ++cluster->ondisk->stat_records;
628         if (rec_type == HAMMER_RECTYPE_CLUSTER)
629                 ++cluster->ondisk->stat_records;
630         return(item);
631 }
632
633 static void
634 alloc_new_buffer(struct cluster_info *cluster, hammer_alist_t live,
635                  u_int64_t type, int32_t nelements)
636 {
637         int32_t buf_no;
638         struct buffer_info *buf;
639
640         if (type == HAMMER_FSBUF_RECORDS) {
641                 buf_no = hammer_alist_alloc_rev(&cluster->alist_master, 1,
642                                                 HAMMER_ALIST_BLOCK_MAX);
643         } else {
644                 buf_no = hammer_alist_alloc_fwd(&cluster->alist_master, 1, 
645                                                 0);
646         }
647         assert(buf_no != HAMMER_ALIST_BLOCK_NONE);
648         buf = get_buffer(cluster, buf_no, type);
649         hammer_alist_free(live, buf_no * HAMMER_FSBUF_MAXBLKS, nelements);
650         /* XXX modified bit for multiple gets/rels */
651 }
652
653 /*
654  * Flush various tracking structures to disk
655  */
656
657 /*
658  * Flush various tracking structures to disk
659  */
660 void
661 flush_all_volumes(void)
662 {
663         struct volume_info *vol;
664
665         TAILQ_FOREACH(vol, &VolList, entry)
666                 flush_volume(vol);
667 }
668
669 void
670 flush_volume(struct volume_info *vol)
671 {
672         struct supercl_info *supercl;
673         struct cluster_info *cl;
674
675         TAILQ_FOREACH(supercl, &vol->supercl_list, entry)
676                 flush_supercl(supercl);
677         TAILQ_FOREACH(cl, &vol->cluster_list, entry)
678                 flush_cluster(cl);
679         writehammerbuf(vol, vol->ondisk, 0);
680         vol->cache.modified = 0;
681 }
682
683 void
684 flush_supercl(struct supercl_info *supercl)
685 {
686         int64_t supercl_offset;
687
688         supercl_offset = supercl->scl_offset;
689         writehammerbuf(supercl->volume, supercl->ondisk, supercl_offset);
690         supercl->cache.modified = 0;
691 }
692
693 void
694 flush_cluster(struct cluster_info *cl)
695 {
696         struct buffer_info *buf;
697         int64_t cluster_offset;
698
699         TAILQ_FOREACH(buf, &cl->buffer_list, entry)
700                 flush_buffer(buf);
701         cluster_offset = cl->clu_offset;
702         writehammerbuf(cl->volume, cl->ondisk, cluster_offset);
703         cl->cache.modified = 0;
704 }
705
706 void
707 flush_buffer(struct buffer_info *buf)
708 {
709         writehammerbuf(buf->volume, buf->ondisk, buf->buf_offset);
710         buf->cache.modified = 0;
711 }
712
713 /*
714  * Generic buffer initialization
715  */
716 static void
717 initbuffer(hammer_alist_t live, hammer_fsbuf_head_t head, u_int64_t type)
718 {
719         head->buf_type = type;
720         hammer_alist_init(live, 0, 0, HAMMER_ASTATE_ALLOC);
721 }
722
723 #if 0
724 /*
725  * Core I/O operations
726  */
727 static void
728 readhammerbuf(struct volume_info *vol, void *data, int64_t offset)
729 {
730         ssize_t n;
731
732         n = pread(vol->fd, data, HAMMER_BUFSIZE, offset);
733         if (n != HAMMER_BUFSIZE)
734                 err(1, "Read volume %d (%s)", vol->vol_no, vol->name);
735 }
736
737 #endif
738
739 static void
740 writehammerbuf(struct volume_info *vol, const void *data, int64_t offset)
741 {
742         ssize_t n;
743
744         n = pwrite(vol->fd, data, HAMMER_BUFSIZE, offset);
745         if (n != HAMMER_BUFSIZE)
746                 err(1, "Write volume %d (%s)", vol->vol_no, vol->name);
747 }
748
749 void
750 panic(const char *ctl, ...)
751 {
752         va_list va;
753
754         va_start(va, ctl);
755         vfprintf(stderr, ctl, va);
756         va_end(va);
757         fprintf(stderr, "\n");
758         exit(1);
759 }
760