Merge from vendor branch GDB:
[dragonfly.git] / sbin / hammer / ondisk.c
1 /*
2  * Copyright (c) 2007 The DragonFly Project.  All rights reserved.
3  * 
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  * 
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  * 
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  * 
34  * $DragonFly: src/sbin/hammer/ondisk.c,v 1.7 2008/01/03 06:48:45 dillon Exp $
35  */
36
37 #include <sys/types.h>
38 #include <assert.h>
39 #include <stdio.h>
40 #include <stdlib.h>
41 #include <stdarg.h>
42 #include <string.h>
43 #include <unistd.h>
44 #include <err.h>
45 #include <fcntl.h>
46 #include "hammer_util.h"
47
48 static void initbuffer(hammer_alist_t live, hammer_fsbuf_head_t head,
49                         u_int64_t type);
50 static void alloc_new_buffer(struct cluster_info *cluster, hammer_alist_t live,
51                         u_int64_t type, int32_t nelements);
52 #if 0
53 static void readhammerbuf(struct volume_info *vol, void *data,
54                         int64_t offset);
55 #endif
56 static void writehammerbuf(struct volume_info *vol, const void *data,
57                         int64_t offset);
58
59
60 struct hammer_alist_config Buf_alist_config;
61 struct hammer_alist_config Vol_normal_alist_config;
62 struct hammer_alist_config Vol_super_alist_config;
63 struct hammer_alist_config Supercl_alist_config;
64 struct hammer_alist_config Clu_master_alist_config;
65 struct hammer_alist_config Clu_slave_alist_config;
66 uuid_t Hammer_FSType;
67 uuid_t Hammer_FSId;
68 int64_t ClusterSize;
69 int64_t BootAreaSize;
70 int64_t MemAreaSize;
71 int     UsingSuperClusters;
72 int     NumVolumes;
73 struct volume_list VolList = TAILQ_HEAD_INITIALIZER(VolList);
74
75 void
76 init_alist_templates(void)
77 {
78         /*
79          * Initialize the alist templates we will be using
80          */
81         hammer_alist_template(&Buf_alist_config, HAMMER_FSBUF_MAXBLKS,
82                               1, HAMMER_FSBUF_METAELMS);
83         hammer_alist_template(&Vol_normal_alist_config, HAMMER_VOL_MAXCLUSTERS,
84                               1, HAMMER_VOL_METAELMS_1LYR);
85         hammer_alist_template(&Vol_super_alist_config,
86                           HAMMER_VOL_MAXSUPERCLUSTERS * HAMMER_SCL_MAXCLUSTERS,
87                               HAMMER_SCL_MAXCLUSTERS, HAMMER_VOL_METAELMS_2LYR);
88         hammer_super_alist_template(&Vol_super_alist_config);
89         hammer_alist_template(&Supercl_alist_config, HAMMER_VOL_MAXCLUSTERS,
90                               1, HAMMER_SUPERCL_METAELMS);
91         hammer_alist_template(&Clu_master_alist_config, HAMMER_CLU_MAXBUFFERS,
92                               1, HAMMER_CLU_MASTER_METAELMS);
93         hammer_alist_template(&Clu_slave_alist_config,
94                               HAMMER_CLU_MAXBUFFERS * HAMMER_FSBUF_MAXBLKS,
95                               HAMMER_FSBUF_MAXBLKS, HAMMER_CLU_SLAVE_METAELMS);
96         hammer_buffer_alist_template(&Clu_slave_alist_config);
97 }
98
99 /*
100  * Lookup the requested information structure and related on-disk buffer.
101  * Missing structures are created.
102  */
103
104 struct volume_info *
105 setup_volume(int32_t vol_no, const char *filename, int isnew, int oflags)
106 {
107         struct volume_info *vol;
108         struct volume_info *scan;
109         struct hammer_volume_ondisk *ondisk;
110         int n;
111
112         /*
113          * Allocate the volume structure
114          */
115         vol = malloc(sizeof(*vol));
116         bzero(vol, sizeof(*vol));
117         TAILQ_INIT(&vol->cluster_list);
118         TAILQ_INIT(&vol->supercl_list);
119         vol->name = strdup(filename);
120         vol->fd = open(filename, oflags);
121         if (vol->fd < 0) {
122                 free(vol->name);
123                 free(vol);
124                 err(1, "setup_volume: %s: Open failed", filename);
125         }
126
127         /*
128          * Read or initialize the volume header
129          */
130         vol->ondisk = ondisk = malloc(HAMMER_BUFSIZE);
131         if (isnew) {
132                 bzero(ondisk, HAMMER_BUFSIZE);
133                 vol->using_supercl = UsingSuperClusters;
134         } else {
135                 n = pread(vol->fd, ondisk, HAMMER_BUFSIZE, 0);
136                 if (n != HAMMER_BUFSIZE) {
137                         err(1, "setup_volume: %s: Read failed at offset 0",
138                             filename);
139                 }
140                 if (ondisk->vol_flags & HAMMER_VOLF_USINGSUPERCL)
141                         vol->using_supercl = 1;
142                 vol_no = ondisk->vol_no;
143
144                 if (bcmp(&Hammer_FSType, &ondisk->vol_fstype, sizeof(Hammer_FSType)) != 0) {
145                         errx(1, "setup_volume: %s: Header does not indicate "
146                                 "that this is a hammer volume", vol->name);
147                 }
148                 if (TAILQ_EMPTY(&VolList)) {
149                         Hammer_FSId = vol->ondisk->vol_fsid;
150                 } else if (bcmp(&Hammer_FSId, &ondisk->vol_fsid, sizeof(Hammer_FSId)) != 0) {
151                         errx(1, "setup_volume: %s: FSId does match other "
152                                 "volumes!", vol->name);
153                 }
154         }
155         vol->vol_no = vol_no;
156         if (UsingSuperClusters) {
157                 vol->clu_alist.config = &Vol_super_alist_config;
158                 vol->clu_alist.meta = ondisk->vol_almeta.super;
159                 vol->clu_alist.info = vol;
160         } else {
161                 vol->clu_alist.config = &Vol_normal_alist_config;
162                 vol->clu_alist.meta = ondisk->vol_almeta.normal;
163         }
164         vol->buf_alist.config = &Buf_alist_config;
165         vol->buf_alist.meta = ondisk->head.buf_almeta;
166
167         if (isnew) {
168                 hammer_alist_init(&vol->clu_alist, 0, 0, HAMMER_ASTATE_ALLOC);
169                 initbuffer(&vol->buf_alist, &ondisk->head, HAMMER_FSBUF_VOLUME);
170                 vol->cache.modified = 1;
171         }
172
173         /*
174          * Link the volume structure in
175          */
176         TAILQ_FOREACH(scan, &VolList, entry) {
177                 if (scan->vol_no == vol_no) {
178                         errx(1, "setup_volume %s: Duplicate volume number %d "
179                                 "against %s", filename, vol_no, scan->name);
180                 }
181         }
182         TAILQ_INSERT_TAIL(&VolList, vol, entry);
183         return(vol);
184 }
185
186 struct volume_info *
187 get_volume(int32_t vol_no)
188 {
189         struct volume_info *vol;
190
191         TAILQ_FOREACH(vol, &VolList, entry) {
192                 if (vol->vol_no == vol_no)
193                         break;
194         }
195         if (vol == NULL)
196                 errx(1, "get_volume: Volume %d does not exist!", vol_no);
197         ++vol->cache.refs;
198         /* not added to or removed from hammer cache */
199         return(vol);
200 }
201
202 void
203 rel_volume(struct volume_info *volume)
204 {
205         /* not added to or removed from hammer cache */
206         --volume->cache.refs;
207 }
208
209 struct supercl_info *
210 get_supercl(struct volume_info *vol, int32_t scl_no, hammer_alloc_state_t isnew)
211 {
212         struct hammer_supercl_ondisk *ondisk;
213         struct supercl_info *scl;
214         int32_t scl_group;
215         int64_t scl_group_size;
216         int n;
217
218         assert(vol->using_supercl);
219
220         TAILQ_FOREACH(scl, &vol->supercl_list, entry) {
221                 if (scl->scl_no == scl_no)
222                         break;
223         }
224         if (scl == NULL) {
225                 /*
226                  * Allocate the scl
227                  */
228                 scl = malloc(sizeof(*scl));
229                 bzero(scl, sizeof(*scl));
230                 scl->scl_no = scl_no;
231                 scl->volume = vol;
232                 TAILQ_INSERT_TAIL(&vol->supercl_list, scl, entry);
233                 ++vol->cache.refs;
234                 scl->cache.u.supercl = scl;
235                 hammer_cache_add(&scl->cache, ISSUPERCL);
236
237                 /*
238                  * Calculate the super-cluster's offset in the volume.
239                  *
240                  * The arrangement is [scl * N][N * 32768 clusters], repeat.
241                  * N is typically 16.
242                  */
243                 scl_group = scl_no / HAMMER_VOL_SUPERCLUSTER_GROUP;
244                 scl_group_size = ((int64_t)HAMMER_BUFSIZE *
245                                   HAMMER_VOL_SUPERCLUSTER_GROUP) +
246                                   ((int64_t)HAMMER_VOL_SUPERCLUSTER_GROUP *
247                                   ClusterSize * HAMMER_SCL_MAXCLUSTERS);
248                 scl->scl_offset = vol->ondisk->vol_clo_beg +
249                                   scl_group * scl_group_size +
250                                   (scl_no % HAMMER_VOL_SUPERCLUSTER_GROUP) *
251                                   HAMMER_BUFSIZE;
252         }
253         ++scl->cache.refs;
254         hammer_cache_flush();
255         if ((ondisk = scl->ondisk) == NULL) {
256                 scl->ondisk = ondisk = malloc(HAMMER_BUFSIZE);
257                 scl->clu_alist.config = &Supercl_alist_config;
258                 scl->clu_alist.meta = ondisk->scl_meta;
259                 scl->buf_alist.config = &Buf_alist_config;
260                 scl->buf_alist.meta = ondisk->head.buf_almeta;
261                 if (isnew == 0) {
262                         n = pread(vol->fd, ondisk, HAMMER_BUFSIZE,
263                                   scl->scl_offset);
264                         if (n != HAMMER_BUFSIZE) {
265                                 err(1, "get_supercl: %s:%d Read failed "
266                                     "at offset %lld",
267                                     vol->name, scl_no, scl->scl_offset);
268                         }
269                 }
270         }
271         if (isnew) {
272                 bzero(ondisk, HAMMER_BUFSIZE);
273                 hammer_alist_init(&scl->clu_alist, 0, 0, isnew);
274                 initbuffer(&scl->buf_alist, &ondisk->head,
275                            HAMMER_FSBUF_SUPERCL);
276                 scl->cache.modified = 1;
277         }
278         return(scl);
279 }
280
281 void
282 rel_supercl(struct supercl_info *supercl)
283 {
284         struct volume_info *volume;
285
286         assert(supercl->cache.refs > 0);
287         if (--supercl->cache.refs == 0) {
288                 if (supercl->cache.delete) {
289                         volume = supercl->volume;
290                         if (supercl->cache.modified)
291                                 flush_supercl(supercl);
292                         TAILQ_REMOVE(&volume->supercl_list, supercl, entry);
293                         hammer_cache_del(&supercl->cache);
294                         free(supercl->ondisk);
295                         free(supercl);
296                         rel_volume(volume);
297                 }
298         }
299 }
300
301 struct cluster_info *
302 get_cluster(struct volume_info *vol, int32_t clu_no, hammer_alloc_state_t isnew)
303 {
304         struct hammer_cluster_ondisk *ondisk;
305         struct cluster_info *cl;
306         int32_t scl_group;
307         int64_t scl_group_size;
308         int n;
309
310         TAILQ_FOREACH(cl, &vol->cluster_list, entry) {
311                 if (cl->clu_no == clu_no)
312                         break;
313         }
314         if (cl == NULL) {
315                 /*
316                  * Allocate the cluster
317                  */
318                 cl = malloc(sizeof(*cl));
319                 bzero(cl, sizeof(*cl));
320                 TAILQ_INIT(&cl->buffer_list);
321                 cl->clu_no = clu_no;
322                 cl->volume = vol;
323                 TAILQ_INSERT_TAIL(&vol->cluster_list, cl, entry);
324                 ++vol->cache.refs;
325                 cl->cache.u.cluster = cl;
326                 hammer_cache_add(&cl->cache, ISCLUSTER);
327                 if (vol->using_supercl) {
328                         cl->supercl = get_supercl(vol, clu_no / HAMMER_SCL_MAXCLUSTERS, 0);
329                         ++cl->supercl->cache.refs;
330                 }
331
332                 /*
333                  * Calculate the cluster's offset in the volume
334                  *
335                  * The arrangement is [scl * N][N * 32768 clusters], repeat.
336                  * N is typically 16.
337                  *
338                  * Note that the cluster offset calculation is slightly
339                  * different from the supercluster offset calculation due
340                  * to the way the grouping works.
341                  */
342                 if (vol->using_supercl) {
343                         scl_group = clu_no / HAMMER_VOL_SUPERCLUSTER_GROUP /
344                                     HAMMER_SCL_MAXCLUSTERS;
345                         scl_group_size = 
346                                 ((int64_t)HAMMER_BUFSIZE *
347                                 HAMMER_VOL_SUPERCLUSTER_GROUP) +
348                                 ((int64_t)HAMMER_VOL_SUPERCLUSTER_GROUP *
349                                 ClusterSize * HAMMER_SCL_MAXCLUSTERS);
350                         scl_group_size += HAMMER_VOL_SUPERCLUSTER_GROUP *
351                                           HAMMER_BUFSIZE;
352                         cl->clu_offset =
353                                 vol->ondisk->vol_clo_beg +
354                                 scl_group * scl_group_size +
355                                 (HAMMER_BUFSIZE * HAMMER_VOL_SUPERCLUSTER_GROUP) +
356                                  ((int64_t)clu_no % ((int64_t)HAMMER_SCL_MAXCLUSTERS * HAMMER_VOL_SUPERCLUSTER_GROUP)) *
357                                  ClusterSize;
358                 } else {
359                         cl->clu_offset = vol->ondisk->vol_clo_beg +
360                                          (int64_t)clu_no * ClusterSize;
361                 }
362         }
363         ++cl->cache.refs;
364         hammer_cache_flush();
365         if ((ondisk = cl->ondisk) == NULL) {
366                 cl->ondisk = ondisk = malloc(HAMMER_BUFSIZE);
367                 cl->alist_master.config = &Clu_master_alist_config;
368                 cl->alist_master.meta = ondisk->clu_master_meta;
369                 cl->alist_btree.config = &Clu_slave_alist_config;
370                 cl->alist_btree.meta = ondisk->clu_btree_meta;
371                 cl->alist_btree.info = cl;
372                 cl->alist_record.config = &Clu_slave_alist_config;
373                 cl->alist_record.meta = ondisk->clu_record_meta;
374                 cl->alist_record.info = cl;
375                 cl->alist_mdata.config = &Clu_slave_alist_config;
376                 cl->alist_mdata.meta = ondisk->clu_mdata_meta;
377                 cl->alist_mdata.info = cl;
378                 if (isnew == 0) {
379                         n = pread(vol->fd, ondisk, HAMMER_BUFSIZE,
380                                   cl->clu_offset);
381                         if (n != HAMMER_BUFSIZE) {
382                                 err(1, "get_cluster: %s:%d Read failed "
383                                     "at offset %lld",
384                                     vol->name, clu_no, cl->clu_offset);
385                         }
386                 }
387         }
388         if (isnew) {
389                 bzero(ondisk, HAMMER_BUFSIZE);
390                 hammer_alist_init(&cl->alist_master, 0, 0, isnew);
391                 hammer_alist_init(&cl->alist_btree, 0, 0, HAMMER_ASTATE_ALLOC);
392                 hammer_alist_init(&cl->alist_record, 0, 0, HAMMER_ASTATE_ALLOC);
393                 hammer_alist_init(&cl->alist_mdata, 0, 0, HAMMER_ASTATE_ALLOC);
394                 cl->cache.modified = 1;
395         }
396         return(cl);
397 }
398
399 void
400 rel_cluster(struct cluster_info *cluster)
401 {
402         struct volume_info *volume;
403         struct supercl_info *supercl;
404
405         assert(cluster->cache.refs > 0);
406         if (--cluster->cache.refs == 0) {
407                 if (cluster->cache.delete) {
408                         volume = cluster->volume;
409                         supercl = cluster->supercl;
410                         if (cluster->cache.modified)
411                                 flush_cluster(cluster);
412                         TAILQ_REMOVE(&volume->cluster_list, cluster, entry);
413                         hammer_cache_del(&cluster->cache);
414                         free(cluster->ondisk);
415                         free(cluster);
416                         rel_volume(volume);
417                         if (supercl)
418                                 rel_supercl(supercl);
419                 }
420         }
421 }
422
423 /*
424  * Acquire the specified buffer.
425  * 
426  * We are formatting a new buffer is buf_type != 0
427  */
428 struct buffer_info *
429 get_buffer(struct cluster_info *cl, int32_t buf_no, int64_t buf_type)
430 {
431         hammer_fsbuf_ondisk_t ondisk;
432         struct buffer_info *buf;
433         int n;
434
435         /*
436          * Find the buffer.  Note that buffer 0 corresponds to the cluster
437          * header and should never be requested.
438          */
439         assert(buf_no != 0);
440         TAILQ_FOREACH(buf, &cl->buffer_list, entry) {
441                 if (buf->buf_no == buf_no)
442                         break;
443         }
444         if (buf == NULL) {
445                 buf = malloc(sizeof(*buf));
446                 bzero(buf, sizeof(*buf));
447                 buf->buf_no = buf_no;
448                 buf->buf_offset = cl->clu_offset + buf_no * HAMMER_BUFSIZE;
449                 buf->cluster = cl;
450                 buf->volume = cl->volume;
451                 TAILQ_INSERT_TAIL(&cl->buffer_list, buf, entry);
452                 ++cl->cache.refs;
453                 buf->cache.u.buffer = buf;
454                 hammer_cache_add(&buf->cache, ISBUFFER);
455         }
456         ++buf->cache.refs;
457         hammer_cache_flush();
458         if ((ondisk = buf->ondisk) == NULL) {
459                 buf->ondisk = ondisk = malloc(HAMMER_BUFSIZE);
460                 buf->alist.config = &Buf_alist_config;
461                 buf->alist.meta = ondisk->head.buf_almeta;
462                 if (buf_type == 0) {
463                         n = pread(cl->volume->fd, ondisk, HAMMER_BUFSIZE,
464                                   buf->buf_offset);
465                         if (n != HAMMER_BUFSIZE) {
466                                 err(1, "get_buffer: %s:%d:%d Read failed at "
467                                        "offset %lld",
468                                     cl->volume->name, buf->cluster->clu_no,
469                                     buf_no, buf->buf_offset);
470                         }
471                 }
472         }
473         if (buf_type) {
474                 bzero(ondisk, HAMMER_BUFSIZE);
475                 initbuffer(&buf->alist, &ondisk->head, buf_type);
476                 buf->cache.modified = 1;
477         }
478         return(buf);
479 }
480
481 void
482 rel_buffer(struct buffer_info *buffer)
483 {
484         struct cluster_info *cluster;
485
486         assert(buffer->cache.refs > 0);
487         if (--buffer->cache.refs == 0) {
488                 if (buffer->cache.delete) {
489                         cluster = buffer->cluster;
490                         if (buffer->cache.modified)
491                                 flush_buffer(buffer);
492                         TAILQ_REMOVE(&cluster->buffer_list, buffer, entry);
493                         hammer_cache_del(&buffer->cache);
494                         free(buffer->ondisk);
495                         free(buffer);
496                         rel_cluster(cluster);
497                 }
498         }
499 }
500
501 /*
502  * Allocate HAMMER elements - btree nodes, data storage, and record elements
503  */
504 void *
505 alloc_btree_element(struct cluster_info *cluster, int32_t *offp)
506 {
507         struct buffer_info *buf;
508         hammer_alist_t live;
509         int32_t elm_no;
510         void *item;
511
512         live = &cluster->alist_btree;
513         elm_no = hammer_alist_alloc_fwd(live, 1, cluster->ondisk->idx_index);
514         if (elm_no == HAMMER_ALIST_BLOCK_NONE)
515                 elm_no = hammer_alist_alloc_fwd(live, 1, 0);
516         if (elm_no == HAMMER_ALIST_BLOCK_NONE) {
517                 alloc_new_buffer(cluster, live,
518                                  HAMMER_FSBUF_BTREE, HAMMER_BTREE_NODES);
519                 ++cluster->ondisk->stat_idx_bufs;
520                 ++cluster->volume->ondisk->vol_stat_idx_bufs;
521                 ++cluster->volume->ondisk->vol0_stat_idx_bufs;
522                 elm_no = hammer_alist_alloc(live, 1);
523                 assert(elm_no != HAMMER_ALIST_BLOCK_NONE);
524         }
525         cluster->ondisk->idx_index = elm_no;
526         buf = get_buffer(cluster, elm_no / HAMMER_FSBUF_MAXBLKS, 0);
527         assert(buf->ondisk->head.buf_type != 0);
528         item = &buf->ondisk->btree.nodes[elm_no & HAMMER_FSBUF_BLKMASK];
529         *offp = buf->buf_no * HAMMER_BUFSIZE +
530                 ((char *)item - (char *)buf->ondisk);
531         return(item);
532 }
533
534 void *
535 alloc_data_element(struct cluster_info *cluster, int32_t bytes, int32_t *offp)
536 {
537         struct buffer_info *buf;
538         hammer_alist_t live;
539         int32_t elm_no;
540         int32_t nblks = (bytes + HAMMER_DATA_BLKMASK) & ~HAMMER_DATA_BLKMASK;
541         void *item;
542
543         /*
544          * Try to allocate a btree-node.  If elm_no is HAMMER_ALIST_BLOCK_NONE
545          * and buf is non-NULL we have to initialize a new buffer's a-list.
546          */
547         live = &cluster->alist_mdata;
548         elm_no = hammer_alist_alloc_fwd(live, nblks, cluster->ondisk->idx_data);
549         if (elm_no == HAMMER_ALIST_BLOCK_NONE)
550                 elm_no = hammer_alist_alloc_fwd(live, 1, 0);
551         if (elm_no == HAMMER_ALIST_BLOCK_NONE) {
552                 alloc_new_buffer(cluster, live,
553                                  HAMMER_FSBUF_DATA, HAMMER_DATA_NODES);
554                 ++cluster->ondisk->stat_data_bufs;
555                 ++cluster->volume->ondisk->vol_stat_data_bufs;
556                 ++cluster->volume->ondisk->vol0_stat_data_bufs;
557                 elm_no = hammer_alist_alloc(live, nblks);
558                 assert(elm_no != HAMMER_ALIST_BLOCK_NONE);
559         }
560         cluster->ondisk->idx_index = elm_no;
561         buf = get_buffer(cluster, elm_no / HAMMER_FSBUF_MAXBLKS, 0);
562         assert(buf->ondisk->head.buf_type != 0);
563         item = &buf->ondisk->data.data[elm_no & HAMMER_FSBUF_BLKMASK];
564         *offp = buf->buf_no * HAMMER_BUFSIZE +
565                 ((char *)item - (char *)buf->ondisk);
566         return(item);
567 }
568
569 void *
570 alloc_record_element(struct cluster_info *cluster, int32_t *offp)
571 {
572         struct buffer_info *buf;
573         hammer_alist_t live;
574         int32_t elm_no;
575         void *item;
576
577         live = &cluster->alist_record;
578         elm_no = hammer_alist_alloc_rev(live, 1, cluster->ondisk->idx_record);
579         if (elm_no == HAMMER_ALIST_BLOCK_NONE)
580                 elm_no = hammer_alist_alloc_rev(live, 1,HAMMER_ALIST_BLOCK_MAX);
581         if (elm_no == HAMMER_ALIST_BLOCK_NONE) {
582                 alloc_new_buffer(cluster, live,
583                                  HAMMER_FSBUF_RECORDS, HAMMER_RECORD_NODES);
584                 ++cluster->ondisk->stat_rec_bufs;
585                 ++cluster->volume->ondisk->vol_stat_rec_bufs;
586                 ++cluster->volume->ondisk->vol0_stat_rec_bufs;
587                 elm_no = hammer_alist_alloc_rev(live, 1,HAMMER_ALIST_BLOCK_MAX);
588                 assert(elm_no != HAMMER_ALIST_BLOCK_NONE);
589         }
590         cluster->ondisk->idx_record = elm_no;
591         buf = get_buffer(cluster, elm_no / HAMMER_FSBUF_MAXBLKS, 0);
592         assert(buf->ondisk->head.buf_type != 0);
593         item = &buf->ondisk->record.recs[elm_no & HAMMER_FSBUF_BLKMASK];
594         *offp = buf->buf_no * HAMMER_BUFSIZE +
595                 ((char *)item - (char *)buf->ondisk);
596         return(item);
597 }
598
599 static void
600 alloc_new_buffer(struct cluster_info *cluster, hammer_alist_t live,
601                  u_int64_t type, int32_t nelements)
602 {
603         int32_t buf_no;
604         struct buffer_info *buf;
605
606         if (type == HAMMER_FSBUF_RECORDS) {
607                 buf_no = hammer_alist_alloc_rev(&cluster->alist_master, 1,
608                                                 HAMMER_ALIST_BLOCK_MAX);
609         } else {
610                 buf_no = hammer_alist_alloc_fwd(&cluster->alist_master, 1, 
611                                                 0);
612         }
613         assert(buf_no != HAMMER_ALIST_BLOCK_NONE);
614         buf = get_buffer(cluster, buf_no, type);
615         hammer_alist_free(live, buf_no * HAMMER_FSBUF_MAXBLKS, nelements);
616 /*      rel_buffer(buffer);XXX modified bit for multiple gets/rels */
617 }
618
619 /*
620  * Flush various tracking structures to disk
621  */
622
623 /*
624  * Flush various tracking structures to disk
625  */
626 void
627 flush_all_volumes(void)
628 {
629         struct volume_info *vol;
630
631         TAILQ_FOREACH(vol, &VolList, entry)
632                 flush_volume(vol);
633 }
634
635 void
636 flush_volume(struct volume_info *vol)
637 {
638         struct supercl_info *supercl;
639         struct cluster_info *cl;
640
641         TAILQ_FOREACH(supercl, &vol->supercl_list, entry)
642                 flush_supercl(supercl);
643         TAILQ_FOREACH(cl, &vol->cluster_list, entry)
644                 flush_cluster(cl);
645         writehammerbuf(vol, vol->ondisk, 0);
646         vol->cache.modified = 0;
647 }
648
649 void
650 flush_supercl(struct supercl_info *supercl)
651 {
652         int64_t supercl_offset;
653
654         supercl_offset = supercl->scl_offset;
655         writehammerbuf(supercl->volume, supercl->ondisk, supercl_offset);
656         supercl->cache.modified = 0;
657 }
658
659 void
660 flush_cluster(struct cluster_info *cl)
661 {
662         struct buffer_info *buf;
663         int64_t cluster_offset;
664
665         TAILQ_FOREACH(buf, &cl->buffer_list, entry)
666                 flush_buffer(buf);
667         cluster_offset = cl->clu_offset;
668         writehammerbuf(cl->volume, cl->ondisk, cluster_offset);
669         cl->cache.modified = 0;
670 }
671
672 void
673 flush_buffer(struct buffer_info *buf)
674 {
675         writehammerbuf(buf->volume, buf->ondisk, buf->buf_offset);
676         buf->cache.modified = 0;
677 }
678
679 /*
680  * Generic buffer initialization
681  */
682 static void
683 initbuffer(hammer_alist_t live, hammer_fsbuf_head_t head, u_int64_t type)
684 {
685         head->buf_type = type;
686         hammer_alist_init(live, 0, 0, HAMMER_ASTATE_ALLOC);
687 }
688
689 #if 0
690 /*
691  * Core I/O operations
692  */
693 static void
694 readhammerbuf(struct volume_info *vol, void *data, int64_t offset)
695 {
696         ssize_t n;
697
698         n = pread(vol->fd, data, HAMMER_BUFSIZE, offset);
699         if (n != HAMMER_BUFSIZE)
700                 err(1, "Read volume %d (%s)", vol->vol_no, vol->name);
701 }
702
703 #endif
704
705 static void
706 writehammerbuf(struct volume_info *vol, const void *data, int64_t offset)
707 {
708         ssize_t n;
709
710         n = pwrite(vol->fd, data, HAMMER_BUFSIZE, offset);
711         if (n != HAMMER_BUFSIZE)
712                 err(1, "Write volume %d (%s)", vol->vol_no, vol->name);
713 }
714
715 void
716 panic(const char *ctl, ...)
717 {
718         va_list va;
719
720         va_start(va, ctl);
721         vfprintf(stderr, ctl, va);
722         va_end(va);
723         fprintf(stderr, "\n");
724         exit(1);
725 }
726