2 * Copyright (c) 2007 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * $DragonFly: src/sys/vfs/hammer/hammer_disk.h,v 1.1 2007/10/12 18:57:45 dillon Exp $
42 * The structures below represent the on-disk format for a HAMMER
43 * filesystem. Note that all fields for on-disk structures are naturally
44 * aligned. The host endian format is used - compatibility is possible
45 * if the implementation detects reversed endian and adjusts data accordingly.
47 * Most of HAMMER revolves around the concept of an object identifier. An
48 * obj_id is a 64 bit quantity which uniquely identifies a filesystem object
49 * FOR THE ENTIRE LIFE OF THE FILESYSTEM. This uniqueness allows backups
50 * and mirrors to retain varying amounts of filesystem history by removing
51 * any possibility of conflict through identifier reuse.
53 * A HAMMER filesystem may spam multiple volumes.
55 * A HAMMER filesystem uses a 16K filesystem buffer size. All filesystem
56 * I/O is done in multiples of 16K. Most buffer-sized headers such as those
57 * used by volumes, super-clusters, clusters, and basic filesystem buffers
58 * use fixed-sized A-lists which are heavily dependant on HAMMER_BUFSIZE.
60 #define HAMMER_BUFSIZE 16384
61 #define HAMMER_BUFMASK (HAMMER_BUFSIZE - 1)
64 * Hammer transction ids are 64 bit unsigned integers and are usually
65 * synchronized with the time of day in nanoseconds.
67 typedef u_int64_t hammer_tid_t;
70 * Most HAMMER data structures are embedded in 16K filesystem buffers.
71 * All filesystem buffers except those designated as pure-data buffers
72 * contain this 128-byte header.
74 * This structure contains an embedded A-List used to manage space within
75 * the filesystem buffer. It is not used by volume or cluster header
76 * buffers, or by pure-data buffers. The granularity is variable and
77 * depends on the type of filesystem buffer. BLKSIZE is just a minimum.
80 #define HAMMER_FSBUF_HEAD_SIZE 128
81 #define HAMMER_FSBUF_MAXBLKS 256
82 #define HAMMER_FSBUF_METAELMS HAMMER_ALIST_METAELMS_256_1LYR /* 11 */
84 struct hammer_fsbuf_head {
87 u_int32_t buf_reserved07;
88 u_int32_t reserved[6];
89 struct hammer_almeta buf_almeta[HAMMER_FSBUF_METAELMS];
92 typedef struct hammer_fsbuf_head *hammer_fsbuf_head_t;
95 * Note: Pure-data buffers contain pure-data and have no buf_type.
96 * Piecemeal data buffers do have a header and use HAMMER_FSBUF_DATA.
98 #define HAMMER_FSBUF_VOLUME 0xC8414D4DC5523031ULL /* HAMMER01 */
99 #define HAMMER_FSBUF_SUPERCL 0xC8414D52C3555052ULL /* HAMRSUPR */
100 #define HAMMER_FSBUF_CLUSTER 0xC8414D52C34C5553ULL /* HAMRCLUS */
101 #define HAMMER_FSBUF_RECORDS 0xC8414D52D2454353ULL /* HAMRRECS */
102 #define HAMMER_FSBUF_BTREE 0xC8414D52C2545245ULL /* HAMRBTRE */
103 #define HAMMER_FSBUF_DATA 0xC8414D52C4415441ULL /* HAMRDATA */
105 #define HAMMER_FSBUF_VOLUME_REV 0x313052C54D4D41C8ULL /* (reverse endian) */
108 * The B-Tree structures need hammer_fsbuf_head.
110 #include "hammer_btree.h"
113 * HAMMER Volume header
115 * A HAMMER filesystem is built from any number of block devices, Each block
116 * device contains a volume header followed by however many super-clusters
117 * and clusters fit into the volume. Clusters cannot be migrated but the
118 * data they contain can, so HAMMER can use a truncated cluster for any
119 * extra space at the end of the volume.
121 * The volume containing the root cluster is designated as the master volume.
122 * The root cluster designation can be moved to any volume.
124 * The volume header takes up an entire 16K filesystem buffer and includes
125 * a one or two-layered A-list to manage the clusters making up the volume.
126 * A volume containing up to 32768 clusters (2TB) can be managed with a
127 * single-layered A-list. A two-layer A-list is capable of managing up
128 * to 16384 super-clusters with each super-cluster containing 32768 clusters
129 * (32768 TB per volume total). The number of volumes is limited to 32768
130 * but it only takes 512 to fill out a 64 bit address space so for all
131 * intents and purposes the filesystem has no limits.
133 * cluster addressing within a volume depends on whether a single or
134 * duel-layer A-list is used. If a duel-layer A-list is used a 16K
135 * super-cluster buffer is needed for every 16384 clusters in the volume.
136 * However, because the A-list's hinting is grouped in multiples of 16
137 * we group 16 super-cluster buffers together (starting just after the
138 * volume header), followed by 16384x16 clusters, and repeat.
140 * NOTE: A 32768-element single-layer and 16384-element duel-layer A-list
143 #define HAMMER_VOL_MAXCLUSTERS 32768 /* 1-layer */
144 #define HAMMER_VOL_MAXSUPERCLUSTERS 16384 /* 2-layer */
145 #define HAMMER_VOL_SUPERCLUSTER_GROUP 16
146 #define HAMMER_VOL_METAELMS_1LYR HAMMER_ALIST_METAELMS_32K_1LYR
147 #define HAMMER_VOL_METAELMS_2LYR HAMMER_ALIST_METAELMS_16K_2LYR
149 struct hammer_volume_ondisk {
150 struct hammer_fsbuf_head head;
151 int64_t vol_beg; /* byte offset of first cluster in volume */
152 int64_t vol_end; /* byte offset of volume EOF */
153 int64_t vol_locked; /* reserved clusters are >= this offset */
155 uuid_t vol_fsid; /* identify filesystem */
156 uuid_t vol_fstype; /* identify filesystem type */
157 char vol_name[64]; /* Name of volume */
159 int32_t vol_no; /* volume number within filesystem */
160 int32_t vol_count; /* number of volumes making up FS */
162 u_int32_t vol_version; /* version control information */
163 u_int32_t vol_segsize; /* cluster size power of 2, 512M max */
164 u_int32_t vol_flags; /* volume flags */
165 u_int32_t vol_rootvol; /* which volume is the root volume? */
167 int32_t vol_clsize; /* cluster size (same for all volumes) */
168 u_int32_t vol_reserved05;
169 u_int32_t vol_reserved06;
170 u_int32_t vol_reserved07;
173 * These fields are initialized and space is reserved in every
174 * volume making up a HAMMER filesytem, but only the master volume
175 * contains valid data.
177 int32_t vol0_rootcluster; /* root cluster no (index) in rootvol */
178 u_int32_t vol0_reserved02;
179 u_int32_t vol0_reserved03;
180 hammer_tid_t vol0_nexttid; /* next TID */
181 u_int64_t vol0_recid; /* fs-wide record id allocator */
186 * Meta elements for the volume header's A-list, which is either a
187 * 1-layer A-list capable of managing 32768 clusters, or a 2-layer
188 * A-list capable of managing 16384 super-clusters (each of which
189 * can handle 32768 clusters).
192 hammer_almeta_t super[HAMMER_VOL_METAELMS_2LYR];
193 hammer_almeta_t normal[HAMMER_VOL_METAELMS_1LYR];
195 u_int32_t vol0_bitmap[1024];
198 #define HAMMER_VOLF_VALID 0x0001 /* valid entry */
199 #define HAMMER_VOLF_OPEN 0x0002 /* volume is open */
200 #define HAMMER_VOLF_SUPERCL_ENABLE 0x0004 /* enable supercluster layer */
201 #define HAMMER_VOLF_SUPERCL_RESERVE 0x0008 /* supercluster layout */
204 * HAMMER Super-cluster header
206 * A super-cluster is used to increase the maximum size of a volume.
207 * HAMMER's volume header can manage up to 32768 direct clusters or
208 * 16384 super-clusters. Each super-cluster (which is basically just
209 * a 16K filesystem buffer) can manage up to 32768 clusters. So adding
210 * a super-cluster layer allows a HAMMER volume to be sized upwards of
211 * around 32768TB instead of 2TB.
213 * Any volume initially formatted to be over 32G reserves space for the layer
214 * but the layer is only enabled if the volume exceeds 2TB.
216 #define HAMMER_SUPERCL_METAELMS HAMMER_ALIST_METAELMS_32K_1LYR
218 struct hammer_supercl_ondisk {
219 struct hammer_fsbuf_head head;
220 uuid_t vol_fsid; /* identify filesystem - sanity check */
221 uuid_t vol_fstype; /* identify filesystem type - sanity check */
222 int32_t reserved[1024];
224 hammer_almeta_t scl_meta[HAMMER_SUPERCL_METAELMS];
228 * HAMMER Cluster header
230 * A cluster is limited to 64MB and is made up of 4096 16K filesystem
231 * buffers. The cluster header contains four A-lists to manage these
234 * master_alist - This is a non-layered A-list which manages pure-data
235 * allocations and allocations on behalf of other A-lists.
237 * btree_alist - This is a layered A-list which manages filesystem buffers
238 * containing B-Tree nodes.
240 * record_alist - This is a layered A-list which manages filesystem buffers
241 * containing records.
243 * mdata_alist - This is a layered A-list which manages filesystem buffers
244 * containing piecemeal record data.
246 * General storage management works like this: All the A-lists except the
247 * master start in an all-allocated state. Now lets say you wish to allocate
248 * a B-Tree node out the btree_alist. If the allocation fails you allocate
249 * a pure data block out of master_alist and then free that block in
250 * btree_alist, thereby assigning more space to the btree_alist, and then
251 * retry your allocation out of the btree_alist. In the reverse direction,
252 * filesystem buffers can be garbage collected back to master_alist simply
253 * by doing whole-buffer allocations in btree_alist and then freeing the
254 * space in master_alist. The whole-buffer-allocation approach to garbage
255 * collection works because A-list allocations are always power-of-2 sized
258 #define HAMMER_CLU_MAXBUFFERS 4096
259 #define HAMMER_CLU_MASTER_METAELMS HAMMER_ALIST_METAELMS_4K_1LYR
260 #define HAMMER_CLU_SLAVE_METAELMS HAMMER_ALIST_METAELMS_4K_2LYR
262 struct hammer_cluster_ondisk {
263 struct hammer_fsbuf_head head;
264 uuid_t vol_fsid; /* identify filesystem - sanity check */
265 uuid_t vol_fstype; /* identify filesystem type - sanity check */
267 u_int64_t clu_gen; /* identify generation number of cluster */
268 u_int64_t clu_unused01;
270 hammer_tid_t clu_id; /* unique cluster self identification */
271 int32_t vol_no; /* cluster contained in volume (sanity) */
272 u_int32_t clu_flags; /* cluster flags */
274 int32_t clu_start; /* start of data (byte offset) */
275 int32_t clu_limit; /* end of data (byte offset) */
276 int32_t clu_no; /* cluster index in volume (sanity) */
277 u_int32_t clu_reserved03;
279 u_int32_t clu_reserved04;
280 u_int32_t clu_reserved05;
281 u_int32_t clu_reserved06;
282 u_int32_t clu_reserved07;
284 int32_t idx_data; /* data append point (byte offset) */
285 int32_t idx_index; /* index append point (byte offset) */
286 int32_t idx_record; /* record prepend point (byte offset) */
287 u_int32_t idx_reserved03;
290 * Specify the range of information stored in this cluster as two
291 * btree elements. These elements exist as separate records that
292 * point to us in the parent cluster's B-Tree.
294 * Note that clu_btree_end is range-inclusive, not range-exclusive.
295 * i.e. 0-1023 instead of 0,1024.
297 struct hammer_base_elm clu_btree_beg;
298 struct hammer_base_elm clu_btree_end;
301 * The cluster's B-Tree root can change as a side effect of insertion
302 * and deletion operations so store an offset instead of embedding
305 int32_t clu_btree_root;
306 int32_t clu_btree_parent_vol_no;
307 int32_t clu_btree_parent_clu_no;
308 hammer_tid_t clu_btree_parent_clu_id;
310 u_int64_t synchronized_rec_id;
312 hammer_almeta_t clu_master_meta[HAMMER_CLU_MASTER_METAELMS];
313 hammer_almeta_t clu_btree_meta[HAMMER_CLU_SLAVE_METAELMS];
314 hammer_almeta_t clu_record_meta[HAMMER_CLU_SLAVE_METAELMS];
315 hammer_almeta_t clu_mdata_meta[HAMMER_CLU_SLAVE_METAELMS];
319 * HAMMER records are 96 byte entities encoded into 16K filesystem buffers.
320 * Each record has a 64 byte header and a 32 byte extension. 170 records
321 * fit into each buffer. Storage is managed by the buffer's A-List.
323 * Each record may have an explicit data reference to a block of data up
324 * to 2^31-1 bytes in size within the current cluster. Note that multiple
325 * records may share the same or overlapping data references.
329 * All HAMMER records have a common 64-byte base and a 32-byte extension.
331 * Many HAMMER record types reference out-of-band data within the cluster.
332 * This data can also be stored in-band in the record itself if it is small
333 * enough. Either way, (data_offset, data_len) points to it.
335 * Key comparison order: obj_id, rec_type, key, create_tid
337 struct hammer_base_record {
338 int64_t obj_id; /* 00 object record is associated with */
339 int64_t key; /* 08 indexing key (offset or namekey) */
341 hammer_tid_t create_tid;/* 10 transaction id for record creation */
342 hammer_tid_t delete_tid;/* 18 transaction id for record update/delete */
344 u_int16_t rec_type; /* 20 type of record */
345 u_int16_t obj_type; /* 22 type of object (if inode) */
346 u_int32_t data_offset; /* 24 intra-cluster data reference */
347 /* An offset of 0 indicates zero-fill */
348 int32_t data_len; /* 28 size of data (remainder zero-fill) */
349 u_int32_t data_crc; /* 2C data sanity check */
350 u_int64_t rec_id; /* 30 record id (iterator for recovery) */
351 u_int64_t reserved07; /* 38 */
356 * Record types are fairly straightforward. The B-Tree includes the record
357 * type in its index sort.
359 * In particular please note that it is possible to create a pseudo-
360 * filesystem within a HAMMER filesystem by creating a special object
361 * type within a directory. Pseudo-filesystems are used as replication
362 * targets and even though they are built within a HAMMER filesystem they
363 * get their own obj_id space (and thus can serve as a replication target)
364 * and look like a mount point to the system.
366 #define HAMMER_RECTYPE_UNKNOWN 0
367 #define HAMMER_RECTYPE_INODE 1 /* inode in obj_id space */
368 #define HAMMER_RECTYPE_PSEUDO_INODE 2 /* pseudo filesysem */
369 #define HAMMER_RECTYPE_DATA_CREATE 0x10
370 #define HAMMER_RECTYPE_DATA_ZEROFILL 0x11
371 #define HAMMER_RECTYPE_DATA_DELETE 0x12
372 #define HAMMER_RECTYPE_DATA_UPDATE 0x13
373 #define HAMMER_RECTYPE_DIR_CREATE 0x20
374 #define HAMMER_RECTYPE_DIR_DELETE 0x22
375 #define HAMMER_RECTYPE_DIR_UPDATE 0x23
376 #define HAMMER_RECTYPE_DB_CREATE 0x30
377 #define HAMMER_RECTYPE_DB_DELETE 0x32
378 #define HAMMER_RECTYPE_DB_UPDATE 0x33
379 #define HAMMER_RECTYPE_EXT_CREATE 0x40 /* ext attributes */
380 #define HAMMER_RECTYPE_EXT_DELETE 0x42
381 #define HAMMER_RECTYPE_EXT_UPDATE 0x43
383 #define HAMMER_OBJTYPE_DIRECTORY 1
384 #define HAMMER_OBJTYPE_REGFILE 2
385 #define HAMMER_OBJTYPE_DBFILE 3
386 #define HAMMER_OBJTYPE_FIFO 4
387 #define HAMMER_OBJTYPE_CDEV 5
388 #define HAMMER_OBJTYPE_BDEV 6
389 #define HAMMER_OBJTYPE_SOFTLINK 7
390 #define HAMMER_OBJTYPE_PSEUDOFS 8 /* pseudo filesystem obj */
392 #define HAMMER_OBJTYPE_CLUSTER_BEG 0x10
393 #define HAMMER_OBJTYPE_CLUSTER_END 0x11
396 * Generic full-sized record
398 struct hammer_generic_record {
399 struct hammer_base_record base;
404 * A HAMMER inode record.
406 * This forms the basis for a filesystem object. obj_id is the inode number,
407 * key1 represents the pseudo filesystem id for security partitioning
408 * (preventing cross-links and/or restricting a NFS export and specifying the
409 * security policy), and key2 represents the data retention policy id.
411 * Inode numbers are 64 bit quantities which uniquely identify a filesystem
412 * object for the ENTIRE life of the filesystem, even after the object has
413 * been deleted. For all intents and purposes inode numbers are simply
414 * allocated by incrementing a sequence space.
416 * There is an important distinction between the data stored in the inode
417 * record and the record's data reference. The record references a
418 * hammer_inode_data structure but the filesystem object size and hard link
419 * count is stored in the inode record itself. This allows multiple inodes
420 * to share the same hammer_inode_data structure. This is possible because
421 * any modifications will lay out new data. The HAMMER implementation need
422 * not use the data-sharing ability when laying down new records.
424 * A HAMMER inode is subject to the same historical storage requirements
425 * as any other record. In particular any change in filesystem or hard link
426 * count will lay down a new inode record when the filesystem is synced to
427 * disk. This can lead to a lot of junk records which get cleaned up by
428 * the data retention policy.
430 * The ino_atime and ino_mtime fields are a special case. Modifications to
431 * these fields do NOT lay down a new record by default, though the values
432 * are effectively frozen for snapshots which access historical versions
433 * of the inode record due to other operations. This means that atime will
434 * not necessarily be accurate in snapshots, backups, or mirrors. mtime
435 * will be accurate in backups and mirrors since it can be regenerated from
436 * the mirroring stream.
438 * Because nlinks is historically retained the hardlink count will be
439 * accurate when accessing a HAMMER filesystem snapshot.
441 struct hammer_inode_record {
442 struct hammer_base_record base;
443 u_int64_t ino_atime; /* last access time (not historical) */
444 u_int64_t ino_mtime; /* last modified time (not historical) */
445 u_int64_t ino_size; /* filesystem object size */
446 u_int64_t ino_nlinks; /* hard links */
450 * Data records specify the entire contents of a regular file object,
451 * including attributes. Small amounts of data can theoretically be
452 * embedded in the record itself but the use of this ability verses using
453 * an out-of-band data reference depends on the implementation.
455 struct hammer_data_record {
456 struct hammer_base_record base;
461 * A directory entry specifies the HAMMER filesystem object id, a copy of
462 * the file type, and file name (either embedded or as out-of-band data).
463 * If the file name is short enough to fit into den_name[] (including a
464 * terminating nul) then it will be embedded in the record, otherwise it
465 * is stored out-of-band. The base record's data reference always points
466 * to the nul-terminated filename regardless.
468 * Directory entries are indexed with a 128 bit namekey rather then an
469 * offset. A portion of the namekey is an iterator or randomizer to deal
472 struct hammer_entry_record {
473 struct hammer_base_record base;
474 u_int64_t obj_id; /* object being referenced */
475 u_int64_t reserved01;
476 u_int8_t den_type; /* cached file type */
477 char den_name[15]; /* short file names fit in record */
481 * Hammer rollup record
483 union hammer_record_ondisk {
484 struct hammer_base_record base;
485 struct hammer_generic_record generic;
486 struct hammer_inode_record inode;
487 struct hammer_data_record data;
488 struct hammer_entry_record entry;
491 typedef union hammer_record_ondisk *hammer_record_ondisk_t;
494 * Filesystem buffer for records
496 #define HAMMER_RECORD_NODES \
497 ((HAMMER_BUFSIZE - sizeof(struct hammer_fsbuf_head)) / \
498 sizeof(union hammer_record_ondisk))
500 struct hammer_fsbuf_recs {
501 struct hammer_fsbuf_head head;
503 union hammer_record_ondisk recs[HAMMER_RECORD_NODES];
507 * Filesystem buffer for piecemeal data. Note that this does not apply
508 * to dedicated pure-data buffers as such buffers do not have a header.
511 #define HAMMER_DATA_SIZE (HAMMER_BUFSIZE - sizeof(struct hammer_fsbuf_head))
512 #define HAMMER_DATA_BLKSIZE 64
513 #define HAMMER_DATA_NODES (HAMMER_DATA_SIZE / HAMMER_DATA_BLKSIZE)
515 struct hammer_fsbuf_data {
516 struct hammer_fsbuf_head head;
517 u_int8_t data[HAMMER_DATA_NODES][HAMMER_DATA_BLKSIZE];
522 * HAMMER UNIX Attribute data
524 * The data reference in a HAMMER inode record points to this structure. Any
525 * modifications to the contents of this structure will result in a record
526 * replacement operation.
528 * state_sum allows a filesystem object to be validated to a degree by
529 * generating a checksum of all of its pieces (in no particular order) and
530 * checking it against this field.
532 struct hammer_inode_data {
533 u_int16_t version; /* inode data version */
534 u_int16_t mode; /* basic unix permissions */
535 u_int32_t uflags; /* chflags */
536 u_int64_t reserved01;
537 u_int64_t reserved02;
538 u_int64_t state_sum; /* cumulative checksum */
543 #define HAMMER_INODE_DATA_VERSION 1
546 * Rollup various structures embedded as record data
549 struct hammer_inode_data inode;
554 * Function library support available to kernel and userland
556 void hammer_alist_template(hammer_alist_config_t bl, int32_t blocks,
557 int32_t base_radix, int32_t maxmeta);
558 void hammer_alist_init(hammer_alist_config_t bl, hammer_almeta_t meta);
559 int32_t hammer_alist_alloc(hammer_alist_t live, int32_t count);
560 int32_t hammer_alist_alloc_rev(hammer_alist_t live, int32_t count);
562 int32_t hammer_alist_alloc_from(hammer_alist_t live, int32_t cnt, int32_t beg);
564 void hammer_alist_free(hammer_alist_t live, int32_t blkno, int32_t count);