4fa2c40221a7b1bbd6ffda253106a661ea994c63
[dragonfly.git] / sys / vfs / hammer / hammer.h
1 /*
2  * Copyright (c) 2007-2008 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34
35 #ifndef VFS_HAMMER_HAMMER_H_
36 #define VFS_HAMMER_HAMMER_H_
37
38 /*
39  * This header file contains structures used internally by the HAMMERFS
40  * implementation.  See hammer_disk.h for on-disk structures.
41  */
42
43 #include <sys/param.h>
44 #ifdef _KERNEL
45 #include <sys/kernel.h>
46 #include <sys/systm.h>
47 #endif
48 #include <sys/conf.h>
49 #include <sys/tree.h>
50 #include <sys/malloc.h>
51 #include <sys/mount.h>
52 #include <sys/vnode.h>
53 #include <sys/proc.h>
54 #include <sys/priv.h>
55 #include <sys/dirent.h>
56 #include <sys/stat.h>
57 #include <sys/fcntl.h>
58 #include <sys/lockf.h>
59 #include <sys/file.h>
60 #include <sys/event.h>
61 #include <sys/buf.h>
62 #include <sys/queue.h>
63 #include <sys/ktr.h>
64 #include <sys/limits.h>
65 #include <sys/sysctl.h>
66 #include <vm/swap_pager.h>
67 #include <vm/vm_extern.h>
68
69 #include "hammer_disk.h"
70 #include "hammer_mount.h"
71 #include "hammer_ioctl.h"
72
73 #if defined(_KERNEL) || defined(_KERNEL_STRUCTURES)
74
75 MALLOC_DECLARE(M_HAMMER);
76
77 /*
78  * Kernel trace
79  */
80 #if !defined(KTR_HAMMER)
81 #define KTR_HAMMER      KTR_ALL
82 #endif
83 /* KTR_INFO_MASTER_EXTERN(hammer); */
84
85 /*
86  * Misc structures
87  */
88 struct hammer_mount;
89 struct hammer_inode;
90 struct hammer_volume;
91 struct hammer_buffer;
92 struct hammer_node;
93 struct hammer_undo;
94 struct hammer_reserve;
95 struct hammer_io;
96
97 /*
98  * Key structure used for custom RB tree inode lookups.  This prototypes
99  * the function hammer_ino_rb_tree_RB_LOOKUP_INFO(root, info).
100  */
101 typedef struct hammer_inode_info {
102         int64_t         obj_id;         /* (key) object identifier */
103         hammer_tid_t    obj_asof;       /* (key) snapshot transid or 0 */
104         uint32_t        obj_localization; /* (key) pseudo-fs id for upper 16 bits */
105         union {
106                 hammer_btree_leaf_elm_t leaf;
107         } u;
108 } *hammer_inode_info_t;
109
110 typedef enum hammer_transaction_type {
111         HAMMER_TRANS_RO,
112         HAMMER_TRANS_STD,
113         HAMMER_TRANS_FLS
114 } hammer_transaction_type_t;
115
116 /*
117  * HAMMER Transaction tracking
118  */
119 struct hammer_transaction {
120         hammer_transaction_type_t type;
121         struct hammer_mount *hmp;
122         hammer_tid_t    tid;
123         uint64_t        time;
124         uint32_t        time32;
125         int             sync_lock_refs;
126         int             flags;
127         struct hammer_volume *rootvol;
128 };
129
130 typedef struct hammer_transaction *hammer_transaction_t;
131
132 #define HAMMER_TRANSF_NEWINODE  0x0001
133 #define HAMMER_TRANSF_CRCDOM    0x0004  /* EDOM on CRC error, less critical */
134
135 /*
136  * HAMMER locks
137  */
138 struct hammer_lock {
139         volatile u_int  refs;           /* active references */
140         volatile u_int  lockval;        /* lock count and control bits */
141         struct thread   *lowner;        /* owner if exclusively held */
142         struct thread   *rowner;        /* owner if exclusively held */
143 };
144
145 #define HAMMER_REFS_LOCKED      0x40000000      /* transition check */
146 #define HAMMER_REFS_WANTED      0x20000000      /* transition check */
147 #define HAMMER_REFS_CHECK       0x10000000      /* transition check */
148
149 #define HAMMER_REFS_FLAGS       (HAMMER_REFS_LOCKED | \
150                                  HAMMER_REFS_WANTED | \
151                                  HAMMER_REFS_CHECK)
152
153 #define HAMMER_LOCKF_EXCLUSIVE  0x40000000
154 #define HAMMER_LOCKF_WANTED     0x20000000
155
156 static __inline int
157 hammer_notlocked(struct hammer_lock *lock)
158 {
159         return(lock->lockval == 0);
160 }
161
162 static __inline int
163 hammer_islocked(struct hammer_lock *lock)
164 {
165         return(lock->lockval != 0);
166 }
167
168 /*
169  * Returns the number of refs on the object.
170  */
171 static __inline int
172 hammer_isactive(struct hammer_lock *lock)
173 {
174         return(lock->refs & ~HAMMER_REFS_FLAGS);
175 }
176
177 static __inline int
178 hammer_oneref(struct hammer_lock *lock)
179 {
180         return((lock->refs & ~HAMMER_REFS_FLAGS) == 1);
181 }
182
183 static __inline int
184 hammer_norefs(struct hammer_lock *lock)
185 {
186         return((lock->refs & ~HAMMER_REFS_FLAGS) == 0);
187 }
188
189 static __inline int
190 hammer_norefsorlock(struct hammer_lock *lock)
191 {
192         return(lock->refs == 0);
193 }
194
195 static __inline int
196 hammer_refsorlock(struct hammer_lock *lock)
197 {
198         return(lock->refs != 0);
199 }
200
201 /*
202  * Return if we specifically own the lock exclusively.
203  */
204 static __inline int
205 hammer_lock_excl_owned(struct hammer_lock *lock, thread_t td)
206 {
207         if ((lock->lockval & HAMMER_LOCKF_EXCLUSIVE) &&
208             lock->lowner == td) {
209                 return(1);
210         }
211         return(0);
212 }
213
214 /*
215  * Flush state, used by various structures
216  */
217 typedef enum hammer_inode_state {
218         HAMMER_FST_IDLE,
219         HAMMER_FST_SETUP,
220         HAMMER_FST_FLUSH
221 } hammer_inode_state_t;
222
223 /*
224  * Pseudo-filesystem extended data tracking
225  */
226 struct hammer_pseudofs_inmem;
227 RB_HEAD(hammer_pfs_rb_tree, hammer_pseudofs_inmem);
228 RB_PROTOTYPE2(hammer_pfs_rb_tree, hammer_pseudofs_inmem, rb_node,
229               hammer_pfs_rb_compare, uint32_t);
230
231 struct hammer_pseudofs_inmem {
232         RB_ENTRY(hammer_pseudofs_inmem) rb_node;
233         struct hammer_lock      lock;
234         uint32_t                localization;
235         hammer_tid_t            create_tid;
236         int                     flags;
237         udev_t                  fsid_udev;
238         struct hammer_pseudofs_data pfsd;
239 };
240
241 typedef struct hammer_pseudofs_inmem *hammer_pseudofs_inmem_t;
242
243 /*
244  * Cache object ids.  A fixed number of objid cache structures are
245  * created to reserve object id's for newly created files in multiples
246  * of 100,000, localized to a particular directory, and recycled as
247  * needed.  This allows parallel create operations in different
248  * directories to retain fairly localized object ids which in turn
249  * improves reblocking performance and layout.
250  */
251 #define OBJID_CACHE_SIZE        2048
252 #define OBJID_CACHE_BULK_BITS   10              /* 10 bits (1024)       */
253 #define OBJID_CACHE_BULK        (32 * 32)       /* two level (1024)     */
254 #define OBJID_CACHE_BULK_MASK   (OBJID_CACHE_BULK - 1)
255 #define OBJID_CACHE_BULK_MASK64 ((uint64_t)(OBJID_CACHE_BULK - 1))
256
257 typedef struct hammer_objid_cache {
258         TAILQ_ENTRY(hammer_objid_cache) entry;
259         struct hammer_inode             *dip;
260         hammer_tid_t                    base_tid;
261         int                             count;
262         uint32_t                        bm0;
263         uint32_t                        bm1[32];
264 } *hammer_objid_cache_t;
265
266 /*
267  * Associate an inode with a B-Tree node to cache search start positions
268  */
269 typedef struct hammer_node_cache {
270         TAILQ_ENTRY(hammer_node_cache)  entry;
271         struct hammer_node              *node;
272         struct hammer_inode             *ip;
273 } *hammer_node_cache_t;
274
275 TAILQ_HEAD(hammer_node_cache_list, hammer_node_cache);
276
277 /*
278  * Live dedup cache
279  */
280 struct hammer_dedup_cache;
281 RB_HEAD(hammer_dedup_crc_rb_tree, hammer_dedup_cache);
282 RB_PROTOTYPE2(hammer_dedup_crc_rb_tree, hammer_dedup_cache, crc_entry,
283                 hammer_dedup_crc_rb_compare, hammer_crc_t);
284
285 RB_HEAD(hammer_dedup_off_rb_tree, hammer_dedup_cache);
286 RB_PROTOTYPE2(hammer_dedup_off_rb_tree, hammer_dedup_cache, off_entry,
287                 hammer_dedup_off_rb_compare, hammer_off_t);
288
289 typedef struct hammer_dedup_cache {
290         RB_ENTRY(hammer_dedup_cache) crc_entry;
291         RB_ENTRY(hammer_dedup_cache) off_entry;
292         TAILQ_ENTRY(hammer_dedup_cache) lru_entry;
293         struct hammer_mount *hmp;
294         int64_t obj_id;
295         uint32_t localization;
296         off_t file_offset;
297         int bytes;
298         hammer_off_t data_offset;
299         hammer_crc_t crc;
300 } *hammer_dedup_cache_t;
301
302 /*
303  * Structure used to organize flush groups.  Flush groups must be
304  * organized into chunks in order to avoid blowing out the UNDO FIFO.
305  * Without this a 'sync' could end up flushing 50,000 inodes in a single
306  * transaction.
307  */
308 RB_HEAD(hammer_fls_rb_tree, hammer_inode);
309 RB_PROTOTYPE(hammer_fls_rb_tree, hammer_inode, rb_flsnode,
310               hammer_ino_rb_compare);
311
312 struct hammer_flush_group {
313         TAILQ_ENTRY(hammer_flush_group) flush_entry;
314         struct hammer_fls_rb_tree       flush_tree;
315         int                             seq;            /* our seq no */
316         int                             total_count;    /* record load */
317         int                             running;        /* group is running */
318         int                             closed;
319         int                             refs;
320 };
321
322 typedef struct hammer_flush_group *hammer_flush_group_t;
323
324 TAILQ_HEAD(hammer_flush_group_list, hammer_flush_group);
325
326 /*
327  * Structure used to represent an inode in-memory.
328  *
329  * The record and data associated with an inode may be out of sync with
330  * the disk (xDIRTY flags), or not even on the disk at all (ONDISK flag
331  * clear).
332  *
333  * An inode may also hold a cache of unsynchronized records, used for
334  * database and directories only.  Unsynchronized regular file data is
335  * stored in the buffer cache.
336  *
337  * NOTE: A file which is created and destroyed within the initial
338  * synchronization period can wind up not doing any disk I/O at all.
339  *
340  * Finally, an inode may cache numerous disk-referencing B-Tree cursors.
341  */
342 RB_HEAD(hammer_ino_rb_tree, hammer_inode);
343 RB_PROTOTYPEX(hammer_ino_rb_tree, INFO, hammer_inode, rb_node,
344               hammer_ino_rb_compare, hammer_inode_info_t);
345
346 RB_HEAD(hammer_redo_rb_tree, hammer_inode);
347 RB_PROTOTYPE2(hammer_redo_rb_tree, hammer_inode, rb_redonode,
348               hammer_redo_rb_compare, hammer_off_t);
349
350 struct hammer_record;
351 RB_HEAD(hammer_rec_rb_tree, hammer_record);
352 RB_PROTOTYPEX(hammer_rec_rb_tree, INFO, hammer_record, rb_node,
353               hammer_rec_rb_compare, hammer_btree_leaf_elm_t);
354
355 TAILQ_HEAD(hammer_record_list, hammer_record);
356 TAILQ_HEAD(hammer_node_list, hammer_node);
357
358 struct hammer_inode {
359         RB_ENTRY(hammer_inode)  rb_node;
360         hammer_inode_state_t    flush_state;
361         hammer_flush_group_t    flush_group;
362         RB_ENTRY(hammer_inode)  rb_flsnode;     /* when on flush list */
363         RB_ENTRY(hammer_inode)  rb_redonode;    /* when INODE_RDIRTY is set */
364         struct hammer_record_list target_list;  /* target of dependant recs */
365         int64_t                 obj_id;         /* (key) object identifier */
366         hammer_tid_t            obj_asof;       /* (key) snapshot or 0 */
367         uint32_t                obj_localization; /* (key) pseudo-fs id for upper 16 bits */
368         struct hammer_mount     *hmp;
369         hammer_objid_cache_t    objid_cache;
370         int                     flags;
371         int                     error;          /* flush error */
372         int                     cursor_ip_refs; /* sanity */
373 #if 0
374         int                     cursor_exclreq_count;
375 #endif
376         int                     rsv_recs;
377         struct vnode            *vp;
378         hammer_pseudofs_inmem_t pfsm;
379         struct lockf            advlock;
380         struct hammer_lock      lock;           /* sync copy interlock */
381         off_t                   trunc_off;
382         struct hammer_btree_leaf_elm ino_leaf;  /* in-memory cache */
383         struct hammer_inode_data ino_data;      /* in-memory cache */
384         struct hammer_rec_rb_tree rec_tree;     /* in-memory cache */
385         int                     rec_generation;
386
387         /*
388          * search initiate cache
389          * cache[0] - this inode
390          * cache[1] - related data, the content depends on situations
391          * cache[2] - for dip to cache ip to shortcut B-Tree search
392          * cache[3] - related data copied from dip to a new ip's cache[1]
393          */
394         struct hammer_node_cache cache[4];
395
396         /*
397          * When a demark is created to synchronize an inode to
398          * disk, certain fields are copied so the front-end VOPs
399          * can continue to run in parallel with the synchronization
400          * occuring in the background.
401          */
402         int             sync_flags;             /* to-sync flags cache */
403         off_t           sync_trunc_off;         /* to-sync truncation */
404         off_t           save_trunc_off;         /* write optimization */
405         struct hammer_btree_leaf_elm sync_ino_leaf; /* to-sync cache */
406         struct hammer_inode_data sync_ino_data; /* to-sync cache */
407         size_t          redo_count;
408
409         /*
410          * Track the earliest offset in the UNDO/REDO FIFO containing
411          * REDO records.  This is staged to the backend during flush
412          * sequences.  While the inode is staged redo_fifo_next is used
413          * to track the earliest offset for rotation into redo_fifo_start
414          * on completion of the flush.
415          */
416         hammer_off_t    redo_fifo_start;
417         hammer_off_t    redo_fifo_next;
418 };
419
420 typedef struct hammer_inode *hammer_inode_t;
421
422 #define VTOI(vp)        ((struct hammer_inode *)(vp)->v_data)
423
424 /*
425  * NOTE: DDIRTY does not include atime or mtime and does not include
426  *       write-append size changes.  SDIRTY handles write-append size
427  *       changes.
428  *
429  *       REDO indicates that REDO logging is active, creating a definitive
430  *       stream of REDO records in the UNDO/REDO log for writes and
431  *       truncations, including boundary records when/if REDO is turned off.
432  *       REDO is typically enabled by fsync() and turned off if excessive
433  *       writes without an fsync() occurs.
434  *
435  *       RDIRTY indicates that REDO records were laid down in the UNDO/REDO
436  *       FIFO (even if REDO is turned off some might still be active) and
437  *       still being tracked for this inode.  See hammer_redo.c
438  */
439 #define HAMMER_INODE_DDIRTY     0x0001  /* in-memory ino_data is dirty */
440                                         /* (not including atime/mtime) */
441 #define HAMMER_INODE_RSV_INODES 0x0002  /* hmp->rsv_inodes bumped */
442 #define HAMMER_INODE_CONN_DOWN  0x0004  /* include in downward recursion */
443 #define HAMMER_INODE_XDIRTY     0x0008  /* in-memory records */
444 #define HAMMER_INODE_ONDISK     0x0010  /* inode is on-disk (else not yet) */
445 #define HAMMER_INODE_FLUSH      0x0020  /* flush on last ref */
446 #define HAMMER_INODE_DELETED    0x0080  /* inode delete (backend) */
447 #define HAMMER_INODE_DELONDISK  0x0100  /* delete synchronized to disk */
448 #define HAMMER_INODE_RO         0x0200  /* read-only (because of as-of) */
449 #define HAMMER_INODE_RECSW      0x0400  /* waiting on data record flush */
450 #define HAMMER_INODE_DONDISK    0x0800  /* data records may be on disk */
451 #define HAMMER_INODE_BUFS       0x1000  /* dirty high level bps present */
452 #define HAMMER_INODE_REFLUSH    0x2000  /* flush on dependancy / reflush */
453 #define HAMMER_INODE_RECLAIM    0x4000  /* trying to reclaim */
454 #define HAMMER_INODE_FLUSHW     0x8000  /* someone waiting for flush */
455
456 #define HAMMER_INODE_TRUNCATED  0x00010000
457 #define HAMMER_INODE_DELETING   0x00020000 /* inode delete request (frontend)*/
458 #define HAMMER_INODE_RESIGNAL   0x00040000 /* re-signal on re-flush */
459 #define HAMMER_INODE_ATIME      0x00100000 /* in-memory atime modified */
460 #define HAMMER_INODE_MTIME      0x00200000 /* in-memory mtime modified */
461 #define HAMMER_INODE_WOULDBLOCK 0x00400000 /* re-issue to new flush group */
462 #define HAMMER_INODE_DUMMY      0x00800000 /* dummy inode covering bad file */
463 #define HAMMER_INODE_SDIRTY     0x01000000 /* in-memory ino_data.size is dirty*/
464 #define HAMMER_INODE_REDO       0x02000000 /* REDO logging active */
465 #define HAMMER_INODE_RDIRTY     0x04000000 /* REDO records active in fifo */
466 #define HAMMER_INODE_SLAVEFLUSH 0x08000000 /* being flushed by slave */
467
468 #define HAMMER_INODE_MODMASK    (HAMMER_INODE_DDIRTY|HAMMER_INODE_SDIRTY|   \
469                                  HAMMER_INODE_XDIRTY|HAMMER_INODE_BUFS|     \
470                                  HAMMER_INODE_ATIME|HAMMER_INODE_MTIME|     \
471                                  HAMMER_INODE_TRUNCATED|HAMMER_INODE_DELETING)
472
473 #define HAMMER_INODE_MODMASK_NOXDIRTY   \
474                                 (HAMMER_INODE_MODMASK & ~HAMMER_INODE_XDIRTY)
475
476 #define HAMMER_INODE_MODMASK_NOREDO     \
477                                 (HAMMER_INODE_DDIRTY|                       \
478                                  HAMMER_INODE_XDIRTY|                       \
479                                  HAMMER_INODE_TRUNCATED|HAMMER_INODE_DELETING)
480
481 #define HAMMER_FLUSH_SIGNAL     0x0001
482 #define HAMMER_FLUSH_RECURSION  0x0002
483
484 /*
485  * Used by the inode reclaim code to pipeline reclaims and avoid
486  * blowing out kernel memory or letting the flusher get too far
487  * behind.  The reclaim wakes up when count reaches 0 or the
488  * timer expires.
489  */
490 struct hammer_reclaim {
491         TAILQ_ENTRY(hammer_reclaim) entry;
492         int     count;
493 };
494
495 /*
496  * Track who is creating the greatest burden on the
497  * inode cache.
498  */
499 struct hammer_inostats {
500         pid_t           pid;    /* track user process */
501         int             ltick;  /* last tick */
502         int             count;  /* count (degenerates) */
503 };
504
505 #define HAMMER_INOSTATS_HSIZE   32
506 #define HAMMER_INOSTATS_HMASK   (HAMMER_INOSTATS_HSIZE - 1)
507
508 /*
509  * Structure used to represent an unsynchronized record in-memory.  These
510  * records typically represent directory entries.  Only non-historical
511  * records are kept in-memory.
512  *
513  * Records are organized as a per-inode RB-Tree.  If the inode is not
514  * on disk then neither are any records and the in-memory record tree
515  * represents the entire contents of the inode.  If the inode is on disk
516  * then the on-disk B-Tree is scanned in parallel with the in-memory
517  * RB-Tree to synthesize the current state of the file.
518  *
519  * Records are also used to enforce the ordering of directory create/delete
520  * operations.  A new inode will not be flushed to disk unless its related
521  * directory entry is also being flushed at the same time.  A directory entry
522  * will not be removed unless its related inode is also being removed at the
523  * same time.
524  */
525 typedef enum hammer_record_type {
526         HAMMER_MEM_RECORD_GENERAL,      /* misc record */
527         HAMMER_MEM_RECORD_INODE,        /* inode record */
528         HAMMER_MEM_RECORD_ADD,          /* positive memory cache record */
529         HAMMER_MEM_RECORD_DEL,          /* negative delete-on-disk record */
530         HAMMER_MEM_RECORD_DATA          /* bulk-data record w/on-disk ref */
531 } hammer_record_type_t;
532
533 struct hammer_record {
534         RB_ENTRY(hammer_record)         rb_node;
535         TAILQ_ENTRY(hammer_record)      target_entry;
536         hammer_inode_state_t            flush_state;
537         hammer_flush_group_t            flush_group;
538         hammer_record_type_t            type;
539         struct hammer_lock              lock;
540         struct hammer_reserve           *resv;
541         struct hammer_inode             *ip;
542         struct hammer_inode             *target_ip;
543         struct hammer_btree_leaf_elm    leaf;
544         hammer_data_ondisk_t            data;
545         int                             flags;
546         int                             gflags;
547         hammer_off_t                    zone2_offset;   /* direct-write only */
548 };
549
550 typedef struct hammer_record *hammer_record_t;
551
552 /*
553  * Record flags.  Note that FE can only be set by the frontend if the
554  * record has not been interlocked by the backend w/ BE.
555  */
556 #define HAMMER_RECF_ALLOCDATA           0x0001
557 #define HAMMER_RECF_ONRBTREE            0x0002
558 #define HAMMER_RECF_DELETED_FE          0x0004  /* deleted (frontend) */
559 #define HAMMER_RECF_DELETED_BE          0x0008  /* deleted (backend) */
560 #define HAMMER_RECF_COMMITTED           0x0010  /* committed to the B-Tree */
561 #define HAMMER_RECF_INTERLOCK_BE        0x0020  /* backend interlock */
562 #define HAMMER_RECF_WANTED              0x0040  /* wanted by the frontend */
563 #define HAMMER_RECF_DEDUPED             0x0080  /* will be live-dedup'ed */
564 #define HAMMER_RECF_CONVERT_DELETE      0x0100  /* special case */
565 #define HAMMER_RECF_REDO                0x1000  /* REDO was laid down */
566
567 /*
568  * These flags must be separate to deal with SMP races
569  */
570 #define HAMMER_RECG_DIRECT_IO           0x0001  /* related direct I/O running*/
571 #define HAMMER_RECG_DIRECT_WAIT         0x0002  /* related direct I/O running*/
572 #define HAMMER_RECG_DIRECT_INVAL        0x0004  /* buffer alias invalidation */
573 /*
574  * hammer_create_at_cursor() and hammer_delete_at_cursor() flags.
575  */
576 #define HAMMER_CREATE_MODE_UMIRROR      0x0001
577 #define HAMMER_CREATE_MODE_SYS          0x0002
578
579 #define HAMMER_DELETE_ADJUST            0x0001
580 #define HAMMER_DELETE_DESTROY           0x0002
581
582 /*
583  * In-memory structures representing on-disk structures.
584  */
585 RB_HEAD(hammer_vol_rb_tree, hammer_volume);
586 RB_HEAD(hammer_buf_rb_tree, hammer_buffer);
587 RB_HEAD(hammer_nod_rb_tree, hammer_node);
588 RB_HEAD(hammer_und_rb_tree, hammer_undo);
589 RB_HEAD(hammer_res_rb_tree, hammer_reserve);
590 RB_HEAD(hammer_mod_rb_tree, hammer_io);
591
592 RB_PROTOTYPE2(hammer_vol_rb_tree, hammer_volume, rb_node,
593               hammer_vol_rb_compare, int32_t);
594 RB_PROTOTYPE2(hammer_buf_rb_tree, hammer_buffer, rb_node,
595               hammer_buf_rb_compare, hammer_off_t);
596 RB_PROTOTYPE2(hammer_nod_rb_tree, hammer_node, rb_node,
597               hammer_nod_rb_compare, hammer_off_t);
598 RB_PROTOTYPE2(hammer_und_rb_tree, hammer_undo, rb_node,
599               hammer_und_rb_compare, hammer_off_t);
600 RB_PROTOTYPE2(hammer_res_rb_tree, hammer_reserve, rb_node,
601               hammer_res_rb_compare, hammer_off_t);
602 RB_PROTOTYPE2(hammer_mod_rb_tree, hammer_io, rb_node,
603               hammer_mod_rb_compare, hammer_off_t);
604
605 /*
606  * IO management - embedded at the head of various in-memory structures
607  *
608  * VOLUME       - hammer_volume containing meta-data
609  * META_BUFFER  - hammer_buffer containing meta-data
610  * UNDO_BUFFER  - hammer_buffer containing undo-data
611  * DATA_BUFFER  - hammer_buffer containing pure-data
612  * DUMMY        - hammer_buffer not containing valid data
613  *
614  * Dirty volume headers and dirty meta-data buffers are locked until the
615  * flusher can sequence them out.  Dirty pure-data buffers can be written.
616  * Clean buffers can be passively released.
617  */
618 typedef enum hammer_io_type {
619         HAMMER_STRUCTURE_VOLUME,
620         HAMMER_STRUCTURE_META_BUFFER,
621         HAMMER_STRUCTURE_UNDO_BUFFER,
622         HAMMER_STRUCTURE_DATA_BUFFER,
623         HAMMER_STRUCTURE_DUMMY
624 } hammer_io_type_t;
625
626 /*
627  * XXX: struct hammer_io can't directly embed LIST_ENTRY() at offset 0,
628  * since a list head in struct buf expects a struct called worklist for
629  * list entries.  HAMMER needs to define and use struct worklist.
630  */
631 struct worklist {
632         LIST_ENTRY(worklist) node;
633 };
634
635 TAILQ_HEAD(hammer_io_list, hammer_io);
636 typedef struct hammer_io_list *hammer_io_list_t;
637
638 struct hammer_io {
639         struct worklist         worklist; /* must be at offset 0 */
640         struct hammer_lock      lock;
641         enum hammer_io_type     type;
642         struct hammer_mount     *hmp;
643         struct hammer_volume    *volume;
644         RB_ENTRY(hammer_io)     rb_node;     /* if modified */
645         TAILQ_ENTRY(hammer_io)  iorun_entry; /* iorun_list */
646         struct hammer_mod_rb_tree *mod_root;
647         struct buf              *bp;
648         int64_t                 offset;    /* volume offset */
649         int                     bytes;     /* buffer cache buffer size */
650         int                     modify_refs;
651
652         /*
653          * These can be modified at any time by the backend while holding
654          * io_token, due to bio_done and hammer_io_complete() callbacks.
655          */
656         u_int           running : 1;    /* bp write IO in progress */
657         u_int           waiting : 1;    /* someone is waiting on us */
658         u_int           ioerror : 1;    /* abort on io-error */
659         u_int           unusedA : 29;
660
661         /*
662          * These can only be modified by the frontend while holding
663          * fs_token, or by the backend while holding the io interlocked
664          * with no references (which will block the frontend when it
665          * tries to reference it).
666          *
667          * WARNING! SMP RACES will create havoc if the callbacks ever tried
668          *          to modify any of these outside the above restrictions.
669          */
670         u_int           modified : 1;   /* bp's data was modified */
671         u_int           released : 1;   /* bp released (w/ B_LOCKED set) */
672         u_int           waitdep : 1;    /* flush waits for dependancies */
673         u_int           recovered : 1;  /* has recovery ref */
674         u_int           waitmod : 1;    /* waiting for modify_refs */
675         u_int           reclaim : 1;    /* reclaim requested */
676         u_int           gencrc : 1;     /* crc needs to be generated */
677         u_int           unusedB : 25;
678 };
679
680 typedef struct hammer_io *hammer_io_t;
681
682 #define HAMMER_CLUSTER_SIZE     (64 * 1024)
683 #if HAMMER_CLUSTER_SIZE > MAXBSIZE
684 #undef  HAMMER_CLUSTER_SIZE
685 #define HAMMER_CLUSTER_SIZE     MAXBSIZE
686 #endif
687
688 /*
689  * In-memory volume representing on-disk buffer
690  */
691 struct hammer_volume {
692         struct hammer_io io; /* must be at offset 0 */
693         RB_ENTRY(hammer_volume) rb_node;
694         hammer_volume_ondisk_t ondisk;
695         int32_t vol_no;
696         hammer_off_t maxbuf_off; /* Maximum buffer offset (zone-2) */
697         char    *vol_name;
698         struct vnode *devvp;
699         int     vol_flags;
700 };
701
702 typedef struct hammer_volume *hammer_volume_t;
703
704 #define HAMMER_ITOV(iop) ((hammer_volume_t)(iop))
705
706 /*
707  * In-memory buffer representing an on-disk buffer.
708  */
709 struct hammer_buffer {
710         struct hammer_io io; /* must be at offset 0 */
711         RB_ENTRY(hammer_buffer) rb_node;
712         void *ondisk;
713         hammer_off_t zoneX_offset;
714         hammer_off_t zone2_offset;
715         struct hammer_reserve *resv;
716         struct hammer_node_list node_list;
717 };
718
719 typedef struct hammer_buffer *hammer_buffer_t;
720
721 #define HAMMER_ITOB(iop) ((hammer_buffer_t)(iop))
722
723 /*
724  * In-memory B-Tree node, representing an on-disk B-Tree node.
725  *
726  * This is a hang-on structure which is backed by a hammer_buffer,
727  * and used for fine-grained locking of B-Tree nodes in order to
728  * properly control lock ordering.
729  */
730 struct hammer_node {
731         struct hammer_lock      lock;           /* node-by-node lock */
732         TAILQ_ENTRY(hammer_node) entry;         /* per-buffer linkage */
733         RB_ENTRY(hammer_node)   rb_node;        /* per-mount linkage */
734         hammer_off_t            node_offset;    /* full offset spec */
735         struct hammer_mount     *hmp;
736         struct hammer_buffer    *buffer;        /* backing buffer */
737         hammer_node_ondisk_t    ondisk;         /* ptr to on-disk structure */
738         TAILQ_HEAD(, hammer_cursor) cursor_list;  /* deadlock recovery */
739         struct hammer_node_cache_list cache_list; /* passive caches */
740         int                     flags;
741 #if 0
742         int                     cursor_exclreq_count;
743 #endif
744 };
745
746 #define HAMMER_NODE_DELETED     0x0001
747 #define HAMMER_NODE_FLUSH       0x0002
748 #define HAMMER_NODE_CRCGOOD     0x0004
749 #define HAMMER_NODE_NEEDSCRC    0x0008
750 #define HAMMER_NODE_NEEDSMIRROR 0x0010
751 #define HAMMER_NODE_CRCBAD      0x0020
752 #define HAMMER_NODE_NONLINEAR   0x0040          /* linear heuristic */
753
754 #define HAMMER_NODE_CRCANY      (HAMMER_NODE_CRCGOOD | HAMMER_NODE_CRCBAD)
755
756 typedef struct hammer_node      *hammer_node_t;
757
758 /*
759  * List of locked nodes.  This structure is used to lock potentially large
760  * numbers of nodes as an aid for complex B-Tree operations.
761  */
762 struct hammer_node_lock;
763 TAILQ_HEAD(hammer_node_lock_list, hammer_node_lock);
764
765 struct hammer_node_lock {
766         TAILQ_ENTRY(hammer_node_lock) entry;
767         struct hammer_node_lock_list  list;
768         struct hammer_node_lock       *parent;
769         hammer_node_t   node;
770         hammer_node_ondisk_t copy;      /* copy of on-disk data */
771         int             index;          /* index of this node in parent */
772         int             count;          /* count children */
773         int             flags;
774 };
775
776 typedef struct hammer_node_lock *hammer_node_lock_t;
777
778 #define HAMMER_NODE_LOCK_UPDATED        0x0001
779 #define HAMMER_NODE_LOCK_LCACHE         0x0002
780
781 /*
782  * The reserve structure prevents the blockmap from allocating
783  * out of a reserved big-block.  Such reservations are used by
784  * the direct-write mechanism.
785  *
786  * The structure is also used to hold off on reallocations of
787  * big-blocks from the freemap until flush dependancies have
788  * been dealt with.
789  */
790 struct hammer_reserve {
791         RB_ENTRY(hammer_reserve) rb_node;
792         TAILQ_ENTRY(hammer_reserve) delay_entry;
793         int             flg_no;
794         int             flags;
795         int             refs;
796         int             zone;
797         int             append_off;
798         int32_t         bytes_free;
799         hammer_off_t    zone_offset;
800 };
801
802 typedef struct hammer_reserve *hammer_reserve_t;
803
804 #define HAMMER_RESF_ONDELAY     0x0001
805 #define HAMMER_RESF_LAYER2FREE  0x0002
806
807 #include "hammer_cursor.h"
808
809 /*
810  * The undo structure tracks recent undos to avoid laying down duplicate
811  * undos within a flush group, saving us a significant amount of overhead.
812  *
813  * This is strictly a heuristic.
814  */
815 #define HAMMER_MAX_UNDOS                1024
816 #define HAMMER_MAX_FLUSHERS             4
817
818 struct hammer_undo {
819         RB_ENTRY(hammer_undo)   rb_node;
820         TAILQ_ENTRY(hammer_undo) lru_entry;
821         hammer_off_t            offset;
822         int                     bytes;
823 };
824
825 typedef struct hammer_undo *hammer_undo_t;
826
827 struct hammer_flusher_info;
828 TAILQ_HEAD(hammer_flusher_info_list, hammer_flusher_info);
829
830 struct hammer_flusher {
831         int             signal;         /* flusher thread sequencer */
832         int             done;           /* last completed flush group */
833         int             next;           /* next unallocated flg seqno */
834         int             group_lock;     /* lock sequencing of the next flush */
835         int             exiting;        /* request master exit */
836         thread_t        td;             /* master flusher thread */
837         hammer_tid_t    tid;            /* last flushed transaction id */
838         int             finalize_want;          /* serialize finalization */
839         struct hammer_lock finalize_lock;       /* serialize finalization */
840         struct hammer_transaction trans;        /* shared transaction */
841         struct hammer_flusher_info_list run_list;
842         struct hammer_flusher_info_list ready_list;
843 };
844
845 #define HAMMER_FLUSH_UNDOS_RELAXED      0
846 #define HAMMER_FLUSH_UNDOS_FORCED       1
847 #define HAMMER_FLUSH_UNDOS_AUTO         2
848 /*
849  * Internal hammer mount data structure
850  */
851 struct hammer_mount {
852         struct mount *mp;
853         struct hammer_ino_rb_tree rb_inos_root;
854         struct hammer_redo_rb_tree rb_redo_root;
855         struct hammer_vol_rb_tree rb_vols_root;
856         struct hammer_nod_rb_tree rb_nods_root;
857         struct hammer_und_rb_tree rb_undo_root;
858         struct hammer_res_rb_tree rb_resv_root;
859         struct hammer_buf_rb_tree rb_bufs_root;
860         struct hammer_pfs_rb_tree rb_pfsm_root;
861
862         struct hammer_dedup_crc_rb_tree rb_dedup_crc_root;
863         struct hammer_dedup_off_rb_tree rb_dedup_off_root;
864
865         struct hammer_volume *rootvol;
866         struct hammer_base_elm root_btree_beg;
867         struct hammer_base_elm root_btree_end;
868
869         struct malloc_type      *m_misc;
870         struct malloc_type      *m_inodes;
871
872         int     flags;          /* HAMMER_MOUNT_xxx flags */
873         int     hflags;
874         int     ronly;
875         int     nvolumes;
876         int     master_id;      /* -1 or 0-15 for mirroring */
877         int     version;        /* hammer filesystem version to use */
878         int     rsv_inodes;     /* reserved space due to dirty inodes */
879         int64_t rsv_databytes;  /* reserved space due to record data */
880         int     rsv_recs;       /* reserved space due to dirty records */
881         int     rsv_fromdelay;  /* big-blocks reserved due to flush delay */
882         int     undo_rec_limit; /* based on size of undo area */
883
884         int     volume_to_remove; /* volume that is currently being removed */
885
886         int     count_inodes;   /* total number of inodes */
887         int     count_iqueued;  /* inodes queued to flusher */
888         int     count_reclaims; /* inodes pending reclaim by flusher */
889
890         struct hammer_flusher flusher;
891
892         u_int   check_interrupt;
893         u_int   check_yield;
894         uuid_t  fsid;
895         struct hammer_mod_rb_tree volu_root;    /* dirty undo buffers */
896         struct hammer_mod_rb_tree undo_root;    /* dirty undo buffers */
897         struct hammer_mod_rb_tree data_root;    /* dirty data buffers */
898         struct hammer_mod_rb_tree meta_root;    /* dirty meta bufs    */
899         struct hammer_mod_rb_tree lose_root;    /* loose buffers      */
900         long    locked_dirty_space;             /* meta/volu count    */
901         long    io_running_space;               /* io_token */
902         int     objid_cache_count;
903         int     dedup_cache_count;
904         int     error;                          /* critical I/O error */
905         struct krate    krate;                  /* rate limited kprintf */
906         struct krate    kdiag;                  /* rate limited kprintf */
907         hammer_tid_t    asof;                   /* snapshot mount */
908         hammer_tid_t    next_tid;
909         hammer_tid_t    flush_tid1;             /* flusher tid sequencing */
910         hammer_tid_t    flush_tid2;             /* flusher tid sequencing */
911         int64_t copy_stat_freebigblocks;        /* number of free big-blocks */
912         uint32_t        undo_seqno;             /* UNDO/REDO FIFO seqno */
913         uint32_t        recover_stage2_seqno;   /* REDO recovery seqno */
914         hammer_off_t    recover_stage2_offset;  /* REDO recovery offset */
915
916         struct netexport export;
917         struct hammer_lock sync_lock;
918         struct hammer_lock undo_lock;
919         struct hammer_lock blkmap_lock;
920         struct hammer_lock snapshot_lock;
921         struct hammer_lock volume_lock;
922         struct hammer_blockmap  blockmap[HAMMER_MAX_ZONES];
923         struct hammer_undo      undos[HAMMER_MAX_UNDOS];
924         int                     undo_alloc;
925         TAILQ_HEAD(, hammer_undo)  undo_lru_list;
926         TAILQ_HEAD(, hammer_reserve) delay_list;
927         struct hammer_flush_group_list  flush_group_list;
928         hammer_flush_group_t    fill_flush_group;
929         hammer_flush_group_t    next_flush_group;
930         TAILQ_HEAD(, hammer_objid_cache) objid_cache_list;
931         TAILQ_HEAD(, hammer_dedup_cache) dedup_lru_list;
932         hammer_dedup_cache_t    dedup_free_cache;
933         TAILQ_HEAD(, hammer_reclaim) reclaim_list;
934         TAILQ_HEAD(, hammer_io) iorun_list;
935
936         struct lwkt_token       fs_token;       /* high level */
937         struct lwkt_token       io_token;       /* low level (IO callback) */
938
939         struct hammer_inostats  inostats[HAMMER_INOSTATS_HSIZE];
940         uint64_t volume_map[4];  /* 256 bits bitfield */
941 };
942
943 typedef struct hammer_mount     *hammer_mount_t;
944
945 #define HAMMER_MOUNT_CRITICAL_ERROR     0x0001
946 #define HAMMER_MOUNT_FLUSH_RECOVERY     0x0002
947 #define HAMMER_MOUNT_REDO_SYNC          0x0004
948 #define HAMMER_MOUNT_REDO_RECOVERY_REQ  0x0008
949 #define HAMMER_MOUNT_REDO_RECOVERY_RUN  0x0010
950
951 #define HAMMER_VOLUME_NUMBER_ADD(hmp, vol)                      \
952         (hmp)->volume_map[(vol)->vol_no >> 6] |=                \
953         ((uint64_t)1 << ((vol)->vol_no & ((1 << 6) - 1)))
954
955 #define HAMMER_VOLUME_NUMBER_DEL(hmp, vol)                      \
956         (hmp)->volume_map[(vol)->vol_no >> 6] &=                \
957         ~((uint64_t)1 << ((vol)->vol_no & ((1 << 6) - 1)))
958
959 #define HAMMER_VOLUME_NUMBER_IS_SET(hmp, n)                     \
960         (((hmp)->volume_map[(n) >> 6] &                         \
961         ((uint64_t)1 << ((n) & ((1 << 6) - 1)))) != 0)
962
963 #define HAMMER_VOLUME_NUMBER_FOREACH(hmp, n)                    \
964         for (n = 0; n < HAMMER_MAX_VOLUMES; n++)                \
965                 if (HAMMER_VOLUME_NUMBER_IS_SET(hmp, n))
966
967 /*
968  * Minium buffer cache bufs required to rebalance the B-Tree.
969  * This is because we must hold the children and the children's children
970  * locked.  Even this might not be enough if things are horribly out
971  * of balance.
972  */
973 #define HAMMER_REBALANCE_MIN_BUFS       \
974         (HAMMER_BTREE_LEAF_ELMS * HAMMER_BTREE_LEAF_ELMS)
975
976 #endif  /* _KERNEL || _KERNEL_STRUCTURES */
977
978 #if defined(_KERNEL)
979 /*
980  * checkspace slop (8MB chunks), higher numbers are more conservative.
981  */
982 #define HAMMER_CHKSPC_REBLOCK   25
983 #define HAMMER_CHKSPC_MIRROR    20
984 #define HAMMER_CHKSPC_WRITE     20
985 #define HAMMER_CHKSPC_CREATE    20
986 #define HAMMER_CHKSPC_REMOVE    10
987 #define HAMMER_CHKSPC_EMERGENCY 0
988
989 extern struct vop_ops hammer_vnode_vops;
990 extern struct vop_ops hammer_spec_vops;
991 extern struct vop_ops hammer_fifo_vops;
992
993 extern int hammer_debug_io;
994 extern int hammer_debug_general;
995 extern int hammer_debug_inode;
996 extern int hammer_debug_locks;
997 extern int hammer_debug_btree;
998 extern int hammer_debug_tid;
999 extern int hammer_debug_recover;
1000 extern int hammer_debug_critical;
1001 extern int hammer_cluster_enable;
1002 extern int hammer_live_dedup;
1003 extern int hammer_tdmux_ticks;
1004 extern int hammer_count_fsyncs;
1005 extern int hammer_count_inodes;
1006 extern int hammer_count_iqueued;
1007 extern int hammer_count_reclaims;
1008 extern int hammer_count_records;
1009 extern int hammer_count_record_datas;
1010 extern int hammer_count_volumes;
1011 extern int hammer_count_buffers;
1012 extern int hammer_count_nodes;
1013 extern int64_t hammer_stats_btree_lookups;
1014 extern int64_t hammer_stats_btree_searches;
1015 extern int64_t hammer_stats_btree_inserts;
1016 extern int64_t hammer_stats_btree_deletes;
1017 extern int64_t hammer_stats_btree_elements;
1018 extern int64_t hammer_stats_btree_splits;
1019 extern int64_t hammer_stats_btree_iterations;
1020 extern int64_t hammer_stats_btree_root_iterations;
1021 extern int64_t hammer_stats_record_iterations;
1022 extern int64_t hammer_stats_file_read;
1023 extern int64_t hammer_stats_file_write;
1024 extern int64_t hammer_stats_file_iopsr;
1025 extern int64_t hammer_stats_file_iopsw;
1026 extern int64_t hammer_stats_disk_read;
1027 extern int64_t hammer_stats_disk_write;
1028 extern int64_t hammer_stats_inode_flushes;
1029 extern int64_t hammer_stats_commits;
1030 extern int64_t hammer_stats_undo;
1031 extern int64_t hammer_stats_redo;
1032 extern long hammer_count_dirtybufspace;
1033 extern int hammer_count_refedbufs;
1034 extern int hammer_count_reservations;
1035 extern long hammer_count_io_running_read;
1036 extern long hammer_count_io_running_write;
1037 extern int hammer_count_io_locked;
1038 extern long hammer_limit_dirtybufspace;
1039 extern int hammer_limit_recs;
1040 extern int hammer_limit_inode_recs;
1041 extern int hammer_limit_reclaims;
1042 extern int hammer_live_dedup_cache_size;
1043 extern int hammer_limit_redo;
1044 extern int hammer_verify_zone;
1045 extern int hammer_verify_data;
1046 extern int hammer_double_buffer;
1047 extern int hammer_btree_full_undo;
1048 extern int hammer_yield_check;
1049 extern int hammer_fsync_mode;
1050 extern int hammer_autoflush;
1051 extern int64_t hammer_contention_count;
1052
1053 extern int64_t hammer_live_dedup_vnode_bcmps;
1054 extern int64_t hammer_live_dedup_device_bcmps;
1055 extern int64_t hammer_live_dedup_findblk_failures;
1056 extern int64_t hammer_live_dedup_bmap_saves;
1057
1058 void    hammer_critical_error(hammer_mount_t hmp, hammer_inode_t ip,
1059                         int error, const char *msg);
1060 int     hammer_vop_inactive(struct vop_inactive_args *);
1061 int     hammer_vop_reclaim(struct vop_reclaim_args *);
1062 int     hammer_get_vnode(struct hammer_inode *ip, struct vnode **vpp);
1063 struct hammer_inode *hammer_get_inode(hammer_transaction_t trans,
1064                         hammer_inode_t dip, int64_t obj_id,
1065                         hammer_tid_t asof, uint32_t localization,
1066                         int flags, int *errorp);
1067 struct hammer_inode *hammer_get_dummy_inode(hammer_transaction_t trans,
1068                         hammer_inode_t dip, int64_t obj_id,
1069                         hammer_tid_t asof, uint32_t localization,
1070                         int flags, int *errorp);
1071 struct hammer_inode *hammer_find_inode(hammer_transaction_t trans,
1072                         int64_t obj_id, hammer_tid_t asof,
1073                         uint32_t localization);
1074 void    hammer_scan_inode_snapshots(hammer_mount_t hmp,
1075                         hammer_inode_info_t iinfo,
1076                         int (*callback)(hammer_inode_t ip, void *data),
1077                         void *data);
1078 void    hammer_put_inode(struct hammer_inode *ip);
1079 void    hammer_put_inode_ref(struct hammer_inode *ip);
1080 void    hammer_inode_waitreclaims(hammer_transaction_t trans);
1081 void    hammer_inode_dirty(struct hammer_inode *ip);
1082
1083 int     hammer_unload_volume(hammer_volume_t volume, void *data);
1084 int     hammer_adjust_volume_mode(hammer_volume_t volume, void *data __unused);
1085
1086 int     hammer_unload_buffer(hammer_buffer_t buffer, void *data);
1087 int     hammer_install_volume(hammer_mount_t hmp, const char *volname,
1088                         struct vnode *devvp, void *data);
1089 int     hammer_mountcheck_volumes(hammer_mount_t hmp);
1090 int     hammer_get_installed_volumes(hammer_mount_t hmp);
1091
1092 int     hammer_mem_add(hammer_record_t record);
1093 int     hammer_ip_lookup(hammer_cursor_t cursor);
1094 int     hammer_ip_first(hammer_cursor_t cursor);
1095 int     hammer_ip_next(hammer_cursor_t cursor);
1096 int     hammer_ip_resolve_data(hammer_cursor_t cursor);
1097 int     hammer_ip_delete_record(hammer_cursor_t cursor, hammer_inode_t ip,
1098                         hammer_tid_t tid);
1099 int     hammer_create_at_cursor(hammer_cursor_t cursor,
1100                         hammer_btree_leaf_elm_t leaf, void *udata, int mode);
1101 int     hammer_delete_at_cursor(hammer_cursor_t cursor, int delete_flags,
1102                         hammer_tid_t delete_tid, uint32_t delete_ts,
1103                         int track, int64_t *stat_bytes);
1104 int     hammer_ip_check_directory_empty(hammer_transaction_t trans,
1105                         hammer_inode_t ip);
1106 int     hammer_sync_hmp(hammer_mount_t hmp, int waitfor);
1107 int     hammer_queue_inodes_flusher(hammer_mount_t hmp, int waitfor);
1108
1109 hammer_record_t
1110         hammer_alloc_mem_record(hammer_inode_t ip, int data_len);
1111 void    hammer_flush_record_done(hammer_record_t record, int error);
1112 void    hammer_wait_mem_record_ident(hammer_record_t record, const char *ident);
1113 void    hammer_rel_mem_record(hammer_record_t record);
1114
1115 int     hammer_cursor_up(hammer_cursor_t cursor);
1116 int     hammer_cursor_up_locked(hammer_cursor_t cursor);
1117 int     hammer_cursor_down(hammer_cursor_t cursor);
1118 int     hammer_cursor_upgrade(hammer_cursor_t cursor);
1119 int     hammer_cursor_upgrade_node(hammer_cursor_t cursor);
1120 void    hammer_cursor_downgrade(hammer_cursor_t cursor);
1121 int     hammer_cursor_upgrade2(hammer_cursor_t c1, hammer_cursor_t c2);
1122 void    hammer_cursor_downgrade2(hammer_cursor_t c1, hammer_cursor_t c2);
1123 int     hammer_cursor_seek(hammer_cursor_t cursor, hammer_node_t node,
1124                         int index);
1125 void    hammer_lock_ex_ident(struct hammer_lock *lock, const char *ident);
1126 int     hammer_lock_ex_try(struct hammer_lock *lock);
1127 void    hammer_lock_sh(struct hammer_lock *lock);
1128 int     hammer_lock_sh_try(struct hammer_lock *lock);
1129 int     hammer_lock_upgrade(struct hammer_lock *lock, int shcount);
1130 void    hammer_lock_downgrade(struct hammer_lock *lock, int shcount);
1131 int     hammer_lock_status(struct hammer_lock *lock);
1132 void    hammer_unlock(struct hammer_lock *lock);
1133 void    hammer_ref(struct hammer_lock *lock);
1134 int     hammer_ref_interlock(struct hammer_lock *lock);
1135 int     hammer_ref_interlock_true(struct hammer_lock *lock);
1136 void    hammer_ref_interlock_done(struct hammer_lock *lock);
1137 void    hammer_rel(struct hammer_lock *lock);
1138 int     hammer_rel_interlock(struct hammer_lock *lock, int locked);
1139 void    hammer_rel_interlock_done(struct hammer_lock *lock, int orig_locked);
1140 int     hammer_get_interlock(struct hammer_lock *lock);
1141 int     hammer_try_interlock_norefs(struct hammer_lock *lock);
1142 void    hammer_put_interlock(struct hammer_lock *lock, int error);
1143
1144 void    hammer_sync_lock_ex(hammer_transaction_t trans);
1145 void    hammer_sync_lock_sh(hammer_transaction_t trans);
1146 int     hammer_sync_lock_sh_try(hammer_transaction_t trans);
1147 void    hammer_sync_unlock(hammer_transaction_t trans);
1148
1149 uint32_t hammer_to_unix_xid(uuid_t *uuid);
1150 void hammer_guid_to_uuid(uuid_t *uuid, uint32_t guid);
1151 void    hammer_time_to_timespec(uint64_t xtime, struct timespec *ts);
1152 uint64_t hammer_timespec_to_time(struct timespec *ts);
1153 int     hammer_str_to_tid(const char *str, int *ispfsp,
1154                         hammer_tid_t *tidp, uint32_t *localizationp);
1155 hammer_tid_t hammer_alloc_objid(hammer_mount_t hmp, hammer_inode_t dip,
1156                         int64_t namekey);
1157 void hammer_clear_objid(hammer_inode_t dip);
1158 void hammer_destroy_objid_cache(hammer_mount_t hmp);
1159
1160 int hammer_dedup_crc_rb_compare(hammer_dedup_cache_t dc1,
1161                         hammer_dedup_cache_t dc2);
1162 int hammer_dedup_off_rb_compare(hammer_dedup_cache_t dc1,
1163                         hammer_dedup_cache_t dc2);
1164 hammer_dedup_cache_t hammer_dedup_cache_add(hammer_inode_t ip,
1165                         hammer_btree_leaf_elm_t leaf);
1166 hammer_dedup_cache_t hammer_dedup_cache_lookup(hammer_mount_t hmp,
1167                         hammer_crc_t crc);
1168 void hammer_dedup_cache_inval(hammer_mount_t hmp, hammer_off_t base_offset);
1169 void hammer_destroy_dedup_cache(hammer_mount_t hmp);
1170 void hammer_dump_dedup_cache(hammer_mount_t hmp);
1171 int hammer_dedup_validate(hammer_dedup_cache_t dcp, int zone, int bytes,
1172                         void *data);
1173
1174 int hammer_enter_undo_history(hammer_mount_t hmp, hammer_off_t offset,
1175                         int bytes);
1176 void hammer_clear_undo_history(hammer_mount_t hmp);
1177 enum vtype hammer_get_vnode_type(uint8_t obj_type);
1178 int hammer_get_dtype(uint8_t obj_type);
1179 uint8_t hammer_get_obj_type(enum vtype vtype);
1180 int64_t hammer_direntry_namekey(hammer_inode_t dip, const void *name, int len,
1181                         uint32_t *max_iterationsp);
1182 int     hammer_nohistory(hammer_inode_t ip);
1183
1184 int     hammer_init_cursor(hammer_transaction_t trans, hammer_cursor_t cursor,
1185                         hammer_node_cache_t cache, hammer_inode_t ip);
1186 void    hammer_normalize_cursor(hammer_cursor_t cursor);
1187 void    hammer_done_cursor(hammer_cursor_t cursor);
1188 int     hammer_recover_cursor(hammer_cursor_t cursor);
1189 void    hammer_unlock_cursor(hammer_cursor_t cursor);
1190 int     hammer_lock_cursor(hammer_cursor_t cursor);
1191 hammer_cursor_t hammer_push_cursor(hammer_cursor_t ocursor);
1192 void    hammer_pop_cursor(hammer_cursor_t ocursor, hammer_cursor_t ncursor);
1193
1194 void    hammer_cursor_replaced_node(hammer_node_t onode, hammer_node_t nnode);
1195 void    hammer_cursor_removed_node(hammer_node_t onode, hammer_node_t parent,
1196                         int index);
1197 void    hammer_cursor_split_node(hammer_node_t onode, hammer_node_t nnode,
1198                         int index);
1199 void    hammer_cursor_moved_element(hammer_node_t oparent, int pindex,
1200                         hammer_node_t onode, int oindex,
1201                         hammer_node_t nnode, int nindex);
1202 void    hammer_cursor_parent_changed(hammer_node_t node, hammer_node_t oparent,
1203                         hammer_node_t nparent, int nindex);
1204 void    hammer_cursor_inserted_element(hammer_node_t node, int index);
1205 void    hammer_cursor_deleted_element(hammer_node_t node, int index);
1206 void    hammer_cursor_invalidate_cache(hammer_cursor_t cursor);
1207
1208 int     hammer_btree_lookup(hammer_cursor_t cursor);
1209 int     hammer_btree_first(hammer_cursor_t cursor);
1210 int     hammer_btree_last(hammer_cursor_t cursor);
1211 int     hammer_btree_extract(hammer_cursor_t cursor, int flags);
1212 int     hammer_btree_iterate(hammer_cursor_t cursor);
1213 int     hammer_btree_iterate_reverse(hammer_cursor_t cursor);
1214 int     hammer_btree_insert(hammer_cursor_t cursor,
1215                             hammer_btree_leaf_elm_t elm, int *doprop);
1216 int     hammer_btree_delete(hammer_cursor_t cursor, int *ndelete);
1217 void    hammer_btree_do_propagation(hammer_cursor_t cursor,
1218                             hammer_btree_leaf_elm_t leaf);
1219 int     hammer_btree_cmp(hammer_base_elm_t key1, hammer_base_elm_t key2);
1220 int     hammer_btree_chkts(hammer_tid_t ts, hammer_base_elm_t key);
1221 int     hammer_btree_correct_rhb(hammer_cursor_t cursor, hammer_tid_t tid);
1222 int     hammer_btree_correct_lhb(hammer_cursor_t cursor, hammer_tid_t tid);
1223
1224 int     btree_set_parent_of_child(hammer_transaction_t trans,
1225                         hammer_node_t node,
1226                         hammer_btree_elm_t elm);
1227 void    hammer_node_lock_init(hammer_node_lock_t parent, hammer_node_t node);
1228 void    hammer_btree_lcache_init(hammer_mount_t hmp, hammer_node_lock_t lcache,
1229                         int depth);
1230 void    hammer_btree_lcache_free(hammer_mount_t hmp, hammer_node_lock_t lcache);
1231 int     hammer_btree_lock_children(hammer_cursor_t cursor, int depth,
1232                         hammer_node_lock_t parent,
1233                         hammer_node_lock_t lcache);
1234 void    hammer_btree_lock_copy(hammer_cursor_t cursor,
1235                         hammer_node_lock_t parent);
1236 int     hammer_btree_sync_copy(hammer_cursor_t cursor,
1237                         hammer_node_lock_t parent);
1238 void    hammer_btree_unlock_children(hammer_mount_t hmp,
1239                         hammer_node_lock_t parent,
1240                         hammer_node_lock_t lcache);
1241 int     hammer_btree_search_node(hammer_base_elm_t elm, hammer_node_ondisk_t node);
1242 hammer_node_t hammer_btree_get_parent(hammer_transaction_t trans,
1243                         hammer_node_t node, int *parent_indexp,
1244                         int *errorp, int try_exclusive);
1245
1246 void    hammer_print_btree_node(hammer_node_ondisk_t ondisk);
1247 void    hammer_print_btree_elm(hammer_btree_elm_t elm);
1248
1249 void    *hammer_bread(hammer_mount_t hmp, hammer_off_t off,
1250                         int *errorp, struct hammer_buffer **bufferp);
1251 void    *hammer_bnew(hammer_mount_t hmp, hammer_off_t off,
1252                         int *errorp, struct hammer_buffer **bufferp);
1253 void    *hammer_bread_ext(hammer_mount_t hmp, hammer_off_t off, int bytes,
1254                         int *errorp, struct hammer_buffer **bufferp);
1255 void    *hammer_bnew_ext(hammer_mount_t hmp, hammer_off_t off, int bytes,
1256                         int *errorp, struct hammer_buffer **bufferp);
1257
1258 hammer_volume_t hammer_get_root_volume(hammer_mount_t hmp, int *errorp);
1259
1260 hammer_volume_t hammer_get_volume(hammer_mount_t hmp,
1261                         int32_t vol_no, int *errorp);
1262 hammer_buffer_t hammer_get_buffer(hammer_mount_t hmp, hammer_off_t buf_offset,
1263                         int bytes, int isnew, int *errorp);
1264 void            hammer_sync_buffers(hammer_mount_t hmp,
1265                         hammer_off_t base_offset, int bytes);
1266 int             hammer_del_buffers(hammer_mount_t hmp,
1267                         hammer_off_t base_offset,
1268                         hammer_off_t zone2_offset, int bytes,
1269                         int report_conflicts);
1270
1271 int             hammer_ref_volume(hammer_volume_t volume);
1272 int             hammer_ref_buffer(hammer_buffer_t buffer);
1273 void            hammer_flush_buffer_nodes(hammer_buffer_t buffer);
1274
1275 void            hammer_rel_volume(hammer_volume_t volume, int locked);
1276 void            hammer_rel_buffer(hammer_buffer_t buffer, int locked);
1277
1278 int             hammer_vfs_export(struct mount *mp, int op,
1279                         const struct export_args *export);
1280 hammer_node_t   hammer_get_node(hammer_transaction_t trans,
1281                         hammer_off_t node_offset, int isnew, int *errorp);
1282 void            hammer_ref_node(hammer_node_t node);
1283 hammer_node_t   hammer_ref_node_safe(hammer_transaction_t trans,
1284                         hammer_node_cache_t cache, int *errorp);
1285 void            hammer_rel_node(hammer_node_t node);
1286 void            hammer_delete_node(hammer_transaction_t trans,
1287                         hammer_node_t node);
1288 void            hammer_cache_node(hammer_node_cache_t cache,
1289                         hammer_node_t node);
1290 void            hammer_uncache_node(hammer_node_cache_t cache);
1291 void            hammer_flush_node(hammer_node_t node, int locked);
1292
1293 void hammer_dup_buffer(struct hammer_buffer **bufferp,
1294                         struct hammer_buffer *buffer);
1295 hammer_node_t hammer_alloc_btree(hammer_transaction_t trans,
1296                         hammer_off_t hint, int *errorp);
1297 void *hammer_alloc_data(hammer_transaction_t trans, int32_t data_len,
1298                         uint16_t rec_type, hammer_off_t *data_offsetp,
1299                         struct hammer_buffer **data_bufferp,
1300                         hammer_off_t hint, int *errorp);
1301
1302 int hammer_generate_undo(hammer_transaction_t trans,
1303                         hammer_off_t zone_offset, void *base, int len);
1304 int hammer_generate_redo(hammer_transaction_t trans, hammer_inode_t ip,
1305                         hammer_off_t file_offset, uint32_t flags,
1306                         void *base, int len);
1307 void hammer_generate_redo_sync(hammer_transaction_t trans);
1308 void hammer_redo_fifo_start_flush(hammer_inode_t ip);
1309 void hammer_redo_fifo_end_flush(hammer_inode_t ip);
1310
1311 void hammer_format_undo(void *base, uint32_t seqno);
1312 int hammer_upgrade_undo_4(hammer_transaction_t trans);
1313
1314 void hammer_put_volume(struct hammer_volume *volume, int flush);
1315 void hammer_put_buffer(struct hammer_buffer *buffer, int flush);
1316
1317 hammer_off_t hammer_freemap_alloc(hammer_transaction_t trans,
1318                         hammer_off_t owner, int *errorp);
1319 void hammer_freemap_free(hammer_transaction_t trans, hammer_off_t phys_offset,
1320                         hammer_off_t owner, int *errorp);
1321 int _hammer_checkspace(hammer_mount_t hmp, int slop, int64_t *resp);
1322 hammer_off_t hammer_blockmap_alloc(hammer_transaction_t trans, int zone,
1323                         int bytes, hammer_off_t hint, int *errorp);
1324 hammer_reserve_t hammer_blockmap_reserve(hammer_mount_t hmp, int zone,
1325                         int bytes, hammer_off_t *zone_offp, int *errorp);
1326 hammer_reserve_t hammer_blockmap_reserve_dedup(hammer_mount_t hmp, int zone,
1327                         int bytes, hammer_off_t zone_offset, int *errorp);
1328 void hammer_blockmap_reserve_complete(hammer_mount_t hmp,
1329                         hammer_reserve_t resv);
1330 void hammer_reserve_clrdelay(hammer_mount_t hmp, hammer_reserve_t resv);
1331 void hammer_blockmap_free(hammer_transaction_t trans,
1332                         hammer_off_t zone_offset, int bytes);
1333 int hammer_blockmap_dedup(hammer_transaction_t trans,
1334                         hammer_off_t zone_offset, int bytes);
1335 int hammer_blockmap_finalize(hammer_transaction_t trans,
1336                         hammer_reserve_t resv,
1337                         hammer_off_t zone_offset, int bytes);
1338 int hammer_blockmap_getfree(hammer_mount_t hmp, hammer_off_t zone_offset,
1339                         int *curp, int *errorp);
1340 hammer_off_t hammer_blockmap_lookup_verify(hammer_mount_t hmp,
1341                         hammer_off_t zone_offset, int *errorp);
1342
1343 hammer_off_t hammer_undo_lookup(hammer_mount_t hmp, hammer_off_t zone_offset,
1344                         int *errorp);
1345 int64_t hammer_undo_used(hammer_transaction_t trans);
1346 int64_t hammer_undo_space(hammer_transaction_t trans);
1347 int64_t hammer_undo_max(hammer_mount_t hmp);
1348 int hammer_undo_reclaim(hammer_io_t io);
1349
1350 void hammer_start_transaction(hammer_transaction_t trans,
1351                               struct hammer_mount *hmp);
1352 void hammer_simple_transaction(hammer_transaction_t trans,
1353                               struct hammer_mount *hmp);
1354 void hammer_start_transaction_fls(hammer_transaction_t trans,
1355                                   struct hammer_mount *hmp);
1356 void hammer_done_transaction(hammer_transaction_t trans);
1357 hammer_tid_t hammer_alloc_tid(hammer_mount_t hmp, int count);
1358
1359 void hammer_modify_inode(hammer_transaction_t trans, hammer_inode_t ip, int flags);
1360 void hammer_flush_inode(hammer_inode_t ip, int flags);
1361 void hammer_flush_inode_done(hammer_inode_t ip, int error);
1362 void hammer_wait_inode(hammer_inode_t ip);
1363
1364 int  hammer_create_inode(hammer_transaction_t trans, struct vattr *vap,
1365                         struct ucred *cred, struct hammer_inode *dip,
1366                         const char *name, int namelen,
1367                         hammer_pseudofs_inmem_t pfsm,
1368                         struct hammer_inode **ipp);
1369 void hammer_rel_inode(hammer_inode_t ip, int flush);
1370 int hammer_reload_inode(hammer_inode_t ip, void *arg __unused);
1371 int hammer_ino_rb_compare(hammer_inode_t ip1, hammer_inode_t ip2);
1372 int hammer_redo_rb_compare(hammer_inode_t ip1, hammer_inode_t ip2);
1373 int hammer_destroy_inode_callback(hammer_inode_t ip, void *data __unused);
1374
1375 int hammer_sync_inode(hammer_transaction_t trans, hammer_inode_t ip);
1376 void hammer_test_inode(hammer_inode_t dip);
1377 void hammer_inode_unloadable_check(hammer_inode_t ip, int getvp);
1378 int hammer_update_atime_quick(hammer_inode_t ip);
1379
1380 int  hammer_ip_add_direntry(hammer_transaction_t trans,
1381                         hammer_inode_t dip, const char *name, int bytes,
1382                         hammer_inode_t nip);
1383 int  hammer_ip_del_direntry(hammer_transaction_t trans,
1384                         hammer_cursor_t cursor, hammer_inode_t dip,
1385                         hammer_inode_t ip);
1386 void hammer_ip_replace_bulk(hammer_mount_t hmp, hammer_record_t record);
1387 hammer_record_t hammer_ip_add_bulk(hammer_inode_t ip, off_t file_offset,
1388                         void *data, int bytes, int *errorp);
1389 int  hammer_ip_frontend_trunc(struct hammer_inode *ip, off_t file_size);
1390 int  hammer_ip_add_record(hammer_transaction_t trans,
1391                         hammer_record_t record);
1392 int  hammer_ip_delete_range(hammer_cursor_t cursor, hammer_inode_t ip,
1393                         int64_t ran_beg, int64_t ran_end, int truncating);
1394 int  hammer_ip_delete_clean(hammer_cursor_t cursor, hammer_inode_t ip,
1395                         int *countp);
1396 int  hammer_ip_sync_data(hammer_cursor_t cursor, hammer_inode_t ip,
1397                         int64_t offset, void *data, int bytes);
1398 int  hammer_ip_sync_record_cursor(hammer_cursor_t cursor, hammer_record_t rec);
1399 hammer_pseudofs_inmem_t  hammer_load_pseudofs(hammer_transaction_t trans,
1400                         uint32_t localization, int *errorp);
1401 int  hammer_mkroot_pseudofs(hammer_transaction_t trans, struct ucred *cred,
1402                         hammer_pseudofs_inmem_t pfsm, hammer_inode_t dip);
1403 int  hammer_save_pseudofs(hammer_transaction_t trans,
1404                         hammer_pseudofs_inmem_t pfsm);
1405 int  hammer_unload_pseudofs(hammer_transaction_t trans, uint32_t localization);
1406 void hammer_rel_pseudofs(hammer_mount_t hmp, hammer_pseudofs_inmem_t pfsm);
1407 int hammer_ioctl(hammer_inode_t ip, u_long com, caddr_t data, int fflag,
1408                         struct ucred *cred);
1409
1410 void hammer_io_init(hammer_io_t io, hammer_volume_t volume,
1411                         enum hammer_io_type type);
1412 int hammer_io_read(struct vnode *devvp, struct hammer_io *io, int limit);
1413 void hammer_io_advance(struct hammer_io *io);
1414 int hammer_io_new(struct vnode *devvp, struct hammer_io *io);
1415 int hammer_io_inval(hammer_volume_t volume, hammer_off_t zone2_offset);
1416 struct buf *hammer_io_release(struct hammer_io *io, int flush);
1417 void hammer_io_flush(struct hammer_io *io, int reclaim);
1418 void hammer_io_wait(struct hammer_io *io);
1419 void hammer_io_waitdep(struct hammer_io *io);
1420 void hammer_io_wait_all(hammer_mount_t hmp, const char *ident, int doflush);
1421 int hammer_io_direct_read(hammer_mount_t hmp, struct bio *bio,
1422                         hammer_btree_leaf_elm_t leaf);
1423 int hammer_io_indirect_read(hammer_mount_t hmp, struct bio *bio,
1424                         hammer_btree_leaf_elm_t leaf);
1425 int hammer_io_direct_write(hammer_mount_t hmp, struct bio *bio,
1426                         hammer_record_t record);
1427 void hammer_io_direct_wait(hammer_record_t record);
1428 void hammer_io_direct_uncache(hammer_mount_t hmp, hammer_btree_leaf_elm_t leaf);
1429 void hammer_io_write_interlock(hammer_io_t io);
1430 void hammer_io_done_interlock(hammer_io_t io);
1431 void hammer_io_clear_modify(struct hammer_io *io, int inval);
1432 void hammer_io_clear_modlist(struct hammer_io *io);
1433 void hammer_io_flush_sync(hammer_mount_t hmp);
1434 void hammer_io_clear_error(struct hammer_io *io);
1435 void hammer_io_clear_error_noassert(struct hammer_io *io);
1436 void hammer_io_notmeta(hammer_buffer_t buffer);
1437 void hammer_io_limit_backlog(hammer_mount_t hmp);
1438
1439 void hammer_modify_volume(hammer_transaction_t trans, hammer_volume_t volume,
1440                         void *base, int len);
1441 void hammer_modify_buffer(hammer_transaction_t trans, hammer_buffer_t buffer,
1442                         void *base, int len);
1443 void hammer_modify_volume_done(hammer_volume_t volume);
1444 void hammer_modify_buffer_done(hammer_buffer_t buffer);
1445
1446 int hammer_ioc_reblock(hammer_transaction_t trans, hammer_inode_t ip,
1447                         struct hammer_ioc_reblock *reblock);
1448 int hammer_ioc_rebalance(hammer_transaction_t trans, hammer_inode_t ip,
1449                         struct hammer_ioc_rebalance *rebal);
1450 int hammer_ioc_prune(hammer_transaction_t trans, hammer_inode_t ip,
1451                         struct hammer_ioc_prune *prune);
1452 int hammer_ioc_mirror_read(hammer_transaction_t trans, hammer_inode_t ip,
1453                         struct hammer_ioc_mirror_rw *mirror);
1454 int hammer_ioc_mirror_write(hammer_transaction_t trans, hammer_inode_t ip,
1455                         struct hammer_ioc_mirror_rw *mirror);
1456 int hammer_ioc_set_pseudofs(hammer_transaction_t trans, hammer_inode_t ip,
1457                         struct ucred *cred, struct hammer_ioc_pseudofs_rw *pfs);
1458 int hammer_ioc_get_pseudofs(hammer_transaction_t trans, hammer_inode_t ip,
1459                         struct hammer_ioc_pseudofs_rw *pfs);
1460 int hammer_ioc_destroy_pseudofs(hammer_transaction_t trans, hammer_inode_t ip,
1461                         struct hammer_ioc_pseudofs_rw *pfs);
1462 int hammer_ioc_downgrade_pseudofs(hammer_transaction_t trans, hammer_inode_t ip,
1463                         struct hammer_ioc_pseudofs_rw *pfs);
1464 int hammer_ioc_upgrade_pseudofs(hammer_transaction_t trans, hammer_inode_t ip,
1465                         struct hammer_ioc_pseudofs_rw *pfs);
1466 int hammer_ioc_wait_pseudofs(hammer_transaction_t trans, hammer_inode_t ip,
1467                         struct hammer_ioc_pseudofs_rw *pfs);
1468 int hammer_ioc_scan_pseudofs(hammer_transaction_t trans, hammer_inode_t ip,
1469                         struct hammer_ioc_pseudofs_rw *pfs);
1470 int hammer_ioc_volume_add(hammer_transaction_t trans, hammer_inode_t ip,
1471                         struct hammer_ioc_volume *ioc);
1472 int hammer_ioc_volume_del(hammer_transaction_t trans, hammer_inode_t ip,
1473                         struct hammer_ioc_volume *ioc);
1474 int hammer_ioc_volume_list(hammer_transaction_t trans, hammer_inode_t ip,
1475                         struct hammer_ioc_volume_list *ioc);
1476 int hammer_ioc_dedup(hammer_transaction_t trans, hammer_inode_t ip,
1477                         struct hammer_ioc_dedup *dedup);
1478
1479 int hammer_signal_check(hammer_mount_t hmp);
1480
1481 void hammer_flusher_create(hammer_mount_t hmp);
1482 void hammer_flusher_destroy(hammer_mount_t hmp);
1483 void hammer_flusher_sync(hammer_mount_t hmp);
1484 int  hammer_flusher_async(hammer_mount_t hmp, hammer_flush_group_t flg);
1485 int  hammer_flusher_async_one(hammer_mount_t hmp);
1486 int hammer_flusher_running(hammer_mount_t hmp);
1487 void hammer_flusher_wait(hammer_mount_t hmp, int seq);
1488 void hammer_flusher_wait_next(hammer_mount_t hmp);
1489 int  hammer_flusher_meta_limit(hammer_mount_t hmp);
1490 int  hammer_flusher_meta_halflimit(hammer_mount_t hmp);
1491 int  hammer_flusher_undo_exhausted(hammer_transaction_t trans, int quarter);
1492 void hammer_flusher_clean_loose_ios(hammer_mount_t hmp);
1493 void hammer_flusher_finalize(hammer_transaction_t trans, int final);
1494 int  hammer_flusher_haswork(hammer_mount_t hmp);
1495 int  hammer_flush_dirty(hammer_mount_t hmp, int max_count);
1496 void hammer_flusher_flush_undos(hammer_mount_t hmp, int already_flushed);
1497
1498 int hammer_recover_stage1(hammer_mount_t hmp, hammer_volume_t rootvol);
1499 int hammer_recover_stage2(hammer_mount_t hmp, hammer_volume_t rootvol);
1500 void hammer_recover_flush_buffers(hammer_mount_t hmp,
1501                         hammer_volume_t root_volume, int final);
1502
1503 void hammer_crc_set_blockmap(hammer_blockmap_t blockmap);
1504 void hammer_crc_set_volume(hammer_volume_ondisk_t ondisk);
1505 void hammer_crc_set_leaf(void *data, hammer_btree_leaf_elm_t leaf);
1506
1507 int hammer_crc_test_blockmap(hammer_blockmap_t blockmap);
1508 int hammer_crc_test_volume(hammer_volume_ondisk_t ondisk);
1509 int hammer_crc_test_btree(hammer_node_ondisk_t ondisk);
1510 int hammer_crc_test_leaf(void *data, hammer_btree_leaf_elm_t leaf);
1511 udev_t hammer_fsid_to_udev(uuid_t *uuid);
1512
1513
1514 int hammer_blocksize(int64_t file_offset);
1515 int hammer_blockoff(int64_t file_offset);
1516 int64_t hammer_blockdemarc(int64_t file_offset1, int64_t file_offset2);
1517
1518 /*
1519  * Shortcut for _hammer_checkspace(), used all over the code.
1520  */
1521 static __inline int
1522 hammer_checkspace(hammer_mount_t hmp, int slop)
1523 {
1524         return(_hammer_checkspace(hmp, slop, NULL));
1525 }
1526
1527 static __inline void
1528 hammer_wait_mem_record(hammer_record_t record)
1529 {
1530         hammer_wait_mem_record_ident(record, "hmmwai");
1531 }
1532
1533 static __inline void
1534 hammer_lock_ex(struct hammer_lock *lock)
1535 {
1536         hammer_lock_ex_ident(lock, "hmrlck");
1537 }
1538
1539 static __inline void
1540 hammer_modify_volume_noundo(hammer_transaction_t trans, hammer_volume_t volume)
1541 {
1542         hammer_modify_volume(trans, volume, NULL, 0);
1543 }
1544
1545 static __inline void
1546 hammer_modify_buffer_noundo(hammer_transaction_t trans, hammer_buffer_t buffer)
1547 {
1548         hammer_modify_buffer(trans, buffer, NULL, 0);
1549 }
1550
1551 /*
1552  * Indicate that a B-Tree node is being modified.
1553  */
1554 static __inline void
1555 hammer_modify_node_noundo(hammer_transaction_t trans, hammer_node_t node)
1556 {
1557         KKASSERT((node->flags & HAMMER_NODE_CRCBAD) == 0);
1558         hammer_modify_buffer(trans, node->buffer, NULL, 0);
1559 }
1560
1561 static __inline void
1562 hammer_modify_node_all(hammer_transaction_t trans, struct hammer_node *node)
1563 {
1564         KKASSERT((node->flags & HAMMER_NODE_CRCBAD) == 0);
1565         hammer_modify_buffer(trans, node->buffer,
1566                              node->ondisk, sizeof(*node->ondisk));
1567 }
1568
1569 static __inline void
1570 hammer_modify_node(hammer_transaction_t trans, hammer_node_t node,
1571                    void *base, int len)
1572 {
1573         hammer_crc_t *crcptr;
1574
1575         KKASSERT((char *)base >= (char *)node->ondisk &&
1576                  (char *)base + len <=
1577                     (char *)node->ondisk + sizeof(*node->ondisk));
1578         KKASSERT((node->flags & HAMMER_NODE_CRCBAD) == 0);
1579
1580         if (hammer_btree_full_undo) {
1581                 hammer_modify_node_all(trans, node);
1582         } else {
1583                 hammer_modify_buffer(trans, node->buffer, base, len);
1584                 crcptr = &node->ondisk->crc;
1585                 hammer_modify_buffer(trans, node->buffer,
1586                                      crcptr, sizeof(hammer_crc_t));
1587                 --node->buffer->io.modify_refs; /* only want one ref */
1588         }
1589 }
1590
1591 /*
1592  * Indicate that the specified modifications have been completed.
1593  *
1594  * Do not try to generate the crc here, it's very expensive to do and a
1595  * sequence of insertions or deletions can result in many calls to this
1596  * function on the same node.
1597  */
1598 static __inline void
1599 hammer_modify_node_done(hammer_node_t node)
1600 {
1601         node->flags |= HAMMER_NODE_CRCGOOD;
1602         if ((node->flags & HAMMER_NODE_NEEDSCRC) == 0) {
1603                 node->flags |= HAMMER_NODE_NEEDSCRC;
1604                 node->buffer->io.gencrc = 1;
1605                 hammer_ref_node(node);
1606         }
1607         hammer_modify_buffer_done(node->buffer);
1608 }
1609
1610 static __inline int
1611 hammer_btree_extract_leaf(hammer_cursor_t cursor)
1612 {
1613         return(hammer_btree_extract(cursor, 0));
1614 }
1615
1616 static __inline int
1617 hammer_btree_extract_data(hammer_cursor_t cursor)
1618 {
1619         return(hammer_btree_extract(cursor, HAMMER_CURSOR_GET_DATA));
1620 }
1621
1622 static __inline void
1623 hammer_crc_set_btree(hammer_node_ondisk_t ondisk)
1624 {
1625         ondisk->crc = crc32(&ondisk->crc + 1, HAMMER_BTREE_CRCSIZE);
1626 }
1627
1628 /*
1629  * Lookup a blockmap offset.
1630  */
1631 static __inline hammer_off_t
1632 hammer_blockmap_lookup(hammer_mount_t hmp, hammer_off_t zone_offset,
1633                         int *errorp)
1634 {
1635 #if defined INVARIANTS
1636         int zone = HAMMER_ZONE_DECODE(zone_offset);
1637         KKASSERT(hammer_is_zone2_mapped_index(zone));
1638 #endif
1639
1640         /*
1641          * We can actually skip blockmap verify by default,
1642          * as normal blockmaps are now direct-mapped onto the freemap
1643          * and so represent zone-2 addresses.
1644          */
1645         if (hammer_verify_zone == 0) {
1646                 *errorp = 0;
1647                 return hammer_xlate_to_zone2(zone_offset);
1648         }
1649
1650         return hammer_blockmap_lookup_verify(hmp, zone_offset, errorp);
1651 }
1652
1653 #define hammer_modify_volume_field(trans, vol, field)           \
1654         hammer_modify_volume(trans, vol, &(vol)->ondisk->field, \
1655                              sizeof((vol)->ondisk->field))
1656
1657 #define hammer_modify_node_field(trans, node, field)            \
1658         hammer_modify_node(trans, node, &(node)->ondisk->field, \
1659                              sizeof((node)->ondisk->field))
1660
1661 /*
1662  * The HAMMER_INODE_CAP_DIR_LOCAL_INO capability is set on newly
1663  * created directories for HAMMER version 2 or greater and causes
1664  * directory entries to be placed the inode localization zone in
1665  * the B-Tree instead of the misc zone.
1666  *
1667  * This greatly improves localization between directory entries and
1668  * inodes
1669  */
1670 static __inline uint32_t
1671 hammer_dir_localization(hammer_inode_t dip)
1672 {
1673         return(HAMMER_DIR_INODE_LOCALIZATION(&dip->ino_data));
1674 }
1675
1676 static __inline
1677 hammer_io_t
1678 hammer_buf_peek_io(struct buf *bp)
1679 {
1680         return((hammer_io_t)LIST_FIRST(&bp->b_dep));
1681 }
1682
1683 static __inline
1684 void
1685 hammer_buf_attach_io(struct buf *bp, hammer_io_t io)
1686 {
1687         /* struct buf and struct hammer_io are 1:1 */
1688         KKASSERT(hammer_buf_peek_io(bp) == NULL);
1689         LIST_INSERT_HEAD(&bp->b_dep, &io->worklist, node);
1690 }
1691
1692 #define hkprintf(format, args...)                       \
1693         kprintf("HAMMER: "format,## args)
1694 #define hvkprintf(vol, format, args...)                 \
1695         kprintf("HAMMER(%s) "format, vol->ondisk->vol_label,## args)
1696 #define hmkprintf(hmp, format, args...)                 \
1697         kprintf("HAMMER(%s) "format, hmp->mp->mnt_stat.f_mntfromname,## args)
1698 #define hdkprintf(format, args...)                      \
1699         kprintf("%s: "format, __func__,## args)
1700
1701 #define hkrateprintf(rate , format, args...)            \
1702         krateprintf(rate, "HAMMER: "format,## args)
1703 #define hvkrateprintf(rate, vol, format, args...)       \
1704         krateprintf(rate, "HAMMER(%s) "format, vol->ondisk->vol_label,## args)
1705 #define hmkrateprintf(rate, hmp, format, args...)       \
1706         krateprintf(rate, "HAMMER(%s) "format, hmp->mp->mnt_stat.f_mntfromname,## args)
1707 #define hdkrateprintf(rate, format, args...)            \
1708         krateprintf(rate, "%s: "format, __func__,## args)
1709
1710 #define hpanic(format, args...)                         \
1711         panic("%s: "format, __func__,## args)
1712 #endif  /* _KERNEL */
1713
1714 #endif /* !VFS_HAMMER_HAMMER_H_ */