hammer2 - Initial synchronization thread
[dragonfly.git] / sys / vfs / hammer2 / hammer2.h
1 /*
2  * Copyright (c) 2011-2015 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@dragonflybsd.org>
6  * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org>
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  *
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in
16  *    the documentation and/or other materials provided with the
17  *    distribution.
18  * 3. Neither the name of The DragonFly Project nor the names of its
19  *    contributors may be used to endorse or promote products derived
20  *    from this software without specific, prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
26  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  */
35
36 /*
37  * HAMMER2 IN-MEMORY CACHE OF MEDIA STRUCTURES
38  *
39  * This header file contains structures used internally by the HAMMER2
40  * implementation.  See hammer2_disk.h for on-disk structures.
41  *
42  * There is an in-memory representation of all on-media data structure.
43  * Almost everything is represented by a hammer2_chain structure in-memory.
44  * Other higher-level structures typically map to chains.
45  *
46  * A great deal of data is accessed simply via its buffer cache buffer,
47  * which is mapped for the duration of the chain's lock.  Hammer2 must
48  * implement its own buffer cache layer on top of the system layer to
49  * allow for different threads to lock different sub-block-sized buffers.
50  *
51  * When modifications are made to a chain a new filesystem block must be
52  * allocated.  Multiple modifications do not typically allocate new blocks
53  * until the current block has been flushed.  Flushes do not block the
54  * front-end unless the front-end operation crosses the current inode being
55  * flushed.
56  *
57  * The in-memory representation may remain cached (for example in order to
58  * placemark clustering locks) even after the related data has been
59  * detached.
60  */
61
62 #ifndef _VFS_HAMMER2_HAMMER2_H_
63 #define _VFS_HAMMER2_HAMMER2_H_
64
65 #include <sys/param.h>
66 #include <sys/types.h>
67 #include <sys/kernel.h>
68 #include <sys/conf.h>
69 #include <sys/systm.h>
70 #include <sys/tree.h>
71 #include <sys/malloc.h>
72 #include <sys/mount.h>
73 #include <sys/vnode.h>
74 #include <sys/proc.h>
75 #include <sys/mountctl.h>
76 #include <sys/priv.h>
77 #include <sys/stat.h>
78 #include <sys/thread.h>
79 #include <sys/globaldata.h>
80 #include <sys/lockf.h>
81 #include <sys/buf.h>
82 #include <sys/queue.h>
83 #include <sys/limits.h>
84 #include <sys/dmsg.h>
85 #include <sys/mutex.h>
86 #include <sys/kern_syscall.h>
87
88 #include <sys/signal2.h>
89 #include <sys/buf2.h>
90 #include <sys/mutex2.h>
91 #include <sys/thread2.h>
92
93 #include "hammer2_disk.h"
94 #include "hammer2_mount.h"
95 #include "hammer2_ioctl.h"
96
97 struct hammer2_io;
98 struct hammer2_iocb;
99 struct hammer2_chain;
100 struct hammer2_cluster;
101 struct hammer2_inode;
102 struct hammer2_dev;
103 struct hammer2_pfs;
104 struct hammer2_span;
105 struct hammer2_state;
106 struct hammer2_msg;
107
108 /*
109  * Mutex and lock shims.  Hammer2 requires support for asynchronous and
110  * abortable locks, and both exclusive and shared spinlocks.  Normal
111  * synchronous non-abortable locks can be substituted for spinlocks.
112  */
113 typedef mtx_t                           hammer2_mtx_t;
114 typedef mtx_link_t                      hammer2_mtx_link_t;
115 typedef mtx_state_t                     hammer2_mtx_state_t;
116
117 typedef struct spinlock                 hammer2_spin_t;
118
119 #define hammer2_mtx_ex                  mtx_lock_ex_quick
120 #define hammer2_mtx_sh                  mtx_lock_sh_quick
121 #define hammer2_mtx_unlock              mtx_unlock
122 #define hammer2_mtx_owned               mtx_owned
123 #define hammer2_mtx_init                mtx_init
124 #define hammer2_mtx_temp_release        mtx_lock_temp_release
125 #define hammer2_mtx_temp_restore        mtx_lock_temp_restore
126 #define hammer2_mtx_refs                mtx_lockrefs
127
128 #define hammer2_spin_init               spin_init
129 #define hammer2_spin_sh                 spin_lock_shared
130 #define hammer2_spin_ex                 spin_lock
131 #define hammer2_spin_unsh               spin_unlock_shared
132 #define hammer2_spin_unex               spin_unlock
133
134 /*
135  * General lock support
136  */
137 static __inline
138 int
139 hammer2_mtx_upgrade(hammer2_mtx_t *mtx)
140 {
141         int wasexclusive;
142
143         if (mtx_islocked_ex(mtx)) {
144                 wasexclusive = 1;
145         } else {
146                 mtx_unlock(mtx);
147                 mtx_lock_ex_quick(mtx);
148                 wasexclusive = 0;
149         }
150         return wasexclusive;
151 }
152
153 /*
154  * Downgrade an inode lock from exclusive to shared only if the inode
155  * lock was previously shared.  If the inode lock was previously exclusive,
156  * this is a NOP.
157  */
158 static __inline
159 void
160 hammer2_mtx_downgrade(hammer2_mtx_t *mtx, int wasexclusive)
161 {
162         if (wasexclusive == 0)
163                 mtx_downgrade(mtx);
164 }
165
166 /*
167  * The xid tracks internal transactional updates.
168  *
169  * XXX fix-me, really needs to be 64-bits
170  */
171 typedef uint32_t hammer2_xid_t;
172
173 #define HAMMER2_XID_MIN 0x00000000U
174 #define HAMMER2_XID_MAX 0x7FFFFFFFU
175
176 /*
177  * The chain structure tracks a portion of the media topology from the
178  * root (volume) down.  Chains represent volumes, inodes, indirect blocks,
179  * data blocks, and freemap nodes and leafs.
180  *
181  * The chain structure utilizes a simple singly-homed topology and the
182  * chain's in-memory topology will move around as the chains do, due mainly
183  * to renames and indirect block creation.
184  *
185  * Block Table Updates
186  *
187  *      Block table updates for insertions and updates are delayed until the
188  *      flush.  This allows us to avoid having to modify the parent chain
189  *      all the way to the root.
190  *
191  *      Block table deletions are performed immediately (modifying the parent
192  *      in the process) because the flush code uses the chain structure to
193  *      track delayed updates and the chain will be (likely) gone or moved to
194  *      another location in the topology after a deletion.
195  *
196  *      A prior iteration of the code tried to keep the relationship intact
197  *      on deletes by doing a delete-duplicate operation on the chain, but
198  *      it added way too much complexity to the codebase.
199  *
200  * Flush Synchronization
201  *
202  *      The flush code must flush modified chains bottom-up.  Because chain
203  *      structures can shift around and are NOT topologically stable,
204  *      modified chains are independently indexed for the flush.  As the flush
205  *      runs it modifies (or further modifies) and updates the parents,
206  *      propagating the flush all the way to the volume root.
207  *
208  *      Modifying front-end operations can occur during a flush but will block
209  *      in two cases: (1) when the front-end tries to operate on the inode
210  *      currently in the midst of being flushed and (2) if the front-end
211  *      crosses an inode currently being flushed (such as during a rename).
212  *      So, for example, if you rename directory "x" to "a/b/c/d/e/f/g/x" and
213  *      the flusher is currently working on "a/b/c", the rename will block
214  *      temporarily in order to ensure that "x" exists in one place or the
215  *      other.
216  *
217  *      Meta-data statistics are updated by the flusher.  The front-end will
218  *      make estimates but meta-data must be fully synchronized only during a
219  *      flush in order to ensure that it remains correct across a crash.
220  *
221  *      Multiple flush synchronizations can theoretically be in-flight at the
222  *      same time but the implementation is not coded to handle the case and
223  *      currently serializes them.
224  *
225  * Snapshots:
226  *
227  *      Snapshots currently require the subdirectory tree being snapshotted
228  *      to be flushed.  The snapshot then creates a new super-root inode which
229  *      copies the flushed blockdata of the directory or file that was
230  *      snapshotted.
231  *
232  * RBTREE NOTES:
233  *
234  *      - Note that the radix tree runs in powers of 2 only so sub-trees
235  *        cannot straddle edges.
236  */
237 RB_HEAD(hammer2_chain_tree, hammer2_chain);
238 TAILQ_HEAD(h2_flush_list, hammer2_chain);
239 TAILQ_HEAD(h2_core_list, hammer2_chain);
240 TAILQ_HEAD(h2_iocb_list, hammer2_iocb);
241
242 #define CHAIN_CORE_DELETE_BMAP_ENTRIES  \
243         (HAMMER2_PBUFSIZE / sizeof(hammer2_blockref_t) / sizeof(uint32_t))
244
245 struct hammer2_chain_core {
246         hammer2_mtx_t   lock;
247         hammer2_spin_t  spin;
248         struct hammer2_chain_tree rbtree; /* sub-chains */
249         int             live_zero;      /* blockref array opt */
250         u_int           flags;
251         u_int           live_count;     /* live (not deleted) chains in tree */
252         u_int           chain_count;    /* live + deleted chains under core */
253         int             generation;     /* generation number (inserts only) */
254 };
255
256 typedef struct hammer2_chain_core hammer2_chain_core_t;
257
258 #define HAMMER2_CORE_UNUSED0001         0x0001
259 #define HAMMER2_CORE_COUNTEDBREFS       0x0002
260
261 RB_HEAD(hammer2_io_tree, hammer2_io);
262
263 /*
264  * IOCB - IO callback (into chain, cluster, or manual request)
265  */
266 struct hammer2_iocb {
267         TAILQ_ENTRY(hammer2_iocb) entry;
268         void (*callback)(struct hammer2_iocb *iocb);
269         struct hammer2_io       *dio;
270         struct hammer2_cluster  *cluster;
271         struct hammer2_chain    *chain;
272         void                    *ptr;
273         off_t                   lbase;
274         int                     lsize;
275         uint32_t                flags;
276         int                     error;
277 };
278
279 typedef struct hammer2_iocb hammer2_iocb_t;
280
281 #define HAMMER2_IOCB_INTERLOCK  0x00000001
282 #define HAMMER2_IOCB_ONQ        0x00000002
283 #define HAMMER2_IOCB_DONE       0x00000004
284 #define HAMMER2_IOCB_INPROG     0x00000008
285 #define HAMMER2_IOCB_UNUSED10   0x00000010
286 #define HAMMER2_IOCB_QUICK      0x00010000
287 #define HAMMER2_IOCB_ZERO       0x00020000
288 #define HAMMER2_IOCB_READ       0x00040000
289 #define HAMMER2_IOCB_WAKEUP     0x00080000
290
291 /*
292  * DIO - Management structure wrapping system buffer cache.
293  *
294  *       Used for multiple purposes including concurrent management
295  *       if small requests by chains into larger DIOs.
296  */
297 struct hammer2_io {
298         RB_ENTRY(hammer2_io) rbnode;    /* indexed by device offset */
299         struct h2_iocb_list iocbq;
300         struct spinlock spin;
301         struct hammer2_dev *hmp;
302         struct buf      *bp;
303         off_t           pbase;
304         int             psize;
305         int             refs;
306         int             act;                    /* activity */
307 };
308
309 typedef struct hammer2_io hammer2_io_t;
310
311 #define HAMMER2_DIO_INPROG      0x80000000      /* bio in progress */
312 #define HAMMER2_DIO_GOOD        0x40000000      /* dio->bp is stable */
313 #define HAMMER2_DIO_WAITING     0x20000000      /* (old) */
314 #define HAMMER2_DIO_DIRTY       0x10000000      /* flush on last drop */
315
316 #define HAMMER2_DIO_MASK        0x0FFFFFFF
317
318 /*
319  * Primary chain structure keeps track of the topology in-memory.
320  */
321 struct hammer2_chain {
322         hammer2_chain_core_t    core;
323         RB_ENTRY(hammer2_chain) rbnode;         /* live chain(s) */
324         hammer2_blockref_t      bref;
325         struct hammer2_chain    *parent;
326         struct hammer2_state    *state;         /* if active cache msg */
327         struct hammer2_dev      *hmp;
328         struct hammer2_pfs      *pmp;           /* A PFS or super-root (spmp) */
329
330         hammer2_xid_t   flush_xid;              /* flush sequencing */
331         hammer2_key_t   data_count;             /* delta's to apply */
332         hammer2_key_t   inode_count;            /* delta's to apply */
333         hammer2_key_t   data_count_up;          /* delta's to apply */
334         hammer2_key_t   inode_count_up;         /* delta's to apply */
335         hammer2_io_t    *dio;                   /* physical data buffer */
336         u_int           bytes;                  /* physical data size */
337         u_int           flags;
338         u_int           refs;
339         u_int           lockcnt;
340         hammer2_media_data_t *data;             /* data pointer shortcut */
341         TAILQ_ENTRY(hammer2_chain) flush_node;  /* flush list */
342 };
343
344 typedef struct hammer2_chain hammer2_chain_t;
345
346 int hammer2_chain_cmp(hammer2_chain_t *chain1, hammer2_chain_t *chain2);
347 RB_PROTOTYPE(hammer2_chain_tree, hammer2_chain, rbnode, hammer2_chain_cmp);
348
349 /*
350  * Special notes on flags:
351  *
352  * INITIAL - This flag allows a chain to be created and for storage to
353  *           be allocated without having to immediately instantiate the
354  *           related buffer.  The data is assumed to be all-zeros.  It
355  *           is primarily used for indirect blocks.
356  *
357  * MODIFIED- The chain's media data has been modified.
358  * UPDATE  - Chain might not be modified but parent blocktable needs update
359  *
360  * BMAPPED - Indicates that the chain is present in the parent blockmap.
361  * BMAPUPD - Indicates that the chain is present but needs to be updated
362  *           in the parent blockmap.
363  */
364 #define HAMMER2_CHAIN_MODIFIED          0x00000001      /* dirty chain data */
365 #define HAMMER2_CHAIN_ALLOCATED         0x00000002      /* kmalloc'd chain */
366 #define HAMMER2_CHAIN_DESTROY           0x00000004
367 #define HAMMER2_CHAIN_UNLINKED          0x00000008      /* unlinked file */
368 #define HAMMER2_CHAIN_DELETED           0x00000010      /* deleted chain */
369 #define HAMMER2_CHAIN_INITIAL           0x00000020      /* initial create */
370 #define HAMMER2_CHAIN_UPDATE            0x00000040      /* need parent update */
371 #define HAMMER2_CHAIN_DEFERRED          0x00000080      /* flush depth defer */
372 #define HAMMER2_CHAIN_IOFLUSH           0x00000100      /* bawrite on put */
373 #define HAMMER2_CHAIN_ONFLUSH           0x00000200      /* on a flush list */
374 #define HAMMER2_CHAIN_UNUSED00000400    0x00000400
375 #define HAMMER2_CHAIN_VOLUMESYNC        0x00000800      /* needs volume sync */
376 #define HAMMER2_CHAIN_UNUSED00001000    0x00001000
377 #define HAMMER2_CHAIN_UNUSED00002000    0x00002000
378 #define HAMMER2_CHAIN_ONRBTREE          0x00004000      /* on parent RB tree */
379 #define HAMMER2_CHAIN_SNAPSHOT          0x00008000      /* snapshot special */
380 #define HAMMER2_CHAIN_EMBEDDED          0x00010000      /* embedded data */
381 #define HAMMER2_CHAIN_RELEASE           0x00020000      /* don't keep around */
382 #define HAMMER2_CHAIN_BMAPPED           0x00040000      /* present in blkmap */
383 #define HAMMER2_CHAIN_BMAPUPD           0x00080000      /* +needs updating */
384 #define HAMMER2_CHAIN_UNUSED00100000    0x00100000
385 #define HAMMER2_CHAIN_UNUSED00200000    0x00200000
386 #define HAMMER2_CHAIN_PFSBOUNDARY       0x00400000      /* super->pfs inode */
387
388 #define HAMMER2_CHAIN_FLUSH_MASK        (HAMMER2_CHAIN_MODIFIED |       \
389                                          HAMMER2_CHAIN_UPDATE |         \
390                                          HAMMER2_CHAIN_ONFLUSH)
391
392 /*
393  * Flags passed to hammer2_chain_lookup() and hammer2_chain_next()
394  *
395  * NOTE: MATCHIND allows an indirect block / freemap node to be returned
396  *       when the passed key range matches the radix.  Remember that key_end
397  *       is inclusive (e.g. {0x000,0xFFF}, not {0x000,0x1000}).
398  */
399 #define HAMMER2_LOOKUP_NOLOCK           0x00000001      /* ref only */
400 #define HAMMER2_LOOKUP_NODATA           0x00000002      /* data left NULL */
401 #define HAMMER2_LOOKUP_SHARED           0x00000100
402 #define HAMMER2_LOOKUP_MATCHIND         0x00000200      /* return all chains */
403 #define HAMMER2_LOOKUP_UNUSED0400       0x00000400
404 #define HAMMER2_LOOKUP_ALWAYS           0x00000800      /* resolve data */
405
406 /*
407  * Flags passed to hammer2_chain_modify() and hammer2_chain_resize()
408  *
409  * NOTE: OPTDATA allows us to avoid instantiating buffers for INDIRECT
410  *       blocks in the INITIAL-create state.
411  */
412 #define HAMMER2_MODIFY_OPTDATA          0x00000002      /* data can be NULL */
413 #define HAMMER2_MODIFY_NO_MODIFY_TID    0x00000004
414 #define HAMMER2_MODIFY_UNUSED0008       0x00000008
415 #define HAMMER2_MODIFY_NOREALLOC        0x00000010
416
417 /*
418  * Flags passed to hammer2_chain_lock()
419  */
420 #define HAMMER2_RESOLVE_NEVER           1
421 #define HAMMER2_RESOLVE_MAYBE           2
422 #define HAMMER2_RESOLVE_ALWAYS          3
423 #define HAMMER2_RESOLVE_MASK            0x0F
424
425 #define HAMMER2_RESOLVE_SHARED          0x10    /* request shared lock */
426 #define HAMMER2_RESOLVE_NOREF           0x20    /* already ref'd on lock */
427
428 /*
429  * Flags passed to hammer2_chain_delete()
430  */
431 #define HAMMER2_DELETE_PERMANENT        0x0001
432 #define HAMMER2_DELETE_NOSTATS          0x0002
433
434 #define HAMMER2_INSERT_NOSTATS          0x0002
435 #define HAMMER2_INSERT_PFSROOT          0x0004
436
437 /*
438  * Flags passed to hammer2_chain_delete_duplicate()
439  */
440 #define HAMMER2_DELDUP_RECORE           0x0001
441
442 /*
443  * Cluster different types of storage together for allocations
444  */
445 #define HAMMER2_FREECACHE_INODE         0
446 #define HAMMER2_FREECACHE_INDIR         1
447 #define HAMMER2_FREECACHE_DATA          2
448 #define HAMMER2_FREECACHE_UNUSED3       3
449 #define HAMMER2_FREECACHE_TYPES         4
450
451 /*
452  * hammer2_freemap_alloc() block preference
453  */
454 #define HAMMER2_OFF_NOPREF              ((hammer2_off_t)-1)
455
456 /*
457  * BMAP read-ahead maximum parameters
458  */
459 #define HAMMER2_BMAP_COUNT              16      /* max bmap read-ahead */
460 #define HAMMER2_BMAP_BYTES              (HAMMER2_PBUFSIZE * HAMMER2_BMAP_COUNT)
461
462 /*
463  * hammer2_freemap_adjust()
464  */
465 #define HAMMER2_FREEMAP_DORECOVER       1
466 #define HAMMER2_FREEMAP_DOMAYFREE       2
467 #define HAMMER2_FREEMAP_DOREALFREE      3
468
469 /*
470  * HAMMER2 cluster - A set of chains representing the same entity.
471  *
472  * hammer2_cluster typically represents a temporary set of representitive
473  * chains.  The one exception is that a hammer2_cluster is embedded in
474  * hammer2_inode.  This embedded cluster is ONLY used to track the
475  * representitive chains and cannot be directly locked.
476  *
477  * A cluster is usually temporary (and thus per-thread) for locking purposes,
478  * allowing us to embed the asynchronous storage required for cluster
479  * operations in the cluster itself and adjust the state and status without
480  * having to worry too much about SMP issues.
481  *
482  * The exception is the cluster embedded in the hammer2_inode structure.
483  * This is used to cache the cluster state on an inode-by-inode basis.
484  * Individual hammer2_chain structures not incorporated into clusters might
485  * also stick around to cache miscellanious elements.
486  *
487  * Because the cluster is a 'working copy' and is usually subject to cluster
488  * quorum rules, it is quite possible for us to end up with an insufficient
489  * number of live chains to execute an operation.  If an insufficient number
490  * of chains remain in a working copy, the operation may have to be
491  * downgraded, retried, stall until the requisit number of chains are
492  * available, or possibly even error out depending on the mount type.
493  */
494 #define HAMMER2_MAXCLUSTER      8
495
496 struct hammer2_cluster_item {
497         hammer2_mtx_link_t      async_link;
498         hammer2_chain_t         *chain;
499         struct hammer2_cluster  *cluster;       /* link back to cluster */
500         int                     cache_index;
501         int                     unused01;
502 };
503
504 typedef struct hammer2_cluster_item hammer2_cluster_item_t;
505
506 struct hammer2_cluster {
507         int                     unused01;
508         int                     refs;           /* track for deallocation */
509         struct hammer2_pfs      *pmp;
510         uint32_t                flags;
511         int                     nchains;
512         hammer2_iocb_t          iocb;
513         hammer2_chain_t         *focus;         /* current focus (or mod) */
514         hammer2_cluster_item_t  array[HAMMER2_MAXCLUSTER];
515 };
516
517 typedef struct hammer2_cluster  hammer2_cluster_t;
518
519 /*
520  * WRHARD       - Hard mounts can write fully synchronized
521  * RDHARD       - Hard mounts can read fully synchronized
522  * WRSOFT       - Soft mounts can write to at least the SOFT_MASTER
523  * RDSOFT       - Soft mounts can read from at least a SOFT_SLAVE
524  * RDSLAVE      - slaves are accessible (possibly unsynchronized or remote).
525  * MSYNCED      - All masters are fully synchronized
526  * SSYNCED      - All known local slaves are fully synchronized to masters
527  *
528  * All available masters are always incorporated.  All PFSs belonging to a
529  * cluster (master, slave, copy, whatever) always try to synchronize the
530  * total number of known masters in the PFSs root inode.
531  *
532  * A cluster might have access to many slaves, copies, or caches, but we
533  * have a limited number of cluster slots.  Any such elements which are
534  * directly mounted from block device(s) will always be incorporated.   Note
535  * that SSYNCED only applies to such elements which are directly mounted,
536  * not to any remote slaves, copies, or caches that could be available.  These
537  * bits are used to monitor and drive our synchronization threads.
538  *
539  * When asking the question 'is any data accessible at all', then a simple
540  * test against (RDHARD|RDSOFT|RDSLAVE) gives you the answer.  If any of
541  * these bits are set the object can be read with certain caveats:
542  * RDHARD - no caveats.  RDSOFT - authoritative but might not be synchronized.
543  * and RDSLAVE - not authoritative, has some data but it could be old or
544  * incomplete.
545  *
546  * When both soft and hard mounts are available, data will be read and written
547  * via the soft mount only.  But all might be in the cluster because
548  * background synchronization threads still need to do their work.
549  */
550 #define HAMMER2_CLUSTER_INODE   0x00000001      /* embedded in inode */
551 #define HAMMER2_CLUSTER_NOSYNC  0x00000002      /* not in sync (cumulative) */
552 #define HAMMER2_CLUSTER_WRHARD  0x00000100      /* hard-mount can write */
553 #define HAMMER2_CLUSTER_RDHARD  0x00000200      /* hard-mount can read */
554 #define HAMMER2_CLUSTER_WRSOFT  0x00000400      /* soft-mount can write */
555 #define HAMMER2_CLUSTER_RDSOFT  0x00000800      /* soft-mount can read */
556 #define HAMMER2_CLUSTER_MSYNCED 0x00001000      /* all masters synchronized */
557 #define HAMMER2_CLUSTER_SSYNCED 0x00002000      /* known slaves synchronized */
558
559 #define HAMMER2_CLUSTER_ANYDATA ( HAMMER2_CLUSTER_RDHARD |      \
560                                   HAMMER2_CLUSTER_RDSOFT |      \
561                                   HAMMER2_CLUSTER_RDSLAVE)
562
563 #define HAMMER2_CLUSTER_RDOK    ( HAMMER2_CLUSTER_RDHARD |      \
564                                   HAMMER2_CLUSTER_RDSOFT)
565
566 #define HAMMER2_CLUSTER_WROK    ( HAMMER2_CLUSTER_WRHARD |      \
567                                   HAMMER2_CLUSTER_WRSOFT)
568
569
570 RB_HEAD(hammer2_inode_tree, hammer2_inode);
571
572 /*
573  * A hammer2 inode.
574  *
575  * NOTE: The inode-embedded cluster is never used directly for I/O (since
576  *       it may be shared).  Instead it will be replicated-in and synchronized
577  *       back out if changed.
578  */
579 struct hammer2_inode {
580         RB_ENTRY(hammer2_inode) rbnode;         /* inumber lookup (HL) */
581         hammer2_mtx_t           lock;           /* inode lock */
582         struct hammer2_pfs      *pmp;           /* PFS mount */
583         struct hammer2_inode    *pip;           /* parent inode */
584         struct vnode            *vp;
585         hammer2_cluster_t       cluster;
586         struct lockf            advlock;
587         hammer2_tid_t           inum;
588         u_int                   flags;
589         u_int                   refs;           /* +vpref, +flushref */
590         uint8_t                 comp_heuristic;
591         hammer2_off_t           size;
592         uint64_t                mtime;
593 };
594
595 typedef struct hammer2_inode hammer2_inode_t;
596
597 #define HAMMER2_INODE_MODIFIED          0x0001
598 #define HAMMER2_INODE_SROOT             0x0002  /* kmalloc special case */
599 #define HAMMER2_INODE_RENAME_INPROG     0x0004
600 #define HAMMER2_INODE_ONRBTREE          0x0008
601 #define HAMMER2_INODE_RESIZED           0x0010
602 #define HAMMER2_INODE_MTIME             0x0020
603
604 int hammer2_inode_cmp(hammer2_inode_t *ip1, hammer2_inode_t *ip2);
605 RB_PROTOTYPE2(hammer2_inode_tree, hammer2_inode, rbnode, hammer2_inode_cmp,
606                 hammer2_tid_t);
607
608 /*
609  * inode-unlink side-structure
610  */
611 struct hammer2_inode_unlink {
612         TAILQ_ENTRY(hammer2_inode_unlink) entry;
613         hammer2_inode_t *ip;
614 };
615 TAILQ_HEAD(h2_unlk_list, hammer2_inode_unlink);
616
617 typedef struct hammer2_inode_unlink hammer2_inode_unlink_t;
618
619 /*
620  * Cluster node synchronization thread element.
621  *
622  * Multiple syncthr's can hang off of a hammer2_pfs structure, typically one
623  * for each block device that is part of the PFS.  Synchronization threads
624  * for PFSs accessed over the network are handled by their respective hosts.
625  *
626  * Synchronization threads are responsible for keeping a local node
627  * synchronized to the greater cluster.
628  *
629  * A syncthr can also hang off each hammer2_dev's super-root PFS (spmp).
630  * This thread is responsible for automatic bulkfree and dedup scans.
631  */
632 struct hammer2_syncthr {
633         struct hammer2_pfs *pmp;
634         kdmsg_state_t   *span;
635         thread_t        td;
636         uint32_t        flags;
637         uint32_t        unused01;
638         struct lock     lk;
639 };
640
641 typedef struct hammer2_syncthr hammer2_syncthr_t;
642
643 #define HAMMER2_SYNCTHR_UNMOUNTING      0x0001  /* unmount request */
644 #define HAMMER2_SYNCTHR_DEV             0x0002  /* related to dev, not pfs */
645 #define HAMMER2_SYNCTHR_SPANNED         0x0004  /* LNK_SPAN active */
646 #define HAMMER2_SYNCTHR_REMASTER        0x0008  /* remaster request */
647 #define HAMMER2_SYNCTHR_STOP            0x0010  /* exit request */
648 #define HAMMER2_SYNCTHR_FREEZE          0x0020  /* force idle */
649 #define HAMMER2_SYNCTHR_FROZEN          0x0040  /* restart */
650
651 /*
652  * A hammer2 transaction and flush sequencing structure.
653  *
654  * This global structure is tied into hammer2_dev and is used
655  * to sequence modifying operations and flushes.
656  *
657  * (a) Any modifying operations with sync_tid >= flush_tid will stall until
658  *     all modifying operating with sync_tid < flush_tid complete.
659  *
660  *     The flush related to flush_tid stalls until all modifying operations
661  *     with sync_tid < flush_tid complete.
662  *
663  * (b) Once unstalled, modifying operations with sync_tid > flush_tid are
664  *     allowed to run.  All modifications cause modify/duplicate operations
665  *     to occur on the related chains.  Note that most INDIRECT blocks will
666  *     be unaffected because the modifications just overload the RBTREE
667  *     structurally instead of actually modifying the indirect blocks.
668  *
669  * (c) The actual flush unstalls and RUNS CONCURRENTLY with (b), but only
670  *     utilizes the chain structures with sync_tid <= flush_tid.  The
671  *     flush will modify related indirect blocks and inodes in-place
672  *     (rather than duplicate) since the adjustments are compatible with
673  *     (b)'s RBTREE overloading
674  *
675  *     SPECIAL NOTE:  Inode modifications have to also propagate along any
676  *                    modify/duplicate chains.  File writes detect the flush
677  *                    and force out the conflicting buffer cache buffer(s)
678  *                    before reusing them.
679  *
680  * (d) Snapshots can be made instantly but must be flushed and disconnected
681  *     from their duplicative source before they can be mounted.  This is
682  *     because while H2's on-media structure supports forks, its in-memory
683  *     structure only supports very simple forking for background flushing
684  *     purposes.
685  *
686  * TODO: Flush merging.  When fsync() is called on multiple discrete files
687  *       concurrently there is no reason to stall the second fsync.
688  *       The final flush that reaches to root can cover both fsync()s.
689  *
690  *     The chains typically terminate as they fly onto the disk.  The flush
691  *     ultimately reaches the volume header.
692  */
693 struct hammer2_trans {
694         TAILQ_ENTRY(hammer2_trans) entry;
695         struct hammer2_pfs      *pmp;
696         hammer2_xid_t           sync_xid;
697         hammer2_tid_t           inode_tid;      /* inode number assignment */
698         thread_t                td;             /* pointer */
699         int                     flags;
700         int                     blocked;
701         uint8_t                 inodes_created;
702         uint8_t                 dummy[7];
703 };
704
705 typedef struct hammer2_trans hammer2_trans_t;
706
707 #define HAMMER2_TRANS_ISFLUSH           0x0001  /* formal flush */
708 #define HAMMER2_TRANS_CONCURRENT        0x0002  /* concurrent w/flush */
709 #define HAMMER2_TRANS_BUFCACHE          0x0004  /* from bioq strategy write */
710 #define HAMMER2_TRANS_NEWINODE          0x0008  /* caller allocating inode */
711 #define HAMMER2_TRANS_UNUSED0010        0x0010
712 #define HAMMER2_TRANS_PREFLUSH          0x0020  /* preflush state */
713
714 #define HAMMER2_FREEMAP_HEUR_NRADIX     4       /* pwr 2 PBUFRADIX-MINIORADIX */
715 #define HAMMER2_FREEMAP_HEUR_TYPES      8
716 #define HAMMER2_FREEMAP_HEUR            (HAMMER2_FREEMAP_HEUR_NRADIX * \
717                                          HAMMER2_FREEMAP_HEUR_TYPES)
718
719 /*
720  * Transaction Rendezvous
721  */
722 TAILQ_HEAD(hammer2_trans_queue, hammer2_trans);
723
724 struct hammer2_trans_manage {
725         hammer2_xid_t           flush_xid;      /* last flush transaction */
726         hammer2_xid_t           alloc_xid;
727         struct lock             translk;        /* lockmgr lock */
728         struct hammer2_trans_queue transq;      /* modifying transactions */
729         int                     flushcnt;       /* track flush trans */
730 };
731
732 typedef struct hammer2_trans_manage hammer2_trans_manage_t;
733
734 /*
735  * Global (per partition) management structure, represents a hard block
736  * device.  Typically referenced by hammer2_chain structures when applicable.
737  * Typically not used for network-managed elements.
738  *
739  * Note that a single hammer2_dev can be indirectly tied to multiple system
740  * mount points.  There is no direct relationship.  System mounts are
741  * per-cluster-id, not per-block-device, and a single hard mount might contain
742  * many PFSs and those PFSs might combine together in various ways to form
743  * the set of available clusters.
744  */
745 struct hammer2_dev {
746         struct vnode    *devvp;         /* device vnode */
747         int             ronly;          /* read-only mount */
748         int             pmp_count;      /* number of actively mounted PFSs */
749         TAILQ_ENTRY(hammer2_dev) mntentry; /* hammer2_mntlist */
750
751         struct malloc_type *mchain;
752         int             nipstacks;
753         int             maxipstacks;
754         kdmsg_iocom_t   iocom;          /* volume-level dmsg interface */
755         struct spinlock io_spin;        /* iotree access */
756         struct hammer2_io_tree iotree;
757         int             iofree_count;
758         hammer2_chain_t vchain;         /* anchor chain (topology) */
759         hammer2_chain_t fchain;         /* anchor chain (freemap) */
760         struct spinlock list_spin;
761         struct h2_flush_list    flushq; /* flush seeds */
762         struct hammer2_pfs *spmp;       /* super-root pmp for transactions */
763         struct lock     vollk;          /* lockmgr lock */
764         hammer2_off_t   heur_freemap[HAMMER2_FREEMAP_HEUR];
765         int             volhdrno;       /* last volhdrno written */
766         hammer2_volume_data_t voldata;
767         hammer2_volume_data_t volsync;  /* synchronized voldata */
768 };
769
770 typedef struct hammer2_dev hammer2_dev_t;
771
772 /*
773  * Per-cluster management structure.  This structure will be tied to a
774  * system mount point if the system is mounting the PFS, but is also used
775  * to manage clusters encountered during the super-root scan or received
776  * via LNK_SPANs that might not be mounted.
777  *
778  * This structure is also used to represent the super-root that hangs off
779  * of a hard mount point.  The super-root is not really a cluster element.
780  * In this case the spmp_hmp field will be non-NULL.  It's just easier to do
781  * this than to special case super-root manipulation in the hammer2_chain*
782  * code as being only hammer2_dev-related.
783  *
784  * pfs_mode and pfs_nmasters are rollup fields which critically describes
785  * how elements of the cluster act on the cluster.  pfs_mode is only applicable
786  * when a PFS is mounted by the system.  pfs_nmasters is our best guess as to
787  * how many masters have been configured for a cluster and is always
788  * applicable.
789  *
790  * WARNING! Portions of this structure have deferred initialization.  In
791  *          particular, if not mounted there will be no ihidden or wthread.
792  *          umounted network PFSs will also be missing iroot and numerous
793  *          other fields will not be initialized prior to mount.
794  *
795  *          Synchronization threads are chain-specific and only applicable
796  *          to local hard PFS entries.  A hammer2_pfs structure may contain
797  *          more than one when multiple hard PFSs are present on the local
798  *          machine which require synchronization monitoring.  Most PFSs
799  *          (such as snapshots) are 1xMASTER PFSs which do not need a
800  *          synchronization thread.
801  *
802  * WARNING! The chains making up pfs->iroot's cluster are accounted for in
803  *          hammer2_dev->pmp_count when the pfs is associated with a mount
804  *          point.
805  */
806 struct hammer2_pfs {
807         struct mount            *mp;
808         TAILQ_ENTRY(hammer2_pfs) mntentry;      /* hammer2_pfslist */
809         uuid_t                  pfs_clid;
810         hammer2_dev_t           *spmp_hmp;      /* only if super-root pmp */
811         hammer2_inode_t         *iroot;         /* PFS root inode */
812         hammer2_inode_t         *ihidden;       /* PFS hidden directory */
813         struct lock             lock;           /* PFS lock for certain ops */
814         hammer2_off_t           inode_count;    /* copy of inode_count */
815         struct netexport        export;         /* nfs export */
816         int                     ronly;          /* read-only mount */
817         struct malloc_type      *minode;
818         struct malloc_type      *mmsg;
819         struct spinlock         inum_spin;      /* inumber lookup */
820         struct hammer2_inode_tree inum_tree;    /* (not applicable to spmp) */
821         hammer2_tid_t           alloc_tid;
822         hammer2_tid_t           flush_tid;
823         hammer2_tid_t           inode_tid;
824         uint8_t                 pfs_nmasters;   /* total masters */
825         uint8_t                 pfs_mode;       /* operating mode PFSMODE */
826         uint8_t                 unused01;
827         uint8_t                 unused02;
828         uint32_t                unused03;
829         long                    inmem_inodes;
830         uint32_t                inmem_dirty_chains;
831         int                     count_lwinprog; /* logical write in prog */
832         struct spinlock         list_spin;
833         struct h2_unlk_list     unlinkq;        /* last-close unlink */
834         hammer2_syncthr_t       primary_thr;
835         thread_t                wthread_td;     /* write thread td */
836         struct bio_queue_head   wthread_bioq;   /* logical buffer bioq */
837         hammer2_mtx_t           wthread_mtx;    /* interlock */
838         int                     wthread_destroy;/* termination sequencing */
839 };
840
841 typedef struct hammer2_pfs hammer2_pfs_t;
842
843 #define HAMMER2_DIRTYCHAIN_WAITING      0x80000000
844 #define HAMMER2_DIRTYCHAIN_MASK         0x7FFFFFFF
845
846 #define HAMMER2_LWINPROG_WAITING        0x80000000
847 #define HAMMER2_LWINPROG_MASK           0x7FFFFFFF
848
849 /*
850  * Bulkscan
851  */
852 #define HAMMER2_BULK_ABORT      0x00000001
853
854 /*
855  * Misc
856  */
857 #if defined(_KERNEL)
858
859 MALLOC_DECLARE(M_HAMMER2);
860
861 #define VTOI(vp)        ((hammer2_inode_t *)(vp)->v_data)
862 #define ITOV(ip)        ((ip)->vp)
863
864 /*
865  * Currently locked chains retain the locked buffer cache buffer for
866  * indirect blocks, and indirect blocks can be one of two sizes.  The
867  * device buffer has to match the case to avoid deadlocking recursive
868  * chains that might otherwise try to access different offsets within
869  * the same device buffer.
870  */
871 static __inline
872 int
873 hammer2_devblkradix(int radix)
874 {
875 #if 0
876         if (radix <= HAMMER2_LBUFRADIX) {
877                 return (HAMMER2_LBUFRADIX);
878         } else {
879                 return (HAMMER2_PBUFRADIX);
880         }
881 #endif
882         return (HAMMER2_PBUFRADIX);
883 }
884
885 /*
886  * XXX almost time to remove this.  DIO uses PBUFSIZE exclusively now.
887  */
888 static __inline
889 size_t
890 hammer2_devblksize(size_t bytes)
891 {
892 #if 0
893         if (bytes <= HAMMER2_LBUFSIZE) {
894                 return(HAMMER2_LBUFSIZE);
895         } else {
896                 KKASSERT(bytes <= HAMMER2_PBUFSIZE &&
897                          (bytes ^ (bytes - 1)) == ((bytes << 1) - 1));
898                 return (HAMMER2_PBUFSIZE);
899         }
900 #endif
901         return (HAMMER2_PBUFSIZE);
902 }
903
904
905 static __inline
906 hammer2_pfs_t *
907 MPTOPMP(struct mount *mp)
908 {
909         return ((hammer2_pfs_t *)mp->mnt_data);
910 }
911
912 #define LOCKSTART       int __nlocks = curthread->td_locks
913 #define LOCKENTER       (++curthread->td_locks)
914 #define LOCKEXIT        (--curthread->td_locks)
915 #define LOCKSTOP        KKASSERT(curthread->td_locks == __nlocks)
916
917 extern struct vop_ops hammer2_vnode_vops;
918 extern struct vop_ops hammer2_spec_vops;
919 extern struct vop_ops hammer2_fifo_vops;
920
921 extern int hammer2_debug;
922 extern int hammer2_cluster_enable;
923 extern int hammer2_hardlink_enable;
924 extern int hammer2_flush_pipe;
925 extern int hammer2_synchronous_flush;
926 extern int hammer2_dio_count;
927 extern long hammer2_limit_dirty_chains;
928 extern long hammer2_iod_file_read;
929 extern long hammer2_iod_meta_read;
930 extern long hammer2_iod_indr_read;
931 extern long hammer2_iod_fmap_read;
932 extern long hammer2_iod_volu_read;
933 extern long hammer2_iod_file_write;
934 extern long hammer2_iod_meta_write;
935 extern long hammer2_iod_indr_write;
936 extern long hammer2_iod_fmap_write;
937 extern long hammer2_iod_volu_write;
938 extern long hammer2_ioa_file_read;
939 extern long hammer2_ioa_meta_read;
940 extern long hammer2_ioa_indr_read;
941 extern long hammer2_ioa_fmap_read;
942 extern long hammer2_ioa_volu_read;
943 extern long hammer2_ioa_file_write;
944 extern long hammer2_ioa_meta_write;
945 extern long hammer2_ioa_indr_write;
946 extern long hammer2_ioa_fmap_write;
947 extern long hammer2_ioa_volu_write;
948
949 extern struct objcache *cache_buffer_read;
950 extern struct objcache *cache_buffer_write;
951
952 extern int destroy;
953 extern int write_thread_wakeup;
954
955 /*
956  * hammer2_subr.c
957  */
958 #define hammer2_icrc32(buf, size)       iscsi_crc32((buf), (size))
959 #define hammer2_icrc32c(buf, size, crc) iscsi_crc32_ext((buf), (size), (crc))
960
961 int hammer2_signal_check(time_t *timep);
962 hammer2_cluster_t *hammer2_inode_lock_ex(hammer2_inode_t *ip);
963 hammer2_cluster_t *hammer2_inode_lock_nex(hammer2_inode_t *ip, int how);
964 hammer2_cluster_t *hammer2_inode_lock_sh(hammer2_inode_t *ip);
965 void hammer2_inode_unlock_ex(hammer2_inode_t *ip, hammer2_cluster_t *chain);
966 void hammer2_inode_unlock_sh(hammer2_inode_t *ip, hammer2_cluster_t *chain);
967 hammer2_mtx_state_t hammer2_inode_lock_temp_release(hammer2_inode_t *ip);
968 void hammer2_inode_lock_temp_restore(hammer2_inode_t *ip,
969                         hammer2_mtx_state_t ostate);
970 int hammer2_inode_lock_upgrade(hammer2_inode_t *ip);
971 void hammer2_inode_lock_downgrade(hammer2_inode_t *ip, int);
972
973 void hammer2_dev_exlock(hammer2_dev_t *hmp);
974 void hammer2_dev_shlock(hammer2_dev_t *hmp);
975 void hammer2_dev_unlock(hammer2_dev_t *hmp);
976
977 int hammer2_get_dtype(const hammer2_inode_data_t *ipdata);
978 int hammer2_get_vtype(const hammer2_inode_data_t *ipdata);
979 u_int8_t hammer2_get_obj_type(enum vtype vtype);
980 void hammer2_time_to_timespec(u_int64_t xtime, struct timespec *ts);
981 u_int64_t hammer2_timespec_to_time(const struct timespec *ts);
982 u_int32_t hammer2_to_unix_xid(const uuid_t *uuid);
983 void hammer2_guid_to_uuid(uuid_t *uuid, u_int32_t guid);
984 hammer2_xid_t hammer2_trans_newxid(hammer2_pfs_t *pmp);
985 void hammer2_trans_manage_init(void);
986
987 hammer2_key_t hammer2_dirhash(const unsigned char *name, size_t len);
988 int hammer2_getradix(size_t bytes);
989
990 int hammer2_calc_logical(hammer2_inode_t *ip, hammer2_off_t uoff,
991                         hammer2_key_t *lbasep, hammer2_key_t *leofp);
992 int hammer2_calc_physical(hammer2_inode_t *ip,
993                         const hammer2_inode_data_t *ipdata,
994                         hammer2_key_t lbase);
995 void hammer2_update_time(uint64_t *timep);
996 void hammer2_adjreadcounter(hammer2_blockref_t *bref, size_t bytes);
997
998 /*
999  * hammer2_inode.c
1000  */
1001 struct vnode *hammer2_igetv(hammer2_inode_t *ip, hammer2_cluster_t *cparent,
1002                         int *errorp);
1003 void hammer2_inode_lock_nlinks(hammer2_inode_t *ip);
1004 void hammer2_inode_unlock_nlinks(hammer2_inode_t *ip);
1005 hammer2_inode_t *hammer2_inode_lookup(hammer2_pfs_t *pmp,
1006                         hammer2_tid_t inum);
1007 hammer2_inode_t *hammer2_inode_get(hammer2_pfs_t *pmp,
1008                         hammer2_inode_t *dip, hammer2_cluster_t *cluster);
1009 void hammer2_inode_free(hammer2_inode_t *ip);
1010 void hammer2_inode_ref(hammer2_inode_t *ip);
1011 void hammer2_inode_drop(hammer2_inode_t *ip);
1012 void hammer2_inode_repoint(hammer2_inode_t *ip, hammer2_inode_t *pip,
1013                         hammer2_cluster_t *cluster);
1014 void hammer2_run_unlinkq(hammer2_trans_t *trans, hammer2_pfs_t *pmp);
1015
1016 hammer2_inode_t *hammer2_inode_create(hammer2_trans_t *trans,
1017                         hammer2_inode_t *dip,
1018                         struct vattr *vap, struct ucred *cred,
1019                         const uint8_t *name, size_t name_len,
1020                         hammer2_cluster_t **clusterp,
1021                         int flags, int *errorp);
1022 int hammer2_inode_connect(hammer2_trans_t *trans,
1023                         hammer2_cluster_t **clusterp, int hlink,
1024                         hammer2_inode_t *dip, hammer2_cluster_t *dcluster,
1025                         const uint8_t *name, size_t name_len,
1026                         hammer2_key_t key);
1027 hammer2_inode_t *hammer2_inode_common_parent(hammer2_inode_t *fdip,
1028                         hammer2_inode_t *tdip);
1029 void hammer2_inode_fsync(hammer2_trans_t *trans, hammer2_inode_t *ip,
1030                         hammer2_cluster_t *cparent);
1031 int hammer2_unlink_file(hammer2_trans_t *trans, hammer2_inode_t *dip,
1032                         const uint8_t *name, size_t name_len, int isdir,
1033                         int *hlinkp, struct nchandle *nch, int nlinks);
1034 int hammer2_hardlink_consolidate(hammer2_trans_t *trans,
1035                         hammer2_inode_t *ip, hammer2_cluster_t **clusterp,
1036                         hammer2_inode_t *cdip, hammer2_cluster_t *cdcluster,
1037                         int nlinks);
1038 int hammer2_hardlink_deconsolidate(hammer2_trans_t *trans, hammer2_inode_t *dip,
1039                         hammer2_chain_t **chainp, hammer2_chain_t **ochainp);
1040 int hammer2_hardlink_find(hammer2_inode_t *dip, hammer2_cluster_t **cparentp,
1041                         hammer2_cluster_t *cluster);
1042 int hammer2_parent_find(hammer2_cluster_t **cparentp,
1043                         hammer2_cluster_t *cluster);
1044 void hammer2_inode_install_hidden(hammer2_pfs_t *pmp);
1045
1046 /*
1047  * hammer2_chain.c
1048  */
1049 void hammer2_voldata_lock(hammer2_dev_t *hmp);
1050 void hammer2_voldata_unlock(hammer2_dev_t *hmp);
1051 void hammer2_voldata_modify(hammer2_dev_t *hmp);
1052 hammer2_chain_t *hammer2_chain_alloc(hammer2_dev_t *hmp,
1053                                 hammer2_pfs_t *pmp,
1054                                 hammer2_trans_t *trans,
1055                                 hammer2_blockref_t *bref);
1056 void hammer2_chain_core_alloc(hammer2_trans_t *trans, hammer2_chain_t *chain);
1057 void hammer2_chain_ref(hammer2_chain_t *chain);
1058 void hammer2_chain_drop(hammer2_chain_t *chain);
1059 int hammer2_chain_lock(hammer2_chain_t *chain, int how);
1060 const hammer2_media_data_t *hammer2_chain_rdata(hammer2_chain_t *chain);
1061 hammer2_media_data_t *hammer2_chain_wdata(hammer2_chain_t *chain);
1062
1063 /*
1064  * hammer2_cluster.c
1065  */
1066 int hammer2_cluster_isunlinked(hammer2_cluster_t *cluster);
1067 void hammer2_cluster_load_async(hammer2_cluster_t *cluster,
1068                                 void (*callback)(hammer2_iocb_t *iocb),
1069                                 void *ptr);
1070 void hammer2_chain_moved(hammer2_chain_t *chain);
1071 void hammer2_chain_modify(hammer2_trans_t *trans,
1072                                 hammer2_chain_t *chain, int flags);
1073 void hammer2_chain_resize(hammer2_trans_t *trans, hammer2_inode_t *ip,
1074                                 hammer2_chain_t *parent,
1075                                 hammer2_chain_t *chain,
1076                                 int nradix, int flags);
1077 void hammer2_chain_unlock(hammer2_chain_t *chain);
1078 void hammer2_chain_wait(hammer2_chain_t *chain);
1079 hammer2_chain_t *hammer2_chain_get(hammer2_chain_t *parent, int generation,
1080                                 hammer2_blockref_t *bref);
1081 hammer2_chain_t *hammer2_chain_lookup_init(hammer2_chain_t *parent, int flags);
1082 void hammer2_chain_lookup_done(hammer2_chain_t *parent);
1083 hammer2_chain_t *hammer2_chain_lookup(hammer2_chain_t **parentp,
1084                                 hammer2_key_t *key_nextp,
1085                                 hammer2_key_t key_beg, hammer2_key_t key_end,
1086                                 int *cache_indexp, int flags, int *ddflagp);
1087 hammer2_chain_t *hammer2_chain_next(hammer2_chain_t **parentp,
1088                                 hammer2_chain_t *chain,
1089                                 hammer2_key_t *key_nextp,
1090                                 hammer2_key_t key_beg, hammer2_key_t key_end,
1091                                 int *cache_indexp, int flags);
1092 hammer2_chain_t *hammer2_chain_scan(hammer2_chain_t *parent,
1093                                 hammer2_chain_t *chain,
1094                                 int *cache_indexp, int flags);
1095
1096 int hammer2_chain_create(hammer2_trans_t *trans, hammer2_chain_t **parentp,
1097                                 hammer2_chain_t **chainp,
1098                                 hammer2_pfs_t *pmp,
1099                                 hammer2_key_t key, int keybits,
1100                                 int type, size_t bytes, int flags);
1101 void hammer2_chain_rename(hammer2_trans_t *trans, hammer2_blockref_t *bref,
1102                                 hammer2_chain_t **parentp,
1103                                 hammer2_chain_t *chain, int flags);
1104 int hammer2_chain_snapshot(hammer2_trans_t *trans, hammer2_chain_t **chainp,
1105                                 hammer2_ioc_pfs_t *pfs);
1106 void hammer2_chain_delete(hammer2_trans_t *trans, hammer2_chain_t *parent,
1107                                 hammer2_chain_t *chain, int flags);
1108 void hammer2_chain_delete_duplicate(hammer2_trans_t *trans,
1109                                 hammer2_chain_t **chainp, int flags);
1110 void hammer2_flush(hammer2_trans_t *trans, hammer2_chain_t *chain);
1111 void hammer2_chain_commit(hammer2_trans_t *trans, hammer2_chain_t *chain);
1112 void hammer2_chain_setflush(hammer2_trans_t *trans, hammer2_chain_t *chain);
1113 void hammer2_chain_countbrefs(hammer2_chain_t *chain,
1114                                 hammer2_blockref_t *base, int count);
1115
1116 void hammer2_chain_setcheck(hammer2_chain_t *chain, void *bdata);
1117 int hammer2_chain_testcheck(hammer2_chain_t *chain, void *bdata);
1118
1119
1120 void hammer2_pfs_memory_wait(hammer2_pfs_t *pmp);
1121 void hammer2_pfs_memory_inc(hammer2_pfs_t *pmp);
1122 void hammer2_pfs_memory_wakeup(hammer2_pfs_t *pmp);
1123
1124 void hammer2_base_delete(hammer2_trans_t *trans, hammer2_chain_t *chain,
1125                                 hammer2_blockref_t *base, int count,
1126                                 int *cache_indexp, hammer2_chain_t *child);
1127 void hammer2_base_insert(hammer2_trans_t *trans, hammer2_chain_t *chain,
1128                                 hammer2_blockref_t *base, int count,
1129                                 int *cache_indexp, hammer2_chain_t *child);
1130
1131 /*
1132  * hammer2_trans.c
1133  */
1134 void hammer2_trans_init(hammer2_trans_t *trans, hammer2_pfs_t *pmp,
1135                                 int flags);
1136 void hammer2_trans_spmp(hammer2_trans_t *trans, hammer2_pfs_t *pmp);
1137 void hammer2_trans_done(hammer2_trans_t *trans);
1138
1139 /*
1140  * hammer2_ioctl.c
1141  */
1142 int hammer2_ioctl(hammer2_inode_t *ip, u_long com, void *data,
1143                                 int fflag, struct ucred *cred);
1144
1145 /*
1146  * hammer2_io.c
1147  */
1148 void hammer2_io_putblk(hammer2_io_t **diop);
1149 void hammer2_io_cleanup(hammer2_dev_t *hmp, struct hammer2_io_tree *tree);
1150 char *hammer2_io_data(hammer2_io_t *dio, off_t lbase);
1151 void hammer2_io_getblk(hammer2_dev_t *hmp, off_t lbase, int lsize,
1152                                 hammer2_iocb_t *iocb);
1153 void hammer2_io_complete(hammer2_iocb_t *iocb);
1154 void hammer2_io_callback(struct bio *bio);
1155 void hammer2_iocb_wait(hammer2_iocb_t *iocb);
1156 int hammer2_io_new(hammer2_dev_t *hmp, off_t lbase, int lsize,
1157                                 hammer2_io_t **diop);
1158 int hammer2_io_newnz(hammer2_dev_t *hmp, off_t lbase, int lsize,
1159                                 hammer2_io_t **diop);
1160 int hammer2_io_newq(hammer2_dev_t *hmp, off_t lbase, int lsize,
1161                                 hammer2_io_t **diop);
1162 int hammer2_io_bread(hammer2_dev_t *hmp, off_t lbase, int lsize,
1163                                 hammer2_io_t **diop);
1164 void hammer2_io_bawrite(hammer2_io_t **diop);
1165 void hammer2_io_bdwrite(hammer2_io_t **diop);
1166 int hammer2_io_bwrite(hammer2_io_t **diop);
1167 int hammer2_io_isdirty(hammer2_io_t *dio);
1168 void hammer2_io_setdirty(hammer2_io_t *dio);
1169 void hammer2_io_setinval(hammer2_io_t *dio, u_int bytes);
1170 void hammer2_io_brelse(hammer2_io_t **diop);
1171 void hammer2_io_bqrelse(hammer2_io_t **diop);
1172
1173 /*
1174  * hammer2_msgops.c
1175  */
1176 int hammer2_msg_dbg_rcvmsg(kdmsg_msg_t *msg);
1177 int hammer2_msg_adhoc_input(kdmsg_msg_t *msg);
1178
1179 /*
1180  * hammer2_vfsops.c
1181  */
1182 void hammer2_clusterctl_wakeup(kdmsg_iocom_t *iocom);
1183 void hammer2_volconf_update(hammer2_dev_t *hmp, int index);
1184 void hammer2_dump_chain(hammer2_chain_t *chain, int tab, int *countp, char pfx);
1185 void hammer2_bioq_sync(hammer2_pfs_t *pmp);
1186 int hammer2_vfs_sync(struct mount *mp, int waitflags);
1187 hammer2_pfs_t *hammer2_pfsalloc(hammer2_cluster_t *cluster,
1188                                 const hammer2_inode_data_t *ripdata,
1189                                 hammer2_tid_t alloc_tid);
1190
1191 void hammer2_lwinprog_ref(hammer2_pfs_t *pmp);
1192 void hammer2_lwinprog_drop(hammer2_pfs_t *pmp);
1193 void hammer2_lwinprog_wait(hammer2_pfs_t *pmp);
1194
1195 /*
1196  * hammer2_freemap.c
1197  */
1198 int hammer2_freemap_alloc(hammer2_trans_t *trans, hammer2_chain_t *chain,
1199                                 size_t bytes);
1200 void hammer2_freemap_adjust(hammer2_trans_t *trans, hammer2_dev_t *hmp,
1201                                 hammer2_blockref_t *bref, int how);
1202
1203 /*
1204  * hammer2_cluster.c
1205  */
1206 int hammer2_cluster_need_resize(hammer2_cluster_t *cluster, int bytes);
1207 uint8_t hammer2_cluster_type(hammer2_cluster_t *cluster);
1208 const hammer2_media_data_t *hammer2_cluster_rdata(hammer2_cluster_t *cluster);
1209 hammer2_media_data_t *hammer2_cluster_wdata(hammer2_cluster_t *cluster);
1210 hammer2_cluster_t *hammer2_cluster_from_chain(hammer2_chain_t *chain);
1211 int hammer2_cluster_modified(hammer2_cluster_t *cluster);
1212 int hammer2_cluster_duplicated(hammer2_cluster_t *cluster);
1213 void hammer2_cluster_set_chainflags(hammer2_cluster_t *cluster, uint32_t flags);
1214 void hammer2_cluster_clr_chainflags(hammer2_cluster_t *cluster, uint32_t flags);
1215 void hammer2_cluster_bref(hammer2_cluster_t *cluster, hammer2_blockref_t *bref);
1216 void hammer2_cluster_setflush(hammer2_trans_t *trans,
1217                         hammer2_cluster_t *cluster);
1218 void hammer2_cluster_setmethod_check(hammer2_trans_t *trans,
1219                         hammer2_cluster_t *cluster, int check_algo);
1220 hammer2_cluster_t *hammer2_cluster_alloc(hammer2_pfs_t *pmp,
1221                         hammer2_trans_t *trans,
1222                         hammer2_blockref_t *bref);
1223 void hammer2_cluster_ref(hammer2_cluster_t *cluster);
1224 void hammer2_cluster_drop(hammer2_cluster_t *cluster);
1225 void hammer2_cluster_wait(hammer2_cluster_t *cluster);
1226 int hammer2_cluster_lock(hammer2_cluster_t *cluster, int how);
1227 void hammer2_cluster_replace(hammer2_cluster_t *dst, hammer2_cluster_t *src);
1228 void hammer2_cluster_replace_locked(hammer2_cluster_t *dst,
1229                         hammer2_cluster_t *src);
1230 hammer2_cluster_t *hammer2_cluster_copy(hammer2_cluster_t *ocluster);
1231 void hammer2_cluster_unlock(hammer2_cluster_t *cluster);
1232 void hammer2_cluster_resize(hammer2_trans_t *trans, hammer2_inode_t *ip,
1233                         hammer2_cluster_t *cparent, hammer2_cluster_t *cluster,
1234                         int nradix, int flags);
1235 hammer2_inode_data_t *hammer2_cluster_modify_ip(hammer2_trans_t *trans,
1236                         hammer2_inode_t *ip, hammer2_cluster_t *cluster,
1237                         int flags);
1238 void hammer2_cluster_modify(hammer2_trans_t *trans, hammer2_cluster_t *cluster,
1239                         int flags);
1240 void hammer2_cluster_modsync(hammer2_cluster_t *cluster);
1241 hammer2_cluster_t *hammer2_cluster_lookup_init(hammer2_cluster_t *cparent,
1242                         int flags);
1243 void hammer2_cluster_lookup_done(hammer2_cluster_t *cparent);
1244 hammer2_cluster_t *hammer2_cluster_lookup(hammer2_cluster_t *cparent,
1245                         hammer2_key_t *key_nextp,
1246                         hammer2_key_t key_beg, hammer2_key_t key_end,
1247                         int flags, int *ddflagp);
1248 hammer2_cluster_t *hammer2_cluster_next(hammer2_cluster_t *cparent,
1249                         hammer2_cluster_t *cluster,
1250                         hammer2_key_t *key_nextp,
1251                         hammer2_key_t key_beg, hammer2_key_t key_end,
1252                         int flags);
1253 hammer2_cluster_t *hammer2_cluster_scan(hammer2_cluster_t *cparent,
1254                         hammer2_cluster_t *cluster, int flags);
1255 int hammer2_cluster_create(hammer2_trans_t *trans, hammer2_cluster_t *cparent,
1256                         hammer2_cluster_t **clusterp,
1257                         hammer2_key_t key, int keybits,
1258                         int type, size_t bytes, int flags);
1259 void hammer2_cluster_rename(hammer2_trans_t *trans, hammer2_blockref_t *bref,
1260                         hammer2_cluster_t *cparent, hammer2_cluster_t *cluster,
1261                         int flags);
1262 void hammer2_cluster_delete(hammer2_trans_t *trans, hammer2_cluster_t *pcluster,
1263                         hammer2_cluster_t *cluster, int flags);
1264 int hammer2_cluster_snapshot(hammer2_trans_t *trans,
1265                         hammer2_cluster_t *ocluster, hammer2_ioc_pfs_t *pfs);
1266 hammer2_cluster_t *hammer2_cluster_parent(hammer2_cluster_t *cluster);
1267
1268 int hammer2_bulk_scan(hammer2_trans_t *trans, hammer2_chain_t *parent,
1269                         int (*func)(hammer2_chain_t *chain, void *info),
1270                         void *info);
1271 int hammer2_bulkfree_pass(hammer2_dev_t *hmp,
1272                         struct hammer2_ioc_bulkfree *bfi);
1273
1274 /*
1275  * hammer2_iocom.c
1276  */
1277 void hammer2_iocom_init(hammer2_dev_t *hmp);
1278 void hammer2_iocom_uninit(hammer2_dev_t *hmp);
1279 void hammer2_cluster_reconnect(hammer2_dev_t *hmp, struct file *fp);
1280
1281 /*
1282  * hammer2_syncthr.c
1283  */
1284 void hammer2_syncthr_create(hammer2_syncthr_t *thr, hammer2_pfs_t *pmp,
1285                         void (*func)(void *arg));
1286 void hammer2_syncthr_delete(hammer2_syncthr_t *thr);
1287 void hammer2_syncthr_remaster(hammer2_syncthr_t *thr);
1288 void hammer2_syncthr_freeze(hammer2_syncthr_t *thr);
1289 void hammer2_syncthr_unfreeze(hammer2_syncthr_t *thr);
1290 void hammer2_syncthr_primary(void *arg);
1291
1292 #endif /* !_KERNEL */
1293 #endif /* !_VFS_HAMMER2_HAMMER2_H_ */