hammer2 - Cleanup pass, remove unused fields and code
[dragonfly.git] / sys / vfs / hammer2 / hammer2.h
1 /*
2  * Copyright (c) 2011-2016 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@dragonflybsd.org>
6  * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org>
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  *
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in
16  *    the documentation and/or other materials provided with the
17  *    distribution.
18  * 3. Neither the name of The DragonFly Project nor the names of its
19  *    contributors may be used to endorse or promote products derived
20  *    from this software without specific, prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
26  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  */
35
36 /*
37  * HAMMER2 IN-MEMORY CACHE OF MEDIA STRUCTURES
38  *
39  * This header file contains structures used internally by the HAMMER2
40  * implementation.  See hammer2_disk.h for on-disk structures.
41  *
42  * There is an in-memory representation of all on-media data structure.
43  * Almost everything is represented by a hammer2_chain structure in-memory.
44  * Other higher-level structures typically map to chains.
45  *
46  * A great deal of data is accessed simply via its buffer cache buffer,
47  * which is mapped for the duration of the chain's lock.  Hammer2 must
48  * implement its own buffer cache layer on top of the system layer to
49  * allow for different threads to lock different sub-block-sized buffers.
50  *
51  * When modifications are made to a chain a new filesystem block must be
52  * allocated.  Multiple modifications do not typically allocate new blocks
53  * until the current block has been flushed.  Flushes do not block the
54  * front-end unless the front-end operation crosses the current inode being
55  * flushed.
56  *
57  * The in-memory representation may remain cached (for example in order to
58  * placemark clustering locks) even after the related data has been
59  * detached.
60  */
61
62 #ifndef _VFS_HAMMER2_HAMMER2_H_
63 #define _VFS_HAMMER2_HAMMER2_H_
64
65 #include <sys/param.h>
66 #include <sys/types.h>
67 #include <sys/kernel.h>
68 #include <sys/conf.h>
69 #include <sys/systm.h>
70 #include <sys/tree.h>
71 #include <sys/malloc.h>
72 #include <sys/mount.h>
73 #include <sys/vnode.h>
74 #include <sys/proc.h>
75 #include <sys/mountctl.h>
76 #include <sys/priv.h>
77 #include <sys/stat.h>
78 #include <sys/thread.h>
79 #include <sys/globaldata.h>
80 #include <sys/lockf.h>
81 #include <sys/buf.h>
82 #include <sys/queue.h>
83 #include <sys/limits.h>
84 #include <sys/dmsg.h>
85 #include <sys/mutex.h>
86 #include <sys/kern_syscall.h>
87
88 #include <sys/signal2.h>
89 #include <sys/buf2.h>
90 #include <sys/mutex2.h>
91 #include <sys/thread2.h>
92
93 #include "hammer2_xxhash.h"
94 #include "hammer2_disk.h"
95 #include "hammer2_mount.h"
96 #include "hammer2_ioctl.h"
97
98 struct hammer2_io;
99 struct hammer2_iocb;
100 struct hammer2_chain;
101 struct hammer2_cluster;
102 struct hammer2_inode;
103 struct hammer2_dev;
104 struct hammer2_pfs;
105 struct hammer2_span;
106 struct hammer2_state;
107 struct hammer2_msg;
108 struct hammer2_thread;
109 union hammer2_xop;
110
111 /*
112  * Mutex and lock shims.  Hammer2 requires support for asynchronous and
113  * abortable locks, and both exclusive and shared spinlocks.  Normal
114  * synchronous non-abortable locks can be substituted for spinlocks.
115  */
116 typedef mtx_t                           hammer2_mtx_t;
117 typedef mtx_link_t                      hammer2_mtx_link_t;
118 typedef mtx_state_t                     hammer2_mtx_state_t;
119
120 typedef struct spinlock                 hammer2_spin_t;
121
122 #define hammer2_mtx_ex                  mtx_lock_ex_quick
123 #define hammer2_mtx_sh                  mtx_lock_sh_quick
124 #define hammer2_mtx_sh_again            mtx_lock_sh_again
125 #define hammer2_mtx_unlock              mtx_unlock
126 #define hammer2_mtx_downgrade           mtx_downgrade
127 #define hammer2_mtx_owned               mtx_owned
128 #define hammer2_mtx_init                mtx_init
129 #define hammer2_mtx_temp_release        mtx_lock_temp_release
130 #define hammer2_mtx_temp_restore        mtx_lock_temp_restore
131 #define hammer2_mtx_refs                mtx_lockrefs
132
133 #define hammer2_spin_init               spin_init
134 #define hammer2_spin_sh                 spin_lock_shared
135 #define hammer2_spin_ex                 spin_lock
136 #define hammer2_spin_unsh               spin_unlock_shared
137 #define hammer2_spin_unex               spin_unlock
138
139 TAILQ_HEAD(hammer2_xop_list, hammer2_xop_head);
140 TAILQ_HEAD(hammer2_chain_list, hammer2_chain);
141
142 typedef struct hammer2_xop_list hammer2_xop_list_t;
143
144
145 /*
146  * General lock support
147  */
148 static __inline
149 int
150 hammer2_mtx_upgrade_try(hammer2_mtx_t *mtx)
151 {
152         return mtx_upgrade_try(mtx);
153 }
154
155 /*
156  * The xid tracks internal transactional updates.
157  *
158  * XXX fix-me, really needs to be 64-bits
159  */
160 typedef uint32_t hammer2_xid_t;
161
162 #define HAMMER2_XID_MIN                 0x00000000U
163 #define HAMMER2_XID_MAX                 0x7FFFFFFFU
164
165 #define HAMMER2_LIMIT_DIRTY_CHAINS      (65536)
166
167 /*
168  * The chain structure tracks a portion of the media topology from the
169  * root (volume) down.  Chains represent volumes, inodes, indirect blocks,
170  * data blocks, and freemap nodes and leafs.
171  *
172  * The chain structure utilizes a simple singly-homed topology and the
173  * chain's in-memory topology will move around as the chains do, due mainly
174  * to renames and indirect block creation.
175  *
176  * Block Table Updates
177  *
178  *      Block table updates for insertions and updates are delayed until the
179  *      flush.  This allows us to avoid having to modify the parent chain
180  *      all the way to the root.
181  *
182  *      Block table deletions are performed immediately (modifying the parent
183  *      in the process) because the flush code uses the chain structure to
184  *      track delayed updates and the chain will be (likely) gone or moved to
185  *      another location in the topology after a deletion.
186  *
187  *      A prior iteration of the code tried to keep the relationship intact
188  *      on deletes by doing a delete-duplicate operation on the chain, but
189  *      it added way too much complexity to the codebase.
190  *
191  * Flush Synchronization
192  *
193  *      The flush code must flush modified chains bottom-up.  Because chain
194  *      structures can shift around and are NOT topologically stable,
195  *      modified chains are independently indexed for the flush.  As the flush
196  *      runs it modifies (or further modifies) and updates the parents,
197  *      propagating the flush all the way to the volume root.
198  *
199  *      Modifying front-end operations can occur during a flush but will block
200  *      in two cases: (1) when the front-end tries to operate on the inode
201  *      currently in the midst of being flushed and (2) if the front-end
202  *      crosses an inode currently being flushed (such as during a rename).
203  *      So, for example, if you rename directory "x" to "a/b/c/d/e/f/g/x" and
204  *      the flusher is currently working on "a/b/c", the rename will block
205  *      temporarily in order to ensure that "x" exists in one place or the
206  *      other.
207  *
208  *      Meta-data statistics are updated by the flusher.  The front-end will
209  *      make estimates but meta-data must be fully synchronized only during a
210  *      flush in order to ensure that it remains correct across a crash.
211  *
212  *      Multiple flush synchronizations can theoretically be in-flight at the
213  *      same time but the implementation is not coded to handle the case and
214  *      currently serializes them.
215  *
216  * Snapshots:
217  *
218  *      Snapshots currently require the subdirectory tree being snapshotted
219  *      to be flushed.  The snapshot then creates a new super-root inode which
220  *      copies the flushed blockdata of the directory or file that was
221  *      snapshotted.
222  *
223  * RBTREE NOTES:
224  *
225  *      - Note that the radix tree runs in powers of 2 only so sub-trees
226  *        cannot straddle edges.
227  */
228 RB_HEAD(hammer2_chain_tree, hammer2_chain);
229 TAILQ_HEAD(h2_flush_list, hammer2_chain);
230 TAILQ_HEAD(h2_core_list, hammer2_chain);
231 TAILQ_HEAD(h2_iocb_list, hammer2_iocb);
232
233 #define CHAIN_CORE_DELETE_BMAP_ENTRIES  \
234         (HAMMER2_PBUFSIZE / sizeof(hammer2_blockref_t) / sizeof(uint32_t))
235
236 /*
237  * Core topology for chain (embedded in chain).  Protected by a spinlock.
238  */
239 struct hammer2_chain_core {
240         hammer2_spin_t  spin;
241         struct hammer2_chain_tree rbtree; /* sub-chains */
242         int             live_zero;      /* blockref array opt */
243         u_int           live_count;     /* live (not deleted) chains in tree */
244         u_int           chain_count;    /* live + deleted chains under core */
245         int             generation;     /* generation number (inserts only) */
246 };
247
248 typedef struct hammer2_chain_core hammer2_chain_core_t;
249
250 RB_HEAD(hammer2_io_tree, hammer2_io);
251
252 /*
253  * IOCB - IO callback (into chain, cluster, or manual request)
254  */
255 struct hammer2_iocb {
256         TAILQ_ENTRY(hammer2_iocb) entry;
257         void (*callback)(struct hammer2_iocb *iocb);
258         struct hammer2_io       *dio;
259         struct hammer2_chain    *chain;
260         void                    *ptr;
261         off_t                   lbase;
262         int                     lsize;
263         uint32_t                flags;
264         int                     error;
265         int                     btype;
266 };
267
268 typedef struct hammer2_iocb hammer2_iocb_t;
269
270 #define HAMMER2_IOCB_INTERLOCK  0x00000001
271 #define HAMMER2_IOCB_ONQ        0x00000002
272 #define HAMMER2_IOCB_DONE       0x00000004
273 #define HAMMER2_IOCB_INPROG     0x00000008
274 #define HAMMER2_IOCB_UNUSED10   0x00000010
275 #define HAMMER2_IOCB_QUICK      0x00010000
276 #define HAMMER2_IOCB_ZERO       0x00020000
277 #define HAMMER2_IOCB_READ       0x00040000
278 #define HAMMER2_IOCB_WAKEUP     0x00080000
279
280 /*
281  * DIO - Management structure wrapping system buffer cache.
282  *
283  *       Used for multiple purposes including concurrent management
284  *       if small requests by chains into larger DIOs.
285  */
286 struct hammer2_io {
287         RB_ENTRY(hammer2_io) rbnode;    /* indexed by device offset */
288         struct h2_iocb_list iocbq;
289         struct spinlock spin;
290         struct hammer2_dev *hmp;
291         struct buf      *bp;
292         off_t           pbase;
293         uint64_t        refs;
294         int             psize;
295         int             act;            /* activity */
296         int             btype;          /* approximate BREF_TYPE_* */
297         int             unused01;
298 };
299
300 typedef struct hammer2_io hammer2_io_t;
301
302 #define HAMMER2_DIO_INPROG      0x8000000000000000LLU   /* bio in progress */
303 #define HAMMER2_DIO_GOOD        0x4000000000000000LLU   /* dio->bp is stable */
304 #define HAMMER2_DIO_WAITING     0x2000000000000000LLU   /* wait on INPROG */
305 #define HAMMER2_DIO_DIRTY       0x1000000000000000LLU   /* flush last drop */
306 #define HAMMER2_DIO_INVALOK     0x0800000000000000LLU   /* ok to inval */
307 #define HAMMER2_DIO_INVAL       0x0400000000000000LLU   /* inval request */
308
309 #define HAMMER2_DIO_MASK        0x00FFFFFFFFFFFFFFLLU
310
311 #define HAMMER2_DIO_INVALBITS   (HAMMER2_DIO_INVAL | HAMMER2_DIO_INVALOK)
312
313 /*
314  * Primary chain structure keeps track of the topology in-memory.
315  */
316 struct hammer2_chain {
317         hammer2_mtx_t           lock;
318         hammer2_chain_core_t    core;
319         RB_ENTRY(hammer2_chain) rbnode;         /* live chain(s) */
320         hammer2_blockref_t      bref;
321         struct hammer2_chain    *parent;
322         struct hammer2_state    *state;         /* if active cache msg */
323         struct hammer2_dev      *hmp;
324         struct hammer2_pfs      *pmp;           /* A PFS or super-root (spmp) */
325
326         hammer2_io_t    *dio;                   /* physical data buffer */
327         u_int           bytes;                  /* physical data size */
328         u_int           flags;
329         u_int           refs;
330         u_int           lockcnt;
331         int             error;                  /* on-lock data error state */
332         int             persist_refs;           /* (aka ip->cluster) */
333
334         hammer2_media_data_t *data;             /* data pointer shortcut */
335         TAILQ_ENTRY(hammer2_chain) flush_node;  /* flush list */
336         TAILQ_ENTRY(hammer2_chain) lru_node;    /* 0-refs LRU */
337 };
338
339 typedef struct hammer2_chain hammer2_chain_t;
340
341 int hammer2_chain_cmp(hammer2_chain_t *chain1, hammer2_chain_t *chain2);
342 RB_PROTOTYPE(hammer2_chain_tree, hammer2_chain, rbnode, hammer2_chain_cmp);
343
344 /*
345  * Special notes on flags:
346  *
347  * INITIAL      - This flag allows a chain to be created and for storage to
348  *                be allocated without having to immediately instantiate the
349  *                related buffer.  The data is assumed to be all-zeros.  It
350  *                is primarily used for indirect blocks.
351  *
352  * MODIFIED     - The chain's media data has been modified.  Prevents chain
353  *                free on lastdrop if still in the topology.
354  *
355  * UPDATE       - Chain might not be modified but parent blocktable needs
356  *                an update.  Prevents chain free on lastdrop if still in
357  *                the topology.
358  *
359  * FICTITIOUS   - Faked chain as a placeholder for an error condition.  This
360  *                chain is unsuitable for I/O.
361  *
362  * BMAPPED      - Indicates that the chain is present in the parent blockmap.
363  *
364  * BMAPUPD      - Indicates that the chain is present but needs to be updated
365  *                in the parent blockmap.
366  */
367 #define HAMMER2_CHAIN_MODIFIED          0x00000001      /* dirty chain data */
368 #define HAMMER2_CHAIN_ALLOCATED         0x00000002      /* kmalloc'd chain */
369 #define HAMMER2_CHAIN_DESTROY           0x00000004
370 #define HAMMER2_CHAIN_DEDUP             0x00000008      /* recorded for dedup */
371 #define HAMMER2_CHAIN_DELETED           0x00000010      /* deleted chain */
372 #define HAMMER2_CHAIN_INITIAL           0x00000020      /* initial create */
373 #define HAMMER2_CHAIN_UPDATE            0x00000040      /* need parent update */
374 #define HAMMER2_CHAIN_DEFERRED          0x00000080      /* flush depth defer */
375 #define HAMMER2_CHAIN_TESTEDGOOD        0x00000100      /* crc tested good */
376 #define HAMMER2_CHAIN_ONFLUSH           0x00000200      /* on a flush list */
377 #define HAMMER2_CHAIN_FICTITIOUS        0x00000400      /* unsuitable for I/O */
378 #define HAMMER2_CHAIN_VOLUMESYNC        0x00000800      /* needs volume sync */
379 #define HAMMER2_CHAIN_DELAYED           0x00001000      /* delayed flush */
380 #define HAMMER2_CHAIN_COUNTEDBREFS      0x00002000      /* block table stats */
381 #define HAMMER2_CHAIN_ONRBTREE          0x00004000      /* on parent RB tree */
382 #define HAMMER2_CHAIN_ONLRU             0x00008000      /* on LRU list */
383 #define HAMMER2_CHAIN_EMBEDDED          0x00010000      /* embedded data */
384 #define HAMMER2_CHAIN_RELEASE           0x00020000      /* don't keep around */
385 #define HAMMER2_CHAIN_BMAPPED           0x00040000      /* present in blkmap */
386 #define HAMMER2_CHAIN_BMAPUPD           0x00080000      /* +needs updating */
387 #define HAMMER2_CHAIN_IOINPROG          0x00100000      /* I/O interlock */
388 #define HAMMER2_CHAIN_IOSIGNAL          0x00200000      /* I/O interlock */
389 #define HAMMER2_CHAIN_PFSBOUNDARY       0x00400000      /* super->pfs inode */
390
391 #define HAMMER2_CHAIN_FLUSH_MASK        (HAMMER2_CHAIN_MODIFIED |       \
392                                          HAMMER2_CHAIN_UPDATE |         \
393                                          HAMMER2_CHAIN_ONFLUSH |        \
394                                          HAMMER2_CHAIN_DESTROY)
395
396 /*
397  * Hammer2 error codes, used by chain->error and cluster->error.  The error
398  * code is typically set on-lock unless no I/O was requested, and set on
399  * I/O otherwise.  If set for a cluster it generally means that the cluster
400  * code could not find a valid copy to present.
401  *
402  * IO           - An I/O error occurred
403  * CHECK        - I/O succeeded but did not match the check code
404  * INCOMPLETE   - A cluster is not complete enough to use, or
405  *                a chain cannot be loaded because its parent has an error.
406  *
407  * NOTE: API allows callers to check zero/non-zero to determine if an error
408  *       condition exists.
409  *
410  * NOTE: Chain's data field is usually NULL on an IO error but not necessarily
411  *       NULL on other errors.  Check chain->error, not chain->data.
412  */
413 #define HAMMER2_ERROR_NONE              0
414 #define HAMMER2_ERROR_IO                1       /* device I/O error */
415 #define HAMMER2_ERROR_CHECK             2       /* check code mismatch */
416 #define HAMMER2_ERROR_INCOMPLETE        3       /* incomplete cluster */
417 #define HAMMER2_ERROR_DEPTH             4       /* temporary depth limit */
418
419 /*
420  * Flags passed to hammer2_chain_lookup() and hammer2_chain_next()
421  *
422  * NOTES:
423  *      NOLOCK      - Input and output chains are referenced only and not
424  *                    locked.  Output chain might be temporarily locked
425  *                    internally.
426  *
427  *      NODATA      - Asks that the chain->data not be resolved in order
428  *                    to avoid I/O.
429  *
430  *      NODIRECT    - Prevents a lookup of offset 0 in an inode from returning
431  *                    the inode itself if the inode is in DIRECTDATA mode
432  *                    (i.e. file is <= 512 bytes).  Used by the synchronization
433  *                    code to prevent confusion.
434  *
435  *      SHARED      - The input chain is expected to be locked shared,
436  *                    and the output chain is locked shared.
437  *
438  *      MATCHIND    - Allows an indirect block / freemap node to be returned
439  *                    when the passed key range matches the radix.  Remember
440  *                    that key_end is inclusive (e.g. {0x000,0xFFF},
441  *                    not {0x000,0x1000}).
442  *
443  *                    (Cannot be used for remote or cluster ops).
444  *
445  *      ALLNODES    - Allows NULL focus.
446  *
447  *      ALWAYS      - Always resolve the data.  If ALWAYS and NODATA are both
448  *                    missing, bulk file data is not resolved but inodes and
449  *                    other meta-data will.
450  *
451  *      NOUNLOCK    - Used by hammer2_chain_next() to leave the lock on
452  *                    the input chain intact.  The chain is still dropped.
453  *                    This allows the caller to add a reference to the chain
454  *                    and retain it in a locked state (used by the
455  *                    XOP/feed/collect code).
456  */
457 #define HAMMER2_LOOKUP_NOLOCK           0x00000001      /* ref only */
458 #define HAMMER2_LOOKUP_NODATA           0x00000002      /* data left NULL */
459 #define HAMMER2_LOOKUP_NODIRECT         0x00000004      /* no offset=0 DD */
460 #define HAMMER2_LOOKUP_SHARED           0x00000100
461 #define HAMMER2_LOOKUP_MATCHIND         0x00000200      /* return all chains */
462 #define HAMMER2_LOOKUP_ALLNODES         0x00000400      /* allow NULL focus */
463 #define HAMMER2_LOOKUP_ALWAYS           0x00000800      /* resolve data */
464 #define HAMMER2_LOOKUP_NOUNLOCK         0x00001000      /* leave lock intact */
465
466 /*
467  * Flags passed to hammer2_chain_modify() and hammer2_chain_resize()
468  *
469  * NOTE: OPTDATA allows us to avoid instantiating buffers for INDIRECT
470  *       blocks in the INITIAL-create state.
471  */
472 #define HAMMER2_MODIFY_OPTDATA          0x00000002      /* data can be NULL */
473 #define HAMMER2_MODIFY_NO_MODIFY_TID    0x00000004
474 #define HAMMER2_MODIFY_UNUSED0008       0x00000008
475
476 /*
477  * Flags passed to hammer2_chain_lock()
478  *
479  * NOTE: RDONLY is set to optimize cluster operations when *no* modifications
480  *       will be made to either the cluster being locked or any underlying
481  *       cluster.  It allows the cluster to lock and access data for a subset
482  *       of available nodes instead of all available nodes.
483  */
484 #define HAMMER2_RESOLVE_NEVER           1
485 #define HAMMER2_RESOLVE_MAYBE           2
486 #define HAMMER2_RESOLVE_ALWAYS          3
487 #define HAMMER2_RESOLVE_MASK            0x0F
488
489 #define HAMMER2_RESOLVE_SHARED          0x10    /* request shared lock */
490 #define HAMMER2_RESOLVE_LOCKAGAIN       0x20    /* another shared lock */
491 #define HAMMER2_RESOLVE_RDONLY          0x40    /* higher level op flag */
492
493 /*
494  * Flags passed to hammer2_chain_delete()
495  */
496 #define HAMMER2_DELETE_PERMANENT        0x0001
497
498 /*
499  * Flags passed to hammer2_chain_insert() or hammer2_chain_rename()
500  */
501 #define HAMMER2_INSERT_PFSROOT          0x0004
502
503 /*
504  * Flags passed to hammer2_chain_delete_duplicate()
505  */
506 #define HAMMER2_DELDUP_RECORE           0x0001
507
508 /*
509  * Cluster different types of storage together for allocations
510  */
511 #define HAMMER2_FREECACHE_INODE         0
512 #define HAMMER2_FREECACHE_INDIR         1
513 #define HAMMER2_FREECACHE_DATA          2
514 #define HAMMER2_FREECACHE_UNUSED3       3
515 #define HAMMER2_FREECACHE_TYPES         4
516
517 /*
518  * hammer2_freemap_alloc() block preference
519  */
520 #define HAMMER2_OFF_NOPREF              ((hammer2_off_t)-1)
521
522 /*
523  * BMAP read-ahead maximum parameters
524  */
525 #define HAMMER2_BMAP_COUNT              16      /* max bmap read-ahead */
526 #define HAMMER2_BMAP_BYTES              (HAMMER2_PBUFSIZE * HAMMER2_BMAP_COUNT)
527
528 /*
529  * hammer2_freemap_adjust()
530  */
531 #define HAMMER2_FREEMAP_DORECOVER       1
532 #define HAMMER2_FREEMAP_DOMAYFREE       2
533 #define HAMMER2_FREEMAP_DOREALFREE      3
534
535 /*
536  * HAMMER2 cluster - A set of chains representing the same entity.
537  *
538  * hammer2_cluster typically represents a temporary set of representitive
539  * chains.  The one exception is that a hammer2_cluster is embedded in
540  * hammer2_inode.  This embedded cluster is ONLY used to track the
541  * representitive chains and cannot be directly locked.
542  *
543  * A cluster is usually temporary (and thus per-thread) for locking purposes,
544  * allowing us to embed the asynchronous storage required for cluster
545  * operations in the cluster itself and adjust the state and status without
546  * having to worry too much about SMP issues.
547  *
548  * The exception is the cluster embedded in the hammer2_inode structure.
549  * This is used to cache the cluster state on an inode-by-inode basis.
550  * Individual hammer2_chain structures not incorporated into clusters might
551  * also stick around to cache miscellanious elements.
552  *
553  * Because the cluster is a 'working copy' and is usually subject to cluster
554  * quorum rules, it is quite possible for us to end up with an insufficient
555  * number of live chains to execute an operation.  If an insufficient number
556  * of chains remain in a working copy, the operation may have to be
557  * downgraded, retried, stall until the requisit number of chains are
558  * available, or possibly even error out depending on the mount type.
559  *
560  * A cluster's focus is set when it is locked.  The focus can only be set
561  * to a chain still part of the synchronized set.
562  */
563 #define HAMMER2_MAXCLUSTER      8
564 #define HAMMER2_XOPMASK_CLUSTER ((1U << HAMMER2_MAXCLUSTER) - 1)
565 #define HAMMER2_XOPFIFO         16
566 #define HAMMER2_XOPFIFO_MASK    (HAMMER2_XOPFIFO - 1)
567 #define HAMMER2_XOPGROUPS       32
568 #define HAMMER2_XOPGROUPS_MASK  (HAMMER2_XOPGROUPS - 1)
569 #define HAMMER2_XOPMASK_VOP     0x80000000U
570 #define HAMMER2_XOPMASK_FIFOW   0x40000000U
571
572 #define HAMMER2_XOPMASK_ALLDONE (HAMMER2_XOPMASK_VOP | HAMMER2_XOPMASK_CLUSTER)
573
574 #define HAMMER2_SPECTHREADS     1       /* sync */
575
576 struct hammer2_cluster_item {
577         hammer2_chain_t         *chain;
578         int                     cache_index;
579         int                     error;
580         uint32_t                flags;
581 };
582
583 typedef struct hammer2_cluster_item hammer2_cluster_item_t;
584
585 /*
586  * INVALID      - Invalid for focus, i.e. not part of synchronized set.
587  *                Once set, this bit is sticky across operations.
588  *
589  * FEMOD        - Indicates that front-end modifying operations can
590  *                mess with this entry and MODSYNC will copy also
591  *                effect it.
592  */
593 #define HAMMER2_CITEM_INVALID   0x00000001
594 #define HAMMER2_CITEM_FEMOD     0x00000002
595 #define HAMMER2_CITEM_NULL      0x00000004
596
597 struct hammer2_cluster {
598         int                     refs;           /* track for deallocation */
599         int                     ddflag;
600         struct hammer2_pfs      *pmp;
601         uint32_t                flags;
602         int                     nchains;
603         int                     error;          /* error code valid on lock */
604         int                     focus_index;
605         hammer2_iocb_t          iocb;
606         hammer2_chain_t         *focus;         /* current focus (or mod) */
607         hammer2_cluster_item_t  array[HAMMER2_MAXCLUSTER];
608 };
609
610 typedef struct hammer2_cluster  hammer2_cluster_t;
611
612 /*
613  * WRHARD       - Hard mounts can write fully synchronized
614  * RDHARD       - Hard mounts can read fully synchronized
615  * UNHARD       - Unsynchronized masters present
616  * NOHARD       - No masters visible
617  * WRSOFT       - Soft mounts can write to at least the SOFT_MASTER
618  * RDSOFT       - Soft mounts can read from at least a SOFT_SLAVE
619  * UNSOFT       - Unsynchronized slaves present
620  * NOSOFT       - No slaves visible
621  * RDSLAVE      - slaves are accessible (possibly unsynchronized or remote).
622  * MSYNCED      - All masters are fully synchronized
623  * SSYNCED      - All known local slaves are fully synchronized to masters
624  *
625  * All available masters are always incorporated.  All PFSs belonging to a
626  * cluster (master, slave, copy, whatever) always try to synchronize the
627  * total number of known masters in the PFSs root inode.
628  *
629  * A cluster might have access to many slaves, copies, or caches, but we
630  * have a limited number of cluster slots.  Any such elements which are
631  * directly mounted from block device(s) will always be incorporated.   Note
632  * that SSYNCED only applies to such elements which are directly mounted,
633  * not to any remote slaves, copies, or caches that could be available.  These
634  * bits are used to monitor and drive our synchronization threads.
635  *
636  * When asking the question 'is any data accessible at all', then a simple
637  * test against (RDHARD|RDSOFT|RDSLAVE) gives you the answer.  If any of
638  * these bits are set the object can be read with certain caveats:
639  * RDHARD - no caveats.  RDSOFT - authoritative but might not be synchronized.
640  * and RDSLAVE - not authoritative, has some data but it could be old or
641  * incomplete.
642  *
643  * When both soft and hard mounts are available, data will be read and written
644  * via the soft mount only.  But all might be in the cluster because
645  * background synchronization threads still need to do their work.
646  */
647 #define HAMMER2_CLUSTER_INODE   0x00000001      /* embedded in inode struct */
648 #define HAMMER2_CLUSTER_UNUSED2 0x00000002
649 #define HAMMER2_CLUSTER_LOCKED  0x00000004      /* cluster lks not recursive */
650 #define HAMMER2_CLUSTER_WRHARD  0x00000100      /* hard-mount can write */
651 #define HAMMER2_CLUSTER_RDHARD  0x00000200      /* hard-mount can read */
652 #define HAMMER2_CLUSTER_UNHARD  0x00000400      /* unsynchronized masters */
653 #define HAMMER2_CLUSTER_NOHARD  0x00000800      /* no masters visible */
654 #define HAMMER2_CLUSTER_WRSOFT  0x00001000      /* soft-mount can write */
655 #define HAMMER2_CLUSTER_RDSOFT  0x00002000      /* soft-mount can read */
656 #define HAMMER2_CLUSTER_UNSOFT  0x00004000      /* unsynchronized slaves */
657 #define HAMMER2_CLUSTER_NOSOFT  0x00008000      /* no slaves visible */
658 #define HAMMER2_CLUSTER_MSYNCED 0x00010000      /* all masters synchronized */
659 #define HAMMER2_CLUSTER_SSYNCED 0x00020000      /* known slaves synchronized */
660
661 #define HAMMER2_CLUSTER_ANYDATA ( HAMMER2_CLUSTER_RDHARD |      \
662                                   HAMMER2_CLUSTER_RDSOFT |      \
663                                   HAMMER2_CLUSTER_RDSLAVE)
664
665 #define HAMMER2_CLUSTER_RDOK    ( HAMMER2_CLUSTER_RDHARD |      \
666                                   HAMMER2_CLUSTER_RDSOFT)
667
668 #define HAMMER2_CLUSTER_WROK    ( HAMMER2_CLUSTER_WRHARD |      \
669                                   HAMMER2_CLUSTER_WRSOFT)
670
671 #define HAMMER2_CLUSTER_ZFLAGS  ( HAMMER2_CLUSTER_WRHARD |      \
672                                   HAMMER2_CLUSTER_RDHARD |      \
673                                   HAMMER2_CLUSTER_WRSOFT |      \
674                                   HAMMER2_CLUSTER_RDSOFT |      \
675                                   HAMMER2_CLUSTER_MSYNCED |     \
676                                   HAMMER2_CLUSTER_SSYNCED)
677
678 /*
679  * Helper functions (cluster must be locked for flags to be valid).
680  */
681 static __inline
682 int
683 hammer2_cluster_rdok(hammer2_cluster_t *cluster)
684 {
685         return (cluster->flags & HAMMER2_CLUSTER_RDOK);
686 }
687
688 static __inline
689 int
690 hammer2_cluster_wrok(hammer2_cluster_t *cluster)
691 {
692         return (cluster->flags & HAMMER2_CLUSTER_WROK);
693 }
694
695 RB_HEAD(hammer2_inode_tree, hammer2_inode);
696
697 /*
698  * A hammer2 inode.
699  *
700  * NOTE: The inode-embedded cluster is never used directly for I/O (since
701  *       it may be shared).  Instead it will be replicated-in and synchronized
702  *       back out if changed.
703  */
704 struct hammer2_inode {
705         RB_ENTRY(hammer2_inode) rbnode;         /* inumber lookup (HL) */
706         hammer2_mtx_t           lock;           /* inode lock */
707         hammer2_mtx_t           truncate_lock;  /* prevent truncates */
708         struct hammer2_pfs      *pmp;           /* PFS mount */
709         struct vnode            *vp;
710         struct spinlock         cluster_spin;   /* update cluster */
711         hammer2_cluster_t       cluster;
712         struct lockf            advlock;
713         u_int                   flags;
714         u_int                   refs;           /* +vpref, +flushref */
715         uint8_t                 comp_heuristic;
716         hammer2_inode_meta_t    meta;           /* copy of meta-data */
717         hammer2_off_t           osize;
718 };
719
720 typedef struct hammer2_inode hammer2_inode_t;
721
722 /*
723  * MODIFIED     - Inode is in a modified state, ip->meta may have changes.
724  * RESIZED      - Inode truncated (any) or inode extended beyond
725  *                EMBEDDED_BYTES.
726  */
727 #define HAMMER2_INODE_MODIFIED          0x0001
728 #define HAMMER2_INODE_SROOT             0x0002  /* kmalloc special case */
729 #define HAMMER2_INODE_RENAME_INPROG     0x0004
730 #define HAMMER2_INODE_ONRBTREE          0x0008
731 #define HAMMER2_INODE_RESIZED           0x0010  /* requires inode_fsync */
732 #define HAMMER2_INODE_ISDELETED         0x0020  /* deleted */
733 #define HAMMER2_INODE_ISUNLINKED        0x0040
734 #define HAMMER2_INODE_METAGOOD          0x0080  /* inode meta-data good */
735 #define HAMMER2_INODE_ONSIDEQ           0x0100  /* on side processing queue */
736
737 int hammer2_inode_cmp(hammer2_inode_t *ip1, hammer2_inode_t *ip2);
738 RB_PROTOTYPE2(hammer2_inode_tree, hammer2_inode, rbnode, hammer2_inode_cmp,
739                 hammer2_tid_t);
740
741 /*
742  * inode-unlink side-structure
743  */
744 struct hammer2_inode_sideq {
745         TAILQ_ENTRY(hammer2_inode_sideq) entry;
746         hammer2_inode_t *ip;
747 };
748 TAILQ_HEAD(h2_sideq_list, hammer2_inode_sideq);
749
750 typedef struct hammer2_inode_sideq hammer2_inode_sideq_t;
751
752 /*
753  * Transaction management sub-structure under hammer2_pfs
754  */
755 struct hammer2_trans {
756         uint32_t                flags;
757         uint32_t                sync_wait;
758 };
759
760 typedef struct hammer2_trans hammer2_trans_t;
761
762 #define HAMMER2_TRANS_ISFLUSH           0x80000000      /* flush code */
763 #define HAMMER2_TRANS_BUFCACHE          0x40000000      /* bio strategy */
764 #define HAMMER2_TRANS_UNUSED20          0x20000000
765 #define HAMMER2_TRANS_FPENDING          0x10000000      /* flush pending */
766 #define HAMMER2_TRANS_WAITING           0x08000000      /* someone waiting */
767 #define HAMMER2_TRANS_MASK              0x00FFFFFF      /* count mask */
768
769 #define HAMMER2_FREEMAP_HEUR_NRADIX     4       /* pwr 2 PBUFRADIX-MINIORADIX */
770 #define HAMMER2_FREEMAP_HEUR_TYPES      8
771 #define HAMMER2_FREEMAP_HEUR_SIZE       (HAMMER2_FREEMAP_HEUR_NRADIX * \
772                                          HAMMER2_FREEMAP_HEUR_TYPES)
773
774 #define HAMMER2_DEDUP_HEUR_SIZE         65536
775 #define HAMMER2_DEDUP_HEUR_MASK         (HAMMER2_DEDUP_HEUR_SIZE - 1)
776
777 #define HAMMER2_FLUSH_TOP               0x0001
778 #define HAMMER2_FLUSH_ALL               0x0002
779
780
781 /*
782  * Hammer2 support thread element.
783  *
784  * Potentially many support threads can hang off of hammer2, primarily
785  * off the hammer2_pfs structure.  Typically:
786  *
787  * td x Nodes                   A synchronization thread for each node.
788  * td x Nodes x workers         Worker threads for frontend operations.
789  * td x 1                       Bioq thread for logical buffer writes.
790  *
791  * In addition, the synchronization thread(s) associated with the
792  * super-root PFS (spmp) for a node is responsible for automatic bulkfree
793  * and dedup scans.
794  */
795 struct hammer2_thread {
796         struct hammer2_pfs *pmp;
797         hammer2_xop_list_t xopq;
798         thread_t        td;
799         uint32_t        flags;
800         int             depth;
801         int             clindex;        /* cluster element index */
802         int             repidx;
803         char            *scratch;       /* MAXPHYS */
804 };
805
806 typedef struct hammer2_thread hammer2_thread_t;
807
808 #define HAMMER2_THREAD_UNMOUNTING       0x0001  /* unmount request */
809 #define HAMMER2_THREAD_DEV              0x0002  /* related to dev, not pfs */
810 #define HAMMER2_THREAD_WAITING          0x0004  /* thread in idle tsleep */
811 #define HAMMER2_THREAD_REMASTER         0x0008  /* remaster request */
812 #define HAMMER2_THREAD_STOP             0x0010  /* exit request */
813 #define HAMMER2_THREAD_FREEZE           0x0020  /* force idle */
814 #define HAMMER2_THREAD_FROZEN           0x0040  /* thread is frozen */
815 #define HAMMER2_THREAD_XOPQ             0x0080  /* work pending */
816 #define HAMMER2_THREAD_STOPPED          0x0100  /* thread has stopped */
817 #define HAMMER2_THREAD_UNFREEZE         0x0200
818 #define HAMMER2_THREAD_CLIENTWAIT       0x0400
819
820 #define HAMMER2_THREAD_WAKEUP_MASK      (HAMMER2_THREAD_UNMOUNTING |    \
821                                          HAMMER2_THREAD_REMASTER |      \
822                                          HAMMER2_THREAD_STOP |          \
823                                          HAMMER2_THREAD_FREEZE |        \
824                                          HAMMER2_THREAD_XOPQ)
825
826 /*
827  * Support structure for dedup heuristic.
828  */
829 struct hammer2_dedup {
830         hammer2_off_t   data_off;
831         uint64_t        data_crc;
832         uint32_t        ticks;
833         uint32_t        unused03;
834 };
835
836 typedef struct hammer2_dedup hammer2_dedup_t;
837
838 /*
839  * hammer2_xop - container for VOP/XOP operation (allocated, not on stack).
840  *
841  * This structure is used to distribute a VOP operation across multiple
842  * nodes.  It provides a rendezvous for concurrent node execution and
843  * can be detached from the frontend operation to allow the frontend to
844  * return early.
845  *
846  * This structure also sequences operations on up to three inodes.
847  */
848 typedef void (*hammer2_xop_func_t)(hammer2_thread_t *thr,
849                                    union hammer2_xop *xop);
850
851 struct hammer2_xop_fifo {
852         TAILQ_ENTRY(hammer2_xop_head) entry;
853         hammer2_chain_t         *array[HAMMER2_XOPFIFO];
854         int                     errors[HAMMER2_XOPFIFO];
855         int                     ri;
856         int                     wi;
857         int                     flags;
858         hammer2_thread_t        *thr;
859 };
860
861 typedef struct hammer2_xop_fifo hammer2_xop_fifo_t;
862
863 #define HAMMER2_XOP_FIFO_RUN    0x0001
864 #define HAMMER2_XOP_FIFO_STALL  0x0002
865
866 struct hammer2_xop_head {
867         hammer2_xop_func_t      func;
868         hammer2_tid_t           mtid;
869         struct hammer2_inode    *ip1;
870         struct hammer2_inode    *ip2;
871         struct hammer2_inode    *ip3;
872         uint32_t                check_counter;
873         uint32_t                run_mask;
874         uint32_t                chk_mask;
875         int                     flags;
876         int                     state;
877         int                     error;
878         hammer2_key_t           collect_key;
879         char                    *name1;
880         size_t                  name1_len;
881         char                    *name2;
882         size_t                  name2_len;
883         hammer2_xop_fifo_t      collect[HAMMER2_MAXCLUSTER];
884         hammer2_cluster_t       cluster;        /* help collections */
885 };
886
887 typedef struct hammer2_xop_head hammer2_xop_head_t;
888
889 #define HAMMER2_XOP_CHKWAIT     0x00000001U
890 #define HAMMER2_XOP_CHKINC      0x00000002U
891
892 struct hammer2_xop_ipcluster {
893         hammer2_xop_head_t      head;
894 };
895
896 struct hammer2_xop_strategy {
897         hammer2_xop_head_t      head;
898         hammer2_key_t           lbase;
899         int                     finished;
900         hammer2_mtx_t           lock;
901         struct bio              *bio;
902 };
903
904 struct hammer2_xop_readdir {
905         hammer2_xop_head_t      head;
906         hammer2_key_t           lkey;
907 };
908
909 struct hammer2_xop_nresolve {
910         hammer2_xop_head_t      head;
911         hammer2_key_t           lhc;    /* if name is NULL used lhc */
912 };
913
914 struct hammer2_xop_unlink {
915         hammer2_xop_head_t      head;
916         int                     isdir;
917         int                     dopermanent;
918 };
919
920 struct hammer2_xop_nrename {
921         hammer2_xop_head_t      head;
922         hammer2_tid_t           lhc;
923         int                     ip_key;
924 };
925
926 struct hammer2_xop_scanlhc {
927         hammer2_xop_head_t      head;
928         hammer2_key_t           lhc;
929 };
930
931 struct hammer2_xop_scanall {
932         hammer2_xop_head_t      head;
933         hammer2_key_t           key_beg;        /* inclusive */
934         hammer2_key_t           key_end;        /* inclusive */
935         int                     resolve_flags;
936         int                     lookup_flags;
937 };
938
939 struct hammer2_xop_lookup {
940         hammer2_xop_head_t      head;
941         hammer2_key_t           lhc;
942 };
943
944 struct hammer2_xop_create {
945         hammer2_xop_head_t      head;
946         hammer2_inode_meta_t    meta;           /* initial metadata */
947         hammer2_key_t           lhc;
948         int                     flags;
949 };
950
951 struct hammer2_xop_destroy {
952         hammer2_xop_head_t      head;
953 };
954
955 struct hammer2_xop_fsync {
956         hammer2_xop_head_t      head;
957         hammer2_inode_meta_t    meta;
958         hammer2_off_t           osize;
959         u_int                   ipflags;
960         int                     clear_directdata;
961 };
962
963 struct hammer2_xop_unlinkall {
964         hammer2_xop_head_t      head;
965         hammer2_key_t           key_beg;
966         hammer2_key_t           key_end;
967 };
968
969 struct hammer2_xop_connect {
970         hammer2_xop_head_t      head;
971         hammer2_key_t           lhc;
972 };
973
974 struct hammer2_xop_flush {
975         hammer2_xop_head_t      head;
976 };
977
978 typedef struct hammer2_xop_readdir hammer2_xop_readdir_t;
979 typedef struct hammer2_xop_nresolve hammer2_xop_nresolve_t;
980 typedef struct hammer2_xop_unlink hammer2_xop_unlink_t;
981 typedef struct hammer2_xop_nrename hammer2_xop_nrename_t;
982 typedef struct hammer2_xop_ipcluster hammer2_xop_ipcluster_t;
983 typedef struct hammer2_xop_strategy hammer2_xop_strategy_t;
984 typedef struct hammer2_xop_create hammer2_xop_create_t;
985 typedef struct hammer2_xop_destroy hammer2_xop_destroy_t;
986 typedef struct hammer2_xop_fsync hammer2_xop_fsync_t;
987 typedef struct hammer2_xop_unlinkall hammer2_xop_unlinkall_t;
988 typedef struct hammer2_xop_scanlhc hammer2_xop_scanlhc_t;
989 typedef struct hammer2_xop_scanall hammer2_xop_scanall_t;
990 typedef struct hammer2_xop_lookup hammer2_xop_lookup_t;
991 typedef struct hammer2_xop_connect hammer2_xop_connect_t;
992 typedef struct hammer2_xop_flush hammer2_xop_flush_t;
993
994 union hammer2_xop {
995         hammer2_xop_head_t      head;
996         hammer2_xop_ipcluster_t xop_ipcluster;
997         hammer2_xop_readdir_t   xop_readdir;
998         hammer2_xop_nresolve_t  xop_nresolve;
999         hammer2_xop_unlink_t    xop_unlink;
1000         hammer2_xop_nrename_t   xop_nrename;
1001         hammer2_xop_strategy_t  xop_strategy;
1002         hammer2_xop_create_t    xop_create;
1003         hammer2_xop_destroy_t   xop_destroy;
1004         hammer2_xop_fsync_t     xop_fsync;
1005         hammer2_xop_unlinkall_t xop_unlinkall;
1006         hammer2_xop_scanlhc_t   xop_scanlhc;
1007         hammer2_xop_scanall_t   xop_scanall;
1008         hammer2_xop_lookup_t    xop_lookup;
1009         hammer2_xop_flush_t     xop_flush;
1010         hammer2_xop_connect_t   xop_connect;
1011 };
1012
1013 typedef union hammer2_xop hammer2_xop_t;
1014
1015 /*
1016  * hammer2_xop_group - Manage XOP support threads.
1017  */
1018 struct hammer2_xop_group {
1019         hammer2_thread_t        thrs[HAMMER2_MAXCLUSTER];
1020 };
1021
1022 typedef struct hammer2_xop_group hammer2_xop_group_t;
1023
1024 /*
1025  * flags to hammer2_xop_collect()
1026  */
1027 #define HAMMER2_XOP_COLLECT_NOWAIT      0x00000001
1028 #define HAMMER2_XOP_COLLECT_WAITALL     0x00000002
1029
1030 /*
1031  * flags to hammer2_xop_alloc()
1032  *
1033  * MODIFYING    - This is a modifying transaction, allocate a mtid.
1034  */
1035 #define HAMMER2_XOP_MODIFYING           0x00000001
1036 #define HAMMER2_XOP_STRATEGY            0x00000002
1037
1038 /*
1039  * Global (per partition) management structure, represents a hard block
1040  * device.  Typically referenced by hammer2_chain structures when applicable.
1041  * Typically not used for network-managed elements.
1042  *
1043  * Note that a single hammer2_dev can be indirectly tied to multiple system
1044  * mount points.  There is no direct relationship.  System mounts are
1045  * per-cluster-id, not per-block-device, and a single hard mount might contain
1046  * many PFSs and those PFSs might combine together in various ways to form
1047  * the set of available clusters.
1048  */
1049 struct hammer2_dev {
1050         struct vnode    *devvp;         /* device vnode */
1051         int             ronly;          /* read-only mount */
1052         int             mount_count;    /* number of actively mounted PFSs */
1053         TAILQ_ENTRY(hammer2_dev) mntentry; /* hammer2_mntlist */
1054
1055         struct malloc_type *mchain;
1056         int             nipstacks;
1057         int             maxipstacks;
1058         kdmsg_iocom_t   iocom;          /* volume-level dmsg interface */
1059         struct spinlock io_spin;        /* iotree access */
1060         struct hammer2_io_tree iotree;
1061         int             iofree_count;
1062         hammer2_chain_t vchain;         /* anchor chain (topology) */
1063         hammer2_chain_t fchain;         /* anchor chain (freemap) */
1064         struct spinlock list_spin;
1065         struct h2_flush_list    flushq; /* flush seeds */
1066         struct hammer2_pfs *spmp;       /* super-root pmp for transactions */
1067         struct lock     bulklk;         /* bulkfree lock */
1068         struct lock     vollk;          /* lockmgr lock */
1069         hammer2_off_t   heur_freemap[HAMMER2_FREEMAP_HEUR_SIZE];
1070         hammer2_dedup_t heur_dedup[HAMMER2_DEDUP_HEUR_SIZE];
1071         int             volhdrno;       /* last volhdrno written */
1072         int             hflags;         /* HMNT2 flags applicable to device */
1073         char            devrepname[64]; /* for kprintf */
1074         hammer2_volume_data_t voldata;
1075         hammer2_volume_data_t volsync;  /* synchronized voldata */
1076 };
1077
1078 typedef struct hammer2_dev hammer2_dev_t;
1079
1080 /*
1081  * Helper functions (cluster must be locked for flags to be valid).
1082  */
1083 static __inline
1084 int
1085 hammer2_chain_rdok(hammer2_chain_t *chain)
1086 {
1087         return (chain->error == 0);
1088 }
1089
1090 static __inline
1091 int
1092 hammer2_chain_wrok(hammer2_chain_t *chain)
1093 {
1094         return (chain->error == 0 && chain->hmp->ronly == 0);
1095 }
1096
1097 /*
1098  * Per-cluster management structure.  This structure will be tied to a
1099  * system mount point if the system is mounting the PFS, but is also used
1100  * to manage clusters encountered during the super-root scan or received
1101  * via LNK_SPANs that might not be mounted.
1102  *
1103  * This structure is also used to represent the super-root that hangs off
1104  * of a hard mount point.  The super-root is not really a cluster element.
1105  * In this case the spmp_hmp field will be non-NULL.  It's just easier to do
1106  * this than to special case super-root manipulation in the hammer2_chain*
1107  * code as being only hammer2_dev-related.
1108  *
1109  * pfs_mode and pfs_nmasters are rollup fields which critically describes
1110  * how elements of the cluster act on the cluster.  pfs_mode is only applicable
1111  * when a PFS is mounted by the system.  pfs_nmasters is our best guess as to
1112  * how many masters have been configured for a cluster and is always
1113  * applicable.  pfs_types[] is an array with 1:1 correspondance to the
1114  * iroot cluster and describes the PFS types of the nodes making up the
1115  * cluster.
1116  *
1117  * WARNING! Portions of this structure have deferred initialization.  In
1118  *          particular, if not mounted there will be no wthread.
1119  *          umounted network PFSs will also be missing iroot and numerous
1120  *          other fields will not be initialized prior to mount.
1121  *
1122  *          Synchronization threads are chain-specific and only applicable
1123  *          to local hard PFS entries.  A hammer2_pfs structure may contain
1124  *          more than one when multiple hard PFSs are present on the local
1125  *          machine which require synchronization monitoring.  Most PFSs
1126  *          (such as snapshots) are 1xMASTER PFSs which do not need a
1127  *          synchronization thread.
1128  *
1129  * WARNING! The chains making up pfs->iroot's cluster are accounted for in
1130  *          hammer2_dev->mount_count when the pfs is associated with a mount
1131  *          point.
1132  */
1133 struct hammer2_pfs {
1134         struct mount            *mp;
1135         TAILQ_ENTRY(hammer2_pfs) mntentry;      /* hammer2_pfslist */
1136         uuid_t                  pfs_clid;
1137         hammer2_dev_t           *spmp_hmp;      /* only if super-root pmp */
1138         hammer2_dev_t           *force_local;   /* only if 'local' mount */
1139         hammer2_inode_t         *iroot;         /* PFS root inode */
1140         uint8_t                 pfs_types[HAMMER2_MAXCLUSTER];
1141         char                    *pfs_names[HAMMER2_MAXCLUSTER];
1142         hammer2_dev_t           *pfs_hmps[HAMMER2_MAXCLUSTER];
1143         hammer2_trans_t         trans;
1144         struct lock             lock;           /* PFS lock for certain ops */
1145         struct lock             lock_nlink;     /* rename and nlink lock */
1146         struct netexport        export;         /* nfs export */
1147         int                     ronly;          /* read-only mount */
1148         int                     hflags;         /* pfs-specific mount flags */
1149         struct malloc_type      *minode;
1150         struct malloc_type      *mmsg;
1151         struct spinlock         inum_spin;      /* inumber lookup */
1152         struct hammer2_inode_tree inum_tree;    /* (not applicable to spmp) */
1153         struct spinlock         lru_spin;       /* inumber lookup */
1154         struct hammer2_chain_list lru_list;     /* chains on LRU */
1155         int                     lru_count;      /* #of chains on LRU */
1156         hammer2_tid_t           modify_tid;     /* modify transaction id */
1157         hammer2_tid_t           inode_tid;      /* inode allocator */
1158         uint8_t                 pfs_nmasters;   /* total masters */
1159         uint8_t                 pfs_mode;       /* operating mode PFSMODE */
1160         uint8_t                 unused01;
1161         uint8_t                 unused02;
1162         int                     unused03;
1163         long                    inmem_inodes;
1164         uint32_t                inmem_dirty_chains;
1165         int                     count_lwinprog; /* logical write in prog */
1166         struct spinlock         list_spin;
1167         struct h2_sideq_list    sideq;          /* last-close dirty/unlink */
1168         hammer2_thread_t        sync_thrs[HAMMER2_MAXCLUSTER];
1169         uint32_t                cluster_flags;  /* cached cluster flags */
1170         int                     has_xop_threads;
1171         struct spinlock         xop_spin;       /* xop sequencer */
1172         hammer2_xop_group_t     xop_groups[HAMMER2_XOPGROUPS];
1173 };
1174
1175 typedef struct hammer2_pfs hammer2_pfs_t;
1176
1177 TAILQ_HEAD(hammer2_pfslist, hammer2_pfs);
1178
1179 #define HAMMER2_LRU_LIMIT               1024    /* per pmp lru_list */
1180
1181 #define HAMMER2_DIRTYCHAIN_WAITING      0x80000000
1182 #define HAMMER2_DIRTYCHAIN_MASK         0x7FFFFFFF
1183
1184 #define HAMMER2_LWINPROG_WAITING        0x80000000
1185 #define HAMMER2_LWINPROG_WAITING0       0x40000000
1186 #define HAMMER2_LWINPROG_MASK           0x3FFFFFFF
1187
1188 /*
1189  * hammer2_cluster_check
1190  */
1191 #define HAMMER2_CHECK_NULL      0x00000001
1192
1193 /*
1194  * Bulkscan
1195  */
1196 #define HAMMER2_BULK_ABORT      0x00000001
1197
1198 /*
1199  * Misc
1200  */
1201 #if defined(_KERNEL)
1202
1203 MALLOC_DECLARE(M_HAMMER2);
1204
1205 #define VTOI(vp)        ((hammer2_inode_t *)(vp)->v_data)
1206 #define ITOV(ip)        ((ip)->vp)
1207
1208 /*
1209  * Currently locked chains retain the locked buffer cache buffer for
1210  * indirect blocks, and indirect blocks can be one of two sizes.  The
1211  * device buffer has to match the case to avoid deadlocking recursive
1212  * chains that might otherwise try to access different offsets within
1213  * the same device buffer.
1214  */
1215 static __inline
1216 int
1217 hammer2_devblkradix(int radix)
1218 {
1219 #if 0
1220         if (radix <= HAMMER2_LBUFRADIX) {
1221                 return (HAMMER2_LBUFRADIX);
1222         } else {
1223                 return (HAMMER2_PBUFRADIX);
1224         }
1225 #endif
1226         return (HAMMER2_PBUFRADIX);
1227 }
1228
1229 /*
1230  * XXX almost time to remove this.  DIO uses PBUFSIZE exclusively now.
1231  */
1232 static __inline
1233 size_t
1234 hammer2_devblksize(size_t bytes)
1235 {
1236 #if 0
1237         if (bytes <= HAMMER2_LBUFSIZE) {
1238                 return(HAMMER2_LBUFSIZE);
1239         } else {
1240                 KKASSERT(bytes <= HAMMER2_PBUFSIZE &&
1241                          (bytes ^ (bytes - 1)) == ((bytes << 1) - 1));
1242                 return (HAMMER2_PBUFSIZE);
1243         }
1244 #endif
1245         return (HAMMER2_PBUFSIZE);
1246 }
1247
1248
1249 static __inline
1250 hammer2_pfs_t *
1251 MPTOPMP(struct mount *mp)
1252 {
1253         return ((hammer2_pfs_t *)mp->mnt_data);
1254 }
1255
1256 #define LOCKSTART       int __nlocks = curthread->td_locks
1257 #define LOCKENTER       (++curthread->td_locks)
1258 #define LOCKEXIT        (--curthread->td_locks)
1259 #define LOCKSTOP        KKASSERT(curthread->td_locks == __nlocks)
1260
1261 extern struct vop_ops hammer2_vnode_vops;
1262 extern struct vop_ops hammer2_spec_vops;
1263 extern struct vop_ops hammer2_fifo_vops;
1264 extern struct hammer2_pfslist hammer2_pfslist;
1265 extern struct lock hammer2_mntlk;
1266
1267
1268 extern int hammer2_debug;
1269 extern int hammer2_cluster_read;
1270 extern int hammer2_cluster_write;
1271 extern int hammer2_dedup_enable;
1272 extern int hammer2_inval_enable;
1273 extern int hammer2_flush_pipe;
1274 extern int hammer2_synchronous_flush;
1275 extern int hammer2_dio_count;
1276 extern long hammer2_chain_allocs;
1277 extern long hammer2_chain_frees;
1278 extern long hammer2_limit_dirty_chains;
1279 extern long hammer2_count_modified_chains;
1280 extern long hammer2_iod_invals;
1281 extern long hammer2_iod_file_read;
1282 extern long hammer2_iod_meta_read;
1283 extern long hammer2_iod_indr_read;
1284 extern long hammer2_iod_fmap_read;
1285 extern long hammer2_iod_volu_read;
1286 extern long hammer2_iod_file_write;
1287 extern long hammer2_iod_file_wembed;
1288 extern long hammer2_iod_file_wzero;
1289 extern long hammer2_iod_file_wdedup;
1290 extern long hammer2_iod_meta_write;
1291 extern long hammer2_iod_indr_write;
1292 extern long hammer2_iod_fmap_write;
1293 extern long hammer2_iod_volu_write;
1294
1295 extern long hammer2_check_xxhash64;
1296 extern long hammer2_check_icrc32;
1297
1298 extern struct objcache *cache_buffer_read;
1299 extern struct objcache *cache_buffer_write;
1300 extern struct objcache *cache_xops;
1301
1302 /*
1303  * hammer2_subr.c
1304  */
1305 #define hammer2_icrc32(buf, size)       iscsi_crc32((buf), (size))
1306 #define hammer2_icrc32c(buf, size, crc) iscsi_crc32_ext((buf), (size), (crc))
1307
1308 int hammer2_signal_check(time_t *timep);
1309 const char *hammer2_error_str(int error);
1310
1311 void hammer2_inode_lock(hammer2_inode_t *ip, int how);
1312 void hammer2_inode_unlock(hammer2_inode_t *ip);
1313 hammer2_chain_t *hammer2_inode_chain(hammer2_inode_t *ip, int clindex, int how);
1314 hammer2_chain_t *hammer2_inode_chain_and_parent(hammer2_inode_t *ip,
1315                         int clindex, hammer2_chain_t **parentp, int how);
1316 hammer2_mtx_state_t hammer2_inode_lock_temp_release(hammer2_inode_t *ip);
1317 void hammer2_inode_lock_temp_restore(hammer2_inode_t *ip,
1318                         hammer2_mtx_state_t ostate);
1319 int hammer2_inode_lock_upgrade(hammer2_inode_t *ip);
1320 void hammer2_inode_lock_downgrade(hammer2_inode_t *ip, int);
1321
1322 void hammer2_dev_exlock(hammer2_dev_t *hmp);
1323 void hammer2_dev_shlock(hammer2_dev_t *hmp);
1324 void hammer2_dev_unlock(hammer2_dev_t *hmp);
1325
1326 int hammer2_get_dtype(const hammer2_inode_data_t *ipdata);
1327 int hammer2_get_vtype(uint8_t type);
1328 uint8_t hammer2_get_obj_type(enum vtype vtype);
1329 void hammer2_time_to_timespec(uint64_t xtime, struct timespec *ts);
1330 uint64_t hammer2_timespec_to_time(const struct timespec *ts);
1331 uint32_t hammer2_to_unix_xid(const uuid_t *uuid);
1332 void hammer2_guid_to_uuid(uuid_t *uuid, uint32_t guid);
1333 void hammer2_trans_manage_init(hammer2_pfs_t *pmp);
1334
1335 hammer2_key_t hammer2_dirhash(const unsigned char *name, size_t len);
1336 int hammer2_getradix(size_t bytes);
1337
1338 int hammer2_calc_logical(hammer2_inode_t *ip, hammer2_off_t uoff,
1339                         hammer2_key_t *lbasep, hammer2_key_t *leofp);
1340 int hammer2_calc_physical(hammer2_inode_t *ip, hammer2_key_t lbase);
1341 void hammer2_update_time(uint64_t *timep);
1342 void hammer2_adjreadcounter(hammer2_blockref_t *bref, size_t bytes);
1343
1344 /*
1345  * hammer2_inode.c
1346  */
1347 struct vnode *hammer2_igetv(hammer2_inode_t *ip, int *errorp);
1348 hammer2_inode_t *hammer2_inode_lookup(hammer2_pfs_t *pmp,
1349                         hammer2_tid_t inum);
1350 hammer2_inode_t *hammer2_inode_get(hammer2_pfs_t *pmp, hammer2_inode_t *dip,
1351                         hammer2_cluster_t *cluster, int idx);
1352 void hammer2_inode_free(hammer2_inode_t *ip);
1353 void hammer2_inode_ref(hammer2_inode_t *ip);
1354 void hammer2_inode_drop(hammer2_inode_t *ip);
1355 void hammer2_inode_repoint(hammer2_inode_t *ip, hammer2_inode_t *pip,
1356                         hammer2_cluster_t *cluster);
1357 void hammer2_inode_repoint_one(hammer2_inode_t *ip, hammer2_cluster_t *cluster,
1358                         int idx);
1359 void hammer2_inode_modify(hammer2_inode_t *ip);
1360 void hammer2_inode_run_sideq(hammer2_pfs_t *pmp);
1361
1362 hammer2_inode_t *hammer2_inode_create(hammer2_inode_t *dip,
1363                         hammer2_inode_t *pip,
1364                         struct vattr *vap, struct ucred *cred,
1365                         const uint8_t *name, size_t name_len, hammer2_key_t lhc,
1366                         hammer2_key_t inum, uint8_t type, uint8_t target_type,
1367                         int flags, int *errorp);
1368 void hammer2_inode_chain_sync(hammer2_inode_t *ip);
1369 int hammer2_inode_unlink_finisher(hammer2_inode_t *ip, int isopen);
1370
1371 /*
1372  * hammer2_chain.c
1373  */
1374 void hammer2_voldata_lock(hammer2_dev_t *hmp);
1375 void hammer2_voldata_unlock(hammer2_dev_t *hmp);
1376 void hammer2_voldata_modify(hammer2_dev_t *hmp);
1377 hammer2_chain_t *hammer2_chain_alloc(hammer2_dev_t *hmp,
1378                                 hammer2_pfs_t *pmp,
1379                                 hammer2_blockref_t *bref);
1380 void hammer2_chain_core_init(hammer2_chain_t *chain);
1381 void hammer2_chain_ref(hammer2_chain_t *chain);
1382 void hammer2_chain_ref_hold(hammer2_chain_t *chain);
1383 void hammer2_chain_drop(hammer2_chain_t *chain);
1384 void hammer2_chain_drop_unhold(hammer2_chain_t *chain);
1385 void hammer2_chain_lock(hammer2_chain_t *chain, int how);
1386 void hammer2_chain_lock_unhold(hammer2_chain_t *chain, int how);
1387 #if 0
1388 void hammer2_chain_push_shared_lock(hammer2_chain_t *chain);
1389 void hammer2_chain_pull_shared_lock(hammer2_chain_t *chain);
1390 #endif
1391 void hammer2_chain_load_data(hammer2_chain_t *chain);
1392 const hammer2_media_data_t *hammer2_chain_rdata(hammer2_chain_t *chain);
1393 hammer2_media_data_t *hammer2_chain_wdata(hammer2_chain_t *chain);
1394 int hammer2_chain_snapshot(hammer2_chain_t *chain, hammer2_ioc_pfs_t *pmp,
1395                                 hammer2_tid_t mtid);
1396
1397 int hammer2_chain_hardlink_find(hammer2_chain_t **parentp,
1398                                 hammer2_chain_t **chainp,
1399                                 int clindex, int flags);
1400 void hammer2_chain_modify(hammer2_chain_t *chain, hammer2_tid_t mtid,
1401                                 hammer2_off_t dedup_off, int flags);
1402 void hammer2_chain_modify_ip(hammer2_inode_t *ip, hammer2_chain_t *chain,
1403                                 hammer2_tid_t mtid, int flags);
1404 void hammer2_chain_resize(hammer2_inode_t *ip, hammer2_chain_t *parent,
1405                                 hammer2_chain_t *chain,
1406                                 hammer2_tid_t mtid, hammer2_off_t dedup_off,
1407                                 int nradix, int flags);
1408 void hammer2_chain_unlock(hammer2_chain_t *chain);
1409 void hammer2_chain_unlock_hold(hammer2_chain_t *chain);
1410 void hammer2_chain_wait(hammer2_chain_t *chain);
1411 hammer2_chain_t *hammer2_chain_get(hammer2_chain_t *parent, int generation,
1412                                 hammer2_blockref_t *bref);
1413 hammer2_chain_t *hammer2_chain_lookup_init(hammer2_chain_t *parent, int flags);
1414 void hammer2_chain_lookup_done(hammer2_chain_t *parent);
1415 hammer2_chain_t *hammer2_chain_getparent(hammer2_chain_t **parentp, int how);
1416 hammer2_chain_t *hammer2_chain_lookup(hammer2_chain_t **parentp,
1417                                 hammer2_key_t *key_nextp,
1418                                 hammer2_key_t key_beg, hammer2_key_t key_end,
1419                                 int *cache_indexp, int flags);
1420 hammer2_chain_t *hammer2_chain_next(hammer2_chain_t **parentp,
1421                                 hammer2_chain_t *chain,
1422                                 hammer2_key_t *key_nextp,
1423                                 hammer2_key_t key_beg, hammer2_key_t key_end,
1424                                 int *cache_indexp, int flags);
1425 hammer2_blockref_t *hammer2_chain_scan(hammer2_chain_t *parent,
1426                                 hammer2_chain_t **chainp,
1427                                 hammer2_blockref_t *bref,
1428                                 int *firstp, int *cache_indexp, int flags);
1429
1430 int hammer2_chain_create(hammer2_chain_t **parentp, hammer2_chain_t **chainp,
1431                                 hammer2_pfs_t *pmp, int methods,
1432                                 hammer2_key_t key, int keybits,
1433                                 int type, size_t bytes, hammer2_tid_t mtid,
1434                                 hammer2_off_t dedup_off, int flags);
1435 void hammer2_chain_rename(hammer2_blockref_t *bref,
1436                                 hammer2_chain_t **parentp,
1437                                 hammer2_chain_t *chain,
1438                                 hammer2_tid_t mtid, int flags);
1439 void hammer2_chain_delete(hammer2_chain_t *parent, hammer2_chain_t *chain,
1440                                 hammer2_tid_t mtid, int flags);
1441 void hammer2_chain_setflush(hammer2_chain_t *chain);
1442 void hammer2_chain_countbrefs(hammer2_chain_t *chain,
1443                                 hammer2_blockref_t *base, int count);
1444 hammer2_chain_t *hammer2_chain_bulksnap(hammer2_chain_t *chain);
1445 void hammer2_chain_bulkdrop(hammer2_chain_t *copy);
1446
1447 void hammer2_chain_setcheck(hammer2_chain_t *chain, void *bdata);
1448 int hammer2_chain_testcheck(hammer2_chain_t *chain, void *bdata);
1449
1450 void hammer2_pfs_memory_wait(hammer2_pfs_t *pmp);
1451 void hammer2_pfs_memory_inc(hammer2_pfs_t *pmp);
1452 void hammer2_pfs_memory_wakeup(hammer2_pfs_t *pmp);
1453
1454 void hammer2_base_delete(hammer2_chain_t *chain,
1455                                 hammer2_blockref_t *base, int count,
1456                                 int *cache_indexp, hammer2_chain_t *child);
1457 void hammer2_base_insert(hammer2_chain_t *chain,
1458                                 hammer2_blockref_t *base, int count,
1459                                 int *cache_indexp, hammer2_chain_t *child);
1460
1461 /*
1462  * hammer2_flush.c
1463  */
1464 void hammer2_flush(hammer2_chain_t *chain, int istop);
1465 void hammer2_flush_quick(hammer2_dev_t *hmp);
1466 void hammer2_delayed_flush(hammer2_chain_t *chain);
1467
1468 /*
1469  * hammer2_trans.c
1470  */
1471 void hammer2_trans_init(hammer2_pfs_t *pmp, uint32_t flags);
1472 hammer2_tid_t hammer2_trans_sub(hammer2_pfs_t *pmp);
1473 void hammer2_trans_done(hammer2_pfs_t *pmp);
1474 hammer2_tid_t hammer2_trans_newinum(hammer2_pfs_t *pmp);
1475 void hammer2_trans_assert_strategy(hammer2_pfs_t *pmp);
1476 void hammer2_dedup_record(hammer2_chain_t *chain, char *data);
1477
1478 /*
1479  * hammer2_ioctl.c
1480  */
1481 int hammer2_ioctl(hammer2_inode_t *ip, u_long com, void *data,
1482                                 int fflag, struct ucred *cred);
1483
1484 /*
1485  * hammer2_io.c
1486  */
1487 void hammer2_io_putblk(hammer2_io_t **diop);
1488 void hammer2_io_cleanup(hammer2_dev_t *hmp, struct hammer2_io_tree *tree);
1489 char *hammer2_io_data(hammer2_io_t *dio, off_t lbase);
1490 hammer2_io_t *hammer2_io_getquick(hammer2_dev_t *hmp, off_t lbase, int lsize);
1491 void hammer2_io_resetinval(hammer2_dev_t *hmp, off_t lbase);
1492 void hammer2_io_getblk(hammer2_dev_t *hmp, off_t lbase, int lsize,
1493                                 hammer2_iocb_t *iocb);
1494 void hammer2_io_complete(hammer2_iocb_t *iocb);
1495 void hammer2_io_callback(struct bio *bio);
1496 void hammer2_iocb_wait(hammer2_iocb_t *iocb);
1497 int hammer2_io_new(hammer2_dev_t *hmp, int btype, off_t lbase, int lsize,
1498                                 hammer2_io_t **diop);
1499 int hammer2_io_newnz(hammer2_dev_t *hmp, int btype, off_t lbase, int lsize,
1500                                 hammer2_io_t **diop);
1501 void hammer2_io_newq(hammer2_dev_t *hmp, int btype, off_t lbase, int lsize);
1502 int hammer2_io_bread(hammer2_dev_t *hmp, int btype, off_t lbase, int lsize,
1503                                 hammer2_io_t **diop);
1504 void hammer2_io_bawrite(hammer2_io_t **diop);
1505 void hammer2_io_bdwrite(hammer2_io_t **diop);
1506 int hammer2_io_bwrite(hammer2_io_t **diop);
1507 int hammer2_io_isdirty(hammer2_io_t *dio);
1508 void hammer2_io_setdirty(hammer2_io_t *dio);
1509 void hammer2_io_setinval(hammer2_io_t *dio, hammer2_off_t off, u_int bytes);
1510 void hammer2_io_brelse(hammer2_io_t **diop);
1511 void hammer2_io_bqrelse(hammer2_io_t **diop);
1512 int hammer2_io_crc_good(hammer2_chain_t *chain, uint64_t *maskp);
1513 void hammer2_io_crc_setmask(hammer2_io_t *dio, uint64_t mask);
1514 void hammer2_io_crc_clrmask(hammer2_io_t *dio, uint64_t mask);
1515
1516 /*
1517  * hammer2_thread.c
1518  */
1519 void hammer2_thr_signal(hammer2_thread_t *thr, uint32_t flags);
1520 void hammer2_thr_return(hammer2_thread_t *thr, uint32_t flags);
1521 void hammer2_thr_wait(hammer2_thread_t *thr, uint32_t flags);
1522 void hammer2_thr_wait_neg(hammer2_thread_t *thr, uint32_t flags);
1523 void hammer2_thr_create(hammer2_thread_t *thr, hammer2_pfs_t *pmp,
1524                         const char *id, int clindex, int repidx,
1525                         void (*func)(void *arg));
1526 void hammer2_thr_delete(hammer2_thread_t *thr);
1527 void hammer2_thr_remaster(hammer2_thread_t *thr);
1528 void hammer2_thr_freeze_async(hammer2_thread_t *thr);
1529 void hammer2_thr_freeze(hammer2_thread_t *thr);
1530 void hammer2_thr_unfreeze(hammer2_thread_t *thr);
1531 int hammer2_thr_break(hammer2_thread_t *thr);
1532 void hammer2_primary_xops_thread(void *arg);
1533
1534 /*
1535  * hammer2_thread.c (XOP API)
1536  */
1537 void hammer2_xop_group_init(hammer2_pfs_t *pmp, hammer2_xop_group_t *xgrp);
1538 void *hammer2_xop_alloc(hammer2_inode_t *ip, int flags);
1539 void hammer2_xop_setname(hammer2_xop_head_t *xop,
1540                                 const char *name, size_t name_len);
1541 void hammer2_xop_setname2(hammer2_xop_head_t *xop,
1542                                 const char *name, size_t name_len);
1543 size_t hammer2_xop_setname_inum(hammer2_xop_head_t *xop, hammer2_key_t inum);
1544 void hammer2_xop_setip2(hammer2_xop_head_t *xop, hammer2_inode_t *ip2);
1545 void hammer2_xop_setip3(hammer2_xop_head_t *xop, hammer2_inode_t *ip3);
1546 void hammer2_xop_reinit(hammer2_xop_head_t *xop);
1547 void hammer2_xop_helper_create(hammer2_pfs_t *pmp);
1548 void hammer2_xop_helper_cleanup(hammer2_pfs_t *pmp);
1549 void hammer2_xop_start(hammer2_xop_head_t *xop, hammer2_xop_func_t func);
1550 void hammer2_xop_start_except(hammer2_xop_head_t *xop, hammer2_xop_func_t func,
1551                                 int notidx);
1552 int hammer2_xop_collect(hammer2_xop_head_t *xop, int flags);
1553 void hammer2_xop_retire(hammer2_xop_head_t *xop, uint32_t mask);
1554 int hammer2_xop_active(hammer2_xop_head_t *xop);
1555 int hammer2_xop_feed(hammer2_xop_head_t *xop, hammer2_chain_t *chain,
1556                                 int clindex, int error);
1557
1558 /*
1559  * hammer2_synchro.c
1560  */
1561 void hammer2_primary_sync_thread(void *arg);
1562
1563 /*
1564  * XOP backends in hammer2_xops.c, primarily for VNOPS.  Other XOP backends
1565  * may be integrated into other source files.
1566  */
1567 void hammer2_xop_ipcluster(hammer2_thread_t *thr, hammer2_xop_t *xop);
1568 void hammer2_xop_readdir(hammer2_thread_t *thr, hammer2_xop_t *xop);
1569 void hammer2_xop_nresolve(hammer2_thread_t *thr, hammer2_xop_t *xop);
1570 void hammer2_xop_unlink(hammer2_thread_t *thr, hammer2_xop_t *xop);
1571 void hammer2_xop_nrename(hammer2_thread_t *thr, hammer2_xop_t *xop);
1572 void hammer2_xop_scanlhc(hammer2_thread_t *thr, hammer2_xop_t *xop);
1573 void hammer2_xop_scanall(hammer2_thread_t *thr, hammer2_xop_t *xop);
1574 void hammer2_xop_lookup(hammer2_thread_t *thr, hammer2_xop_t *xop);
1575 void hammer2_inode_xop_create(hammer2_thread_t *thr, hammer2_xop_t *xop);
1576 void hammer2_inode_xop_destroy(hammer2_thread_t *thr, hammer2_xop_t *xop);
1577 void hammer2_inode_xop_chain_sync(hammer2_thread_t *thr, hammer2_xop_t *xop);
1578 void hammer2_inode_xop_unlinkall(hammer2_thread_t *thr, hammer2_xop_t *xop);
1579 void hammer2_inode_xop_connect(hammer2_thread_t *thr, hammer2_xop_t *xop);
1580 void hammer2_inode_xop_flush(hammer2_thread_t *thr, hammer2_xop_t *xop);
1581
1582 /*
1583  * hammer2_msgops.c
1584  */
1585 int hammer2_msg_dbg_rcvmsg(kdmsg_msg_t *msg);
1586 int hammer2_msg_adhoc_input(kdmsg_msg_t *msg);
1587
1588 /*
1589  * hammer2_vfsops.c
1590  */
1591 void hammer2_volconf_update(hammer2_dev_t *hmp, int index);
1592 void hammer2_dump_chain(hammer2_chain_t *chain, int tab, int *countp, char pfx);
1593 int hammer2_vfs_sync(struct mount *mp, int waitflags);
1594 hammer2_pfs_t *hammer2_pfsalloc(hammer2_chain_t *chain,
1595                                 const hammer2_inode_data_t *ripdata,
1596                                 hammer2_tid_t modify_tid,
1597                                 hammer2_dev_t *force_local);
1598 void hammer2_pfsdealloc(hammer2_pfs_t *pmp, int clindex, int destroying);
1599 int hammer2_vfs_vget(struct mount *mp, struct vnode *dvp,
1600                                 ino_t ino, struct vnode **vpp);
1601
1602 void hammer2_lwinprog_ref(hammer2_pfs_t *pmp);
1603 void hammer2_lwinprog_drop(hammer2_pfs_t *pmp);
1604 void hammer2_lwinprog_wait(hammer2_pfs_t *pmp, int pipe);
1605
1606 /*
1607  * hammer2_freemap.c
1608  */
1609 int hammer2_freemap_alloc(hammer2_chain_t *chain, size_t bytes);
1610 void hammer2_freemap_adjust(hammer2_dev_t *hmp,
1611                                 hammer2_blockref_t *bref, int how);
1612
1613 /*
1614  * hammer2_cluster.c
1615  */
1616 uint8_t hammer2_cluster_type(hammer2_cluster_t *cluster);
1617 const hammer2_media_data_t *hammer2_cluster_rdata(hammer2_cluster_t *cluster);
1618 hammer2_media_data_t *hammer2_cluster_wdata(hammer2_cluster_t *cluster);
1619 hammer2_cluster_t *hammer2_cluster_from_chain(hammer2_chain_t *chain);
1620 void hammer2_cluster_bref(hammer2_cluster_t *cluster, hammer2_blockref_t *bref);
1621 hammer2_cluster_t *hammer2_cluster_alloc(hammer2_pfs_t *pmp,
1622                                 hammer2_blockref_t *bref);
1623 void hammer2_cluster_ref(hammer2_cluster_t *cluster);
1624 void hammer2_cluster_drop(hammer2_cluster_t *cluster);
1625 void hammer2_cluster_lock(hammer2_cluster_t *cluster, int how);
1626 int hammer2_cluster_check(hammer2_cluster_t *cluster, hammer2_key_t lokey,
1627                         int flags);
1628 void hammer2_cluster_resolve(hammer2_cluster_t *cluster);
1629 void hammer2_cluster_forcegood(hammer2_cluster_t *cluster);
1630 void hammer2_cluster_unlock(hammer2_cluster_t *cluster);
1631
1632 int hammer2_bulkfree_pass(hammer2_dev_t *hmp,
1633                         struct hammer2_ioc_bulkfree *bfi);
1634
1635 /*
1636  * hammer2_iocom.c
1637  */
1638 void hammer2_iocom_init(hammer2_dev_t *hmp);
1639 void hammer2_iocom_uninit(hammer2_dev_t *hmp);
1640 void hammer2_cluster_reconnect(hammer2_dev_t *hmp, struct file *fp);
1641
1642 /*
1643  * hammer2_strategy.c
1644  */
1645 int hammer2_vop_strategy(struct vop_strategy_args *ap);
1646 int hammer2_vop_bmap(struct vop_bmap_args *ap);
1647 void hammer2_write_thread(void *arg);
1648 void hammer2_bioq_sync(hammer2_pfs_t *pmp);
1649 void hammer2_dedup_clear(hammer2_dev_t *hmp);
1650
1651 #endif /* !_KERNEL */
1652 #endif /* !_VFS_HAMMER2_HAMMER2_H_ */