hammer2 - Refactor frontend part 9/many
[dragonfly.git] / sys / vfs / hammer2 / hammer2.h
1 /*
2  * Copyright (c) 2011-2015 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@dragonflybsd.org>
6  * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org>
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  *
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in
16  *    the documentation and/or other materials provided with the
17  *    distribution.
18  * 3. Neither the name of The DragonFly Project nor the names of its
19  *    contributors may be used to endorse or promote products derived
20  *    from this software without specific, prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
26  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  */
35
36 /*
37  * HAMMER2 IN-MEMORY CACHE OF MEDIA STRUCTURES
38  *
39  * This header file contains structures used internally by the HAMMER2
40  * implementation.  See hammer2_disk.h for on-disk structures.
41  *
42  * There is an in-memory representation of all on-media data structure.
43  * Almost everything is represented by a hammer2_chain structure in-memory.
44  * Other higher-level structures typically map to chains.
45  *
46  * A great deal of data is accessed simply via its buffer cache buffer,
47  * which is mapped for the duration of the chain's lock.  Hammer2 must
48  * implement its own buffer cache layer on top of the system layer to
49  * allow for different threads to lock different sub-block-sized buffers.
50  *
51  * When modifications are made to a chain a new filesystem block must be
52  * allocated.  Multiple modifications do not typically allocate new blocks
53  * until the current block has been flushed.  Flushes do not block the
54  * front-end unless the front-end operation crosses the current inode being
55  * flushed.
56  *
57  * The in-memory representation may remain cached (for example in order to
58  * placemark clustering locks) even after the related data has been
59  * detached.
60  */
61
62 #ifndef _VFS_HAMMER2_HAMMER2_H_
63 #define _VFS_HAMMER2_HAMMER2_H_
64
65 #include <sys/param.h>
66 #include <sys/types.h>
67 #include <sys/kernel.h>
68 #include <sys/conf.h>
69 #include <sys/systm.h>
70 #include <sys/tree.h>
71 #include <sys/malloc.h>
72 #include <sys/mount.h>
73 #include <sys/vnode.h>
74 #include <sys/proc.h>
75 #include <sys/mountctl.h>
76 #include <sys/priv.h>
77 #include <sys/stat.h>
78 #include <sys/thread.h>
79 #include <sys/globaldata.h>
80 #include <sys/lockf.h>
81 #include <sys/buf.h>
82 #include <sys/queue.h>
83 #include <sys/limits.h>
84 #include <sys/dmsg.h>
85 #include <sys/mutex.h>
86 #include <sys/kern_syscall.h>
87
88 #include <sys/signal2.h>
89 #include <sys/buf2.h>
90 #include <sys/mutex2.h>
91 #include <sys/thread2.h>
92
93 #include "hammer2_disk.h"
94 #include "hammer2_mount.h"
95 #include "hammer2_ioctl.h"
96
97 struct hammer2_io;
98 struct hammer2_iocb;
99 struct hammer2_chain;
100 struct hammer2_cluster;
101 struct hammer2_inode;
102 struct hammer2_dev;
103 struct hammer2_pfs;
104 struct hammer2_span;
105 struct hammer2_state;
106 struct hammer2_msg;
107 struct hammer2_thread;
108 union hammer2_xop;
109
110 /*
111  * Mutex and lock shims.  Hammer2 requires support for asynchronous and
112  * abortable locks, and both exclusive and shared spinlocks.  Normal
113  * synchronous non-abortable locks can be substituted for spinlocks.
114  */
115 typedef mtx_t                           hammer2_mtx_t;
116 typedef mtx_link_t                      hammer2_mtx_link_t;
117 typedef mtx_state_t                     hammer2_mtx_state_t;
118
119 typedef struct spinlock                 hammer2_spin_t;
120
121 #define hammer2_mtx_ex                  mtx_lock_ex_quick
122 #define hammer2_mtx_sh                  mtx_lock_sh_quick
123 #define hammer2_mtx_unlock              mtx_unlock
124 #define hammer2_mtx_owned               mtx_owned
125 #define hammer2_mtx_init                mtx_init
126 #define hammer2_mtx_temp_release        mtx_lock_temp_release
127 #define hammer2_mtx_temp_restore        mtx_lock_temp_restore
128 #define hammer2_mtx_refs                mtx_lockrefs
129
130 #define hammer2_spin_init               spin_init
131 #define hammer2_spin_sh                 spin_lock_shared
132 #define hammer2_spin_ex                 spin_lock
133 #define hammer2_spin_unsh               spin_unlock_shared
134 #define hammer2_spin_unex               spin_unlock
135
136 TAILQ_HEAD(hammer2_xop_list, hammer2_xop_head);
137
138 typedef struct hammer2_xop_list hammer2_xop_list_t;
139
140
141 /*
142  * General lock support
143  */
144 static __inline
145 int
146 hammer2_mtx_upgrade(hammer2_mtx_t *mtx)
147 {
148         int wasexclusive;
149
150         if (mtx_islocked_ex(mtx)) {
151                 wasexclusive = 1;
152         } else {
153                 mtx_unlock(mtx);
154                 mtx_lock_ex_quick(mtx);
155                 wasexclusive = 0;
156         }
157         return wasexclusive;
158 }
159
160 /*
161  * Downgrade an inode lock from exclusive to shared only if the inode
162  * lock was previously shared.  If the inode lock was previously exclusive,
163  * this is a NOP.
164  */
165 static __inline
166 void
167 hammer2_mtx_downgrade(hammer2_mtx_t *mtx, int wasexclusive)
168 {
169         if (wasexclusive == 0)
170                 mtx_downgrade(mtx);
171 }
172
173 /*
174  * The xid tracks internal transactional updates.
175  *
176  * XXX fix-me, really needs to be 64-bits
177  */
178 typedef uint32_t hammer2_xid_t;
179
180 #define HAMMER2_XID_MIN 0x00000000U
181 #define HAMMER2_XID_MAX 0x7FFFFFFFU
182
183 /*
184  * The chain structure tracks a portion of the media topology from the
185  * root (volume) down.  Chains represent volumes, inodes, indirect blocks,
186  * data blocks, and freemap nodes and leafs.
187  *
188  * The chain structure utilizes a simple singly-homed topology and the
189  * chain's in-memory topology will move around as the chains do, due mainly
190  * to renames and indirect block creation.
191  *
192  * Block Table Updates
193  *
194  *      Block table updates for insertions and updates are delayed until the
195  *      flush.  This allows us to avoid having to modify the parent chain
196  *      all the way to the root.
197  *
198  *      Block table deletions are performed immediately (modifying the parent
199  *      in the process) because the flush code uses the chain structure to
200  *      track delayed updates and the chain will be (likely) gone or moved to
201  *      another location in the topology after a deletion.
202  *
203  *      A prior iteration of the code tried to keep the relationship intact
204  *      on deletes by doing a delete-duplicate operation on the chain, but
205  *      it added way too much complexity to the codebase.
206  *
207  * Flush Synchronization
208  *
209  *      The flush code must flush modified chains bottom-up.  Because chain
210  *      structures can shift around and are NOT topologically stable,
211  *      modified chains are independently indexed for the flush.  As the flush
212  *      runs it modifies (or further modifies) and updates the parents,
213  *      propagating the flush all the way to the volume root.
214  *
215  *      Modifying front-end operations can occur during a flush but will block
216  *      in two cases: (1) when the front-end tries to operate on the inode
217  *      currently in the midst of being flushed and (2) if the front-end
218  *      crosses an inode currently being flushed (such as during a rename).
219  *      So, for example, if you rename directory "x" to "a/b/c/d/e/f/g/x" and
220  *      the flusher is currently working on "a/b/c", the rename will block
221  *      temporarily in order to ensure that "x" exists in one place or the
222  *      other.
223  *
224  *      Meta-data statistics are updated by the flusher.  The front-end will
225  *      make estimates but meta-data must be fully synchronized only during a
226  *      flush in order to ensure that it remains correct across a crash.
227  *
228  *      Multiple flush synchronizations can theoretically be in-flight at the
229  *      same time but the implementation is not coded to handle the case and
230  *      currently serializes them.
231  *
232  * Snapshots:
233  *
234  *      Snapshots currently require the subdirectory tree being snapshotted
235  *      to be flushed.  The snapshot then creates a new super-root inode which
236  *      copies the flushed blockdata of the directory or file that was
237  *      snapshotted.
238  *
239  * RBTREE NOTES:
240  *
241  *      - Note that the radix tree runs in powers of 2 only so sub-trees
242  *        cannot straddle edges.
243  */
244 RB_HEAD(hammer2_chain_tree, hammer2_chain);
245 TAILQ_HEAD(h2_flush_list, hammer2_chain);
246 TAILQ_HEAD(h2_core_list, hammer2_chain);
247 TAILQ_HEAD(h2_iocb_list, hammer2_iocb);
248
249 #define CHAIN_CORE_DELETE_BMAP_ENTRIES  \
250         (HAMMER2_PBUFSIZE / sizeof(hammer2_blockref_t) / sizeof(uint32_t))
251
252 /*
253  * Core topology for chain (embedded in chain).  Protected by a spinlock.
254  */
255 struct hammer2_chain_core {
256         hammer2_spin_t  spin;
257         struct hammer2_chain_tree rbtree; /* sub-chains */
258         int             live_zero;      /* blockref array opt */
259         u_int           live_count;     /* live (not deleted) chains in tree */
260         u_int           chain_count;    /* live + deleted chains under core */
261         int             generation;     /* generation number (inserts only) */
262 };
263
264 typedef struct hammer2_chain_core hammer2_chain_core_t;
265
266 RB_HEAD(hammer2_io_tree, hammer2_io);
267
268 /*
269  * IOCB - IO callback (into chain, cluster, or manual request)
270  */
271 struct hammer2_iocb {
272         TAILQ_ENTRY(hammer2_iocb) entry;
273         void (*callback)(struct hammer2_iocb *iocb);
274         struct hammer2_io       *dio;
275         struct hammer2_cluster  *cluster;
276         struct hammer2_chain    *chain;
277         void                    *ptr;
278         off_t                   lbase;
279         int                     lsize;
280         uint32_t                flags;
281         int                     error;
282 };
283
284 typedef struct hammer2_iocb hammer2_iocb_t;
285
286 #define HAMMER2_IOCB_INTERLOCK  0x00000001
287 #define HAMMER2_IOCB_ONQ        0x00000002
288 #define HAMMER2_IOCB_DONE       0x00000004
289 #define HAMMER2_IOCB_INPROG     0x00000008
290 #define HAMMER2_IOCB_UNUSED10   0x00000010
291 #define HAMMER2_IOCB_QUICK      0x00010000
292 #define HAMMER2_IOCB_ZERO       0x00020000
293 #define HAMMER2_IOCB_READ       0x00040000
294 #define HAMMER2_IOCB_WAKEUP     0x00080000
295
296 /*
297  * DIO - Management structure wrapping system buffer cache.
298  *
299  *       Used for multiple purposes including concurrent management
300  *       if small requests by chains into larger DIOs.
301  */
302 struct hammer2_io {
303         RB_ENTRY(hammer2_io) rbnode;    /* indexed by device offset */
304         struct h2_iocb_list iocbq;
305         struct spinlock spin;
306         struct hammer2_dev *hmp;
307         struct buf      *bp;
308         off_t           pbase;
309         int             psize;
310         int             refs;
311         int             act;                    /* activity */
312 };
313
314 typedef struct hammer2_io hammer2_io_t;
315
316 #define HAMMER2_DIO_INPROG      0x80000000      /* bio in progress */
317 #define HAMMER2_DIO_GOOD        0x40000000      /* dio->bp is stable */
318 #define HAMMER2_DIO_WAITING     0x20000000      /* (old) */
319 #define HAMMER2_DIO_DIRTY       0x10000000      /* flush on last drop */
320
321 #define HAMMER2_DIO_MASK        0x0FFFFFFF
322
323 /*
324  * Primary chain structure keeps track of the topology in-memory.
325  */
326 struct hammer2_chain {
327         hammer2_mtx_t           lock;
328         hammer2_chain_core_t    core;
329         RB_ENTRY(hammer2_chain) rbnode;         /* live chain(s) */
330         hammer2_blockref_t      bref;
331         struct hammer2_chain    *parent;
332         struct hammer2_state    *state;         /* if active cache msg */
333         struct hammer2_dev      *hmp;
334         struct hammer2_pfs      *pmp;           /* A PFS or super-root (spmp) */
335
336         hammer2_xid_t   flush_xid;              /* flush sequencing */
337         hammer2_io_t    *dio;                   /* physical data buffer */
338         u_int           bytes;                  /* physical data size */
339         u_int           flags;
340         u_int           refs;
341         u_int           lockcnt;
342         int             error;                  /* on-lock data error state */
343
344         hammer2_media_data_t *data;             /* data pointer shortcut */
345         TAILQ_ENTRY(hammer2_chain) flush_node;  /* flush list */
346 };
347
348 typedef struct hammer2_chain hammer2_chain_t;
349
350 int hammer2_chain_cmp(hammer2_chain_t *chain1, hammer2_chain_t *chain2);
351 RB_PROTOTYPE(hammer2_chain_tree, hammer2_chain, rbnode, hammer2_chain_cmp);
352
353 /*
354  * Special notes on flags:
355  *
356  * INITIAL      - This flag allows a chain to be created and for storage to
357  *                be allocated without having to immediately instantiate the
358  *                related buffer.  The data is assumed to be all-zeros.  It
359  *                is primarily used for indirect blocks.
360  *
361  * MODIFIED     - The chain's media data has been modified.
362  *
363  * UPDATE       - Chain might not be modified but parent blocktable needs update
364  *
365  * FICTITIOUS   - Faked chain as a placeholder for an error condition.  This
366  *                chain is unsuitable for I/O.
367  *
368  * BMAPPED      - Indicates that the chain is present in the parent blockmap.
369  *
370  * BMAPUPD      - Indicates that the chain is present but needs to be updated
371  *                in the parent blockmap.
372  */
373 #define HAMMER2_CHAIN_MODIFIED          0x00000001      /* dirty chain data */
374 #define HAMMER2_CHAIN_ALLOCATED         0x00000002      /* kmalloc'd chain */
375 #define HAMMER2_CHAIN_DESTROY           0x00000004
376 #define HAMMER2_CHAIN_UNUSED0008        0x00000008
377 #define HAMMER2_CHAIN_DELETED           0x00000010      /* deleted chain */
378 #define HAMMER2_CHAIN_INITIAL           0x00000020      /* initial create */
379 #define HAMMER2_CHAIN_UPDATE            0x00000040      /* need parent update */
380 #define HAMMER2_CHAIN_DEFERRED          0x00000080      /* flush depth defer */
381 #define HAMMER2_CHAIN_IOFLUSH           0x00000100      /* bawrite on put */
382 #define HAMMER2_CHAIN_ONFLUSH           0x00000200      /* on a flush list */
383 #define HAMMER2_CHAIN_FICTITIOUS        0x00000400      /* unsuitable for I/O */
384 #define HAMMER2_CHAIN_VOLUMESYNC        0x00000800      /* needs volume sync */
385 #define HAMMER2_CHAIN_DELAYED           0x00001000      /* delayed flush */
386 #define HAMMER2_CHAIN_COUNTEDBREFS      0x00002000      /* block table stats */
387 #define HAMMER2_CHAIN_ONRBTREE          0x00004000      /* on parent RB tree */
388 #define HAMMER2_CHAIN_UNUSED00008000    0x00008000
389 #define HAMMER2_CHAIN_EMBEDDED          0x00010000      /* embedded data */
390 #define HAMMER2_CHAIN_RELEASE           0x00020000      /* don't keep around */
391 #define HAMMER2_CHAIN_BMAPPED           0x00040000      /* present in blkmap */
392 #define HAMMER2_CHAIN_BMAPUPD           0x00080000      /* +needs updating */
393 #define HAMMER2_CHAIN_IOINPROG          0x00100000      /* I/O interlock */
394 #define HAMMER2_CHAIN_IOSIGNAL          0x00200000      /* I/O interlock */
395 #define HAMMER2_CHAIN_PFSBOUNDARY       0x00400000      /* super->pfs inode */
396
397 #define HAMMER2_CHAIN_FLUSH_MASK        (HAMMER2_CHAIN_MODIFIED |       \
398                                          HAMMER2_CHAIN_UPDATE |         \
399                                          HAMMER2_CHAIN_ONFLUSH)
400
401 /*
402  * Hammer2 error codes, used by chain->error and cluster->error.  The error
403  * code is typically set on-lock unless no I/O was requested, and set on
404  * I/O otherwise.  If set for a cluster it generally means that the cluster
405  * code could not find a valid copy to present.
406  *
407  * IO           - An I/O error occurred
408  * CHECK        - I/O succeeded but did not match the check code
409  * INCOMPLETE   - A cluster is not complete enough to use, or
410  *                a chain cannot be loaded because its parent has an error.
411  *
412  * NOTE: API allows callers to check zero/non-zero to determine if an error
413  *       condition exists.
414  *
415  * NOTE: Chain's data field is usually NULL on an IO error but not necessarily
416  *       NULL on other errors.  Check chain->error, not chain->data.
417  */
418 #define HAMMER2_ERROR_NONE              0
419 #define HAMMER2_ERROR_IO                1       /* device I/O error */
420 #define HAMMER2_ERROR_CHECK             2       /* check code mismatch */
421 #define HAMMER2_ERROR_INCOMPLETE        3       /* incomplete cluster */
422 #define HAMMER2_ERROR_DEPTH             4       /* temporary depth limit */
423
424 /*
425  * Flags passed to hammer2_chain_lookup() and hammer2_chain_next()
426  *
427  * NOTES:
428  *      NOLOCK      - Input and output chains are referenced only and not
429  *                    locked.  Output chain might be temporarily locked
430  *                    internally.
431  *
432  *      NODATA      - Asks that the chain->data not be resolved in order
433  *                    to avoid I/O.
434  *
435  *      NODIRECT    - Prevents a lookup of offset 0 in an inode from returning
436  *                    the inode itself if the inode is in DIRECTDATA mode
437  *                    (i.e. file is <= 512 bytes).  Used by the synchronization
438  *                    code to prevent confusion.
439  *
440  *      SHARED      - The input chain is expected to be locked shared,
441  *                    and the output chain is locked shared.
442  *
443  *      MATCHIND    - Allows an indirect block / freemap node to be returned
444  *                    when the passed key range matches the radix.  Remember
445  *                    that key_end is inclusive (e.g. {0x000,0xFFF},
446  *                    not {0x000,0x1000}).
447  *
448  *                    (Cannot be used for remote or cluster ops).
449  *
450  *      ALLNODES    - Allows NULL focus.
451  *
452  *      ALWAYS      - Always resolve the data.  If ALWAYS and NODATA are both
453  *                    missing, bulk file data is not resolved but inodes and
454  *                    other meta-data will.
455  *
456  *      NOUNLOCK    - Used by hammer2_chain_next() to leave the lock on
457  *                    the input chain intact.  The chain is still dropped.
458  *                    This allows the caller to add a reference to the chain
459  *                    and retain it in a locked state (used by the
460  *                    XOP/feed/collect code).
461  */
462 #define HAMMER2_LOOKUP_NOLOCK           0x00000001      /* ref only */
463 #define HAMMER2_LOOKUP_NODATA           0x00000002      /* data left NULL */
464 #define HAMMER2_LOOKUP_NODIRECT         0x00000004      /* no offset=0 DD */
465 #define HAMMER2_LOOKUP_SHARED           0x00000100
466 #define HAMMER2_LOOKUP_MATCHIND         0x00000200      /* return all chains */
467 #define HAMMER2_LOOKUP_ALLNODES         0x00000400      /* allow NULL focus */
468 #define HAMMER2_LOOKUP_ALWAYS           0x00000800      /* resolve data */
469 #define HAMMER2_LOOKUP_NOUNLOCK         0x00001000      /* leave lock intact */
470
471 /*
472  * Flags passed to hammer2_chain_modify() and hammer2_chain_resize()
473  *
474  * NOTE: OPTDATA allows us to avoid instantiating buffers for INDIRECT
475  *       blocks in the INITIAL-create state.
476  */
477 #define HAMMER2_MODIFY_OPTDATA          0x00000002      /* data can be NULL */
478 #define HAMMER2_MODIFY_NO_MODIFY_TID    0x00000004
479 #define HAMMER2_MODIFY_UNUSED0008       0x00000008
480 #define HAMMER2_MODIFY_NOREALLOC        0x00000010
481
482 /*
483  * Flags passed to hammer2_chain_lock()
484  *
485  * NOTE: RDONLY is set to optimize cluster operations when *no* modifications
486  *       will be made to either the cluster being locked or any underlying
487  *       cluster.  It allows the cluster to lock and access data for a subset
488  *       of available nodes instead of all available nodes.
489  */
490 #define HAMMER2_RESOLVE_NEVER           1
491 #define HAMMER2_RESOLVE_MAYBE           2
492 #define HAMMER2_RESOLVE_ALWAYS          3
493 #define HAMMER2_RESOLVE_MASK            0x0F
494
495 #define HAMMER2_RESOLVE_SHARED          0x10    /* request shared lock */
496 #define HAMMER2_RESOLVE_UNUSED20        0x20
497 #define HAMMER2_RESOLVE_RDONLY          0x40    /* higher level op flag */
498
499 /*
500  * Flags passed to hammer2_chain_delete()
501  */
502 #define HAMMER2_DELETE_PERMANENT        0x0001
503 #define HAMMER2_DELETE_NOSTATS          0x0002
504
505 #define HAMMER2_INSERT_NOSTATS          0x0002
506 #define HAMMER2_INSERT_PFSROOT          0x0004
507
508 /*
509  * Flags passed to hammer2_chain_delete_duplicate()
510  */
511 #define HAMMER2_DELDUP_RECORE           0x0001
512
513 /*
514  * Cluster different types of storage together for allocations
515  */
516 #define HAMMER2_FREECACHE_INODE         0
517 #define HAMMER2_FREECACHE_INDIR         1
518 #define HAMMER2_FREECACHE_DATA          2
519 #define HAMMER2_FREECACHE_UNUSED3       3
520 #define HAMMER2_FREECACHE_TYPES         4
521
522 /*
523  * hammer2_freemap_alloc() block preference
524  */
525 #define HAMMER2_OFF_NOPREF              ((hammer2_off_t)-1)
526
527 /*
528  * BMAP read-ahead maximum parameters
529  */
530 #define HAMMER2_BMAP_COUNT              16      /* max bmap read-ahead */
531 #define HAMMER2_BMAP_BYTES              (HAMMER2_PBUFSIZE * HAMMER2_BMAP_COUNT)
532
533 /*
534  * hammer2_freemap_adjust()
535  */
536 #define HAMMER2_FREEMAP_DORECOVER       1
537 #define HAMMER2_FREEMAP_DOMAYFREE       2
538 #define HAMMER2_FREEMAP_DOREALFREE      3
539
540 /*
541  * HAMMER2 cluster - A set of chains representing the same entity.
542  *
543  * hammer2_cluster typically represents a temporary set of representitive
544  * chains.  The one exception is that a hammer2_cluster is embedded in
545  * hammer2_inode.  This embedded cluster is ONLY used to track the
546  * representitive chains and cannot be directly locked.
547  *
548  * A cluster is usually temporary (and thus per-thread) for locking purposes,
549  * allowing us to embed the asynchronous storage required for cluster
550  * operations in the cluster itself and adjust the state and status without
551  * having to worry too much about SMP issues.
552  *
553  * The exception is the cluster embedded in the hammer2_inode structure.
554  * This is used to cache the cluster state on an inode-by-inode basis.
555  * Individual hammer2_chain structures not incorporated into clusters might
556  * also stick around to cache miscellanious elements.
557  *
558  * Because the cluster is a 'working copy' and is usually subject to cluster
559  * quorum rules, it is quite possible for us to end up with an insufficient
560  * number of live chains to execute an operation.  If an insufficient number
561  * of chains remain in a working copy, the operation may have to be
562  * downgraded, retried, stall until the requisit number of chains are
563  * available, or possibly even error out depending on the mount type.
564  *
565  * A cluster's focus is set when it is locked.  The focus can only be set
566  * to a chain still part of the synchronized set.
567  */
568 #define HAMMER2_MAXCLUSTER      8
569 #define HAMMER2_XOPFIFO         16
570 #define HAMMER2_XOPFIFO_MASK    (HAMMER2_XOPFIFO - 1)
571 #define HAMMER2_XOPGROUPS       16
572 #define HAMMER2_XOPGROUPS_MASK  (HAMMER2_XOPGROUPS - 1)
573 #define HAMMER2_XOPMASK_VOP     0x80000000U
574
575 struct hammer2_cluster_item {
576 #if 0
577         hammer2_mtx_link_t      async_link;
578 #endif
579         hammer2_chain_t         *chain;
580 #if 0
581         struct hammer2_cluster  *cluster;       /* link back to cluster */
582 #endif
583         int                     cache_index;
584         uint32_t                flags;
585 };
586
587 typedef struct hammer2_cluster_item hammer2_cluster_item_t;
588
589 /*
590  * INVALID      - Invalid for focus, i.e. not part of synchronized set.
591  *                Once set, this bit is sticky across operations.
592  *
593  * FEMOD        - Indicates that front-end modifying operations can
594  *                mess with this entry and MODSYNC will copy also
595  *                effect it.
596  */
597 #define HAMMER2_CITEM_INVALID   0x00000001
598 #define HAMMER2_CITEM_FEMOD     0x00000002
599 #define HAMMER2_CITEM_NULL      0x00000004
600
601 struct hammer2_cluster {
602         int                     refs;           /* track for deallocation */
603         int                     ddflag;
604         struct hammer2_pfs      *pmp;
605         uint32_t                flags;
606         int                     nchains;
607         int                     error;          /* error code valid on lock */
608         int                     focus_index;
609         hammer2_iocb_t          iocb;
610         hammer2_chain_t         *focus;         /* current focus (or mod) */
611         hammer2_cluster_item_t  array[HAMMER2_MAXCLUSTER];
612 };
613
614 typedef struct hammer2_cluster  hammer2_cluster_t;
615
616 /*
617  * WRHARD       - Hard mounts can write fully synchronized
618  * RDHARD       - Hard mounts can read fully synchronized
619  * UNHARD       - Unsynchronized masters present
620  * NOHARD       - No masters visible
621  * WRSOFT       - Soft mounts can write to at least the SOFT_MASTER
622  * RDSOFT       - Soft mounts can read from at least a SOFT_SLAVE
623  * UNSOFT       - Unsynchronized slaves present
624  * NOSOFT       - No slaves visible
625  * RDSLAVE      - slaves are accessible (possibly unsynchronized or remote).
626  * MSYNCED      - All masters are fully synchronized
627  * SSYNCED      - All known local slaves are fully synchronized to masters
628  *
629  * All available masters are always incorporated.  All PFSs belonging to a
630  * cluster (master, slave, copy, whatever) always try to synchronize the
631  * total number of known masters in the PFSs root inode.
632  *
633  * A cluster might have access to many slaves, copies, or caches, but we
634  * have a limited number of cluster slots.  Any such elements which are
635  * directly mounted from block device(s) will always be incorporated.   Note
636  * that SSYNCED only applies to such elements which are directly mounted,
637  * not to any remote slaves, copies, or caches that could be available.  These
638  * bits are used to monitor and drive our synchronization threads.
639  *
640  * When asking the question 'is any data accessible at all', then a simple
641  * test against (RDHARD|RDSOFT|RDSLAVE) gives you the answer.  If any of
642  * these bits are set the object can be read with certain caveats:
643  * RDHARD - no caveats.  RDSOFT - authoritative but might not be synchronized.
644  * and RDSLAVE - not authoritative, has some data but it could be old or
645  * incomplete.
646  *
647  * When both soft and hard mounts are available, data will be read and written
648  * via the soft mount only.  But all might be in the cluster because
649  * background synchronization threads still need to do their work.
650  */
651 #define HAMMER2_CLUSTER_INODE   0x00000001      /* embedded in inode struct */
652 #define HAMMER2_CLUSTER_UNUSED2 0x00000002
653 #define HAMMER2_CLUSTER_LOCKED  0x00000004      /* cluster lks not recursive */
654 #define HAMMER2_CLUSTER_WRHARD  0x00000100      /* hard-mount can write */
655 #define HAMMER2_CLUSTER_RDHARD  0x00000200      /* hard-mount can read */
656 #define HAMMER2_CLUSTER_UNHARD  0x00000400      /* unsynchronized masters */
657 #define HAMMER2_CLUSTER_NOHARD  0x00000800      /* no masters visible */
658 #define HAMMER2_CLUSTER_WRSOFT  0x00001000      /* soft-mount can write */
659 #define HAMMER2_CLUSTER_RDSOFT  0x00002000      /* soft-mount can read */
660 #define HAMMER2_CLUSTER_UNSOFT  0x00004000      /* unsynchronized slaves */
661 #define HAMMER2_CLUSTER_NOSOFT  0x00008000      /* no slaves visible */
662 #define HAMMER2_CLUSTER_MSYNCED 0x00010000      /* all masters synchronized */
663 #define HAMMER2_CLUSTER_SSYNCED 0x00020000      /* known slaves synchronized */
664
665 #define HAMMER2_CLUSTER_ANYDATA ( HAMMER2_CLUSTER_RDHARD |      \
666                                   HAMMER2_CLUSTER_RDSOFT |      \
667                                   HAMMER2_CLUSTER_RDSLAVE)
668
669 #define HAMMER2_CLUSTER_RDOK    ( HAMMER2_CLUSTER_RDHARD |      \
670                                   HAMMER2_CLUSTER_RDSOFT)
671
672 #define HAMMER2_CLUSTER_WROK    ( HAMMER2_CLUSTER_WRHARD |      \
673                                   HAMMER2_CLUSTER_WRSOFT)
674
675 #define HAMMER2_CLUSTER_ZFLAGS  ( HAMMER2_CLUSTER_WRHARD |      \
676                                   HAMMER2_CLUSTER_RDHARD |      \
677                                   HAMMER2_CLUSTER_WRSOFT |      \
678                                   HAMMER2_CLUSTER_RDSOFT |      \
679                                   HAMMER2_CLUSTER_MSYNCED |     \
680                                   HAMMER2_CLUSTER_SSYNCED)
681
682 /*
683  * Helper functions (cluster must be locked for flags to be valid).
684  */
685 static __inline
686 int
687 hammer2_cluster_rdok(hammer2_cluster_t *cluster)
688 {
689         return (cluster->flags & HAMMER2_CLUSTER_RDOK);
690 }
691
692 static __inline
693 int
694 hammer2_cluster_wrok(hammer2_cluster_t *cluster)
695 {
696         return (cluster->flags & HAMMER2_CLUSTER_WROK);
697 }
698
699 RB_HEAD(hammer2_inode_tree, hammer2_inode);
700
701 /*
702  * A hammer2 inode.
703  *
704  * NOTE: The inode-embedded cluster is never used directly for I/O (since
705  *       it may be shared).  Instead it will be replicated-in and synchronized
706  *       back out if changed.
707  */
708 struct hammer2_inode {
709         RB_ENTRY(hammer2_inode) rbnode;         /* inumber lookup (HL) */
710         hammer2_mtx_t           lock;           /* inode lock */
711         struct hammer2_pfs      *pmp;           /* PFS mount */
712         struct hammer2_inode    *pip;           /* parent inode */
713         struct vnode            *vp;
714         struct spinlock         cluster_spin;   /* update cluster */
715         hammer2_cluster_t       cluster;
716         struct lockf            advlock;
717         u_int                   flags;
718         u_int                   refs;           /* +vpref, +flushref */
719         uint8_t                 comp_heuristic;
720         hammer2_inode_meta_t    meta;           /* copy of meta-data */
721         hammer2_blockref_t      bref;           /* copy of bref statistics */
722         hammer2_off_t           osize;
723 };
724
725 typedef struct hammer2_inode hammer2_inode_t;
726
727 /*
728  * MODIFIED     - Inode is in a modified state, ip->meta may have changes.
729  * RESIZED      - Inode truncated (any) or inode extended beyond
730  *                EMBEDDED_BYTES.
731  */
732 #define HAMMER2_INODE_MODIFIED          0x0001
733 #define HAMMER2_INODE_SROOT             0x0002  /* kmalloc special case */
734 #define HAMMER2_INODE_RENAME_INPROG     0x0004
735 #define HAMMER2_INODE_ONRBTREE          0x0008
736 #define HAMMER2_INODE_RESIZED           0x0010  /* requires inode_fsync */
737 #define HAMMER2_INODE_UNUSED0020        0x0020
738 #define HAMMER2_INODE_ISUNLINKED        0x0040
739 #define HAMMER2_INODE_METAGOOD          0x0080  /* inode meta-data good */
740
741 int hammer2_inode_cmp(hammer2_inode_t *ip1, hammer2_inode_t *ip2);
742 RB_PROTOTYPE2(hammer2_inode_tree, hammer2_inode, rbnode, hammer2_inode_cmp,
743                 hammer2_tid_t);
744
745 /*
746  * inode-unlink side-structure
747  */
748 struct hammer2_inode_unlink {
749         TAILQ_ENTRY(hammer2_inode_unlink) entry;
750         hammer2_inode_t *ip;
751 };
752 TAILQ_HEAD(h2_unlk_list, hammer2_inode_unlink);
753
754 typedef struct hammer2_inode_unlink hammer2_inode_unlink_t;
755
756 /*
757  * A hammer2 transaction and flush sequencing structure.
758  *
759  * This global structure is tied into hammer2_dev and is used
760  * to sequence modifying operations and flushes.  These operations
761  * run on whole cluster PFSs, not individual nodes (at this level),
762  * so we do not record mirror_tid here.
763  */
764 struct hammer2_trans {
765         TAILQ_ENTRY(hammer2_trans) entry;
766         struct hammer2_pfs      *pmp;
767         hammer2_xid_t           sync_xid;       /* transaction sequencer */
768         hammer2_tid_t           inode_tid;      /* inode number assignment */
769         hammer2_tid_t           modify_tid;     /* modify transaction id */
770         thread_t                td;             /* pointer */
771         int                     flags;
772         int                     blocked;
773         uint8_t                 inodes_created;
774         uint8_t                 dummy[7];
775 };
776
777 typedef struct hammer2_trans hammer2_trans_t;
778
779 #define HAMMER2_TRANS_ISFLUSH           0x0001  /* formal flush */
780 #define HAMMER2_TRANS_CONCURRENT        0x0002  /* concurrent w/flush */
781 #define HAMMER2_TRANS_BUFCACHE          0x0004  /* from bioq strategy write */
782 #define HAMMER2_TRANS_NEWINODE          0x0008  /* caller allocating inode */
783 #define HAMMER2_TRANS_KEEPMODIFY        0x0010  /* do not change bref.modify */
784 #define HAMMER2_TRANS_PREFLUSH          0x0020  /* preflush state */
785
786 #define HAMMER2_FREEMAP_HEUR_NRADIX     4       /* pwr 2 PBUFRADIX-MINIORADIX */
787 #define HAMMER2_FREEMAP_HEUR_TYPES      8
788 #define HAMMER2_FREEMAP_HEUR            (HAMMER2_FREEMAP_HEUR_NRADIX * \
789                                          HAMMER2_FREEMAP_HEUR_TYPES)
790
791 /*
792  * Transaction Rendezvous
793  */
794 TAILQ_HEAD(hammer2_trans_queue, hammer2_trans);
795
796 struct hammer2_trans_manage {
797         hammer2_xid_t           flush_xid;      /* last flush transaction */
798         hammer2_xid_t           alloc_xid;
799         struct lock             translk;        /* lockmgr lock */
800         struct hammer2_trans_queue transq;      /* modifying transactions */
801         int                     flushcnt;       /* track flush trans */
802 };
803
804 typedef struct hammer2_trans_manage hammer2_trans_manage_t;
805
806 /*
807  * Hammer2 support thread element.
808  *
809  * Potentially many support threads can hang off of hammer2, primarily
810  * off the hammer2_pfs structure.  Typically:
811  *
812  * td x Nodes                   A synchronization thread for each node.
813  * td x Nodes x workers         Worker threads for frontend operations.
814  * td x 1                       Bioq thread for logical buffer writes.
815  *
816  * In addition, the synchronization thread(s) associated with the
817  * super-root PFS (spmp) for a node is responsible for automatic bulkfree
818  * and dedup scans.
819  */
820 struct hammer2_thread {
821         struct hammer2_pfs *pmp;
822         thread_t        td;
823         uint32_t        flags;
824         int             depth;
825         int             clindex;        /* cluster element index */
826         int             repidx;
827         hammer2_trans_t trans;
828         struct lock     lk;             /* thread control lock */
829         hammer2_xop_list_t xopq;
830 };
831
832 typedef struct hammer2_thread hammer2_thread_t;
833
834 #define HAMMER2_THREAD_UNMOUNTING       0x0001  /* unmount request */
835 #define HAMMER2_THREAD_DEV              0x0002  /* related to dev, not pfs */
836 #define HAMMER2_THREAD_UNUSED04         0x0004
837 #define HAMMER2_THREAD_REMASTER         0x0008  /* remaster request */
838 #define HAMMER2_THREAD_STOP             0x0010  /* exit request */
839 #define HAMMER2_THREAD_FREEZE           0x0020  /* force idle */
840 #define HAMMER2_THREAD_FROZEN           0x0040  /* restart */
841
842
843 /*
844  * hammer2_xop - container for VOP/XOP operation (allocated, not on stack).
845  *
846  * This structure is used to distribute a VOP operation across multiple
847  * nodes.  It provides a rendezvous for concurrent node execution and
848  * can be detached from the frontend operation to allow the frontend to
849  * return early.
850  */
851 typedef void (*hammer2_xop_func_t)(union hammer2_xop *xop, int clidx);
852
853 typedef struct hammer2_xop_fifo {
854         TAILQ_ENTRY(hammer2_xop_head) entry;
855         hammer2_chain_t         *array[HAMMER2_XOPFIFO];
856         int                     errors[HAMMER2_XOPFIFO];
857         int                     ri;
858         int                     wi;
859         int                     unused03;
860 } hammer2_xop_fifo_t;
861
862 struct hammer2_xop_head {
863         hammer2_xop_func_t      func;
864         struct hammer2_inode    *ip;
865         struct hammer2_xop_group *xgrp;
866         uint32_t                check_counter;
867         uint32_t                run_mask;
868         uint32_t                chk_mask;
869         int                     state;
870         int                     error;
871         hammer2_key_t           lkey;
872         hammer2_key_t           nkey;
873         hammer2_xop_fifo_t      collect[HAMMER2_MAXCLUSTER];
874         hammer2_cluster_t       cluster;        /* help collections */
875 };
876
877 typedef struct hammer2_xop_head hammer2_xop_head_t;
878
879 struct hammer2_xop_readdir {
880         hammer2_xop_head_t      head;
881 };
882
883 typedef struct hammer2_xop_readdir hammer2_xop_readdir_t;
884
885 union hammer2_xop {
886         hammer2_xop_head_t      head;
887         hammer2_xop_readdir_t   xop_readdir;
888 };
889
890 typedef union hammer2_xop hammer2_xop_t;
891
892 /*
893  * hammer2_xop_group - Manage XOP support threads.
894  */
895 struct hammer2_xop_group {
896         hammer2_thread_t        thrs[HAMMER2_MAXCLUSTER];
897         hammer2_mtx_t           mtx;
898 };
899
900 typedef struct hammer2_xop_group hammer2_xop_group_t;
901
902 /*
903  * Global (per partition) management structure, represents a hard block
904  * device.  Typically referenced by hammer2_chain structures when applicable.
905  * Typically not used for network-managed elements.
906  *
907  * Note that a single hammer2_dev can be indirectly tied to multiple system
908  * mount points.  There is no direct relationship.  System mounts are
909  * per-cluster-id, not per-block-device, and a single hard mount might contain
910  * many PFSs and those PFSs might combine together in various ways to form
911  * the set of available clusters.
912  */
913 struct hammer2_dev {
914         struct vnode    *devvp;         /* device vnode */
915         int             ronly;          /* read-only mount */
916         int             mount_count;    /* number of actively mounted PFSs */
917         TAILQ_ENTRY(hammer2_dev) mntentry; /* hammer2_mntlist */
918
919         struct malloc_type *mchain;
920         int             nipstacks;
921         int             maxipstacks;
922         kdmsg_iocom_t   iocom;          /* volume-level dmsg interface */
923         struct spinlock io_spin;        /* iotree access */
924         struct hammer2_io_tree iotree;
925         int             iofree_count;
926         hammer2_chain_t vchain;         /* anchor chain (topology) */
927         hammer2_chain_t fchain;         /* anchor chain (freemap) */
928         struct spinlock list_spin;
929         struct h2_flush_list    flushq; /* flush seeds */
930         struct hammer2_pfs *spmp;       /* super-root pmp for transactions */
931         struct lock     vollk;          /* lockmgr lock */
932         hammer2_off_t   heur_freemap[HAMMER2_FREEMAP_HEUR];
933         int             volhdrno;       /* last volhdrno written */
934         char            devrepname[64]; /* for kprintf */
935         hammer2_volume_data_t voldata;
936         hammer2_volume_data_t volsync;  /* synchronized voldata */
937 };
938
939 typedef struct hammer2_dev hammer2_dev_t;
940
941 /*
942  * Helper functions (cluster must be locked for flags to be valid).
943  */
944 static __inline
945 int
946 hammer2_chain_rdok(hammer2_chain_t *chain)
947 {
948         return (chain->error == 0);
949 }
950
951 static __inline
952 int
953 hammer2_chain_wrok(hammer2_chain_t *chain)
954 {
955         return (chain->error == 0 && chain->hmp->ronly == 0);
956 }
957
958 /*
959  * Per-cluster management structure.  This structure will be tied to a
960  * system mount point if the system is mounting the PFS, but is also used
961  * to manage clusters encountered during the super-root scan or received
962  * via LNK_SPANs that might not be mounted.
963  *
964  * This structure is also used to represent the super-root that hangs off
965  * of a hard mount point.  The super-root is not really a cluster element.
966  * In this case the spmp_hmp field will be non-NULL.  It's just easier to do
967  * this than to special case super-root manipulation in the hammer2_chain*
968  * code as being only hammer2_dev-related.
969  *
970  * pfs_mode and pfs_nmasters are rollup fields which critically describes
971  * how elements of the cluster act on the cluster.  pfs_mode is only applicable
972  * when a PFS is mounted by the system.  pfs_nmasters is our best guess as to
973  * how many masters have been configured for a cluster and is always
974  * applicable.  pfs_types[] is an array with 1:1 correspondance to the
975  * iroot cluster and describes the PFS types of the nodes making up the
976  * cluster.
977  *
978  * WARNING! Portions of this structure have deferred initialization.  In
979  *          particular, if not mounted there will be no ihidden or wthread.
980  *          umounted network PFSs will also be missing iroot and numerous
981  *          other fields will not be initialized prior to mount.
982  *
983  *          Synchronization threads are chain-specific and only applicable
984  *          to local hard PFS entries.  A hammer2_pfs structure may contain
985  *          more than one when multiple hard PFSs are present on the local
986  *          machine which require synchronization monitoring.  Most PFSs
987  *          (such as snapshots) are 1xMASTER PFSs which do not need a
988  *          synchronization thread.
989  *
990  * WARNING! The chains making up pfs->iroot's cluster are accounted for in
991  *          hammer2_dev->mount_count when the pfs is associated with a mount
992  *          point.
993  */
994 struct hammer2_pfs {
995         struct mount            *mp;
996         TAILQ_ENTRY(hammer2_pfs) mntentry;      /* hammer2_pfslist */
997         uuid_t                  pfs_clid;
998         hammer2_dev_t           *spmp_hmp;      /* only if super-root pmp */
999         hammer2_inode_t         *iroot;         /* PFS root inode */
1000         hammer2_inode_t         *ihidden;       /* PFS hidden directory */
1001         uint8_t                 pfs_types[HAMMER2_MAXCLUSTER];
1002         char                    *pfs_names[HAMMER2_MAXCLUSTER];
1003         hammer2_trans_manage_t  tmanage;        /* transaction management */
1004         struct lock             lock;           /* PFS lock for certain ops */
1005         struct netexport        export;         /* nfs export */
1006         int                     ronly;          /* read-only mount */
1007         struct malloc_type      *minode;
1008         struct malloc_type      *mmsg;
1009         struct spinlock         inum_spin;      /* inumber lookup */
1010         struct hammer2_inode_tree inum_tree;    /* (not applicable to spmp) */
1011         hammer2_tid_t           modify_tid;     /* modify transaction id */
1012         hammer2_tid_t           inode_tid;      /* inode allocator */
1013         uint8_t                 pfs_nmasters;   /* total masters */
1014         uint8_t                 pfs_mode;       /* operating mode PFSMODE */
1015         uint8_t                 unused01;
1016         uint8_t                 unused02;
1017         int                     xop_iterator;
1018         long                    inmem_inodes;
1019         uint32_t                inmem_dirty_chains;
1020         int                     count_lwinprog; /* logical write in prog */
1021         struct spinlock         list_spin;
1022         struct h2_unlk_list     unlinkq;        /* last-close unlink */
1023         hammer2_thread_t        sync_thrs[HAMMER2_MAXCLUSTER];
1024         thread_t                wthread_td;     /* write thread td */
1025         struct bio_queue_head   wthread_bioq;   /* logical buffer bioq */
1026         hammer2_mtx_t           wthread_mtx;    /* interlock */
1027         int                     wthread_destroy;/* termination sequencing */
1028         uint32_t                flags;          /* cached cluster flags */
1029         hammer2_xop_group_t     xop_groups[HAMMER2_XOPGROUPS];
1030 };
1031
1032 typedef struct hammer2_pfs hammer2_pfs_t;
1033
1034 #define HAMMER2_DIRTYCHAIN_WAITING      0x80000000
1035 #define HAMMER2_DIRTYCHAIN_MASK         0x7FFFFFFF
1036
1037 #define HAMMER2_LWINPROG_WAITING        0x80000000
1038 #define HAMMER2_LWINPROG_MASK           0x7FFFFFFF
1039
1040 /*
1041  * hammer2_cluster_check
1042  */
1043 #define HAMMER2_CHECK_NULL      0x00000001
1044
1045 /*
1046  * Bulkscan
1047  */
1048 #define HAMMER2_BULK_ABORT      0x00000001
1049
1050 /*
1051  * Misc
1052  */
1053 #if defined(_KERNEL)
1054
1055 MALLOC_DECLARE(M_HAMMER2);
1056
1057 #define VTOI(vp)        ((hammer2_inode_t *)(vp)->v_data)
1058 #define ITOV(ip)        ((ip)->vp)
1059
1060 /*
1061  * Currently locked chains retain the locked buffer cache buffer for
1062  * indirect blocks, and indirect blocks can be one of two sizes.  The
1063  * device buffer has to match the case to avoid deadlocking recursive
1064  * chains that might otherwise try to access different offsets within
1065  * the same device buffer.
1066  */
1067 static __inline
1068 int
1069 hammer2_devblkradix(int radix)
1070 {
1071 #if 0
1072         if (radix <= HAMMER2_LBUFRADIX) {
1073                 return (HAMMER2_LBUFRADIX);
1074         } else {
1075                 return (HAMMER2_PBUFRADIX);
1076         }
1077 #endif
1078         return (HAMMER2_PBUFRADIX);
1079 }
1080
1081 /*
1082  * XXX almost time to remove this.  DIO uses PBUFSIZE exclusively now.
1083  */
1084 static __inline
1085 size_t
1086 hammer2_devblksize(size_t bytes)
1087 {
1088 #if 0
1089         if (bytes <= HAMMER2_LBUFSIZE) {
1090                 return(HAMMER2_LBUFSIZE);
1091         } else {
1092                 KKASSERT(bytes <= HAMMER2_PBUFSIZE &&
1093                          (bytes ^ (bytes - 1)) == ((bytes << 1) - 1));
1094                 return (HAMMER2_PBUFSIZE);
1095         }
1096 #endif
1097         return (HAMMER2_PBUFSIZE);
1098 }
1099
1100
1101 static __inline
1102 hammer2_pfs_t *
1103 MPTOPMP(struct mount *mp)
1104 {
1105         return ((hammer2_pfs_t *)mp->mnt_data);
1106 }
1107
1108 #define LOCKSTART       int __nlocks = curthread->td_locks
1109 #define LOCKENTER       (++curthread->td_locks)
1110 #define LOCKEXIT        (--curthread->td_locks)
1111 #define LOCKSTOP        KKASSERT(curthread->td_locks == __nlocks)
1112
1113 extern struct vop_ops hammer2_vnode_vops;
1114 extern struct vop_ops hammer2_spec_vops;
1115 extern struct vop_ops hammer2_fifo_vops;
1116
1117 extern int hammer2_debug;
1118 extern int hammer2_cluster_enable;
1119 extern int hammer2_hardlink_enable;
1120 extern int hammer2_flush_pipe;
1121 extern int hammer2_synchronous_flush;
1122 extern int hammer2_dio_count;
1123 extern long hammer2_limit_dirty_chains;
1124 extern long hammer2_iod_file_read;
1125 extern long hammer2_iod_meta_read;
1126 extern long hammer2_iod_indr_read;
1127 extern long hammer2_iod_fmap_read;
1128 extern long hammer2_iod_volu_read;
1129 extern long hammer2_iod_file_write;
1130 extern long hammer2_iod_meta_write;
1131 extern long hammer2_iod_indr_write;
1132 extern long hammer2_iod_fmap_write;
1133 extern long hammer2_iod_volu_write;
1134 extern long hammer2_ioa_file_read;
1135 extern long hammer2_ioa_meta_read;
1136 extern long hammer2_ioa_indr_read;
1137 extern long hammer2_ioa_fmap_read;
1138 extern long hammer2_ioa_volu_read;
1139 extern long hammer2_ioa_file_write;
1140 extern long hammer2_ioa_meta_write;
1141 extern long hammer2_ioa_indr_write;
1142 extern long hammer2_ioa_fmap_write;
1143 extern long hammer2_ioa_volu_write;
1144
1145 extern struct objcache *cache_buffer_read;
1146 extern struct objcache *cache_buffer_write;
1147 extern struct objcache *cache_xops;
1148
1149 extern int destroy;
1150 extern int write_thread_wakeup;
1151
1152 /*
1153  * hammer2_subr.c
1154  */
1155 #define hammer2_icrc32(buf, size)       iscsi_crc32((buf), (size))
1156 #define hammer2_icrc32c(buf, size, crc) iscsi_crc32_ext((buf), (size), (crc))
1157
1158 int hammer2_signal_check(time_t *timep);
1159 const char *hammer2_error_str(int error);
1160
1161 void hammer2_inode_lock(hammer2_inode_t *ip, int how);
1162 void hammer2_inode_unlock(hammer2_inode_t *ip, hammer2_cluster_t *cluster);
1163 hammer2_cluster_t *hammer2_inode_cluster(hammer2_inode_t *ip, int how);
1164 hammer2_chain_t *hammer2_inode_chain(hammer2_inode_t *ip, int clindex, int how);
1165 hammer2_mtx_state_t hammer2_inode_lock_temp_release(hammer2_inode_t *ip);
1166 void hammer2_inode_lock_temp_restore(hammer2_inode_t *ip,
1167                         hammer2_mtx_state_t ostate);
1168 int hammer2_inode_lock_upgrade(hammer2_inode_t *ip);
1169 void hammer2_inode_lock_downgrade(hammer2_inode_t *ip, int);
1170
1171 void hammer2_dev_exlock(hammer2_dev_t *hmp);
1172 void hammer2_dev_shlock(hammer2_dev_t *hmp);
1173 void hammer2_dev_unlock(hammer2_dev_t *hmp);
1174
1175 int hammer2_get_dtype(const hammer2_inode_data_t *ipdata);
1176 int hammer2_get_vtype(uint8_t type);
1177 u_int8_t hammer2_get_obj_type(enum vtype vtype);
1178 void hammer2_time_to_timespec(u_int64_t xtime, struct timespec *ts);
1179 u_int64_t hammer2_timespec_to_time(const struct timespec *ts);
1180 u_int32_t hammer2_to_unix_xid(const uuid_t *uuid);
1181 void hammer2_guid_to_uuid(uuid_t *uuid, u_int32_t guid);
1182 hammer2_xid_t hammer2_trans_newxid(hammer2_pfs_t *pmp);
1183 void hammer2_trans_manage_init(hammer2_trans_manage_t *tman);
1184
1185 hammer2_key_t hammer2_dirhash(const unsigned char *name, size_t len);
1186 int hammer2_getradix(size_t bytes);
1187
1188 int hammer2_calc_logical(hammer2_inode_t *ip, hammer2_off_t uoff,
1189                         hammer2_key_t *lbasep, hammer2_key_t *leofp);
1190 int hammer2_calc_physical(hammer2_inode_t *ip, hammer2_key_t lbase);
1191 void hammer2_update_time(uint64_t *timep);
1192 void hammer2_adjreadcounter(hammer2_blockref_t *bref, size_t bytes);
1193
1194 /*
1195  * hammer2_inode.c
1196  */
1197 struct vnode *hammer2_igetv(hammer2_inode_t *ip, hammer2_cluster_t *cparent,
1198                         int *errorp);
1199 hammer2_inode_t *hammer2_inode_lookup(hammer2_pfs_t *pmp,
1200                         hammer2_tid_t inum);
1201 hammer2_inode_t *hammer2_inode_get(hammer2_pfs_t *pmp,
1202                         hammer2_inode_t *dip, hammer2_cluster_t *cluster);
1203 void hammer2_inode_free(hammer2_inode_t *ip);
1204 void hammer2_inode_ref(hammer2_inode_t *ip);
1205 void hammer2_inode_drop(hammer2_inode_t *ip);
1206 void hammer2_inode_repoint(hammer2_inode_t *ip, hammer2_inode_t *pip,
1207                         hammer2_cluster_t *cluster);
1208 void hammer2_inode_repoint_one(hammer2_inode_t *ip, hammer2_cluster_t *cluster,
1209                         int idx);
1210 void hammer2_inode_modify(hammer2_trans_t *trans, hammer2_inode_t *ip);
1211 void hammer2_run_unlinkq(hammer2_trans_t *trans, hammer2_pfs_t *pmp);
1212
1213 hammer2_inode_t *hammer2_inode_create(hammer2_trans_t *trans,
1214                         hammer2_inode_t *dip,
1215                         struct vattr *vap, struct ucred *cred,
1216                         const uint8_t *name, size_t name_len,
1217                         hammer2_cluster_t **clusterp,
1218                         int flags, int *errorp);
1219 int hammer2_inode_connect(hammer2_trans_t *trans,
1220                         hammer2_inode_t *ip, hammer2_cluster_t **clusterp,
1221                         int hlink,
1222                         hammer2_inode_t *dip, hammer2_cluster_t *dcluster,
1223                         const uint8_t *name, size_t name_len,
1224                         hammer2_key_t key);
1225 hammer2_inode_t *hammer2_inode_common_parent(hammer2_inode_t *fdip,
1226                         hammer2_inode_t *tdip);
1227 void hammer2_inode_fsync(hammer2_trans_t *trans, hammer2_inode_t *ip,
1228                         hammer2_cluster_t *cparent);
1229 int hammer2_unlink_file(hammer2_trans_t *trans,
1230                         hammer2_inode_t *dip, hammer2_inode_t *ip,
1231                         const uint8_t *name, size_t name_len, int isdir,
1232                         int *hlinkp, struct nchandle *nch, int nlinks);
1233 int hammer2_hardlink_consolidate(hammer2_trans_t *trans,
1234                         hammer2_inode_t *ip, hammer2_cluster_t **clusterp,
1235                         hammer2_inode_t *cdip, hammer2_cluster_t *cdcluster,
1236                         int nlinks);
1237 int hammer2_hardlink_deconsolidate(hammer2_trans_t *trans, hammer2_inode_t *dip,
1238                         hammer2_chain_t **chainp, hammer2_chain_t **ochainp);
1239 int hammer2_hardlink_find(hammer2_inode_t *dip, hammer2_cluster_t **cparentp,
1240                         hammer2_cluster_t **clusterp);
1241 int hammer2_parent_find(hammer2_cluster_t **cparentp,
1242                         hammer2_cluster_t *cluster);
1243 void hammer2_inode_install_hidden(hammer2_pfs_t *pmp);
1244
1245 /*
1246  * hammer2_chain.c
1247  */
1248 void hammer2_voldata_lock(hammer2_dev_t *hmp);
1249 void hammer2_voldata_unlock(hammer2_dev_t *hmp);
1250 void hammer2_voldata_modify(hammer2_dev_t *hmp);
1251 hammer2_chain_t *hammer2_chain_alloc(hammer2_dev_t *hmp,
1252                                 hammer2_pfs_t *pmp,
1253                                 hammer2_trans_t *trans,
1254                                 hammer2_blockref_t *bref);
1255 void hammer2_chain_core_init(hammer2_chain_t *chain);
1256 void hammer2_chain_ref(hammer2_chain_t *chain);
1257 void hammer2_chain_drop(hammer2_chain_t *chain);
1258 void hammer2_chain_lock(hammer2_chain_t *chain, int how);
1259 void hammer2_chain_load_data(hammer2_chain_t *chain);
1260 const hammer2_media_data_t *hammer2_chain_rdata(hammer2_chain_t *chain);
1261 hammer2_media_data_t *hammer2_chain_wdata(hammer2_chain_t *chain);
1262
1263 /*
1264  * hammer2_cluster.c
1265  */
1266 void hammer2_cluster_load_async(hammer2_cluster_t *cluster,
1267                                 void (*callback)(hammer2_iocb_t *iocb),
1268                                 void *ptr);
1269 void hammer2_chain_moved(hammer2_chain_t *chain);
1270 void hammer2_chain_modify(hammer2_trans_t *trans,
1271                                 hammer2_chain_t *chain, int flags);
1272 void hammer2_chain_resize(hammer2_trans_t *trans, hammer2_inode_t *ip,
1273                                 hammer2_chain_t *parent,
1274                                 hammer2_chain_t *chain,
1275                                 int nradix, int flags);
1276 void hammer2_chain_unlock(hammer2_chain_t *chain);
1277 void hammer2_chain_wait(hammer2_chain_t *chain);
1278 hammer2_chain_t *hammer2_chain_get(hammer2_chain_t *parent, int generation,
1279                                 hammer2_blockref_t *bref);
1280 hammer2_chain_t *hammer2_chain_lookup_init(hammer2_chain_t *parent, int flags);
1281 void hammer2_chain_lookup_done(hammer2_chain_t *parent);
1282 hammer2_chain_t *hammer2_chain_lookup(hammer2_chain_t **parentp,
1283                                 hammer2_key_t *key_nextp,
1284                                 hammer2_key_t key_beg, hammer2_key_t key_end,
1285                                 int *cache_indexp, int flags);
1286 hammer2_chain_t *hammer2_chain_next(hammer2_chain_t **parentp,
1287                                 hammer2_chain_t *chain,
1288                                 hammer2_key_t *key_nextp,
1289                                 hammer2_key_t key_beg, hammer2_key_t key_end,
1290                                 int *cache_indexp, int flags);
1291 hammer2_chain_t *hammer2_chain_scan(hammer2_chain_t *parent,
1292                                 hammer2_chain_t *chain,
1293                                 int *cache_indexp, int flags);
1294
1295 int hammer2_chain_create(hammer2_trans_t *trans, hammer2_chain_t **parentp,
1296                                 hammer2_chain_t **chainp,
1297                                 hammer2_pfs_t *pmp,
1298                                 hammer2_key_t key, int keybits,
1299                                 int type, size_t bytes, int flags);
1300 void hammer2_chain_rename(hammer2_trans_t *trans, hammer2_blockref_t *bref,
1301                                 hammer2_chain_t **parentp,
1302                                 hammer2_chain_t *chain, int flags);
1303 int hammer2_chain_snapshot(hammer2_trans_t *trans, hammer2_chain_t **chainp,
1304                                 hammer2_ioc_pfs_t *pmp);
1305 void hammer2_chain_delete(hammer2_trans_t *trans, hammer2_chain_t *parent,
1306                                 hammer2_chain_t *chain, int flags);
1307 void hammer2_chain_delete_duplicate(hammer2_trans_t *trans,
1308                                 hammer2_chain_t **chainp, int flags);
1309 void hammer2_flush(hammer2_trans_t *trans, hammer2_chain_t *chain, int istop);
1310 void hammer2_delayed_flush(hammer2_trans_t *trans, hammer2_chain_t *chain);
1311 void hammer2_chain_commit(hammer2_trans_t *trans, hammer2_chain_t *chain);
1312 void hammer2_chain_setflush(hammer2_trans_t *trans, hammer2_chain_t *chain);
1313 void hammer2_chain_countbrefs(hammer2_chain_t *chain,
1314                                 hammer2_blockref_t *base, int count);
1315
1316 void hammer2_chain_setcheck(hammer2_chain_t *chain, void *bdata);
1317 int hammer2_chain_testcheck(hammer2_chain_t *chain, void *bdata);
1318
1319
1320 void hammer2_pfs_memory_wait(hammer2_pfs_t *pmp);
1321 void hammer2_pfs_memory_inc(hammer2_pfs_t *pmp);
1322 void hammer2_pfs_memory_wakeup(hammer2_pfs_t *pmp);
1323
1324 void hammer2_base_delete(hammer2_trans_t *trans, hammer2_chain_t *chain,
1325                                 hammer2_blockref_t *base, int count,
1326                                 int *cache_indexp, hammer2_chain_t *child);
1327 void hammer2_base_insert(hammer2_trans_t *trans, hammer2_chain_t *chain,
1328                                 hammer2_blockref_t *base, int count,
1329                                 int *cache_indexp, hammer2_chain_t *child);
1330
1331 /*
1332  * hammer2_trans.c
1333  */
1334 void hammer2_trans_init(hammer2_trans_t *trans, hammer2_pfs_t *pmp,
1335                                 int flags);
1336 void hammer2_trans_done(hammer2_trans_t *trans);
1337 void hammer2_trans_assert_strategy(hammer2_pfs_t *pmp);
1338
1339 /*
1340  * hammer2_ioctl.c
1341  */
1342 int hammer2_ioctl(hammer2_inode_t *ip, u_long com, void *data,
1343                                 int fflag, struct ucred *cred);
1344
1345 /*
1346  * hammer2_io.c
1347  */
1348 void hammer2_io_putblk(hammer2_io_t **diop);
1349 void hammer2_io_cleanup(hammer2_dev_t *hmp, struct hammer2_io_tree *tree);
1350 char *hammer2_io_data(hammer2_io_t *dio, off_t lbase);
1351 void hammer2_io_getblk(hammer2_dev_t *hmp, off_t lbase, int lsize,
1352                                 hammer2_iocb_t *iocb);
1353 void hammer2_io_complete(hammer2_iocb_t *iocb);
1354 void hammer2_io_callback(struct bio *bio);
1355 void hammer2_iocb_wait(hammer2_iocb_t *iocb);
1356 int hammer2_io_new(hammer2_dev_t *hmp, off_t lbase, int lsize,
1357                                 hammer2_io_t **diop);
1358 int hammer2_io_newnz(hammer2_dev_t *hmp, off_t lbase, int lsize,
1359                                 hammer2_io_t **diop);
1360 int hammer2_io_newq(hammer2_dev_t *hmp, off_t lbase, int lsize,
1361                                 hammer2_io_t **diop);
1362 int hammer2_io_bread(hammer2_dev_t *hmp, off_t lbase, int lsize,
1363                                 hammer2_io_t **diop);
1364 void hammer2_io_bawrite(hammer2_io_t **diop);
1365 void hammer2_io_bdwrite(hammer2_io_t **diop);
1366 int hammer2_io_bwrite(hammer2_io_t **diop);
1367 int hammer2_io_isdirty(hammer2_io_t *dio);
1368 void hammer2_io_setdirty(hammer2_io_t *dio);
1369 void hammer2_io_setinval(hammer2_io_t *dio, u_int bytes);
1370 void hammer2_io_brelse(hammer2_io_t **diop);
1371 void hammer2_io_bqrelse(hammer2_io_t **diop);
1372
1373 /*
1374  * hammer2_xops.c
1375  */
1376 void hammer2_xop_group_init(hammer2_pfs_t *pmp, hammer2_xop_group_t *xgrp);
1377 hammer2_xop_t *hammer2_xop_alloc(hammer2_inode_t *ip, hammer2_xop_func_t func);
1378 void hammer2_xop_helper_create(hammer2_pfs_t *pmp);
1379 void hammer2_xop_helper_cleanup(hammer2_pfs_t *pmp);
1380 void hammer2_xop_start(hammer2_xop_head_t *xop);
1381 int hammer2_xop_collect(hammer2_xop_head_t *xop);
1382 void hammer2_xop_retire(hammer2_xop_head_t *xop, uint32_t mask);
1383 int hammer2_xop_active(hammer2_xop_head_t *xop);
1384 int hammer2_xop_feed(hammer2_xop_head_t *xop, hammer2_chain_t *chain,
1385                                 int clindex, int error);
1386
1387
1388 void hammer2_xop_readdir(hammer2_xop_t *xop, int clidx);
1389 int hammer2_xop_readlink(struct vop_readlink_args *ap);
1390 int hammer2_xop_nresolve(struct vop_nresolve_args *ap);
1391 int hammer2_xop_nlookupdotdot(struct vop_nlookupdotdot_args *ap);
1392 int hammer2_xop_nmkdir(struct vop_nmkdir_args *ap);
1393 int hammer2_xop_advlock(struct vop_advlock_args *ap);
1394 int hammer2_xop_nlink(struct vop_nlink_args *ap);
1395 int hammer2_xop_ncreate(struct vop_ncreate_args *ap);
1396 int hammer2_xop_nmknod(struct vop_nmknod_args *ap);
1397 int hammer2_xop_nsymlink(struct vop_nsymlink_args *ap);
1398 int hammer2_xop_nremove(struct vop_nremove_args *ap);
1399 int hammer2_xop_nrmdir(struct vop_nrmdir_args *ap);
1400 int hammer2_xop_nrename(struct vop_nrename_args *ap);
1401
1402 /*
1403  * hammer2_msgops.c
1404  */
1405 int hammer2_msg_dbg_rcvmsg(kdmsg_msg_t *msg);
1406 int hammer2_msg_adhoc_input(kdmsg_msg_t *msg);
1407
1408 /*
1409  * hammer2_vfsops.c
1410  */
1411 void hammer2_clusterctl_wakeup(kdmsg_iocom_t *iocom);
1412 void hammer2_volconf_update(hammer2_dev_t *hmp, int index);
1413 void hammer2_dump_chain(hammer2_chain_t *chain, int tab, int *countp, char pfx);
1414 int hammer2_vfs_sync(struct mount *mp, int waitflags);
1415 hammer2_pfs_t *hammer2_pfsalloc(hammer2_cluster_t *cluster,
1416                                 const hammer2_inode_data_t *ripdata,
1417                                 hammer2_tid_t modify_tid);
1418
1419 void hammer2_lwinprog_ref(hammer2_pfs_t *pmp);
1420 void hammer2_lwinprog_drop(hammer2_pfs_t *pmp);
1421 void hammer2_lwinprog_wait(hammer2_pfs_t *pmp);
1422
1423 /*
1424  * hammer2_freemap.c
1425  */
1426 int hammer2_freemap_alloc(hammer2_trans_t *trans, hammer2_chain_t *chain,
1427                                 size_t bytes);
1428 void hammer2_freemap_adjust(hammer2_trans_t *trans, hammer2_dev_t *hmp,
1429                                 hammer2_blockref_t *bref, int how);
1430
1431 /*
1432  * hammer2_cluster.c
1433  */
1434 int hammer2_cluster_need_resize(hammer2_cluster_t *cluster, int bytes);
1435 uint8_t hammer2_cluster_type(hammer2_cluster_t *cluster);
1436 const hammer2_media_data_t *hammer2_cluster_rdata(hammer2_cluster_t *cluster);
1437 const hammer2_media_data_t *hammer2_cluster_rdata_bytes(
1438                                 hammer2_cluster_t *cluster, size_t *bytesp);
1439 hammer2_media_data_t *hammer2_cluster_wdata(hammer2_cluster_t *cluster);
1440 hammer2_cluster_t *hammer2_cluster_from_chain(hammer2_chain_t *chain);
1441 int hammer2_cluster_modified(hammer2_cluster_t *cluster);
1442 int hammer2_cluster_duplicated(hammer2_cluster_t *cluster);
1443 void hammer2_cluster_bref(hammer2_cluster_t *cluster, hammer2_blockref_t *bref);
1444 void hammer2_cluster_setflush(hammer2_trans_t *trans,
1445                         hammer2_cluster_t *cluster);
1446 void hammer2_cluster_setmethod_check(hammer2_trans_t *trans,
1447                         hammer2_cluster_t *cluster, int check_algo);
1448 hammer2_cluster_t *hammer2_cluster_alloc(hammer2_pfs_t *pmp,
1449                         hammer2_trans_t *trans,
1450                         hammer2_blockref_t *bref);
1451 void hammer2_cluster_ref(hammer2_cluster_t *cluster);
1452 void hammer2_cluster_drop(hammer2_cluster_t *cluster);
1453 void hammer2_cluster_wait(hammer2_cluster_t *cluster);
1454 void hammer2_cluster_lock(hammer2_cluster_t *cluster, int how);
1455 void hammer2_cluster_lock_except(hammer2_cluster_t *cluster, int idx, int how);
1456 int hammer2_cluster_check(hammer2_cluster_t *cluster, hammer2_key_t lokey,
1457                         int flags);
1458 void hammer2_cluster_resolve(hammer2_cluster_t *cluster);
1459 void hammer2_cluster_forcegood(hammer2_cluster_t *cluster);
1460 hammer2_cluster_t *hammer2_cluster_copy(hammer2_cluster_t *ocluster);
1461 void hammer2_cluster_unlock(hammer2_cluster_t *cluster);
1462 void hammer2_cluster_unlock_except(hammer2_cluster_t *cluster, int idx);
1463 void hammer2_cluster_resize(hammer2_trans_t *trans, hammer2_inode_t *ip,
1464                         hammer2_cluster_t *cparent, hammer2_cluster_t *cluster,
1465                         int nradix, int flags);
1466 void hammer2_cluster_modify(hammer2_trans_t *trans, hammer2_cluster_t *cluster,
1467                         int flags);
1468 hammer2_inode_data_t *hammer2_cluster_modify_ip(hammer2_trans_t *trans,
1469                         hammer2_inode_t *ip, hammer2_cluster_t *cluster,
1470                         int flags);
1471 void hammer2_cluster_modsync(hammer2_cluster_t *cluster);
1472 hammer2_cluster_t *hammer2_cluster_lookup_init(hammer2_cluster_t *cparent,
1473                         int flags);
1474 void hammer2_cluster_lookup_done(hammer2_cluster_t *cparent);
1475 hammer2_cluster_t *hammer2_cluster_lookup(hammer2_cluster_t *cparent,
1476                         hammer2_key_t *key_nextp,
1477                         hammer2_key_t key_beg, hammer2_key_t key_end,
1478                         int flags);
1479 hammer2_cluster_t *hammer2_cluster_next(hammer2_cluster_t *cparent,
1480                         hammer2_cluster_t *cluster,
1481                         hammer2_key_t *key_nextp,
1482                         hammer2_key_t key_beg, hammer2_key_t key_end,
1483                         int flags);
1484 void hammer2_cluster_next_single_chain(hammer2_cluster_t *cparent,
1485                         hammer2_cluster_t *cluster,
1486                         hammer2_key_t *key_nextp,
1487                         hammer2_key_t key_beg,
1488                         hammer2_key_t key_end,
1489                         int i, int flags);
1490 hammer2_cluster_t *hammer2_cluster_scan(hammer2_cluster_t *cparent,
1491                         hammer2_cluster_t *cluster, int flags);
1492 int hammer2_cluster_create(hammer2_trans_t *trans, hammer2_cluster_t *cparent,
1493                         hammer2_cluster_t **clusterp,
1494                         hammer2_key_t key, int keybits,
1495                         int type, size_t bytes, int flags);
1496 void hammer2_cluster_rename(hammer2_trans_t *trans, hammer2_blockref_t *bref,
1497                         hammer2_cluster_t *cparent, hammer2_cluster_t *cluster,
1498                         int flags);
1499 void hammer2_cluster_delete(hammer2_trans_t *trans, hammer2_cluster_t *pcluster,
1500                         hammer2_cluster_t *cluster, int flags);
1501 int hammer2_cluster_snapshot(hammer2_trans_t *trans,
1502                         hammer2_cluster_t *ocluster, hammer2_ioc_pfs_t *pmp);
1503 hammer2_cluster_t *hammer2_cluster_parent(hammer2_cluster_t *cluster);
1504
1505 int hammer2_bulk_scan(hammer2_trans_t *trans, hammer2_chain_t *parent,
1506                         int (*func)(hammer2_chain_t *chain, void *info),
1507                         void *info);
1508 int hammer2_bulkfree_pass(hammer2_dev_t *hmp,
1509                         struct hammer2_ioc_bulkfree *bfi);
1510
1511 /*
1512  * hammer2_iocom.c
1513  */
1514 void hammer2_iocom_init(hammer2_dev_t *hmp);
1515 void hammer2_iocom_uninit(hammer2_dev_t *hmp);
1516 void hammer2_cluster_reconnect(hammer2_dev_t *hmp, struct file *fp);
1517
1518 /*
1519  * hammer2_thread.c
1520  */
1521 void hammer2_thr_create(hammer2_thread_t *thr, hammer2_pfs_t *pmp,
1522                         const char *id, int clindex, int repidx,
1523                         void (*func)(void *arg));
1524 void hammer2_thr_delete(hammer2_thread_t *thr);
1525 void hammer2_thr_remaster(hammer2_thread_t *thr);
1526 void hammer2_thr_freeze_async(hammer2_thread_t *thr);
1527 void hammer2_thr_freeze(hammer2_thread_t *thr);
1528 void hammer2_thr_unfreeze(hammer2_thread_t *thr);
1529 void hammer2_primary_sync_thread(void *arg);
1530 void hammer2_primary_xops_thread(void *arg);
1531
1532 /*
1533  * hammer2_strategy.c
1534  */
1535 int hammer2_vop_strategy(struct vop_strategy_args *ap);
1536 int hammer2_vop_bmap(struct vop_bmap_args *ap);
1537 void hammer2_write_thread(void *arg);
1538 void hammer2_bioq_sync(hammer2_pfs_t *pmp);
1539
1540 #endif /* !_KERNEL */
1541 #endif /* !_VFS_HAMMER2_HAMMER2_H_ */