hammer2 - Cluster API cleanup
[dragonfly.git] / sys / vfs / hammer2 / hammer2.h
1 /*
2  * Copyright (c) 2011-2014 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@dragonflybsd.org>
6  * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org>
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  *
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in
16  *    the documentation and/or other materials provided with the
17  *    distribution.
18  * 3. Neither the name of The DragonFly Project nor the names of its
19  *    contributors may be used to endorse or promote products derived
20  *    from this software without specific, prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
26  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  */
35
36 /*
37  * HAMMER2 IN-MEMORY CACHE OF MEDIA STRUCTURES
38  *
39  * This header file contains structures used internally by the HAMMER2
40  * implementation.  See hammer2_disk.h for on-disk structures.
41  *
42  * There is an in-memory representation of all on-media data structure.
43  * Almost everything is represented by a hammer2_chain structure in-memory.
44  * Other higher-level structures typically map to chains.
45  *
46  * A great deal of data is accessed simply via its buffer cache buffer,
47  * which is mapped for the duration of the chain's lock.  Hammer2 must
48  * implement its own buffer cache layer on top of the system layer to
49  * allow for different threads to lock different sub-block-sized buffers.
50  *
51  * When modifications are made to a chain a new filesystem block must be
52  * allocated.  Multiple modifications do not typically allocate new blocks
53  * until the current block has been flushed.  Flushes do not block the
54  * front-end unless the front-end operation crosses the current inode being
55  * flushed.
56  *
57  * The in-memory representation may remain cached (for example in order to
58  * placemark clustering locks) even after the related data has been
59  * detached.
60  */
61
62 #ifndef _VFS_HAMMER2_HAMMER2_H_
63 #define _VFS_HAMMER2_HAMMER2_H_
64
65 #include <sys/param.h>
66 #include <sys/types.h>
67 #include <sys/kernel.h>
68 #include <sys/conf.h>
69 #include <sys/systm.h>
70 #include <sys/tree.h>
71 #include <sys/malloc.h>
72 #include <sys/mount.h>
73 #include <sys/vnode.h>
74 #include <sys/proc.h>
75 #include <sys/mountctl.h>
76 #include <sys/priv.h>
77 #include <sys/stat.h>
78 #include <sys/thread.h>
79 #include <sys/globaldata.h>
80 #include <sys/lockf.h>
81 #include <sys/buf.h>
82 #include <sys/queue.h>
83 #include <sys/limits.h>
84 #include <sys/dmsg.h>
85 #include <sys/mutex.h>
86 #include <sys/kern_syscall.h>
87
88 #include <sys/signal2.h>
89 #include <sys/buf2.h>
90 #include <sys/mutex2.h>
91
92 #include "hammer2_disk.h"
93 #include "hammer2_mount.h"
94 #include "hammer2_ioctl.h"
95
96 struct hammer2_io;
97 struct hammer2_iocb;
98 struct hammer2_chain;
99 struct hammer2_cluster;
100 struct hammer2_inode;
101 struct hammer2_mount;
102 struct hammer2_pfsmount;
103 struct hammer2_span;
104 struct hammer2_state;
105 struct hammer2_msg;
106
107 /*
108  * Mutex and lock shims.  Hammer2 requires support for asynchronous and
109  * abortable locks, and both exclusive and shared spinlocks.  Normal
110  * synchronous non-abortable locks can be substituted for spinlocks.
111  */
112 typedef mtx_t                           hammer2_mtx_t;
113 typedef mtx_link_t                      hammer2_mtx_link_t;
114 typedef mtx_state_t                     hammer2_mtx_state_t;
115
116 typedef struct spinlock                 hammer2_spin_t;
117
118 #define hammer2_mtx_ex                  mtx_lock_ex_quick
119 #define hammer2_mtx_sh                  mtx_lock_sh_quick
120 #define hammer2_mtx_unlock              mtx_unlock
121 #define hammer2_mtx_owned               mtx_owned
122 #define hammer2_mtx_init                mtx_init
123 #define hammer2_mtx_temp_release        mtx_lock_temp_release
124 #define hammer2_mtx_temp_restore        mtx_lock_temp_restore
125 #define hammer2_mtx_refs                mtx_lockrefs
126
127 #define hammer2_spin_init               spin_init
128 #define hammer2_spin_sh                 spin_lock_shared
129 #define hammer2_spin_ex                 spin_lock
130 #define hammer2_spin_unsh               spin_unlock_shared
131 #define hammer2_spin_unex               spin_unlock
132
133 /*
134  * General lock support
135  */
136 static __inline
137 int
138 hammer2_mtx_upgrade(hammer2_mtx_t *mtx)
139 {
140         int wasexclusive;
141
142         if (mtx_islocked_ex(mtx)) {
143                 wasexclusive = 1;
144         } else {
145                 mtx_unlock(mtx);
146                 mtx_lock_ex_quick(mtx);
147                 wasexclusive = 0;
148         }
149         return wasexclusive;
150 }
151
152 /*
153  * Downgrade an inode lock from exclusive to shared only if the inode
154  * lock was previously shared.  If the inode lock was previously exclusive,
155  * this is a NOP.
156  */
157 static __inline
158 void
159 hammer2_mtx_downgrade(hammer2_mtx_t *mtx, int wasexclusive)
160 {
161         if (wasexclusive == 0)
162                 mtx_downgrade(mtx);
163 }
164
165 /*
166  * The xid tracks internal transactional updates.
167  *
168  * XXX fix-me, really needs to be 64-bits
169  */
170 typedef uint32_t hammer2_xid_t;
171
172 #define HAMMER2_XID_MIN 0x00000000U
173 #define HAMMER2_XID_MAX 0x7FFFFFFFU
174
175 /*
176  * The chain structure tracks a portion of the media topology from the
177  * root (volume) down.  Chains represent volumes, inodes, indirect blocks,
178  * data blocks, and freemap nodes and leafs.
179  *
180  * The chain structure utilizes a simple singly-homed topology and the
181  * chain's in-memory topology will move around as the chains do, due mainly
182  * to renames and indirect block creation.
183  *
184  * Block Table Updates
185  *
186  *      Block table updates for insertions and updates are delayed until the
187  *      flush.  This allows us to avoid having to modify the parent chain
188  *      all the way to the root.
189  *
190  *      Block table deletions are performed immediately (modifying the parent
191  *      in the process) because the flush code uses the chain structure to
192  *      track delayed updates and the chain will be (likely) gone or moved to
193  *      another location in the topology after a deletion.
194  *
195  *      A prior iteration of the code tried to keep the relationship intact
196  *      on deletes by doing a delete-duplicate operation on the chain, but
197  *      it added way too much complexity to the codebase.
198  *
199  * Flush Synchronization
200  *
201  *      The flush code must flush modified chains bottom-up.  Because chain
202  *      structures can shift around and are NOT topologically stable,
203  *      modified chains are independently indexed for the flush.  As the flush
204  *      runs it modifies (or further modifies) and updates the parents,
205  *      propagating the flush all the way to the volume root.
206  *
207  *      Modifying front-end operations can occur during a flush but will block
208  *      in two cases: (1) when the front-end tries to operate on the inode
209  *      currently in the midst of being flushed and (2) if the front-end
210  *      crosses an inode currently being flushed (such as during a rename).
211  *      So, for example, if you rename directory "x" to "a/b/c/d/e/f/g/x" and
212  *      the flusher is currently working on "a/b/c", the rename will block
213  *      temporarily in order to ensure that "x" exists in one place or the
214  *      other.
215  *
216  *      Meta-data statistics are updated by the flusher.  The front-end will
217  *      make estimates but meta-data must be fully synchronized only during a
218  *      flush in order to ensure that it remains correct across a crash.
219  *
220  *      Multiple flush synchronizations can theoretically be in-flight at the
221  *      same time but the implementation is not coded to handle the case and
222  *      currently serializes them.
223  *
224  * Snapshots:
225  *
226  *      Snapshots currently require the subdirectory tree being snapshotted
227  *      to be flushed.  The snapshot then creates a new super-root inode which
228  *      copies the flushed blockdata of the directory or file that was
229  *      snapshotted.
230  *
231  * RBTREE NOTES:
232  *
233  *      - Note that the radix tree runs in powers of 2 only so sub-trees
234  *        cannot straddle edges.
235  */
236 RB_HEAD(hammer2_chain_tree, hammer2_chain);
237 TAILQ_HEAD(h2_flush_list, hammer2_chain);
238 TAILQ_HEAD(h2_core_list, hammer2_chain);
239 TAILQ_HEAD(h2_iocb_list, hammer2_iocb);
240
241 #define CHAIN_CORE_DELETE_BMAP_ENTRIES  \
242         (HAMMER2_PBUFSIZE / sizeof(hammer2_blockref_t) / sizeof(uint32_t))
243
244 struct hammer2_chain_core {
245         hammer2_mtx_t   lock;
246         hammer2_spin_t  spin;
247         struct hammer2_chain_tree rbtree; /* sub-chains */
248         int             live_zero;      /* blockref array opt */
249         u_int           flags;
250         u_int           live_count;     /* live (not deleted) chains in tree */
251         u_int           chain_count;    /* live + deleted chains under core */
252         int             generation;     /* generation number (inserts only) */
253 };
254
255 typedef struct hammer2_chain_core hammer2_chain_core_t;
256
257 #define HAMMER2_CORE_UNUSED0001         0x0001
258 #define HAMMER2_CORE_COUNTEDBREFS       0x0002
259
260 RB_HEAD(hammer2_io_tree, hammer2_io);
261
262 /*
263  * IOCB - IO callback (into chain, cluster, or manual request)
264  */
265 struct hammer2_iocb {
266         TAILQ_ENTRY(hammer2_iocb) entry;
267         void (*callback)(struct hammer2_iocb *iocb);
268         struct hammer2_io       *dio;
269         struct hammer2_cluster  *cluster;
270         struct hammer2_chain    *chain;
271         void                    *ptr;
272         off_t                   lbase;
273         int                     lsize;
274         uint32_t                flags;
275         int                     error;
276 };
277
278 typedef struct hammer2_iocb hammer2_iocb_t;
279
280 #define HAMMER2_IOCB_INTERLOCK  0x00000001
281 #define HAMMER2_IOCB_ONQ        0x00000002
282 #define HAMMER2_IOCB_DONE       0x00000004
283 #define HAMMER2_IOCB_INPROG     0x00000008
284 #define HAMMER2_IOCB_UNUSED10   0x00000010
285 #define HAMMER2_IOCB_QUICK      0x00010000
286 #define HAMMER2_IOCB_ZERO       0x00020000
287 #define HAMMER2_IOCB_READ       0x00040000
288 #define HAMMER2_IOCB_WAKEUP     0x00080000
289
290 /*
291  * DIO - Management structure wrapping system buffer cache.
292  *
293  *       Used for multiple purposes including concurrent management
294  *       if small requests by chains into larger DIOs.
295  */
296 struct hammer2_io {
297         RB_ENTRY(hammer2_io) rbnode;    /* indexed by device offset */
298         struct h2_iocb_list iocbq;
299         struct spinlock spin;
300         struct hammer2_mount *hmp;
301         struct buf      *bp;
302         off_t           pbase;
303         int             psize;
304         int             refs;
305         int             act;                    /* activity */
306 };
307
308 typedef struct hammer2_io hammer2_io_t;
309
310 #define HAMMER2_DIO_INPROG      0x80000000      /* bio in progress */
311 #define HAMMER2_DIO_GOOD        0x40000000      /* dio->bp is stable */
312 #define HAMMER2_DIO_WAITING     0x20000000      /* (old) */
313 #define HAMMER2_DIO_DIRTY       0x10000000      /* flush on last drop */
314
315 #define HAMMER2_DIO_MASK        0x0FFFFFFF
316
317 /*
318  * Primary chain structure keeps track of the topology in-memory.
319  */
320 struct hammer2_chain {
321         hammer2_chain_core_t    core;
322         RB_ENTRY(hammer2_chain) rbnode;         /* live chain(s) */
323         hammer2_blockref_t      bref;
324         struct hammer2_chain    *parent;
325         struct hammer2_state    *state;         /* if active cache msg */
326         struct hammer2_mount    *hmp;
327         struct hammer2_pfsmount *pmp;           /* (pfs-cluster pmp or spmp) */
328
329         hammer2_xid_t   flush_xid;              /* flush sequencing */
330         hammer2_key_t   data_count;             /* delta's to apply */
331         hammer2_key_t   inode_count;            /* delta's to apply */
332         hammer2_key_t   data_count_up;          /* delta's to apply */
333         hammer2_key_t   inode_count_up;         /* delta's to apply */
334         hammer2_io_t    *dio;                   /* physical data buffer */
335         u_int           bytes;                  /* physical data size */
336         u_int           flags;
337         u_int           refs;
338         u_int           lockcnt;
339         hammer2_media_data_t *data;             /* data pointer shortcut */
340         TAILQ_ENTRY(hammer2_chain) flush_node;  /* flush list */
341 };
342
343 typedef struct hammer2_chain hammer2_chain_t;
344
345 int hammer2_chain_cmp(hammer2_chain_t *chain1, hammer2_chain_t *chain2);
346 RB_PROTOTYPE(hammer2_chain_tree, hammer2_chain, rbnode, hammer2_chain_cmp);
347
348 /*
349  * Special notes on flags:
350  *
351  * INITIAL - This flag allows a chain to be created and for storage to
352  *           be allocated without having to immediately instantiate the
353  *           related buffer.  The data is assumed to be all-zeros.  It
354  *           is primarily used for indirect blocks.
355  *
356  * MODIFIED- The chain's media data has been modified.
357  * UPDATE  - Chain might not be modified but parent blocktable needs update
358  *
359  * BMAPPED - Indicates that the chain is present in the parent blockmap.
360  * BMAPUPD - Indicates that the chain is present but needs to be updated
361  *           in the parent blockmap.
362  */
363 #define HAMMER2_CHAIN_MODIFIED          0x00000001      /* dirty chain data */
364 #define HAMMER2_CHAIN_ALLOCATED         0x00000002      /* kmalloc'd chain */
365 #define HAMMER2_CHAIN_DESTROY           0x00000004
366 #define HAMMER2_CHAIN_UNLINKED          0x00000008      /* unlinked file */
367 #define HAMMER2_CHAIN_DELETED           0x00000010      /* deleted chain */
368 #define HAMMER2_CHAIN_INITIAL           0x00000020      /* initial create */
369 #define HAMMER2_CHAIN_UPDATE            0x00000040      /* need parent update */
370 #define HAMMER2_CHAIN_DEFERRED          0x00000080      /* flush depth defer */
371 #define HAMMER2_CHAIN_IOFLUSH           0x00000100      /* bawrite on put */
372 #define HAMMER2_CHAIN_ONFLUSH           0x00000200      /* on a flush list */
373 #define HAMMER2_CHAIN_UNUSED00000400    0x00000400
374 #define HAMMER2_CHAIN_VOLUMESYNC        0x00000800      /* needs volume sync */
375 #define HAMMER2_CHAIN_UNUSED00001000    0x00001000
376 #define HAMMER2_CHAIN_MOUNTED           0x00002000      /* PFS is mounted */
377 #define HAMMER2_CHAIN_ONRBTREE          0x00004000      /* on parent RB tree */
378 #define HAMMER2_CHAIN_SNAPSHOT          0x00008000      /* snapshot special */
379 #define HAMMER2_CHAIN_EMBEDDED          0x00010000      /* embedded data */
380 #define HAMMER2_CHAIN_RELEASE           0x00020000      /* don't keep around */
381 #define HAMMER2_CHAIN_BMAPPED           0x00040000      /* present in blkmap */
382 #define HAMMER2_CHAIN_BMAPUPD           0x00080000      /* +needs updating */
383 #define HAMMER2_CHAIN_UNUSED00100000    0x00100000
384 #define HAMMER2_CHAIN_UNUSED00200000    0x00200000
385 #define HAMMER2_CHAIN_PFSBOUNDARY       0x00400000      /* super->pfs inode */
386
387 #define HAMMER2_CHAIN_FLUSH_MASK        (HAMMER2_CHAIN_MODIFIED |       \
388                                          HAMMER2_CHAIN_UPDATE |         \
389                                          HAMMER2_CHAIN_ONFLUSH)
390
391 /*
392  * Flags passed to hammer2_chain_lookup() and hammer2_chain_next()
393  *
394  * NOTE: MATCHIND allows an indirect block / freemap node to be returned
395  *       when the passed key range matches the radix.  Remember that key_end
396  *       is inclusive (e.g. {0x000,0xFFF}, not {0x000,0x1000}).
397  */
398 #define HAMMER2_LOOKUP_NOLOCK           0x00000001      /* ref only */
399 #define HAMMER2_LOOKUP_NODATA           0x00000002      /* data left NULL */
400 #define HAMMER2_LOOKUP_SHARED           0x00000100
401 #define HAMMER2_LOOKUP_MATCHIND         0x00000200      /* return all chains */
402 #define HAMMER2_LOOKUP_UNUSED0400       0x00000400
403 #define HAMMER2_LOOKUP_ALWAYS           0x00000800      /* resolve data */
404
405 /*
406  * Flags passed to hammer2_chain_modify() and hammer2_chain_resize()
407  *
408  * NOTE: OPTDATA allows us to avoid instantiating buffers for INDIRECT
409  *       blocks in the INITIAL-create state.
410  */
411 #define HAMMER2_MODIFY_OPTDATA          0x00000002      /* data can be NULL */
412 #define HAMMER2_MODIFY_NO_MODIFY_TID    0x00000004
413 #define HAMMER2_MODIFY_UNUSED0008       0x00000008
414 #define HAMMER2_MODIFY_NOREALLOC        0x00000010
415
416 /*
417  * Flags passed to hammer2_chain_lock()
418  */
419 #define HAMMER2_RESOLVE_NEVER           1
420 #define HAMMER2_RESOLVE_MAYBE           2
421 #define HAMMER2_RESOLVE_ALWAYS          3
422 #define HAMMER2_RESOLVE_MASK            0x0F
423
424 #define HAMMER2_RESOLVE_SHARED          0x10    /* request shared lock */
425 #define HAMMER2_RESOLVE_NOREF           0x20    /* already ref'd on lock */
426
427 /*
428  * Flags passed to hammer2_chain_delete()
429  */
430 #define HAMMER2_DELETE_PERMANENT        0x0001
431 #define HAMMER2_DELETE_NOSTATS          0x0002
432
433 #define HAMMER2_INSERT_NOSTATS          0x0002
434
435 /*
436  * Flags passed to hammer2_chain_delete_duplicate()
437  */
438 #define HAMMER2_DELDUP_RECORE           0x0001
439
440 /*
441  * Cluster different types of storage together for allocations
442  */
443 #define HAMMER2_FREECACHE_INODE         0
444 #define HAMMER2_FREECACHE_INDIR         1
445 #define HAMMER2_FREECACHE_DATA          2
446 #define HAMMER2_FREECACHE_UNUSED3       3
447 #define HAMMER2_FREECACHE_TYPES         4
448
449 /*
450  * hammer2_freemap_alloc() block preference
451  */
452 #define HAMMER2_OFF_NOPREF              ((hammer2_off_t)-1)
453
454 /*
455  * BMAP read-ahead maximum parameters
456  */
457 #define HAMMER2_BMAP_COUNT              16      /* max bmap read-ahead */
458 #define HAMMER2_BMAP_BYTES              (HAMMER2_PBUFSIZE * HAMMER2_BMAP_COUNT)
459
460 /*
461  * hammer2_freemap_adjust()
462  */
463 #define HAMMER2_FREEMAP_DORECOVER       1
464 #define HAMMER2_FREEMAP_DOMAYFREE       2
465 #define HAMMER2_FREEMAP_DOREALFREE      3
466
467 /*
468  * HAMMER2 cluster - A set of chains representing the same entity.
469  *
470  * hammer2_cluster typically represents a temporary set of representitive
471  * chains.  The one exception is that a hammer2_cluster is embedded in
472  * hammer2_inode.  This embedded cluster is ONLY used to track the
473  * representitive chains and cannot be directly locked.
474  *
475  * A cluster is temporary (and thus per-thread) for locking purposes,
476  * allowing us to embed the asynchronous storage required for
477  * cluster operations in the cluster itself.  That is, except for the
478  * embeddeding hammer2_inode, the cluster structure will always represent
479  * a 'working copy'.
480  *
481  * Because the cluster is a 'working copy' and is usually subject to cluster
482  * quorum rules, it is quite possible for us to end up with an insufficient
483  * number of live chains to execute an operation.  If an insufficient number
484  * of chains remain in a working copy, the operation may have to be
485  * downgraded, retried, or stall until the requisit number of chains are
486  * available.
487  */
488 #define HAMMER2_MAXCLUSTER      8
489
490 struct hammer2_cluster_item {
491         hammer2_mtx_link_t      async_link;
492         hammer2_chain_t         *chain;
493         struct hammer2_cluster  *cluster;       /* link back to cluster */
494         int                     cache_index;
495         int                     unused01;
496 };
497
498 typedef struct hammer2_cluster_item hammer2_cluster_item_t;
499
500 struct hammer2_cluster {
501         int                     status;         /* operational status */
502         int                     refs;           /* track for deallocation */
503         struct hammer2_pfsmount *pmp;
504         uint32_t                flags;
505         int                     nchains;
506         hammer2_iocb_t          iocb;
507         hammer2_chain_t         *focus;         /* current focus (or mod) */
508         hammer2_cluster_item_t  array[HAMMER2_MAXCLUSTER];
509 };
510
511 typedef struct hammer2_cluster  hammer2_cluster_t;
512
513 #define HAMMER2_CLUSTER_INODE   0x00000001      /* embedded in inode */
514 #define HAMMER2_CLUSTER_NOSYNC  0x00000002      /* not in sync (cumulative) */
515
516
517 RB_HEAD(hammer2_inode_tree, hammer2_inode);
518
519 /*
520  * A hammer2 inode.
521  *
522  * NOTE: The inode-embedded cluster is never used directly for I/O (since
523  *       it may be shared).  Instead it will be replicated-in and synchronized
524  *       back out if changed.
525  */
526 struct hammer2_inode {
527         RB_ENTRY(hammer2_inode) rbnode;         /* inumber lookup (HL) */
528         hammer2_mtx_t           lock;           /* inode lock */
529         struct hammer2_pfsmount *pmp;           /* PFS mount */
530         struct hammer2_inode    *pip;           /* parent inode */
531         struct vnode            *vp;
532         hammer2_cluster_t       cluster;
533         struct lockf            advlock;
534         hammer2_tid_t           inum;
535         u_int                   flags;
536         u_int                   refs;           /* +vpref, +flushref */
537         uint8_t                 comp_heuristic;
538         hammer2_off_t           size;
539         uint64_t                mtime;
540 };
541
542 typedef struct hammer2_inode hammer2_inode_t;
543
544 #define HAMMER2_INODE_MODIFIED          0x0001
545 #define HAMMER2_INODE_SROOT             0x0002  /* kmalloc special case */
546 #define HAMMER2_INODE_RENAME_INPROG     0x0004
547 #define HAMMER2_INODE_ONRBTREE          0x0008
548 #define HAMMER2_INODE_RESIZED           0x0010
549 #define HAMMER2_INODE_MTIME             0x0020
550
551 int hammer2_inode_cmp(hammer2_inode_t *ip1, hammer2_inode_t *ip2);
552 RB_PROTOTYPE2(hammer2_inode_tree, hammer2_inode, rbnode, hammer2_inode_cmp,
553                 hammer2_tid_t);
554
555 /*
556  * inode-unlink side-structure
557  */
558 struct hammer2_inode_unlink {
559         TAILQ_ENTRY(hammer2_inode_unlink) entry;
560         hammer2_inode_t *ip;
561 };
562 TAILQ_HEAD(h2_unlk_list, hammer2_inode_unlink);
563
564 typedef struct hammer2_inode_unlink hammer2_inode_unlink_t;
565
566 /*
567  * A hammer2 transaction and flush sequencing structure.
568  *
569  * This global structure is tied into hammer2_mount and is used
570  * to sequence modifying operations and flushes.
571  *
572  * (a) Any modifying operations with sync_tid >= flush_tid will stall until
573  *     all modifying operating with sync_tid < flush_tid complete.
574  *
575  *     The flush related to flush_tid stalls until all modifying operations
576  *     with sync_tid < flush_tid complete.
577  *
578  * (b) Once unstalled, modifying operations with sync_tid > flush_tid are
579  *     allowed to run.  All modifications cause modify/duplicate operations
580  *     to occur on the related chains.  Note that most INDIRECT blocks will
581  *     be unaffected because the modifications just overload the RBTREE
582  *     structurally instead of actually modifying the indirect blocks.
583  *
584  * (c) The actual flush unstalls and RUNS CONCURRENTLY with (b), but only
585  *     utilizes the chain structures with sync_tid <= flush_tid.  The
586  *     flush will modify related indirect blocks and inodes in-place
587  *     (rather than duplicate) since the adjustments are compatible with
588  *     (b)'s RBTREE overloading
589  *
590  *     SPECIAL NOTE:  Inode modifications have to also propagate along any
591  *                    modify/duplicate chains.  File writes detect the flush
592  *                    and force out the conflicting buffer cache buffer(s)
593  *                    before reusing them.
594  *
595  * (d) Snapshots can be made instantly but must be flushed and disconnected
596  *     from their duplicative source before they can be mounted.  This is
597  *     because while H2's on-media structure supports forks, its in-memory
598  *     structure only supports very simple forking for background flushing
599  *     purposes.
600  *
601  * TODO: Flush merging.  When fsync() is called on multiple discrete files
602  *       concurrently there is no reason to stall the second fsync.
603  *       The final flush that reaches to root can cover both fsync()s.
604  *
605  *     The chains typically terminate as they fly onto the disk.  The flush
606  *     ultimately reaches the volume header.
607  */
608 struct hammer2_trans {
609         TAILQ_ENTRY(hammer2_trans) entry;
610         struct hammer2_pfsmount *pmp;
611         hammer2_xid_t           sync_xid;
612         hammer2_tid_t           inode_tid;      /* inode number assignment */
613         thread_t                td;             /* pointer */
614         int                     flags;
615         int                     blocked;
616         uint8_t                 inodes_created;
617         uint8_t                 dummy[7];
618 };
619
620 typedef struct hammer2_trans hammer2_trans_t;
621
622 #define HAMMER2_TRANS_ISFLUSH           0x0001  /* formal flush */
623 #define HAMMER2_TRANS_CONCURRENT        0x0002  /* concurrent w/flush */
624 #define HAMMER2_TRANS_BUFCACHE          0x0004  /* from bioq strategy write */
625 #define HAMMER2_TRANS_NEWINODE          0x0008  /* caller allocating inode */
626 #define HAMMER2_TRANS_UNUSED0010        0x0010
627 #define HAMMER2_TRANS_PREFLUSH          0x0020  /* preflush state */
628
629 #define HAMMER2_FREEMAP_HEUR_NRADIX     4       /* pwr 2 PBUFRADIX-MINIORADIX */
630 #define HAMMER2_FREEMAP_HEUR_TYPES      8
631 #define HAMMER2_FREEMAP_HEUR            (HAMMER2_FREEMAP_HEUR_NRADIX * \
632                                          HAMMER2_FREEMAP_HEUR_TYPES)
633
634 /*
635  * Transaction Rendezvous
636  */
637 TAILQ_HEAD(hammer2_trans_queue, hammer2_trans);
638
639 struct hammer2_trans_manage {
640         hammer2_xid_t           flush_xid;      /* last flush transaction */
641         hammer2_xid_t           alloc_xid;
642         struct lock             translk;        /* lockmgr lock */
643         struct hammer2_trans_queue transq;      /* modifying transactions */
644         int                     flushcnt;       /* track flush trans */
645 };
646
647 typedef struct hammer2_trans_manage hammer2_trans_manage_t;
648
649 /*
650  * Global (per device) mount structure for device (aka vp->v_mount->hmp)
651  */
652 struct hammer2_mount {
653         struct vnode    *devvp;         /* device vnode */
654         int             ronly;          /* read-only mount */
655         int             pmp_count;      /* PFS mounts backed by us */
656         TAILQ_ENTRY(hammer2_mount) mntentry; /* hammer2_mntlist */
657
658         struct malloc_type *mchain;
659         int             nipstacks;
660         int             maxipstacks;
661         kdmsg_iocom_t   iocom;          /* volume-level dmsg interface */
662         struct spinlock io_spin;        /* iotree access */
663         struct hammer2_io_tree iotree;
664         int             iofree_count;
665         hammer2_chain_t vchain;         /* anchor chain (topology) */
666         hammer2_chain_t fchain;         /* anchor chain (freemap) */
667         struct spinlock list_spin;
668         struct h2_flush_list    flushq; /* flush seeds */
669         struct hammer2_pfsmount *spmp;  /* super-root pmp for transactions */
670         struct lock     vollk;          /* lockmgr lock */
671         hammer2_off_t   heur_freemap[HAMMER2_FREEMAP_HEUR];
672         int             volhdrno;       /* last volhdrno written */
673         hammer2_volume_data_t voldata;
674         hammer2_volume_data_t volsync;  /* synchronized voldata */
675 };
676
677 typedef struct hammer2_mount hammer2_mount_t;
678
679 /*
680  * HAMMER2 PFS mount point structure (aka vp->v_mount->mnt_data).
681  * This has a 1:1 correspondence to struct mount (note that the
682  * hammer2_mount structure has a N:1 correspondence).
683  *
684  * This structure represents a cluster mount and not necessarily a
685  * PFS under a specific device mount (HMP).  The distinction is important
686  * because the elements backing a cluster mount can change on the fly.
687  *
688  * Usually the first element under the cluster represents the original
689  * user-requested mount that bootstraps the whole mess.  In significant
690  * setups the original is usually just a read-only media image (or
691  * representitive file) that simply contains a bootstrap volume header
692  * listing the configuration.
693  */
694 struct hammer2_pfsmount {
695         struct mount            *mp;
696         TAILQ_ENTRY(hammer2_pfsmount) mntentry; /* hammer2_pfslist */
697         uuid_t                  pfs_clid;
698         uuid_t                  pfs_fsid;
699         hammer2_mount_t         *spmp_hmp;      /* (spmp only) */
700         hammer2_inode_t         *iroot;         /* PFS root inode */
701         hammer2_inode_t         *ihidden;       /* PFS hidden directory */
702         struct lock             lock;           /* PFS lock for certain ops */
703         hammer2_off_t           inode_count;    /* copy of inode_count */
704         struct netexport        export;         /* nfs export */
705         int                     ronly;          /* read-only mount */
706         struct malloc_type      *minode;
707         struct malloc_type      *mmsg;
708         struct spinlock         inum_spin;      /* inumber lookup */
709         struct hammer2_inode_tree inum_tree;    /* (not applicable to spmp) */
710         hammer2_tid_t           alloc_tid;
711         hammer2_tid_t           flush_tid;
712         hammer2_tid_t           inode_tid;
713         long                    inmem_inodes;
714         uint32_t                inmem_dirty_chains;
715         int                     count_lwinprog; /* logical write in prog */
716         struct spinlock         list_spin;
717         struct h2_unlk_list     unlinkq;        /* last-close unlink */
718         thread_t                wthread_td;     /* write thread td */
719         struct bio_queue_head   wthread_bioq;   /* logical buffer bioq */
720         hammer2_mtx_t           wthread_mtx;    /* interlock */
721         int                     wthread_destroy;/* termination sequencing */
722 };
723
724 typedef struct hammer2_pfsmount hammer2_pfsmount_t;
725
726 #define HAMMER2_DIRTYCHAIN_WAITING      0x80000000
727 #define HAMMER2_DIRTYCHAIN_MASK         0x7FFFFFFF
728
729 #define HAMMER2_LWINPROG_WAITING        0x80000000
730 #define HAMMER2_LWINPROG_MASK           0x7FFFFFFF
731
732 /*
733  * Bulkscan
734  */
735 #define HAMMER2_BULK_ABORT      0x00000001
736
737 /*
738  * Misc
739  */
740 #if defined(_KERNEL)
741
742 MALLOC_DECLARE(M_HAMMER2);
743
744 #define VTOI(vp)        ((hammer2_inode_t *)(vp)->v_data)
745 #define ITOV(ip)        ((ip)->vp)
746
747 /*
748  * Currently locked chains retain the locked buffer cache buffer for
749  * indirect blocks, and indirect blocks can be one of two sizes.  The
750  * device buffer has to match the case to avoid deadlocking recursive
751  * chains that might otherwise try to access different offsets within
752  * the same device buffer.
753  */
754 static __inline
755 int
756 hammer2_devblkradix(int radix)
757 {
758 #if 0
759         if (radix <= HAMMER2_LBUFRADIX) {
760                 return (HAMMER2_LBUFRADIX);
761         } else {
762                 return (HAMMER2_PBUFRADIX);
763         }
764 #endif
765         return (HAMMER2_PBUFRADIX);
766 }
767
768 /*
769  * XXX almost time to remove this.  DIO uses PBUFSIZE exclusively now.
770  */
771 static __inline
772 size_t
773 hammer2_devblksize(size_t bytes)
774 {
775 #if 0
776         if (bytes <= HAMMER2_LBUFSIZE) {
777                 return(HAMMER2_LBUFSIZE);
778         } else {
779                 KKASSERT(bytes <= HAMMER2_PBUFSIZE &&
780                          (bytes ^ (bytes - 1)) == ((bytes << 1) - 1));
781                 return (HAMMER2_PBUFSIZE);
782         }
783 #endif
784         return (HAMMER2_PBUFSIZE);
785 }
786
787
788 static __inline
789 hammer2_pfsmount_t *
790 MPTOPMP(struct mount *mp)
791 {
792         return ((hammer2_pfsmount_t *)mp->mnt_data);
793 }
794
795 #define LOCKSTART       int __nlocks = curthread->td_locks
796 #define LOCKENTER       (++curthread->td_locks)
797 #define LOCKEXIT        (--curthread->td_locks)
798 #define LOCKSTOP        KKASSERT(curthread->td_locks == __nlocks)
799
800 extern struct vop_ops hammer2_vnode_vops;
801 extern struct vop_ops hammer2_spec_vops;
802 extern struct vop_ops hammer2_fifo_vops;
803
804 extern int hammer2_debug;
805 extern int hammer2_cluster_enable;
806 extern int hammer2_hardlink_enable;
807 extern int hammer2_flush_pipe;
808 extern int hammer2_synchronous_flush;
809 extern int hammer2_dio_count;
810 extern long hammer2_limit_dirty_chains;
811 extern long hammer2_iod_file_read;
812 extern long hammer2_iod_meta_read;
813 extern long hammer2_iod_indr_read;
814 extern long hammer2_iod_fmap_read;
815 extern long hammer2_iod_volu_read;
816 extern long hammer2_iod_file_write;
817 extern long hammer2_iod_meta_write;
818 extern long hammer2_iod_indr_write;
819 extern long hammer2_iod_fmap_write;
820 extern long hammer2_iod_volu_write;
821 extern long hammer2_ioa_file_read;
822 extern long hammer2_ioa_meta_read;
823 extern long hammer2_ioa_indr_read;
824 extern long hammer2_ioa_fmap_read;
825 extern long hammer2_ioa_volu_read;
826 extern long hammer2_ioa_file_write;
827 extern long hammer2_ioa_meta_write;
828 extern long hammer2_ioa_indr_write;
829 extern long hammer2_ioa_fmap_write;
830 extern long hammer2_ioa_volu_write;
831
832 extern struct objcache *cache_buffer_read;
833 extern struct objcache *cache_buffer_write;
834
835 extern int destroy;
836 extern int write_thread_wakeup;
837
838 /*
839  * hammer2_subr.c
840  */
841 #define hammer2_icrc32(buf, size)       iscsi_crc32((buf), (size))
842 #define hammer2_icrc32c(buf, size, crc) iscsi_crc32_ext((buf), (size), (crc))
843
844 int hammer2_signal_check(time_t *timep);
845 hammer2_cluster_t *hammer2_inode_lock_ex(hammer2_inode_t *ip);
846 hammer2_cluster_t *hammer2_inode_lock_nex(hammer2_inode_t *ip, int how);
847 hammer2_cluster_t *hammer2_inode_lock_sh(hammer2_inode_t *ip);
848 void hammer2_inode_unlock_ex(hammer2_inode_t *ip, hammer2_cluster_t *chain);
849 void hammer2_inode_unlock_sh(hammer2_inode_t *ip, hammer2_cluster_t *chain);
850 hammer2_mtx_state_t hammer2_inode_lock_temp_release(hammer2_inode_t *ip);
851 void hammer2_inode_lock_temp_restore(hammer2_inode_t *ip,
852                         hammer2_mtx_state_t ostate);
853 int hammer2_inode_lock_upgrade(hammer2_inode_t *ip);
854 void hammer2_inode_lock_downgrade(hammer2_inode_t *ip, int);
855
856 void hammer2_mount_exlock(hammer2_mount_t *hmp);
857 void hammer2_mount_shlock(hammer2_mount_t *hmp);
858 void hammer2_mount_unlock(hammer2_mount_t *hmp);
859
860 int hammer2_get_dtype(const hammer2_inode_data_t *ipdata);
861 int hammer2_get_vtype(const hammer2_inode_data_t *ipdata);
862 u_int8_t hammer2_get_obj_type(enum vtype vtype);
863 void hammer2_time_to_timespec(u_int64_t xtime, struct timespec *ts);
864 u_int64_t hammer2_timespec_to_time(const struct timespec *ts);
865 u_int32_t hammer2_to_unix_xid(const uuid_t *uuid);
866 void hammer2_guid_to_uuid(uuid_t *uuid, u_int32_t guid);
867 hammer2_xid_t hammer2_trans_newxid(hammer2_pfsmount_t *pmp);
868 void hammer2_trans_manage_init(void);
869
870 hammer2_key_t hammer2_dirhash(const unsigned char *name, size_t len);
871 int hammer2_getradix(size_t bytes);
872
873 int hammer2_calc_logical(hammer2_inode_t *ip, hammer2_off_t uoff,
874                         hammer2_key_t *lbasep, hammer2_key_t *leofp);
875 int hammer2_calc_physical(hammer2_inode_t *ip,
876                         const hammer2_inode_data_t *ipdata,
877                         hammer2_key_t lbase);
878 void hammer2_update_time(uint64_t *timep);
879 void hammer2_adjreadcounter(hammer2_blockref_t *bref, size_t bytes);
880
881 /*
882  * hammer2_inode.c
883  */
884 struct vnode *hammer2_igetv(hammer2_inode_t *ip, hammer2_cluster_t *cparent,
885                         int *errorp);
886 void hammer2_inode_lock_nlinks(hammer2_inode_t *ip);
887 void hammer2_inode_unlock_nlinks(hammer2_inode_t *ip);
888 hammer2_inode_t *hammer2_inode_lookup(hammer2_pfsmount_t *pmp,
889                         hammer2_tid_t inum);
890 hammer2_inode_t *hammer2_inode_get(hammer2_pfsmount_t *pmp,
891                         hammer2_inode_t *dip, hammer2_cluster_t *cluster);
892 void hammer2_inode_free(hammer2_inode_t *ip);
893 void hammer2_inode_ref(hammer2_inode_t *ip);
894 void hammer2_inode_drop(hammer2_inode_t *ip);
895 void hammer2_inode_repoint(hammer2_inode_t *ip, hammer2_inode_t *pip,
896                         hammer2_cluster_t *cluster);
897 void hammer2_run_unlinkq(hammer2_trans_t *trans, hammer2_pfsmount_t *pmp);
898
899 hammer2_inode_t *hammer2_inode_create(hammer2_trans_t *trans,
900                         hammer2_inode_t *dip,
901                         struct vattr *vap, struct ucred *cred,
902                         const uint8_t *name, size_t name_len,
903                         hammer2_cluster_t **clusterp, int *errorp);
904 int hammer2_inode_connect(hammer2_trans_t *trans,
905                         hammer2_cluster_t **clusterp, int hlink,
906                         hammer2_inode_t *dip, hammer2_cluster_t *dcluster,
907                         const uint8_t *name, size_t name_len,
908                         hammer2_key_t key);
909 hammer2_inode_t *hammer2_inode_common_parent(hammer2_inode_t *fdip,
910                         hammer2_inode_t *tdip);
911 void hammer2_inode_fsync(hammer2_trans_t *trans, hammer2_inode_t *ip,
912                         hammer2_cluster_t *cparent);
913 int hammer2_unlink_file(hammer2_trans_t *trans, hammer2_inode_t *dip,
914                         const uint8_t *name, size_t name_len, int isdir,
915                         int *hlinkp, struct nchandle *nch, int nlinks);
916 int hammer2_hardlink_consolidate(hammer2_trans_t *trans,
917                         hammer2_inode_t *ip, hammer2_cluster_t **clusterp,
918                         hammer2_inode_t *cdip, hammer2_cluster_t *cdcluster,
919                         int nlinks);
920 int hammer2_hardlink_deconsolidate(hammer2_trans_t *trans, hammer2_inode_t *dip,
921                         hammer2_chain_t **chainp, hammer2_chain_t **ochainp);
922 int hammer2_hardlink_find(hammer2_inode_t *dip, hammer2_cluster_t **cparentp,
923                         hammer2_cluster_t *cluster);
924 int hammer2_parent_find(hammer2_cluster_t **cparentp,
925                         hammer2_cluster_t *cluster);
926 void hammer2_inode_install_hidden(hammer2_pfsmount_t *pmp);
927
928 /*
929  * hammer2_chain.c
930  */
931 void hammer2_voldata_lock(hammer2_mount_t *hmp);
932 void hammer2_voldata_unlock(hammer2_mount_t *hmp);
933 void hammer2_voldata_modify(hammer2_mount_t *hmp);
934 hammer2_chain_t *hammer2_chain_alloc(hammer2_mount_t *hmp,
935                                 hammer2_pfsmount_t *pmp,
936                                 hammer2_trans_t *trans,
937                                 hammer2_blockref_t *bref);
938 void hammer2_chain_core_alloc(hammer2_trans_t *trans, hammer2_chain_t *chain);
939 void hammer2_chain_ref(hammer2_chain_t *chain);
940 void hammer2_chain_drop(hammer2_chain_t *chain);
941 int hammer2_chain_lock(hammer2_chain_t *chain, int how);
942 const hammer2_media_data_t *hammer2_chain_rdata(hammer2_chain_t *chain);
943 hammer2_media_data_t *hammer2_chain_wdata(hammer2_chain_t *chain);
944
945 /*
946  * hammer2_cluster.c
947  */
948 int hammer2_cluster_isunlinked(hammer2_cluster_t *cluster);
949 void hammer2_cluster_load_async(hammer2_cluster_t *cluster,
950                                 void (*callback)(hammer2_iocb_t *iocb),
951                                 void *ptr);
952 void hammer2_chain_moved(hammer2_chain_t *chain);
953 void hammer2_chain_modify(hammer2_trans_t *trans,
954                                 hammer2_chain_t *chain, int flags);
955 void hammer2_chain_resize(hammer2_trans_t *trans, hammer2_inode_t *ip,
956                                 hammer2_chain_t *parent,
957                                 hammer2_chain_t *chain,
958                                 int nradix, int flags);
959 void hammer2_chain_unlock(hammer2_chain_t *chain);
960 void hammer2_chain_wait(hammer2_chain_t *chain);
961 hammer2_chain_t *hammer2_chain_get(hammer2_chain_t *parent, int generation,
962                                 hammer2_blockref_t *bref);
963 hammer2_chain_t *hammer2_chain_lookup_init(hammer2_chain_t *parent, int flags);
964 void hammer2_chain_lookup_done(hammer2_chain_t *parent);
965 hammer2_chain_t *hammer2_chain_lookup(hammer2_chain_t **parentp,
966                                 hammer2_key_t *key_nextp,
967                                 hammer2_key_t key_beg, hammer2_key_t key_end,
968                                 int *cache_indexp, int flags, int *ddflagp);
969 hammer2_chain_t *hammer2_chain_next(hammer2_chain_t **parentp,
970                                 hammer2_chain_t *chain,
971                                 hammer2_key_t *key_nextp,
972                                 hammer2_key_t key_beg, hammer2_key_t key_end,
973                                 int *cache_indexp, int flags);
974 hammer2_chain_t *hammer2_chain_scan(hammer2_chain_t *parent,
975                                 hammer2_chain_t *chain,
976                                 int *cache_indexp, int flags);
977
978 int hammer2_chain_create(hammer2_trans_t *trans, hammer2_chain_t **parentp,
979                                 hammer2_chain_t **chainp,
980                                 hammer2_pfsmount_t *pmp,
981                                 hammer2_key_t key, int keybits,
982                                 int type, size_t bytes, int flags);
983 void hammer2_chain_rename(hammer2_trans_t *trans, hammer2_blockref_t *bref,
984                                 hammer2_chain_t **parentp,
985                                 hammer2_chain_t *chain, int flags);
986 int hammer2_chain_snapshot(hammer2_trans_t *trans, hammer2_chain_t **chainp,
987                                 hammer2_ioc_pfs_t *pfs);
988 void hammer2_chain_delete(hammer2_trans_t *trans, hammer2_chain_t *parent,
989                                 hammer2_chain_t *chain, int flags);
990 void hammer2_chain_delete_duplicate(hammer2_trans_t *trans,
991                                 hammer2_chain_t **chainp, int flags);
992 void hammer2_flush(hammer2_trans_t *trans, hammer2_chain_t *chain);
993 void hammer2_chain_commit(hammer2_trans_t *trans, hammer2_chain_t *chain);
994 void hammer2_chain_setflush(hammer2_trans_t *trans, hammer2_chain_t *chain);
995 void hammer2_chain_countbrefs(hammer2_chain_t *chain,
996                                 hammer2_blockref_t *base, int count);
997
998 void hammer2_chain_setcheck(hammer2_chain_t *chain, void *bdata);
999 int hammer2_chain_testcheck(hammer2_chain_t *chain, void *bdata);
1000
1001
1002 void hammer2_pfs_memory_wait(hammer2_pfsmount_t *pmp);
1003 void hammer2_pfs_memory_inc(hammer2_pfsmount_t *pmp);
1004 void hammer2_pfs_memory_wakeup(hammer2_pfsmount_t *pmp);
1005
1006 void hammer2_base_delete(hammer2_trans_t *trans, hammer2_chain_t *chain,
1007                                 hammer2_blockref_t *base, int count,
1008                                 int *cache_indexp, hammer2_chain_t *child);
1009 void hammer2_base_insert(hammer2_trans_t *trans, hammer2_chain_t *chain,
1010                                 hammer2_blockref_t *base, int count,
1011                                 int *cache_indexp, hammer2_chain_t *child);
1012
1013 /*
1014  * hammer2_trans.c
1015  */
1016 void hammer2_trans_init(hammer2_trans_t *trans, hammer2_pfsmount_t *pmp,
1017                                 int flags);
1018 void hammer2_trans_spmp(hammer2_trans_t *trans, hammer2_pfsmount_t *pmp);
1019 void hammer2_trans_done(hammer2_trans_t *trans);
1020
1021 /*
1022  * hammer2_ioctl.c
1023  */
1024 int hammer2_ioctl(hammer2_inode_t *ip, u_long com, void *data,
1025                                 int fflag, struct ucred *cred);
1026
1027 /*
1028  * hammer2_io.c
1029  */
1030 void hammer2_io_putblk(hammer2_io_t **diop);
1031 void hammer2_io_cleanup(hammer2_mount_t *hmp, struct hammer2_io_tree *tree);
1032 char *hammer2_io_data(hammer2_io_t *dio, off_t lbase);
1033 void hammer2_io_getblk(hammer2_mount_t *hmp, off_t lbase, int lsize,
1034                                 hammer2_iocb_t *iocb);
1035 void hammer2_io_complete(hammer2_iocb_t *iocb);
1036 void hammer2_io_callback(struct bio *bio);
1037 void hammer2_iocb_wait(hammer2_iocb_t *iocb);
1038 int hammer2_io_new(hammer2_mount_t *hmp, off_t lbase, int lsize,
1039                                 hammer2_io_t **diop);
1040 int hammer2_io_newnz(hammer2_mount_t *hmp, off_t lbase, int lsize,
1041                                 hammer2_io_t **diop);
1042 int hammer2_io_newq(hammer2_mount_t *hmp, off_t lbase, int lsize,
1043                                 hammer2_io_t **diop);
1044 int hammer2_io_bread(hammer2_mount_t *hmp, off_t lbase, int lsize,
1045                                 hammer2_io_t **diop);
1046 void hammer2_io_bawrite(hammer2_io_t **diop);
1047 void hammer2_io_bdwrite(hammer2_io_t **diop);
1048 int hammer2_io_bwrite(hammer2_io_t **diop);
1049 int hammer2_io_isdirty(hammer2_io_t *dio);
1050 void hammer2_io_setdirty(hammer2_io_t *dio);
1051 void hammer2_io_setinval(hammer2_io_t *dio, u_int bytes);
1052 void hammer2_io_brelse(hammer2_io_t **diop);
1053 void hammer2_io_bqrelse(hammer2_io_t **diop);
1054
1055 /*
1056  * hammer2_msgops.c
1057  */
1058 int hammer2_msg_dbg_rcvmsg(kdmsg_msg_t *msg);
1059 int hammer2_msg_adhoc_input(kdmsg_msg_t *msg);
1060
1061 /*
1062  * hammer2_vfsops.c
1063  */
1064 void hammer2_clusterctl_wakeup(kdmsg_iocom_t *iocom);
1065 void hammer2_volconf_update(hammer2_mount_t *hmp, int index);
1066 void hammer2_dump_chain(hammer2_chain_t *chain, int tab, int *countp, char pfx);
1067 void hammer2_bioq_sync(hammer2_pfsmount_t *pmp);
1068 int hammer2_vfs_sync(struct mount *mp, int waitflags);
1069 void hammer2_lwinprog_ref(hammer2_pfsmount_t *pmp);
1070 void hammer2_lwinprog_drop(hammer2_pfsmount_t *pmp);
1071 void hammer2_lwinprog_wait(hammer2_pfsmount_t *pmp);
1072
1073 /*
1074  * hammer2_freemap.c
1075  */
1076 int hammer2_freemap_alloc(hammer2_trans_t *trans, hammer2_chain_t *chain,
1077                                 size_t bytes);
1078 void hammer2_freemap_adjust(hammer2_trans_t *trans, hammer2_mount_t *hmp,
1079                                 hammer2_blockref_t *bref, int how);
1080
1081 /*
1082  * hammer2_cluster.c
1083  */
1084 int hammer2_cluster_need_resize(hammer2_cluster_t *cluster, int bytes);
1085 uint8_t hammer2_cluster_type(hammer2_cluster_t *cluster);
1086 const hammer2_media_data_t *hammer2_cluster_rdata(hammer2_cluster_t *cluster);
1087 hammer2_media_data_t *hammer2_cluster_wdata(hammer2_cluster_t *cluster);
1088 hammer2_cluster_t *hammer2_cluster_from_chain(hammer2_chain_t *chain);
1089 int hammer2_cluster_modified(hammer2_cluster_t *cluster);
1090 int hammer2_cluster_duplicated(hammer2_cluster_t *cluster);
1091 void hammer2_cluster_set_chainflags(hammer2_cluster_t *cluster, uint32_t flags);
1092 void hammer2_cluster_clr_chainflags(hammer2_cluster_t *cluster, uint32_t flags);
1093 void hammer2_cluster_bref(hammer2_cluster_t *cluster, hammer2_blockref_t *bref);
1094 void hammer2_cluster_setflush(hammer2_trans_t *trans,
1095                         hammer2_cluster_t *cluster);
1096 void hammer2_cluster_setmethod_check(hammer2_trans_t *trans,
1097                         hammer2_cluster_t *cluster, int check_algo);
1098 hammer2_cluster_t *hammer2_cluster_alloc(hammer2_pfsmount_t *pmp,
1099                         hammer2_trans_t *trans,
1100                         hammer2_blockref_t *bref);
1101 void hammer2_cluster_ref(hammer2_cluster_t *cluster);
1102 void hammer2_cluster_drop(hammer2_cluster_t *cluster);
1103 void hammer2_cluster_wait(hammer2_cluster_t *cluster);
1104 int hammer2_cluster_lock(hammer2_cluster_t *cluster, int how);
1105 void hammer2_cluster_replace(hammer2_cluster_t *dst, hammer2_cluster_t *src);
1106 void hammer2_cluster_replace_locked(hammer2_cluster_t *dst,
1107                         hammer2_cluster_t *src);
1108 hammer2_cluster_t *hammer2_cluster_copy(hammer2_cluster_t *ocluster);
1109 void hammer2_cluster_unlock(hammer2_cluster_t *cluster);
1110 void hammer2_cluster_resize(hammer2_trans_t *trans, hammer2_inode_t *ip,
1111                         hammer2_cluster_t *cparent, hammer2_cluster_t *cluster,
1112                         int nradix, int flags);
1113 hammer2_inode_data_t *hammer2_cluster_modify_ip(hammer2_trans_t *trans,
1114                         hammer2_inode_t *ip, hammer2_cluster_t *cluster,
1115                         int flags);
1116 void hammer2_cluster_modify(hammer2_trans_t *trans, hammer2_cluster_t *cluster,
1117                         int flags);
1118 void hammer2_cluster_modsync(hammer2_cluster_t *cluster);
1119 hammer2_cluster_t *hammer2_cluster_lookup_init(hammer2_cluster_t *cparent,
1120                         int flags);
1121 void hammer2_cluster_lookup_done(hammer2_cluster_t *cparent);
1122 hammer2_cluster_t *hammer2_cluster_lookup(hammer2_cluster_t *cparent,
1123                         hammer2_key_t *key_nextp,
1124                         hammer2_key_t key_beg, hammer2_key_t key_end,
1125                         int flags, int *ddflagp);
1126 hammer2_cluster_t *hammer2_cluster_next(hammer2_cluster_t *cparent,
1127                         hammer2_cluster_t *cluster,
1128                         hammer2_key_t *key_nextp,
1129                         hammer2_key_t key_beg, hammer2_key_t key_end,
1130                         int flags);
1131 hammer2_cluster_t *hammer2_cluster_scan(hammer2_cluster_t *cparent,
1132                         hammer2_cluster_t *cluster, int flags);
1133 int hammer2_cluster_create(hammer2_trans_t *trans, hammer2_cluster_t *cparent,
1134                         hammer2_cluster_t **clusterp,
1135                         hammer2_key_t key, int keybits,
1136                         int type, size_t bytes, int flags);
1137 void hammer2_cluster_rename(hammer2_trans_t *trans, hammer2_blockref_t *bref,
1138                         hammer2_cluster_t *cparent, hammer2_cluster_t *cluster,
1139                         int flags);
1140 void hammer2_cluster_delete(hammer2_trans_t *trans, hammer2_cluster_t *pcluster,
1141                         hammer2_cluster_t *cluster, int flags);
1142 int hammer2_cluster_snapshot(hammer2_trans_t *trans,
1143                         hammer2_cluster_t *ocluster, hammer2_ioc_pfs_t *pfs);
1144 hammer2_cluster_t *hammer2_cluster_parent(hammer2_cluster_t *cluster);
1145
1146 int hammer2_bulk_scan(hammer2_trans_t *trans, hammer2_chain_t *parent,
1147                         int (*func)(hammer2_chain_t *chain, void *info),
1148                         void *info);
1149 int hammer2_bulkfree_pass(hammer2_mount_t *hmp,
1150                         struct hammer2_ioc_bulkfree *bfi);
1151
1152 /*
1153  * hammer2_iocom.c
1154  */
1155 void hammer2_iocom_init(hammer2_mount_t *hmp);
1156 void hammer2_iocom_uninit(hammer2_mount_t *hmp);
1157 void hammer2_cluster_reconnect(hammer2_mount_t *hmp, struct file *fp);
1158
1159 #endif /* !_KERNEL */
1160 #endif /* !_VFS_HAMMER2_HAMMER2_H_ */