Commit | Line | Data |
---|---|---|
8750964d | 1 | /* |
1f07f686 | 2 | * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved. |
8750964d MD |
3 | * |
4 | * This code is derived from software contributed to The DragonFly Project | |
5 | * by Matthew Dillon <dillon@backplane.com> | |
6 | * | |
7 | * Redistribution and use in source and binary forms, with or without | |
8 | * modification, are permitted provided that the following conditions | |
9 | * are met: | |
10 | * | |
11 | * 1. Redistributions of source code must retain the above copyright | |
12 | * notice, this list of conditions and the following disclaimer. | |
13 | * 2. Redistributions in binary form must reproduce the above copyright | |
14 | * notice, this list of conditions and the following disclaimer in | |
15 | * the documentation and/or other materials provided with the | |
16 | * distribution. | |
17 | * 3. Neither the name of The DragonFly Project nor the names of its | |
18 | * contributors may be used to endorse or promote products derived | |
19 | * from this software without specific, prior written permission. | |
20 | * | |
21 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |
22 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |
23 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS | |
24 | * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE | |
25 | * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, | |
26 | * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, | |
27 | * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; | |
28 | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED | |
29 | * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, | |
30 | * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT | |
31 | * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | |
32 | * SUCH DAMAGE. | |
33 | * | |
44a83111 | 34 | * $DragonFly: src/sys/vfs/hammer/hammer.h,v 1.130 2008/11/13 02:18:43 dillon Exp $ |
8750964d MD |
35 | */ |
36 | /* | |
37 | * This header file contains structures used internally by the HAMMERFS | |
c60bb2c5 | 38 | * implementation. See hammer_disk.h for on-disk structures. |
8750964d MD |
39 | */ |
40 | ||
427e5fc6 MD |
41 | #include <sys/param.h> |
42 | #include <sys/types.h> | |
43 | #include <sys/kernel.h> | |
42c7d26b | 44 | #include <sys/conf.h> |
427e5fc6 | 45 | #include <sys/systm.h> |
8750964d MD |
46 | #include <sys/tree.h> |
47 | #include <sys/malloc.h> | |
427e5fc6 | 48 | #include <sys/mount.h> |
513ca7d7 | 49 | #include <sys/mountctl.h> |
427e5fc6 | 50 | #include <sys/vnode.h> |
42c7d26b | 51 | #include <sys/proc.h> |
895c1f85 | 52 | #include <sys/priv.h> |
e63644f0 | 53 | #include <sys/stat.h> |
427e5fc6 | 54 | #include <sys/globaldata.h> |
66325755 MD |
55 | #include <sys/lockf.h> |
56 | #include <sys/buf.h> | |
8cd0a023 | 57 | #include <sys/queue.h> |
bcac4bbb | 58 | #include <sys/ktr.h> |
66325755 | 59 | #include <sys/globaldata.h> |
9192654c | 60 | #include <sys/limits.h> |
c652be54 | 61 | #include <vm/vm_extern.h> |
66325755 MD |
62 | |
63 | #include <sys/buf2.h> | |
855942b6 | 64 | #include <sys/signal2.h> |
427e5fc6 | 65 | #include "hammer_disk.h" |
8750964d | 66 | #include "hammer_mount.h" |
7dc57964 | 67 | #include "hammer_ioctl.h" |
8750964d MD |
68 | |
69 | #if defined(_KERNEL) || defined(_KERNEL_STRUCTURES) | |
70 | ||
71 | MALLOC_DECLARE(M_HAMMER); | |
72 | ||
bcac4bbb MD |
73 | /* |
74 | * Kernel trace | |
75 | */ | |
76 | #if !defined(KTR_HAMMER) | |
77 | #define KTR_HAMMER KTR_ALL | |
78 | #endif | |
79 | KTR_INFO_MASTER_EXTERN(hammer); | |
80 | ||
81 | /* | |
82 | * Misc structures | |
83 | */ | |
66325755 MD |
84 | struct hammer_mount; |
85 | ||
8750964d MD |
86 | /* |
87 | * Key structure used for custom RB tree inode lookups. This prototypes | |
88 | * the function hammer_ino_rb_tree_RB_LOOKUP_INFO(root, info). | |
89 | */ | |
90 | typedef struct hammer_inode_info { | |
513ca7d7 | 91 | int64_t obj_id; /* (key) object identifier */ |
8750964d | 92 | hammer_tid_t obj_asof; /* (key) snapshot transid or 0 */ |
ddfdf542 | 93 | u_int32_t obj_localization; /* (key) pseudo-fs */ |
43c665ae MD |
94 | union { |
95 | struct hammer_btree_leaf_elm *leaf; | |
96 | } u; | |
8750964d MD |
97 | } *hammer_inode_info_t; |
98 | ||
b84de5af MD |
99 | typedef enum hammer_transaction_type { |
100 | HAMMER_TRANS_RO, | |
101 | HAMMER_TRANS_STD, | |
102 | HAMMER_TRANS_FLS | |
103 | } hammer_transaction_type_t; | |
104 | ||
66325755 MD |
105 | /* |
106 | * HAMMER Transaction tracking | |
107 | */ | |
108 | struct hammer_transaction { | |
b84de5af | 109 | hammer_transaction_type_t type; |
66325755 MD |
110 | struct hammer_mount *hmp; |
111 | hammer_tid_t tid; | |
ddfdf542 | 112 | u_int64_t time; |
dd94f1b1 | 113 | u_int32_t time32; |
2f85fa4d | 114 | int sync_lock_refs; |
21fde338 | 115 | int flags; |
a89aec1b | 116 | struct hammer_volume *rootvol; |
66325755 MD |
117 | }; |
118 | ||
8cd0a023 MD |
119 | typedef struct hammer_transaction *hammer_transaction_t; |
120 | ||
21fde338 | 121 | #define HAMMER_TRANSF_NEWINODE 0x0001 |
82010f9f | 122 | #define HAMMER_TRANSF_DIDIO 0x0002 |
4c286c36 | 123 | #define HAMMER_TRANSF_CRCDOM 0x0004 /* EDOM on CRC error, less critical */ |
21fde338 | 124 | |
66325755 MD |
125 | /* |
126 | * HAMMER locks | |
127 | */ | |
427e5fc6 | 128 | struct hammer_lock { |
250aec18 | 129 | volatile u_int refs; /* active references */ |
899eb297 | 130 | volatile u_int lockval; /* lock count and control bits */ |
250aec18 MD |
131 | struct thread *lowner; /* owner if exclusively held */ |
132 | struct thread *rowner; /* owner if exclusively held */ | |
8750964d MD |
133 | }; |
134 | ||
250aec18 MD |
135 | #define HAMMER_REFS_LOCKED 0x40000000 /* transition check */ |
136 | #define HAMMER_REFS_WANTED 0x20000000 /* transition check */ | |
137 | #define HAMMER_REFS_CHECK 0x10000000 /* transition check */ | |
138 | ||
139 | #define HAMMER_REFS_FLAGS (HAMMER_REFS_LOCKED | \ | |
140 | HAMMER_REFS_WANTED | \ | |
141 | HAMMER_REFS_CHECK) | |
142 | ||
899eb297 | 143 | #define HAMMER_LOCKF_EXCLUSIVE 0x40000000 |
250aec18 | 144 | #define HAMMER_LOCKF_WANTED 0x20000000 |
899eb297 MD |
145 | |
146 | static __inline int | |
147 | hammer_notlocked(struct hammer_lock *lock) | |
148 | { | |
149 | return(lock->lockval == 0); | |
150 | } | |
151 | ||
427e5fc6 MD |
152 | static __inline int |
153 | hammer_islocked(struct hammer_lock *lock) | |
154 | { | |
899eb297 | 155 | return(lock->lockval != 0); |
427e5fc6 MD |
156 | } |
157 | ||
250aec18 MD |
158 | /* |
159 | * Returns the number of refs on the object. | |
160 | */ | |
0b075555 MD |
161 | static __inline int |
162 | hammer_isactive(struct hammer_lock *lock) | |
163 | { | |
250aec18 MD |
164 | return(lock->refs & ~HAMMER_REFS_FLAGS); |
165 | } | |
166 | ||
167 | static __inline int | |
168 | hammer_oneref(struct hammer_lock *lock) | |
169 | { | |
170 | return((lock->refs & ~HAMMER_REFS_FLAGS) == 1); | |
0b075555 MD |
171 | } |
172 | ||
427e5fc6 | 173 | static __inline int |
250aec18 | 174 | hammer_norefs(struct hammer_lock *lock) |
427e5fc6 | 175 | { |
250aec18 MD |
176 | return((lock->refs & ~HAMMER_REFS_FLAGS) == 0); |
177 | } | |
178 | ||
179 | static __inline int | |
180 | hammer_norefsorlock(struct hammer_lock *lock) | |
181 | { | |
182 | return(lock->refs == 0); | |
183 | } | |
184 | ||
185 | static __inline int | |
186 | hammer_refsorlock(struct hammer_lock *lock) | |
187 | { | |
188 | return(lock->refs != 0); | |
427e5fc6 | 189 | } |
c60bb2c5 | 190 | |
6a37e7e4 | 191 | /* |
7aa3b8a6 | 192 | * Return if we specifically own the lock exclusively. |
6a37e7e4 MD |
193 | */ |
194 | static __inline int | |
7aa3b8a6 | 195 | hammer_lock_excl_owned(struct hammer_lock *lock, thread_t td) |
6a37e7e4 | 196 | { |
899eb297 | 197 | if ((lock->lockval & HAMMER_LOCKF_EXCLUSIVE) && |
250aec18 | 198 | lock->lowner == td) { |
7aa3b8a6 | 199 | return(1); |
899eb297 | 200 | } |
7aa3b8a6 | 201 | return(0); |
6a37e7e4 MD |
202 | } |
203 | ||
ec4e8497 | 204 | /* |
1f07f686 | 205 | * Flush state, used by various structures |
ec4e8497 | 206 | */ |
1f07f686 MD |
207 | typedef enum hammer_inode_state { |
208 | HAMMER_FST_IDLE, | |
209 | HAMMER_FST_SETUP, | |
210 | HAMMER_FST_FLUSH | |
211 | } hammer_inode_state_t; | |
ec4e8497 | 212 | |
1f07f686 | 213 | TAILQ_HEAD(hammer_record_list, hammer_record); |
ec4e8497 | 214 | |
5fa5c92f MD |
215 | /* |
216 | * Pseudo-filesystem extended data tracking | |
217 | */ | |
218 | struct hammer_pfs_rb_tree; | |
219 | struct hammer_pseudofs_inmem; | |
220 | RB_HEAD(hammer_pfs_rb_tree, hammer_pseudofs_inmem); | |
221 | RB_PROTOTYPE2(hammer_pfs_rb_tree, hammer_pseudofs_inmem, rb_node, | |
222 | hammer_pfs_rb_compare, u_int32_t); | |
223 | ||
224 | struct hammer_pseudofs_inmem { | |
225 | RB_ENTRY(hammer_pseudofs_inmem) rb_node; | |
226 | struct hammer_lock lock; | |
227 | u_int32_t localization; | |
ea434b6f | 228 | hammer_tid_t create_tid; |
842e7a70 | 229 | int flags; |
a56cb012 | 230 | udev_t fsid_udev; |
5fa5c92f MD |
231 | struct hammer_pseudofs_data pfsd; |
232 | }; | |
233 | ||
234 | typedef struct hammer_pseudofs_inmem *hammer_pseudofs_inmem_t; | |
235 | ||
842e7a70 MD |
236 | #define HAMMER_PFSM_DELETED 0x0001 |
237 | ||
0729c8c8 MD |
238 | /* |
239 | * Cache object ids. A fixed number of objid cache structures are | |
240 | * created to reserve object id's for newly created files in multiples | |
241 | * of 100,000, localized to a particular directory, and recycled as | |
242 | * needed. This allows parallel create operations in different | |
243 | * directories to retain fairly localized object ids which in turn | |
244 | * improves reblocking performance and layout. | |
245 | */ | |
7d29aec0 | 246 | #define OBJID_CACHE_SIZE 2048 |
5a64efa1 MD |
247 | #define OBJID_CACHE_BULK_BITS 10 /* 10 bits (1024) */ |
248 | #define OBJID_CACHE_BULK (32 * 32) /* two level (1024) */ | |
249 | #define OBJID_CACHE_BULK_MASK (OBJID_CACHE_BULK - 1) | |
250 | #define OBJID_CACHE_BULK_MASK64 ((u_int64_t)(OBJID_CACHE_BULK - 1)) | |
0729c8c8 MD |
251 | |
252 | typedef struct hammer_objid_cache { | |
253 | TAILQ_ENTRY(hammer_objid_cache) entry; | |
254 | struct hammer_inode *dip; | |
5a64efa1 | 255 | hammer_tid_t base_tid; |
0729c8c8 | 256 | int count; |
5a64efa1 MD |
257 | u_int32_t bm0; |
258 | u_int32_t bm1[32]; | |
0729c8c8 MD |
259 | } *hammer_objid_cache_t; |
260 | ||
bcac4bbb MD |
261 | /* |
262 | * Associate an inode with a B-Tree node to cache search start positions | |
263 | */ | |
264 | typedef struct hammer_node_cache { | |
7a61b85d | 265 | TAILQ_ENTRY(hammer_node_cache) entry; |
bcac4bbb MD |
266 | struct hammer_node *node; |
267 | struct hammer_inode *ip; | |
268 | } *hammer_node_cache_t; | |
269 | ||
270 | TAILQ_HEAD(hammer_node_cache_list, hammer_node_cache); | |
271 | ||
507df98a ID |
272 | /* |
273 | * Live dedup cache | |
274 | */ | |
275 | struct hammer_dedup_crc_rb_tree; | |
276 | RB_HEAD(hammer_dedup_crc_rb_tree, hammer_dedup_cache); | |
277 | RB_PROTOTYPE2(hammer_dedup_crc_rb_tree, hammer_dedup_cache, crc_entry, | |
278 | hammer_dedup_crc_rb_compare, hammer_crc_t); | |
279 | ||
280 | struct hammer_dedup_off_rb_tree; | |
281 | RB_HEAD(hammer_dedup_off_rb_tree, hammer_dedup_cache); | |
282 | RB_PROTOTYPE2(hammer_dedup_off_rb_tree, hammer_dedup_cache, off_entry, | |
283 | hammer_dedup_off_rb_compare, hammer_off_t); | |
284 | ||
285 | #define DEDUP_CACHE_SIZE 4096 /* XXX make it a dynamic tunable */ | |
286 | ||
287 | typedef struct hammer_dedup_cache { | |
288 | RB_ENTRY(hammer_dedup_cache) crc_entry; | |
289 | RB_ENTRY(hammer_dedup_cache) off_entry; | |
290 | TAILQ_ENTRY(hammer_dedup_cache) lru_entry; | |
291 | struct hammer_mount *hmp; | |
292 | int64_t obj_id; | |
293 | u_int32_t localization; | |
294 | off_t file_offset; | |
295 | int bytes; | |
296 | hammer_off_t data_offset; | |
297 | hammer_crc_t crc; | |
298 | } *hammer_dedup_cache_t; | |
299 | ||
7a61b85d MD |
300 | /* |
301 | * Structure used to organize flush groups. Flush groups must be | |
302 | * organized into chunks in order to avoid blowing out the UNDO FIFO. | |
303 | * Without this a 'sync' could end up flushing 50,000 inodes in a single | |
304 | * transaction. | |
305 | */ | |
ff003b11 MD |
306 | struct hammer_fls_rb_tree; |
307 | RB_HEAD(hammer_fls_rb_tree, hammer_inode); | |
308 | RB_PROTOTYPE(hammer_fls_rb_tree, hammer_inode, rb_flsnode, | |
309 | hammer_ino_rb_compare); | |
310 | ||
7a61b85d MD |
311 | struct hammer_flush_group { |
312 | TAILQ_ENTRY(hammer_flush_group) flush_entry; | |
ff003b11 | 313 | struct hammer_fls_rb_tree flush_tree; |
37646115 | 314 | int seq; /* our seq no */ |
7a61b85d MD |
315 | int total_count; /* record load */ |
316 | int running; /* group is running */ | |
317 | int closed; | |
318 | int refs; | |
319 | }; | |
320 | ||
321 | typedef struct hammer_flush_group *hammer_flush_group_t; | |
322 | ||
323 | TAILQ_HEAD(hammer_flush_group_list, hammer_flush_group); | |
324 | ||
8750964d | 325 | /* |
8cd0a023 MD |
326 | * Structure used to represent an inode in-memory. |
327 | * | |
328 | * The record and data associated with an inode may be out of sync with | |
329 | * the disk (xDIRTY flags), or not even on the disk at all (ONDISK flag | |
330 | * clear). | |
331 | * | |
332 | * An inode may also hold a cache of unsynchronized records, used for | |
333 | * database and directories only. Unsynchronized regular file data is | |
334 | * stored in the buffer cache. | |
335 | * | |
336 | * NOTE: A file which is created and destroyed within the initial | |
337 | * synchronization period can wind up not doing any disk I/O at all. | |
338 | * | |
339 | * Finally, an inode may cache numerous disk-referencing B-Tree cursors. | |
8750964d MD |
340 | */ |
341 | struct hammer_ino_rb_tree; | |
342 | struct hammer_inode; | |
343 | RB_HEAD(hammer_ino_rb_tree, hammer_inode); | |
344 | RB_PROTOTYPEX(hammer_ino_rb_tree, INFO, hammer_inode, rb_node, | |
8cd0a023 MD |
345 | hammer_ino_rb_compare, hammer_inode_info_t); |
346 | ||
73896937 MD |
347 | struct hammer_redo_rb_tree; |
348 | RB_HEAD(hammer_redo_rb_tree, hammer_inode); | |
349 | RB_PROTOTYPE2(hammer_redo_rb_tree, hammer_inode, rb_redonode, | |
350 | hammer_redo_rb_compare, hammer_off_t); | |
351 | ||
8cd0a023 MD |
352 | struct hammer_rec_rb_tree; |
353 | struct hammer_record; | |
354 | RB_HEAD(hammer_rec_rb_tree, hammer_record); | |
355 | RB_PROTOTYPEX(hammer_rec_rb_tree, INFO, hammer_record, rb_node, | |
0832c9bb | 356 | hammer_rec_rb_compare, hammer_btree_leaf_elm_t); |
8cd0a023 MD |
357 | |
358 | TAILQ_HEAD(hammer_node_list, hammer_node); | |
8750964d MD |
359 | |
360 | struct hammer_inode { | |
1f07f686 MD |
361 | RB_ENTRY(hammer_inode) rb_node; |
362 | hammer_inode_state_t flush_state; | |
7a61b85d | 363 | hammer_flush_group_t flush_group; |
ff003b11 | 364 | RB_ENTRY(hammer_inode) rb_flsnode; /* when on flush list */ |
73896937 | 365 | RB_ENTRY(hammer_inode) rb_redonode; /* when INODE_RDIRTY is set */ |
1f07f686 | 366 | struct hammer_record_list target_list; /* target of dependant recs */ |
adf01747 | 367 | int64_t obj_id; /* (key) object identifier */ |
b84de5af | 368 | hammer_tid_t obj_asof; /* (key) snapshot or 0 */ |
ddfdf542 | 369 | u_int32_t obj_localization; /* (key) pseudo-fs */ |
b84de5af | 370 | struct hammer_mount *hmp; |
0729c8c8 | 371 | hammer_objid_cache_t objid_cache; |
b84de5af MD |
372 | int flags; |
373 | int error; /* flush error */ | |
374 | int cursor_ip_refs; /* sanity */ | |
47637bff | 375 | int rsv_recs; |
b84de5af | 376 | struct vnode *vp; |
5fa5c92f | 377 | hammer_pseudofs_inmem_t pfsm; |
b84de5af MD |
378 | struct lockf advlock; |
379 | struct hammer_lock lock; /* sync copy interlock */ | |
b84de5af | 380 | off_t trunc_off; |
11ad5ade | 381 | struct hammer_btree_leaf_elm ino_leaf; /* in-memory cache */ |
b84de5af MD |
382 | struct hammer_inode_data ino_data; /* in-memory cache */ |
383 | struct hammer_rec_rb_tree rec_tree; /* in-memory cache */ | |
3214ade6 | 384 | int rec_generation; |
39d8fd63 | 385 | struct hammer_node_cache cache[4]; /* search initiate cache */ |
b84de5af MD |
386 | |
387 | /* | |
388 | * When a demark is created to synchronize an inode to | |
389 | * disk, certain fields are copied so the front-end VOPs | |
390 | * can continue to run in parallel with the synchronization | |
391 | * occuring in the background. | |
392 | */ | |
393 | int sync_flags; /* to-sync flags cache */ | |
394 | off_t sync_trunc_off; /* to-sync truncation */ | |
a9d52b76 | 395 | off_t save_trunc_off; /* write optimization */ |
11ad5ade | 396 | struct hammer_btree_leaf_elm sync_ino_leaf; /* to-sync cache */ |
b84de5af | 397 | struct hammer_inode_data sync_ino_data; /* to-sync cache */ |
9192654c | 398 | size_t redo_count; |
73896937 MD |
399 | |
400 | /* | |
401 | * Track the earliest offset in the UNDO/REDO FIFO containing | |
402 | * REDO records. This is staged to the backend during flush | |
403 | * sequences. While the inode is staged redo_fifo_next is used | |
404 | * to track the earliest offset for rotation into redo_fifo_start | |
405 | * on completion of the flush. | |
406 | */ | |
407 | hammer_off_t redo_fifo_start; | |
408 | hammer_off_t redo_fifo_next; | |
8750964d MD |
409 | }; |
410 | ||
8cd0a023 MD |
411 | typedef struct hammer_inode *hammer_inode_t; |
412 | ||
66325755 MD |
413 | #define VTOI(vp) ((struct hammer_inode *)(vp)->v_data) |
414 | ||
9192654c MD |
415 | /* |
416 | * NOTE: DDIRTY does not include atime or mtime and does not include | |
417 | * write-append size changes. SDIRTY handles write-append size | |
418 | * changes. | |
47f363f1 MD |
419 | * |
420 | * REDO indicates that REDO logging is active, creating a definitive | |
421 | * stream of REDO records in the UNDO/REDO log for writes and | |
422 | * truncations, including boundary records when/if REDO is turned off. | |
423 | * REDO is typically enabled by fsync() and turned off if excessive | |
424 | * writes without an fsync() occurs. | |
425 | * | |
426 | * RDIRTY indicates that REDO records were laid down in the UNDO/REDO | |
427 | * FIFO (even if REDO is turned off some might still be active) and | |
428 | * still being tracked for this inode. See hammer_redo.c | |
9192654c | 429 | */ |
ddfdf542 | 430 | /* (not including atime/mtime) */ |
9192654c | 431 | #define HAMMER_INODE_DDIRTY 0x0001 /* in-memory ino_data is dirty */ |
e63644f0 | 432 | #define HAMMER_INODE_RSV_INODES 0x0002 /* hmp->rsv_inodes bumped */ |
7b6ccb11 | 433 | #define HAMMER_INODE_CONN_DOWN 0x0004 /* include in downward recursion */ |
1f07f686 | 434 | #define HAMMER_INODE_XDIRTY 0x0008 /* in-memory records */ |
8cd0a023 | 435 | #define HAMMER_INODE_ONDISK 0x0010 /* inode is on-disk (else not yet) */ |
a89aec1b | 436 | #define HAMMER_INODE_FLUSH 0x0020 /* flush on last ref */ |
869e8f55 | 437 | #define HAMMER_INODE_DELETED 0x0080 /* inode delete (backend) */ |
76376933 | 438 | #define HAMMER_INODE_DELONDISK 0x0100 /* delete synchronized to disk */ |
d113fda1 | 439 | #define HAMMER_INODE_RO 0x0200 /* read-only (because of as-of) */ |
a117fbeb | 440 | #define HAMMER_INODE_RECSW 0x0400 /* waiting on data record flush */ |
0a72edae | 441 | #define HAMMER_INODE_DONDISK 0x0800 /* data records may be on disk */ |
f3b0f382 | 442 | #define HAMMER_INODE_BUFS 0x1000 /* dirty high level bps present */ |
f153644d | 443 | #define HAMMER_INODE_REFLUSH 0x2000 /* flush on dependancy / reflush */ |
9f5097dc | 444 | #define HAMMER_INODE_RECLAIM 0x4000 /* trying to reclaim */ |
059819e3 | 445 | #define HAMMER_INODE_FLUSHW 0x8000 /* Someone waiting for flush */ |
8cd0a023 | 446 | |
b84de5af | 447 | #define HAMMER_INODE_TRUNCATED 0x00010000 |
869e8f55 | 448 | #define HAMMER_INODE_DELETING 0x00020000 /* inode delete request (frontend)*/ |
4e17f465 | 449 | #define HAMMER_INODE_RESIGNAL 0x00040000 /* re-signal on re-flush */ |
ddfdf542 | 450 | #define HAMMER_INODE_ATIME 0x00100000 /* in-memory atime modified */ |
06ad81ff MD |
451 | #define HAMMER_INODE_MTIME 0x00200000 /* in-memory mtime modified */ |
452 | #define HAMMER_INODE_WOULDBLOCK 0x00400000 /* re-issue to new flush group */ | |
4c286c36 | 453 | #define HAMMER_INODE_DUMMY 0x00800000 /* dummy inode covering bad file */ |
9192654c | 454 | #define HAMMER_INODE_SDIRTY 0x01000000 /* in-memory ino_data.size is dirty*/ |
47f363f1 MD |
455 | #define HAMMER_INODE_REDO 0x02000000 /* REDO logging active */ |
456 | #define HAMMER_INODE_RDIRTY 0x04000000 /* REDO records active in fifo */ | |
b84de5af | 457 | |
9192654c | 458 | #define HAMMER_INODE_MODMASK (HAMMER_INODE_DDIRTY|HAMMER_INODE_SDIRTY| \ |
11ad5ade | 459 | HAMMER_INODE_XDIRTY|HAMMER_INODE_BUFS| \ |
ddfdf542 MD |
460 | HAMMER_INODE_ATIME|HAMMER_INODE_MTIME| \ |
461 | HAMMER_INODE_TRUNCATED|HAMMER_INODE_DELETING) | |
1f07f686 | 462 | |
9192654c | 463 | #define HAMMER_INODE_MODMASK_NOXDIRTY \ |
1f07f686 | 464 | (HAMMER_INODE_MODMASK & ~HAMMER_INODE_XDIRTY) |
0b075555 | 465 | |
9192654c MD |
466 | #define HAMMER_INODE_MODMASK_NOREDO \ |
467 | (HAMMER_INODE_DDIRTY| \ | |
468 | HAMMER_INODE_XDIRTY| \ | |
469 | HAMMER_INODE_TRUNCATED|HAMMER_INODE_DELETING) | |
470 | ||
f90dde4c | 471 | #define HAMMER_FLUSH_SIGNAL 0x0001 |
4e17f465 | 472 | #define HAMMER_FLUSH_RECURSION 0x0002 |
f90dde4c | 473 | |
7bc5b8c2 MD |
474 | /* |
475 | * Used by the inode reclaim code to pipeline reclaims and avoid | |
476 | * blowing out kernel memory or letting the flusher get too far | |
82010f9f MD |
477 | * behind. The reclaim wakes up when count reaches 0 or the |
478 | * timer expires. | |
7bc5b8c2 MD |
479 | */ |
480 | struct hammer_reclaim { | |
481 | TAILQ_ENTRY(hammer_reclaim) entry; | |
82010f9f | 482 | int count; |
7bc5b8c2 MD |
483 | }; |
484 | ||
e98f1b96 MD |
485 | /* |
486 | * Track who is creating the greatest burden on the | |
487 | * inode cache. | |
488 | */ | |
489 | struct hammer_inostats { | |
490 | pid_t pid; /* track user process */ | |
491 | int ltick; /* last tick */ | |
492 | int count; /* count (degenerates) */ | |
493 | }; | |
494 | ||
495 | #define HAMMER_INOSTATS_HSIZE 32 | |
496 | #define HAMMER_INOSTATS_HMASK (HAMMER_INOSTATS_HSIZE - 1) | |
497 | ||
8750964d | 498 | /* |
1f07f686 MD |
499 | * Structure used to represent an unsynchronized record in-memory. These |
500 | * records typically represent directory entries. Only non-historical | |
501 | * records are kept in-memory. | |
502 | * | |
503 | * Records are organized as a per-inode RB-Tree. If the inode is not | |
8cd0a023 MD |
504 | * on disk then neither are any records and the in-memory record tree |
505 | * represents the entire contents of the inode. If the inode is on disk | |
506 | * then the on-disk B-Tree is scanned in parallel with the in-memory | |
507 | * RB-Tree to synthesize the current state of the file. | |
508 | * | |
1f07f686 MD |
509 | * Records are also used to enforce the ordering of directory create/delete |
510 | * operations. A new inode will not be flushed to disk unless its related | |
511 | * directory entry is also being flushed at the same time. A directory entry | |
512 | * will not be removed unless its related inode is also being removed at the | |
513 | * same time. | |
8750964d | 514 | */ |
1f07f686 | 515 | typedef enum hammer_record_type { |
e8599db1 | 516 | HAMMER_MEM_RECORD_GENERAL, /* misc record */ |
930bf163 | 517 | HAMMER_MEM_RECORD_INODE, /* inode record */ |
1f07f686 | 518 | HAMMER_MEM_RECORD_ADD, /* positive memory cache record */ |
47637bff MD |
519 | HAMMER_MEM_RECORD_DEL, /* negative delete-on-disk record */ |
520 | HAMMER_MEM_RECORD_DATA /* bulk-data record w/on-disk ref */ | |
1f07f686 MD |
521 | } hammer_record_type_t; |
522 | ||
8cd0a023 MD |
523 | struct hammer_record { |
524 | RB_ENTRY(hammer_record) rb_node; | |
1f07f686 MD |
525 | TAILQ_ENTRY(hammer_record) target_entry; |
526 | hammer_inode_state_t flush_state; | |
7a61b85d | 527 | hammer_flush_group_t flush_group; |
1f07f686 | 528 | hammer_record_type_t type; |
a89aec1b | 529 | struct hammer_lock lock; |
0832c9bb | 530 | struct hammer_reserve *resv; |
8cd0a023 | 531 | struct hammer_inode *ip; |
1f07f686 | 532 | struct hammer_inode *target_ip; |
11ad5ade | 533 | struct hammer_btree_leaf_elm leaf; |
8cd0a023 | 534 | union hammer_data_ondisk *data; |
8cd0a023 | 535 | int flags; |
77912481 | 536 | int gflags; |
e469566b | 537 | hammer_off_t zone2_offset; /* direct-write only */ |
8cd0a023 | 538 | }; |
8750964d | 539 | |
8cd0a023 MD |
540 | typedef struct hammer_record *hammer_record_t; |
541 | ||
d36ec43b MD |
542 | /* |
543 | * Record flags. Note that FE can only be set by the frontend if the | |
544 | * record has not been interlocked by the backend w/ BE. | |
545 | */ | |
8cd0a023 MD |
546 | #define HAMMER_RECF_ALLOCDATA 0x0001 |
547 | #define HAMMER_RECF_ONRBTREE 0x0002 | |
b84de5af | 548 | #define HAMMER_RECF_DELETED_FE 0x0004 /* deleted (frontend) */ |
d36ec43b | 549 | #define HAMMER_RECF_DELETED_BE 0x0008 /* deleted (backend) */ |
1b0ab2c3 | 550 | #define HAMMER_RECF_COMMITTED 0x0010 /* committed to the B-Tree */ |
d36ec43b | 551 | #define HAMMER_RECF_INTERLOCK_BE 0x0020 /* backend interlock */ |
0832c9bb | 552 | #define HAMMER_RECF_WANTED 0x0040 /* wanted by the frontend */ |
507df98a | 553 | #define HAMMER_RECF_DEDUPED 0x0080 /* will be live-dedup'ed */ |
1b0ab2c3 | 554 | #define HAMMER_RECF_CONVERT_DELETE 0x0100 /* special case */ |
47f363f1 | 555 | #define HAMMER_RECF_REDO 0x1000 /* REDO was laid down */ |
8cd0a023 | 556 | |
77912481 MD |
557 | /* |
558 | * These flags must be separate to deal with SMP races | |
559 | */ | |
560 | #define HAMMER_RECG_DIRECT_IO 0x0001 /* related direct I/O running*/ | |
561 | #define HAMMER_RECG_DIRECT_WAIT 0x0002 /* related direct I/O running*/ | |
562 | #define HAMMER_RECG_DIRECT_INVAL 0x0004 /* buffer alias invalidation */ | |
602c6cb8 | 563 | /* |
83f2a3aa | 564 | * hammer_create_at_cursor() and hammer_delete_at_cursor() flags. |
602c6cb8 | 565 | */ |
83f2a3aa MD |
566 | #define HAMMER_CREATE_MODE_UMIRROR 0x0001 |
567 | #define HAMMER_CREATE_MODE_SYS 0x0002 | |
568 | ||
602c6cb8 MD |
569 | #define HAMMER_DELETE_ADJUST 0x0001 |
570 | #define HAMMER_DELETE_DESTROY 0x0002 | |
571 | ||
8cd0a023 | 572 | /* |
47197d71 | 573 | * In-memory structures representing on-disk structures. |
8cd0a023 | 574 | */ |
8750964d | 575 | struct hammer_volume; |
427e5fc6 | 576 | struct hammer_buffer; |
8cd0a023 | 577 | struct hammer_node; |
e8599db1 | 578 | struct hammer_undo; |
0832c9bb MD |
579 | struct hammer_reserve; |
580 | ||
8750964d | 581 | RB_HEAD(hammer_vol_rb_tree, hammer_volume); |
427e5fc6 | 582 | RB_HEAD(hammer_buf_rb_tree, hammer_buffer); |
8cd0a023 | 583 | RB_HEAD(hammer_nod_rb_tree, hammer_node); |
e8599db1 | 584 | RB_HEAD(hammer_und_rb_tree, hammer_undo); |
0832c9bb | 585 | RB_HEAD(hammer_res_rb_tree, hammer_reserve); |
1afb73cf | 586 | RB_HEAD(hammer_mod_rb_tree, hammer_io); |
8750964d MD |
587 | |
588 | RB_PROTOTYPE2(hammer_vol_rb_tree, hammer_volume, rb_node, | |
589 | hammer_vol_rb_compare, int32_t); | |
427e5fc6 | 590 | RB_PROTOTYPE2(hammer_buf_rb_tree, hammer_buffer, rb_node, |
47197d71 | 591 | hammer_buf_rb_compare, hammer_off_t); |
8cd0a023 | 592 | RB_PROTOTYPE2(hammer_nod_rb_tree, hammer_node, rb_node, |
47197d71 | 593 | hammer_nod_rb_compare, hammer_off_t); |
e8599db1 MD |
594 | RB_PROTOTYPE2(hammer_und_rb_tree, hammer_undo, rb_node, |
595 | hammer_und_rb_compare, hammer_off_t); | |
0832c9bb MD |
596 | RB_PROTOTYPE2(hammer_res_rb_tree, hammer_reserve, rb_node, |
597 | hammer_res_rb_compare, hammer_off_t); | |
1afb73cf MD |
598 | RB_PROTOTYPE2(hammer_mod_rb_tree, hammer_io, rb_node, |
599 | hammer_mod_rb_compare, hammer_off_t); | |
8750964d | 600 | |
66325755 MD |
601 | /* |
602 | * IO management - embedded at the head of various in-memory structures | |
10a5d1ba MD |
603 | * |
604 | * VOLUME - hammer_volume containing meta-data | |
605 | * META_BUFFER - hammer_buffer containing meta-data | |
606 | * DATA_BUFFER - hammer_buffer containing pure-data | |
607 | * | |
608 | * Dirty volume headers and dirty meta-data buffers are locked until the | |
609 | * flusher can sequence them out. Dirty pure-data buffers can be written. | |
610 | * Clean buffers can be passively released. | |
66325755 | 611 | */ |
10a5d1ba MD |
612 | typedef enum hammer_io_type { |
613 | HAMMER_STRUCTURE_VOLUME, | |
614 | HAMMER_STRUCTURE_META_BUFFER, | |
615 | HAMMER_STRUCTURE_UNDO_BUFFER, | |
eddadaee MD |
616 | HAMMER_STRUCTURE_DATA_BUFFER, |
617 | HAMMER_STRUCTURE_DUMMY | |
10a5d1ba | 618 | } hammer_io_type_t; |
66325755 MD |
619 | |
620 | union hammer_io_structure; | |
055f5ff8 | 621 | struct hammer_io; |
66325755 MD |
622 | |
623 | struct worklist { | |
624 | LIST_ENTRY(worklist) node; | |
625 | }; | |
626 | ||
10a5d1ba MD |
627 | TAILQ_HEAD(hammer_io_list, hammer_io); |
628 | typedef struct hammer_io_list *hammer_io_list_t; | |
055f5ff8 | 629 | |
66325755 | 630 | struct hammer_io { |
10a5d1ba MD |
631 | struct worklist worklist; |
632 | struct hammer_lock lock; | |
633 | enum hammer_io_type type; | |
634 | struct hammer_mount *hmp; | |
748efb59 | 635 | struct hammer_volume *volume; |
1afb73cf | 636 | RB_ENTRY(hammer_io) rb_node; /* if modified */ |
eddadaee | 637 | TAILQ_ENTRY(hammer_io) iorun_entry; /* iorun_list */ |
1afb73cf | 638 | struct hammer_mod_rb_tree *mod_root; |
10a5d1ba | 639 | struct buf *bp; |
4a2796f3 MD |
640 | int64_t offset; /* zone-2 offset */ |
641 | int bytes; /* buffer cache buffer size */ | |
10a5d1ba MD |
642 | int modify_refs; |
643 | ||
77912481 MD |
644 | /* |
645 | * These can be modified at any time by the backend while holding | |
646 | * io_token, due to bio_done and hammer_io_complete() callbacks. | |
647 | */ | |
055f5ff8 MD |
648 | u_int running : 1; /* bp write IO in progress */ |
649 | u_int waiting : 1; /* someone is waiting on us */ | |
77912481 MD |
650 | u_int ioerror : 1; /* abort on io-error */ |
651 | u_int unusedA : 29; | |
652 | ||
653 | /* | |
654 | * These can only be modified by the frontend while holding | |
655 | * fs_token, or by the backend while holding the io interlocked | |
656 | * with no references (which will block the frontend when it | |
657 | * tries to reference it). | |
658 | * | |
659 | * WARNING! SMP RACES will create havoc if the callbacks ever tried | |
660 | * to modify any of these outside the above restrictions. | |
661 | */ | |
662 | u_int modified : 1; /* bp's data was modified */ | |
663 | u_int released : 1; /* bp released (w/ B_LOCKED set) */ | |
b33e2cc0 | 664 | u_int validated : 1; /* ondisk has been validated */ |
b58c6388 | 665 | u_int waitdep : 1; /* flush waits for dependancies */ |
51c35492 | 666 | u_int recovered : 1; /* has recovery ref */ |
9f5097dc | 667 | u_int waitmod : 1; /* waiting for modify_refs */ |
cebe9493 | 668 | u_int reclaim : 1; /* reclaim requested */ |
bcac4bbb | 669 | u_int gencrc : 1; /* crc needs to be generated */ |
77912481 | 670 | u_int unusedB : 24; |
66325755 MD |
671 | }; |
672 | ||
8cd0a023 MD |
673 | typedef struct hammer_io *hammer_io_t; |
674 | ||
af209b0f MD |
675 | #define HAMMER_CLUSTER_SIZE (64 * 1024) |
676 | #if HAMMER_CLUSTER_SIZE > MAXBSIZE | |
677 | #undef HAMMER_CLUSTER_SIZE | |
678 | #define HAMMER_CLUSTER_SIZE MAXBSIZE | |
679 | #endif | |
680 | #define HAMMER_CLUSTER_BUFS (HAMMER_CLUSTER_SIZE / HAMMER_BUFSIZE) | |
681 | ||
66325755 | 682 | /* |
8cd0a023 | 683 | * In-memory volume representing on-disk buffer |
66325755 | 684 | */ |
8750964d | 685 | struct hammer_volume { |
66325755 | 686 | struct hammer_io io; |
8750964d | 687 | RB_ENTRY(hammer_volume) rb_node; |
8750964d MD |
688 | struct hammer_volume_ondisk *ondisk; |
689 | int32_t vol_no; | |
fbc6e32a | 690 | int64_t nblocks; /* note: special calculation for statfs */ |
47197d71 | 691 | int64_t buffer_base; /* base offset of buffer 0 */ |
2f85fa4d MD |
692 | hammer_off_t maxbuf_off; /* Maximum buffer offset (zone-2) */ |
693 | hammer_off_t maxraw_off; /* Maximum raw offset for device */ | |
c60bb2c5 | 694 | char *vol_name; |
8750964d | 695 | struct vnode *devvp; |
427e5fc6 | 696 | int vol_flags; |
8750964d MD |
697 | }; |
698 | ||
8cd0a023 MD |
699 | typedef struct hammer_volume *hammer_volume_t; |
700 | ||
66325755 | 701 | /* |
8cd0a023 MD |
702 | * In-memory buffer (other then volume, super-cluster, or cluster), |
703 | * representing an on-disk buffer. | |
66325755 | 704 | */ |
427e5fc6 | 705 | struct hammer_buffer { |
66325755 | 706 | struct hammer_io io; |
427e5fc6 | 707 | RB_ENTRY(hammer_buffer) rb_node; |
47197d71 | 708 | void *ondisk; |
34d829f7 | 709 | hammer_off_t zoneX_offset; |
0832c9bb | 710 | hammer_off_t zone2_offset; |
cebe9493 | 711 | struct hammer_reserve *resv; |
8cd0a023 | 712 | struct hammer_node_list clist; |
66325755 MD |
713 | }; |
714 | ||
8cd0a023 MD |
715 | typedef struct hammer_buffer *hammer_buffer_t; |
716 | ||
717 | /* | |
718 | * In-memory B-Tree node, representing an on-disk B-Tree node. | |
719 | * | |
720 | * This is a hang-on structure which is backed by a hammer_buffer, | |
721 | * indexed by a hammer_cluster, and used for fine-grained locking of | |
722 | * B-Tree nodes in order to properly control lock ordering. A hammer_buffer | |
723 | * can contain multiple nodes representing wildly disassociated portions | |
724 | * of the B-Tree so locking cannot be done on a buffer-by-buffer basis. | |
725 | * | |
726 | * This structure uses a cluster-relative index to reduce the number | |
727 | * of layers required to access it, and also because all on-disk B-Tree | |
728 | * references are cluster-relative offsets. | |
729 | */ | |
730 | struct hammer_node { | |
731 | struct hammer_lock lock; /* node-by-node lock */ | |
732 | TAILQ_ENTRY(hammer_node) entry; /* per-buffer linkage */ | |
733 | RB_ENTRY(hammer_node) rb_node; /* per-cluster linkage */ | |
47197d71 | 734 | hammer_off_t node_offset; /* full offset spec */ |
40043e7f | 735 | struct hammer_mount *hmp; |
8cd0a023 MD |
736 | struct hammer_buffer *buffer; /* backing buffer */ |
737 | hammer_node_ondisk_t ondisk; /* ptr to on-disk structure */ | |
b3bad96f | 738 | TAILQ_HEAD(, hammer_cursor) cursor_list; /* deadlock recovery */ |
bcac4bbb | 739 | struct hammer_node_cache_list cache_list; /* passive caches */ |
b3deaf57 | 740 | int flags; |
8cd0a023 MD |
741 | }; |
742 | ||
b3deaf57 MD |
743 | #define HAMMER_NODE_DELETED 0x0001 |
744 | #define HAMMER_NODE_FLUSH 0x0002 | |
bcac4bbb MD |
745 | #define HAMMER_NODE_CRCGOOD 0x0004 |
746 | #define HAMMER_NODE_NEEDSCRC 0x0008 | |
c82af904 | 747 | #define HAMMER_NODE_NEEDSMIRROR 0x0010 |
4c286c36 | 748 | #define HAMMER_NODE_CRCBAD 0x0020 |
fa2b9a03 | 749 | #define HAMMER_NODE_NONLINEAR 0x0040 /* linear heuristic */ |
4c286c36 MD |
750 | |
751 | #define HAMMER_NODE_CRCANY (HAMMER_NODE_CRCGOOD | HAMMER_NODE_CRCBAD) | |
b3deaf57 | 752 | |
8cd0a023 MD |
753 | typedef struct hammer_node *hammer_node_t; |
754 | ||
b33e2cc0 | 755 | /* |
1775b6a0 MD |
756 | * List of locked nodes. This structure is used to lock potentially large |
757 | * numbers of nodes as an aid for complex B-Tree operations. | |
b33e2cc0 | 758 | */ |
1775b6a0 MD |
759 | struct hammer_node_lock; |
760 | TAILQ_HEAD(hammer_node_lock_list, hammer_node_lock); | |
761 | ||
762 | struct hammer_node_lock { | |
763 | TAILQ_ENTRY(hammer_node_lock) entry; | |
764 | struct hammer_node_lock_list list; | |
765 | struct hammer_node_lock *parent; | |
b33e2cc0 | 766 | hammer_node_t node; |
1775b6a0 MD |
767 | hammer_node_ondisk_t copy; /* copy of on-disk data */ |
768 | int index; /* index of this node in parent */ | |
769 | int count; /* count children */ | |
770 | int flags; | |
b33e2cc0 MD |
771 | }; |
772 | ||
1775b6a0 | 773 | typedef struct hammer_node_lock *hammer_node_lock_t; |
b33e2cc0 | 774 | |
1775b6a0 | 775 | #define HAMMER_NODE_LOCK_UPDATED 0x0001 |
24cf83d2 | 776 | #define HAMMER_NODE_LOCK_LCACHE 0x0002 |
b33e2cc0 | 777 | |
8cd0a023 MD |
778 | /* |
779 | * Common I/O management structure - embedded in in-memory structures | |
780 | * which are backed by filesystem buffers. | |
781 | */ | |
66325755 MD |
782 | union hammer_io_structure { |
783 | struct hammer_io io; | |
784 | struct hammer_volume volume; | |
66325755 | 785 | struct hammer_buffer buffer; |
8750964d MD |
786 | }; |
787 | ||
055f5ff8 MD |
788 | typedef union hammer_io_structure *hammer_io_structure_t; |
789 | ||
cebe9493 MD |
790 | /* |
791 | * The reserve structure prevents the blockmap from allocating | |
792 | * out of a reserved bigblock. Such reservations are used by | |
793 | * the direct-write mechanism. | |
794 | * | |
795 | * The structure is also used to hold off on reallocations of | |
796 | * big blocks from the freemap until flush dependancies have | |
797 | * been dealt with. | |
798 | */ | |
0832c9bb MD |
799 | struct hammer_reserve { |
800 | RB_ENTRY(hammer_reserve) rb_node; | |
cebe9493 MD |
801 | TAILQ_ENTRY(hammer_reserve) delay_entry; |
802 | int flush_group; | |
cb51be26 | 803 | int flags; |
0832c9bb | 804 | int refs; |
cb51be26 | 805 | int zone; |
df301614 | 806 | int append_off; |
507df98a | 807 | int32_t bytes_free; |
cebe9493 | 808 | hammer_off_t zone_offset; |
0832c9bb MD |
809 | }; |
810 | ||
811 | typedef struct hammer_reserve *hammer_reserve_t; | |
812 | ||
cb51be26 | 813 | #define HAMMER_RESF_ONDELAY 0x0001 |
5e435c92 | 814 | #define HAMMER_RESF_LAYER2FREE 0x0002 |
cb51be26 | 815 | |
8cd0a023 MD |
816 | #include "hammer_cursor.h" |
817 | ||
e8599db1 | 818 | /* |
cebe9493 MD |
819 | * The undo structure tracks recent undos to avoid laying down duplicate |
820 | * undos within a flush group, saving us a significant amount of overhead. | |
821 | * | |
822 | * This is strictly a heuristic. | |
e8599db1 | 823 | */ |
7a61b85d MD |
824 | #define HAMMER_MAX_UNDOS 1024 |
825 | #define HAMMER_MAX_FLUSHERS 4 | |
e8599db1 MD |
826 | |
827 | struct hammer_undo { | |
828 | RB_ENTRY(hammer_undo) rb_node; | |
829 | TAILQ_ENTRY(hammer_undo) lru_entry; | |
830 | hammer_off_t offset; | |
831 | int bytes; | |
832 | }; | |
833 | ||
834 | typedef struct hammer_undo *hammer_undo_t; | |
835 | ||
af209b0f | 836 | struct hammer_flusher_info; |
7a61b85d | 837 | TAILQ_HEAD(hammer_flusher_info_list, hammer_flusher_info); |
da2da375 MD |
838 | |
839 | struct hammer_flusher { | |
840 | int signal; /* flusher thread sequencer */ | |
e86903d8 | 841 | int done; /* last completed flush group */ |
37646115 | 842 | int next; /* next unallocated flg seqno */ |
da2da375 MD |
843 | int group_lock; /* lock sequencing of the next flush */ |
844 | int exiting; /* request master exit */ | |
da2da375 MD |
845 | thread_t td; /* master flusher thread */ |
846 | hammer_tid_t tid; /* last flushed transaction id */ | |
847 | int finalize_want; /* serialize finalization */ | |
848 | struct hammer_lock finalize_lock; /* serialize finalization */ | |
849 | struct hammer_transaction trans; /* shared transaction */ | |
7a61b85d MD |
850 | struct hammer_flusher_info_list run_list; |
851 | struct hammer_flusher_info_list ready_list; | |
da2da375 MD |
852 | }; |
853 | ||
9192654c MD |
854 | #define HAMMER_FLUSH_UNDOS_RELAXED 0 |
855 | #define HAMMER_FLUSH_UNDOS_FORCED 1 | |
856 | #define HAMMER_FLUSH_UNDOS_AUTO 2 | |
8750964d MD |
857 | /* |
858 | * Internal hammer mount data structure | |
859 | */ | |
860 | struct hammer_mount { | |
861 | struct mount *mp; | |
27ea2398 | 862 | /*struct vnode *rootvp;*/ |
8750964d | 863 | struct hammer_ino_rb_tree rb_inos_root; |
73896937 | 864 | struct hammer_redo_rb_tree rb_redo_root; |
8750964d | 865 | struct hammer_vol_rb_tree rb_vols_root; |
40043e7f | 866 | struct hammer_nod_rb_tree rb_nods_root; |
e8599db1 | 867 | struct hammer_und_rb_tree rb_undo_root; |
0832c9bb MD |
868 | struct hammer_res_rb_tree rb_resv_root; |
869 | struct hammer_buf_rb_tree rb_bufs_root; | |
5fa5c92f | 870 | struct hammer_pfs_rb_tree rb_pfsm_root; |
507df98a ID |
871 | |
872 | struct hammer_dedup_crc_rb_tree rb_dedup_crc_root; | |
873 | struct hammer_dedup_off_rb_tree rb_dedup_off_root; | |
874 | ||
8750964d | 875 | struct hammer_volume *rootvol; |
47197d71 MD |
876 | struct hammer_base_elm root_btree_beg; |
877 | struct hammer_base_elm root_btree_end; | |
bac808fe MD |
878 | |
879 | struct malloc_type *m_misc; | |
880 | struct malloc_type *m_inodes; | |
881 | ||
cdb6e4e6 | 882 | int flags; /* HAMMER_MOUNT_xxx flags */ |
195c19a1 MD |
883 | int hflags; |
884 | int ronly; | |
885 | int nvolumes; | |
d26d0ae9 | 886 | int volume_iterator; |
732a1697 | 887 | int master_id; /* -1 or 0-15 - clustering and mirroring */ |
44a83111 | 888 | int version; /* hammer filesystem version to use */ |
e63644f0 | 889 | int rsv_inodes; /* reserved space due to dirty inodes */ |
a7e9bef1 | 890 | int64_t rsv_databytes; /* reserved space due to record data */ |
e63644f0 | 891 | int rsv_recs; /* reserved space due to dirty records */ |
a7e9bef1 | 892 | int rsv_fromdelay; /* bigblocks reserved due to flush delay */ |
7a61b85d | 893 | int undo_rec_limit; /* based on size of undo area */ |
4a2796f3 MD |
894 | int last_newrecords; |
895 | int count_newrecords; | |
da2da375 | 896 | |
865c9609 MN |
897 | int volume_to_remove; /* volume that is currently being removed */ |
898 | ||
9f5097dc MD |
899 | int inode_reclaims; /* inodes pending reclaim by flusher */ |
900 | int count_inodes; /* total number of inodes */ | |
af209b0f | 901 | int count_iqueued; /* inodes queued to flusher */ |
da2da375 MD |
902 | |
903 | struct hammer_flusher flusher; | |
904 | ||
855942b6 | 905 | u_int check_interrupt; |
3e583440 | 906 | u_int check_yield; |
8750964d | 907 | uuid_t fsid; |
1afb73cf MD |
908 | struct hammer_mod_rb_tree volu_root; /* dirty undo buffers */ |
909 | struct hammer_mod_rb_tree undo_root; /* dirty undo buffers */ | |
910 | struct hammer_mod_rb_tree data_root; /* dirty data buffers */ | |
911 | struct hammer_mod_rb_tree meta_root; /* dirty meta bufs */ | |
912 | struct hammer_mod_rb_tree lose_root; /* loose buffers */ | |
f5a07a7a | 913 | int locked_dirty_space; /* meta/volu count */ |
b0aab9b9 MD |
914 | int io_running_space; /* io_token */ |
915 | int io_running_wakeup; /* io_token */ | |
0729c8c8 | 916 | int objid_cache_count; |
507df98a | 917 | int dedup_cache_count; |
cdb6e4e6 MD |
918 | int error; /* critical I/O error */ |
919 | struct krate krate; /* rate limited kprintf */ | |
ddfdf542 | 920 | hammer_tid_t asof; /* snapshot mount */ |
4889cbd4 MD |
921 | hammer_tid_t next_tid; |
922 | hammer_tid_t flush_tid1; /* flusher tid sequencing */ | |
923 | hammer_tid_t flush_tid2; /* flusher tid sequencing */ | |
e63644f0 | 924 | int64_t copy_stat_freebigblocks; /* number of free bigblocks */ |
02428fb6 | 925 | u_int32_t undo_seqno; /* UNDO/REDO FIFO seqno */ |
c58123da MD |
926 | u_int32_t recover_stage2_seqno; /* REDO recovery seqno */ |
927 | hammer_off_t recover_stage2_offset; /* REDO recovery offset */ | |
e63644f0 | 928 | |
513ca7d7 | 929 | struct netexport export; |
9480ff55 | 930 | struct hammer_lock sync_lock; |
c9b9e29d | 931 | struct hammer_lock free_lock; |
d99d6bf5 MD |
932 | struct hammer_lock undo_lock; |
933 | struct hammer_lock blkmap_lock; | |
83f2a3aa | 934 | struct hammer_lock snapshot_lock; |
52e547e3 | 935 | struct hammer_lock volume_lock; |
0729c8c8 | 936 | struct hammer_blockmap blockmap[HAMMER_MAX_ZONES]; |
e8599db1 MD |
937 | struct hammer_undo undos[HAMMER_MAX_UNDOS]; |
938 | int undo_alloc; | |
939 | TAILQ_HEAD(, hammer_undo) undo_lru_list; | |
cebe9493 | 940 | TAILQ_HEAD(, hammer_reserve) delay_list; |
7a61b85d | 941 | struct hammer_flush_group_list flush_group_list; |
37646115 | 942 | hammer_flush_group_t fill_flush_group; |
7b6ccb11 | 943 | hammer_flush_group_t next_flush_group; |
0729c8c8 | 944 | TAILQ_HEAD(, hammer_objid_cache) objid_cache_list; |
507df98a ID |
945 | TAILQ_HEAD(, hammer_dedup_cache) dedup_lru_list; |
946 | hammer_dedup_cache_t dedup_free_cache; | |
7bc5b8c2 | 947 | TAILQ_HEAD(, hammer_reclaim) reclaim_list; |
eddadaee | 948 | TAILQ_HEAD(, hammer_io) iorun_list; |
e98f1b96 | 949 | |
b0aab9b9 MD |
950 | struct lwkt_token fs_token; /* high level */ |
951 | struct lwkt_token io_token; /* low level (IO callback) */ | |
952 | ||
e98f1b96 | 953 | struct hammer_inostats inostats[HAMMER_INOSTATS_HSIZE]; |
8750964d MD |
954 | }; |
955 | ||
8cd0a023 | 956 | typedef struct hammer_mount *hammer_mount_t; |
8750964d | 957 | |
cdb6e4e6 | 958 | #define HAMMER_MOUNT_CRITICAL_ERROR 0x0001 |
82010f9f | 959 | #define HAMMER_MOUNT_FLUSH_RECOVERY 0x0002 |
47f363f1 | 960 | #define HAMMER_MOUNT_REDO_SYNC 0x0004 |
c58123da MD |
961 | #define HAMMER_MOUNT_REDO_RECOVERY_REQ 0x0008 |
962 | #define HAMMER_MOUNT_REDO_RECOVERY_RUN 0x0010 | |
9f5097dc | 963 | |
fbc6e32a MD |
964 | struct hammer_sync_info { |
965 | int error; | |
966 | int waitfor; | |
967 | }; | |
968 | ||
f6468e9e MD |
969 | /* |
970 | * Minium buffer cache bufs required to rebalance the B-Tree. | |
971 | * This is because we must hold the children and the children's children | |
972 | * locked. Even this might not be enough if things are horribly out | |
973 | * of balance. | |
974 | */ | |
975 | #define HAMMER_REBALANCE_MIN_BUFS \ | |
976 | (HAMMER_BTREE_LEAF_ELMS * HAMMER_BTREE_LEAF_ELMS) | |
977 | ||
978 | ||
8750964d MD |
979 | #endif |
980 | ||
a7e9bef1 MD |
981 | /* |
982 | * checkspace slop (8MB chunks), higher numbers are more conservative. | |
983 | */ | |
93291532 MD |
984 | #define HAMMER_CHKSPC_REBLOCK 25 |
985 | #define HAMMER_CHKSPC_MIRROR 20 | |
986 | #define HAMMER_CHKSPC_WRITE 20 | |
987 | #define HAMMER_CHKSPC_CREATE 20 | |
988 | #define HAMMER_CHKSPC_REMOVE 10 | |
7b6ccb11 | 989 | #define HAMMER_CHKSPC_EMERGENCY 0 |
a7e9bef1 | 990 | |
8750964d MD |
991 | #if defined(_KERNEL) |
992 | ||
993 | extern struct vop_ops hammer_vnode_vops; | |
7a04d74f MD |
994 | extern struct vop_ops hammer_spec_vops; |
995 | extern struct vop_ops hammer_fifo_vops; | |
66325755 | 996 | extern struct bio_ops hammer_bioops; |
427e5fc6 | 997 | |
2f85fa4d | 998 | extern int hammer_debug_io; |
d5ef456e | 999 | extern int hammer_debug_general; |
77062c8a | 1000 | extern int hammer_debug_debug; |
e8599db1 | 1001 | extern int hammer_debug_inode; |
7d683b0f | 1002 | extern int hammer_debug_locks; |
b3deaf57 | 1003 | extern int hammer_debug_btree; |
d113fda1 | 1004 | extern int hammer_debug_tid; |
b33e2cc0 | 1005 | extern int hammer_debug_recover; |
46fe7ae1 | 1006 | extern int hammer_debug_recover_faults; |
fc73edd8 | 1007 | extern int hammer_debug_critical; |
1b0ab2c3 | 1008 | extern int hammer_cluster_enable; |
507df98a | 1009 | extern int hammer_live_dedup; |
7a61b85d | 1010 | extern int hammer_count_fsyncs; |
b3deaf57 | 1011 | extern int hammer_count_inodes; |
af209b0f | 1012 | extern int hammer_count_iqueued; |
9f5097dc | 1013 | extern int hammer_count_reclaiming; |
b3deaf57 MD |
1014 | extern int hammer_count_records; |
1015 | extern int hammer_count_record_datas; | |
1016 | extern int hammer_count_volumes; | |
b3deaf57 MD |
1017 | extern int hammer_count_buffers; |
1018 | extern int hammer_count_nodes; | |
a7e9bef1 | 1019 | extern int64_t hammer_count_extra_space_used; |
cb51be26 MD |
1020 | extern int64_t hammer_stats_btree_lookups; |
1021 | extern int64_t hammer_stats_btree_searches; | |
1022 | extern int64_t hammer_stats_btree_inserts; | |
1023 | extern int64_t hammer_stats_btree_deletes; | |
1024 | extern int64_t hammer_stats_btree_elements; | |
1025 | extern int64_t hammer_stats_btree_splits; | |
1026 | extern int64_t hammer_stats_btree_iterations; | |
39d8fd63 | 1027 | extern int64_t hammer_stats_btree_root_iterations; |
cb51be26 | 1028 | extern int64_t hammer_stats_record_iterations; |
ce0138a6 MD |
1029 | extern int64_t hammer_stats_file_read; |
1030 | extern int64_t hammer_stats_file_write; | |
1031 | extern int64_t hammer_stats_file_iopsr; | |
1032 | extern int64_t hammer_stats_file_iopsw; | |
1033 | extern int64_t hammer_stats_disk_read; | |
1034 | extern int64_t hammer_stats_disk_write; | |
1035 | extern int64_t hammer_stats_inode_flushes; | |
1036 | extern int64_t hammer_stats_commits; | |
89e744ce | 1037 | extern int64_t hammer_stats_undo; |
6048b411 | 1038 | extern int64_t hammer_stats_redo; |
f5a07a7a | 1039 | extern int hammer_count_dirtybufspace; |
a99b9ea2 | 1040 | extern int hammer_count_refedbufs; |
0832c9bb | 1041 | extern int hammer_count_reservations; |
a99b9ea2 MD |
1042 | extern int hammer_count_io_running_read; |
1043 | extern int hammer_count_io_running_write; | |
1044 | extern int hammer_count_io_locked; | |
f5a07a7a | 1045 | extern int hammer_limit_dirtybufspace; |
ba298df1 | 1046 | extern int hammer_limit_running_io; |
47637bff | 1047 | extern int hammer_limit_recs; |
de996e86 | 1048 | extern int hammer_limit_inode_recs; |
ff003b11 | 1049 | extern int hammer_limit_reclaim; |
e2ef7a95 | 1050 | extern int hammer_live_dedup_cache_size; |
9192654c | 1051 | extern int hammer_limit_redo; |
1f07f686 | 1052 | extern int hammer_bio_count; |
cb51be26 | 1053 | extern int hammer_verify_zone; |
1b0ab2c3 | 1054 | extern int hammer_verify_data; |
cb51be26 | 1055 | extern int hammer_write_mode; |
b4f86ea3 | 1056 | extern int hammer_double_buffer; |
3e583440 | 1057 | extern int hammer_yield_check; |
6f3d87c0 | 1058 | extern int hammer_fsync_mode; |
21fde338 | 1059 | extern int hammer_autoflush; |
7d683b0f | 1060 | extern int64_t hammer_contention_count; |
b3deaf57 | 1061 | |
507df98a ID |
1062 | extern int64_t hammer_live_dedup_vnode_bcmps; |
1063 | extern int64_t hammer_live_dedup_device_bcmps; | |
1064 | extern int64_t hammer_live_dedup_findblk_failures; | |
1065 | extern int64_t hammer_live_dedup_bmap_saves; | |
1066 | ||
cdb6e4e6 MD |
1067 | void hammer_critical_error(hammer_mount_t hmp, hammer_inode_t ip, |
1068 | int error, const char *msg); | |
8750964d MD |
1069 | int hammer_vop_inactive(struct vop_inactive_args *); |
1070 | int hammer_vop_reclaim(struct vop_reclaim_args *); | |
e8599db1 | 1071 | int hammer_get_vnode(struct hammer_inode *ip, struct vnode **vpp); |
36f82b23 | 1072 | struct hammer_inode *hammer_get_inode(hammer_transaction_t trans, |
adf01747 | 1073 | hammer_inode_t dip, int64_t obj_id, |
ddfdf542 MD |
1074 | hammer_tid_t asof, u_int32_t localization, |
1075 | int flags, int *errorp); | |
4c286c36 MD |
1076 | struct hammer_inode *hammer_get_dummy_inode(hammer_transaction_t trans, |
1077 | hammer_inode_t dip, int64_t obj_id, | |
1078 | hammer_tid_t asof, u_int32_t localization, | |
1079 | int flags, int *errorp); | |
39d8fd63 MD |
1080 | struct hammer_inode *hammer_find_inode(hammer_transaction_t trans, |
1081 | int64_t obj_id, hammer_tid_t asof, | |
1082 | u_int32_t localization); | |
43c665ae MD |
1083 | void hammer_scan_inode_snapshots(hammer_mount_t hmp, |
1084 | hammer_inode_info_t iinfo, | |
1085 | int (*callback)(hammer_inode_t ip, void *data), | |
1086 | void *data); | |
66325755 MD |
1087 | void hammer_put_inode(struct hammer_inode *ip); |
1088 | void hammer_put_inode_ref(struct hammer_inode *ip); | |
e98f1b96 | 1089 | void hammer_inode_waitreclaims(hammer_transaction_t trans); |
66325755 | 1090 | |
8cd0a023 | 1091 | int hammer_unload_volume(hammer_volume_t volume, void *data __unused); |
51c35492 MD |
1092 | int hammer_adjust_volume_mode(hammer_volume_t volume, void *data __unused); |
1093 | ||
07be83b8 | 1094 | int hammer_unload_buffer(hammer_buffer_t buffer, void *data); |
7c19b529 MN |
1095 | int hammer_install_volume(hammer_mount_t hmp, const char *volname, |
1096 | struct vnode *devvp); | |
1b0ab2c3 | 1097 | int hammer_mountcheck_volumes(hammer_mount_t hmp); |
8cd0a023 | 1098 | |
e469566b | 1099 | int hammer_mem_add(hammer_record_t record); |
45a014dc | 1100 | int hammer_ip_lookup(hammer_cursor_t cursor); |
4e17f465 | 1101 | int hammer_ip_first(hammer_cursor_t cursor); |
a89aec1b | 1102 | int hammer_ip_next(hammer_cursor_t cursor); |
8cd0a023 | 1103 | int hammer_ip_resolve_data(hammer_cursor_t cursor); |
e63644f0 MD |
1104 | int hammer_ip_delete_record(hammer_cursor_t cursor, hammer_inode_t ip, |
1105 | hammer_tid_t tid); | |
83f2a3aa MD |
1106 | int hammer_create_at_cursor(hammer_cursor_t cursor, |
1107 | hammer_btree_leaf_elm_t leaf, void *udata, int mode); | |
602c6cb8 | 1108 | int hammer_delete_at_cursor(hammer_cursor_t cursor, int delete_flags, |
842e7a70 MD |
1109 | hammer_tid_t delete_tid, u_int32_t delete_ts, |
1110 | int track, int64_t *stat_bytes); | |
b3deaf57 | 1111 | int hammer_ip_check_directory_empty(hammer_transaction_t trans, |
98f7132d | 1112 | hammer_inode_t ip); |
fbc6e32a | 1113 | int hammer_sync_hmp(hammer_mount_t hmp, int waitfor); |
f36a9737 MD |
1114 | int hammer_queue_inodes_flusher(hammer_mount_t hmp, int waitfor); |
1115 | ||
8cd0a023 | 1116 | hammer_record_t |
11ad5ade | 1117 | hammer_alloc_mem_record(hammer_inode_t ip, int data_len); |
d36ec43b | 1118 | void hammer_flush_record_done(hammer_record_t record, int error); |
af209b0f | 1119 | void hammer_wait_mem_record_ident(hammer_record_t record, const char *ident); |
b3deaf57 | 1120 | void hammer_rel_mem_record(hammer_record_t record); |
b84de5af | 1121 | |
6a37e7e4 | 1122 | int hammer_cursor_up(hammer_cursor_t cursor); |
f36a9737 | 1123 | int hammer_cursor_up_locked(hammer_cursor_t cursor); |
8cd0a023 | 1124 | int hammer_cursor_down(hammer_cursor_t cursor); |
6a37e7e4 | 1125 | int hammer_cursor_upgrade(hammer_cursor_t cursor); |
7bc5b8c2 | 1126 | int hammer_cursor_upgrade_node(hammer_cursor_t cursor); |
6a37e7e4 | 1127 | void hammer_cursor_downgrade(hammer_cursor_t cursor); |
bb29b5d8 MD |
1128 | int hammer_cursor_upgrade2(hammer_cursor_t c1, hammer_cursor_t c2); |
1129 | void hammer_cursor_downgrade2(hammer_cursor_t c1, hammer_cursor_t c2); | |
32c90105 MD |
1130 | int hammer_cursor_seek(hammer_cursor_t cursor, hammer_node_t node, |
1131 | int index); | |
af209b0f | 1132 | void hammer_lock_ex_ident(struct hammer_lock *lock, const char *ident); |
8cd0a023 MD |
1133 | int hammer_lock_ex_try(struct hammer_lock *lock); |
1134 | void hammer_lock_sh(struct hammer_lock *lock); | |
47637bff | 1135 | int hammer_lock_sh_try(struct hammer_lock *lock); |
bb29b5d8 MD |
1136 | int hammer_lock_upgrade(struct hammer_lock *lock, int shcount); |
1137 | void hammer_lock_downgrade(struct hammer_lock *lock, int shcount); | |
b3bad96f | 1138 | int hammer_lock_status(struct hammer_lock *lock); |
427e5fc6 | 1139 | void hammer_unlock(struct hammer_lock *lock); |
66325755 | 1140 | void hammer_ref(struct hammer_lock *lock); |
250aec18 MD |
1141 | int hammer_ref_interlock(struct hammer_lock *lock); |
1142 | int hammer_ref_interlock_true(struct hammer_lock *lock); | |
1143 | void hammer_ref_interlock_done(struct hammer_lock *lock); | |
1144 | void hammer_rel(struct hammer_lock *lock); | |
1145 | int hammer_rel_interlock(struct hammer_lock *lock, int locked); | |
1146 | void hammer_rel_interlock_done(struct hammer_lock *lock, int orig_locked); | |
1147 | int hammer_get_interlock(struct hammer_lock *lock); | |
1148 | int hammer_try_interlock_norefs(struct hammer_lock *lock); | |
1149 | void hammer_put_interlock(struct hammer_lock *lock, int error); | |
8cd0a023 | 1150 | |
2f85fa4d MD |
1151 | void hammer_sync_lock_ex(hammer_transaction_t trans); |
1152 | void hammer_sync_lock_sh(hammer_transaction_t trans); | |
47637bff | 1153 | int hammer_sync_lock_sh_try(hammer_transaction_t trans); |
2f85fa4d MD |
1154 | void hammer_sync_unlock(hammer_transaction_t trans); |
1155 | ||
66325755 | 1156 | u_int32_t hammer_to_unix_xid(uuid_t *uuid); |
8cd0a023 | 1157 | void hammer_guid_to_uuid(uuid_t *uuid, u_int32_t guid); |
ddfdf542 MD |
1158 | void hammer_time_to_timespec(u_int64_t xtime, struct timespec *ts); |
1159 | u_int64_t hammer_timespec_to_time(struct timespec *ts); | |
bc6c1f13 MD |
1160 | int hammer_str_to_tid(const char *str, int *ispfsp, |
1161 | hammer_tid_t *tidp, u_int32_t *localizationp); | |
1162 | int hammer_is_atatext(const char *name, int len); | |
5a64efa1 MD |
1163 | hammer_tid_t hammer_alloc_objid(hammer_mount_t hmp, hammer_inode_t dip, |
1164 | int64_t namekey); | |
0729c8c8 MD |
1165 | void hammer_clear_objid(hammer_inode_t dip); |
1166 | void hammer_destroy_objid_cache(hammer_mount_t hmp); | |
8cd0a023 | 1167 | |
507df98a ID |
1168 | int hammer_dedup_crc_rb_compare(hammer_dedup_cache_t dc1, |
1169 | hammer_dedup_cache_t dc2); | |
1170 | int hammer_dedup_off_rb_compare(hammer_dedup_cache_t dc1, | |
1171 | hammer_dedup_cache_t dc2); | |
1172 | hammer_dedup_cache_t hammer_dedup_cache_add(hammer_inode_t ip, | |
1173 | hammer_btree_leaf_elm_t leaf); | |
1174 | hammer_dedup_cache_t hammer_dedup_cache_lookup(hammer_mount_t hmp, | |
1175 | hammer_crc_t crc); | |
1176 | void hammer_dedup_cache_inval(hammer_mount_t hmp, hammer_off_t base_offset); | |
1177 | void hammer_destroy_dedup_cache(hammer_mount_t hmp); | |
1178 | void hammer_dump_dedup_cache(hammer_mount_t hmp); | |
1179 | int hammer_dedup_validate(hammer_dedup_cache_t dcp, int zone, int bytes, | |
1180 | void *data); | |
1181 | ||
e8599db1 | 1182 | int hammer_enter_undo_history(hammer_mount_t hmp, hammer_off_t offset, |
b3bad96f | 1183 | int bytes); |
e8599db1 | 1184 | void hammer_clear_undo_history(hammer_mount_t hmp); |
66325755 | 1185 | enum vtype hammer_get_vnode_type(u_int8_t obj_type); |
6b4f890b | 1186 | int hammer_get_dtype(u_int8_t obj_type); |
66325755 | 1187 | u_int8_t hammer_get_obj_type(enum vtype vtype); |
5e435c92 MD |
1188 | int64_t hammer_directory_namekey(hammer_inode_t dip, const void *name, int len, |
1189 | u_int32_t *max_iterationsp); | |
e63644f0 | 1190 | int hammer_nohistory(hammer_inode_t ip); |
427e5fc6 | 1191 | |
36f82b23 | 1192 | int hammer_init_cursor(hammer_transaction_t trans, hammer_cursor_t cursor, |
b3bad96f | 1193 | hammer_node_cache_t cache, hammer_inode_t ip); |
4e17f465 | 1194 | void hammer_normalize_cursor(hammer_cursor_t cursor); |
8cd0a023 | 1195 | void hammer_done_cursor(hammer_cursor_t cursor); |
b3bad96f | 1196 | int hammer_recover_cursor(hammer_cursor_t cursor); |
982be4bf MD |
1197 | void hammer_unlock_cursor(hammer_cursor_t cursor); |
1198 | int hammer_lock_cursor(hammer_cursor_t cursor); | |
3f43fb33 MD |
1199 | hammer_cursor_t hammer_push_cursor(hammer_cursor_t ocursor); |
1200 | void hammer_pop_cursor(hammer_cursor_t ocursor, hammer_cursor_t ncursor); | |
b3bad96f MD |
1201 | |
1202 | void hammer_cursor_replaced_node(hammer_node_t onode, hammer_node_t nnode); | |
1203 | void hammer_cursor_removed_node(hammer_node_t onode, hammer_node_t parent, | |
1204 | int index); | |
1205 | void hammer_cursor_split_node(hammer_node_t onode, hammer_node_t nnode, | |
1206 | int index); | |
bbb01e14 MD |
1207 | void hammer_cursor_moved_element(hammer_node_t oparent, int pindex, |
1208 | hammer_node_t onode, int oindex, | |
1209 | hammer_node_t nnode, int nindex); | |
1775b6a0 MD |
1210 | void hammer_cursor_parent_changed(hammer_node_t node, hammer_node_t oparent, |
1211 | hammer_node_t nparent, int nindex); | |
b3bad96f MD |
1212 | void hammer_cursor_inserted_element(hammer_node_t node, int index); |
1213 | void hammer_cursor_deleted_element(hammer_node_t node, int index); | |
b9107f58 | 1214 | void hammer_cursor_invalidate_cache(hammer_cursor_t cursor); |
8cd0a023 MD |
1215 | |
1216 | int hammer_btree_lookup(hammer_cursor_t cursor); | |
d26d0ae9 | 1217 | int hammer_btree_first(hammer_cursor_t cursor); |
32c90105 | 1218 | int hammer_btree_last(hammer_cursor_t cursor); |
8cd0a023 MD |
1219 | int hammer_btree_extract(hammer_cursor_t cursor, int flags); |
1220 | int hammer_btree_iterate(hammer_cursor_t cursor); | |
32c90105 | 1221 | int hammer_btree_iterate_reverse(hammer_cursor_t cursor); |
11ad5ade | 1222 | int hammer_btree_insert(hammer_cursor_t cursor, |
602c6cb8 | 1223 | hammer_btree_leaf_elm_t elm, int *doprop); |
8cd0a023 | 1224 | int hammer_btree_delete(hammer_cursor_t cursor); |
4c038e17 MD |
1225 | void hammer_btree_do_propagation(hammer_cursor_t cursor, |
1226 | hammer_pseudofs_inmem_t pfsm, | |
602c6cb8 | 1227 | hammer_btree_leaf_elm_t leaf); |
8cd0a023 | 1228 | int hammer_btree_cmp(hammer_base_elm_t key1, hammer_base_elm_t key2); |
d26d0ae9 | 1229 | int hammer_btree_chkts(hammer_tid_t ts, hammer_base_elm_t key); |
32c90105 MD |
1230 | int hammer_btree_correct_rhb(hammer_cursor_t cursor, hammer_tid_t tid); |
1231 | int hammer_btree_correct_lhb(hammer_cursor_t cursor, hammer_tid_t tid); | |
1232 | ||
2f85fa4d MD |
1233 | int btree_set_parent(hammer_transaction_t trans, hammer_node_t node, |
1234 | hammer_btree_elm_t elm); | |
1775b6a0 | 1235 | void hammer_node_lock_init(hammer_node_lock_t parent, hammer_node_t node); |
24cf83d2 MD |
1236 | void hammer_btree_lcache_init(hammer_mount_t hmp, hammer_node_lock_t lcache, |
1237 | int depth); | |
1238 | void hammer_btree_lcache_free(hammer_mount_t hmp, hammer_node_lock_t lcache); | |
1775b6a0 | 1239 | int hammer_btree_lock_children(hammer_cursor_t cursor, int depth, |
24cf83d2 MD |
1240 | hammer_node_lock_t parent, |
1241 | hammer_node_lock_t lcache); | |
1775b6a0 MD |
1242 | void hammer_btree_lock_copy(hammer_cursor_t cursor, |
1243 | hammer_node_lock_t parent); | |
7ddc70d1 | 1244 | int hammer_btree_sync_copy(hammer_cursor_t cursor, |
1775b6a0 | 1245 | hammer_node_lock_t parent); |
24cf83d2 MD |
1246 | void hammer_btree_unlock_children(hammer_mount_t hmp, |
1247 | hammer_node_lock_t parent, | |
1248 | hammer_node_lock_t lcache); | |
bcac4bbb | 1249 | int hammer_btree_search_node(hammer_base_elm_t elm, hammer_node_ondisk_t node); |
82010f9f MD |
1250 | hammer_node_t hammer_btree_get_parent(hammer_transaction_t trans, |
1251 | hammer_node_t node, int *parent_indexp, | |
c82af904 | 1252 | int *errorp, int try_exclusive); |
b33e2cc0 | 1253 | |
c0ade690 MD |
1254 | void hammer_print_btree_node(hammer_node_ondisk_t ondisk); |
1255 | void hammer_print_btree_elm(hammer_btree_elm_t elm, u_int8_t type, int i); | |
8750964d | 1256 | |
47197d71 MD |
1257 | void *hammer_bread(struct hammer_mount *hmp, hammer_off_t off, |
1258 | int *errorp, struct hammer_buffer **bufferp); | |
1259 | void *hammer_bnew(struct hammer_mount *hmp, hammer_off_t off, | |
1260 | int *errorp, struct hammer_buffer **bufferp); | |
4a2796f3 MD |
1261 | void *hammer_bread_ext(struct hammer_mount *hmp, hammer_off_t off, int bytes, |
1262 | int *errorp, struct hammer_buffer **bufferp); | |
1263 | void *hammer_bnew_ext(struct hammer_mount *hmp, hammer_off_t off, int bytes, | |
1264 | int *errorp, struct hammer_buffer **bufferp); | |
8cd0a023 MD |
1265 | |
1266 | hammer_volume_t hammer_get_root_volume(hammer_mount_t hmp, int *errorp); | |
427e5fc6 | 1267 | |
8cd0a023 | 1268 | hammer_volume_t hammer_get_volume(hammer_mount_t hmp, |
427e5fc6 | 1269 | int32_t vol_no, int *errorp); |
4a2796f3 MD |
1270 | hammer_buffer_t hammer_get_buffer(hammer_mount_t hmp, hammer_off_t buf_offset, |
1271 | int bytes, int isnew, int *errorp); | |
1b0ab2c3 MD |
1272 | void hammer_sync_buffers(hammer_mount_t hmp, |
1273 | hammer_off_t base_offset, int bytes); | |
362ec2dc | 1274 | int hammer_del_buffers(hammer_mount_t hmp, |
1b0ab2c3 | 1275 | hammer_off_t base_offset, |
362ec2dc MD |
1276 | hammer_off_t zone2_offset, int bytes, |
1277 | int report_conflicts); | |
8cd0a023 | 1278 | |
fbc6e32a | 1279 | int hammer_ref_volume(hammer_volume_t volume); |
8cd0a023 MD |
1280 | int hammer_ref_buffer(hammer_buffer_t buffer); |
1281 | void hammer_flush_buffer_nodes(hammer_buffer_t buffer); | |
1282 | ||
250aec18 MD |
1283 | void hammer_rel_volume(hammer_volume_t volume, int locked); |
1284 | void hammer_rel_buffer(hammer_buffer_t buffer, int locked); | |
8cd0a023 | 1285 | |
513ca7d7 MD |
1286 | int hammer_vfs_export(struct mount *mp, int op, |
1287 | const struct export_args *export); | |
82010f9f MD |
1288 | hammer_node_t hammer_get_node(hammer_transaction_t trans, |
1289 | hammer_off_t node_offset, int isnew, int *errorp); | |
740d8317 | 1290 | void hammer_ref_node(hammer_node_t node); |
4c286c36 | 1291 | hammer_node_t hammer_ref_node_safe(hammer_transaction_t trans, |
bcac4bbb | 1292 | hammer_node_cache_t cache, int *errorp); |
8cd0a023 | 1293 | void hammer_rel_node(hammer_node_t node); |
36f82b23 MD |
1294 | void hammer_delete_node(hammer_transaction_t trans, |
1295 | hammer_node_t node); | |
bcac4bbb MD |
1296 | void hammer_cache_node(hammer_node_cache_t cache, |
1297 | hammer_node_t node); | |
1298 | void hammer_uncache_node(hammer_node_cache_t cache); | |
250aec18 | 1299 | void hammer_flush_node(hammer_node_t node, int locked); |
8cd0a023 | 1300 | |
427e5fc6 MD |
1301 | void hammer_dup_buffer(struct hammer_buffer **bufferp, |
1302 | struct hammer_buffer *buffer); | |
df2ccbac MD |
1303 | hammer_node_t hammer_alloc_btree(hammer_transaction_t trans, |
1304 | hammer_off_t hint, int *errorp); | |
36f82b23 | 1305 | void *hammer_alloc_data(hammer_transaction_t trans, int32_t data_len, |
bf3b416b | 1306 | u_int16_t rec_type, hammer_off_t *data_offsetp, |
df2ccbac MD |
1307 | struct hammer_buffer **data_bufferp, |
1308 | hammer_off_t hint, int *errorp); | |
bf686dbe | 1309 | |
02428fb6 | 1310 | int hammer_generate_undo(hammer_transaction_t trans, |
059819e3 | 1311 | hammer_off_t zone1_offset, void *base, int len); |
6048b411 | 1312 | int hammer_generate_redo(hammer_transaction_t trans, hammer_inode_t ip, |
47f363f1 MD |
1313 | hammer_off_t file_offset, u_int32_t flags, |
1314 | void *base, int len); | |
1315 | void hammer_generate_redo_sync(hammer_transaction_t trans); | |
73896937 MD |
1316 | void hammer_redo_fifo_start_flush(hammer_inode_t ip); |
1317 | void hammer_redo_fifo_end_flush(hammer_inode_t ip); | |
1318 | ||
47f363f1 | 1319 | void hammer_format_undo(void *base, u_int32_t seqno); |
02428fb6 | 1320 | int hammer_upgrade_undo_4(hammer_transaction_t trans); |
427e5fc6 | 1321 | |
66325755 | 1322 | void hammer_put_volume(struct hammer_volume *volume, int flush); |
66325755 | 1323 | void hammer_put_buffer(struct hammer_buffer *buffer, int flush); |
427e5fc6 | 1324 | |
36f82b23 MD |
1325 | hammer_off_t hammer_freemap_alloc(hammer_transaction_t trans, |
1326 | hammer_off_t owner, int *errorp); | |
1327 | void hammer_freemap_free(hammer_transaction_t trans, hammer_off_t phys_offset, | |
c3be93f2 | 1328 | hammer_off_t owner, int *errorp); |
0f65be10 | 1329 | int _hammer_checkspace(hammer_mount_t hmp, int slop, int64_t *resp); |
36f82b23 | 1330 | hammer_off_t hammer_blockmap_alloc(hammer_transaction_t trans, int zone, |
df2ccbac | 1331 | int bytes, hammer_off_t hint, int *errorp); |
0832c9bb MD |
1332 | hammer_reserve_t hammer_blockmap_reserve(hammer_mount_t hmp, int zone, |
1333 | int bytes, hammer_off_t *zone_offp, int *errorp); | |
507df98a ID |
1334 | hammer_reserve_t hammer_blockmap_reserve_dedup(hammer_mount_t hmp, int zone, |
1335 | int bytes, hammer_off_t zone_offset, int *errorp); | |
0832c9bb MD |
1336 | void hammer_blockmap_reserve_complete(hammer_mount_t hmp, |
1337 | hammer_reserve_t resv); | |
cb51be26 | 1338 | void hammer_reserve_clrdelay(hammer_mount_t hmp, hammer_reserve_t resv); |
36f82b23 MD |
1339 | void hammer_blockmap_free(hammer_transaction_t trans, |
1340 | hammer_off_t bmap_off, int bytes); | |
bb29b5d8 MD |
1341 | int hammer_blockmap_dedup(hammer_transaction_t trans, |
1342 | hammer_off_t bmap_off, int bytes); | |
cdb6e4e6 | 1343 | int hammer_blockmap_finalize(hammer_transaction_t trans, |
5e435c92 | 1344 | hammer_reserve_t resv, |
4a2796f3 | 1345 | hammer_off_t bmap_off, int bytes); |
bf686dbe MD |
1346 | int hammer_blockmap_getfree(hammer_mount_t hmp, hammer_off_t bmap_off, |
1347 | int *curp, int *errorp); | |
40043e7f MD |
1348 | hammer_off_t hammer_blockmap_lookup(hammer_mount_t hmp, hammer_off_t bmap_off, |
1349 | int *errorp); | |
bf686dbe MD |
1350 | hammer_off_t hammer_undo_lookup(hammer_mount_t hmp, hammer_off_t bmap_off, |
1351 | int *errorp); | |
06ad81ff MD |
1352 | int64_t hammer_undo_used(hammer_transaction_t trans); |
1353 | int64_t hammer_undo_space(hammer_transaction_t trans); | |
1f07f686 | 1354 | int64_t hammer_undo_max(hammer_mount_t hmp); |
710733a6 | 1355 | int hammer_undo_reclaim(hammer_io_t io); |
1f07f686 | 1356 | |
8cd0a023 MD |
1357 | void hammer_start_transaction(struct hammer_transaction *trans, |
1358 | struct hammer_mount *hmp); | |
36f82b23 MD |
1359 | void hammer_simple_transaction(struct hammer_transaction *trans, |
1360 | struct hammer_mount *hmp); | |
b84de5af MD |
1361 | void hammer_start_transaction_fls(struct hammer_transaction *trans, |
1362 | struct hammer_mount *hmp); | |
1363 | void hammer_done_transaction(struct hammer_transaction *trans); | |
83f2a3aa | 1364 | hammer_tid_t hammer_alloc_tid(hammer_mount_t hmp, int count); |
66325755 | 1365 | |
e98f1b96 | 1366 | void hammer_modify_inode(hammer_transaction_t trans, hammer_inode_t ip, int flags); |
f90dde4c | 1367 | void hammer_flush_inode(hammer_inode_t ip, int flags); |
cdb6e4e6 | 1368 | void hammer_flush_inode_done(hammer_inode_t ip, int error); |
b84de5af MD |
1369 | void hammer_wait_inode(hammer_inode_t ip); |
1370 | ||
8cd0a023 MD |
1371 | int hammer_create_inode(struct hammer_transaction *trans, struct vattr *vap, |
1372 | struct ucred *cred, struct hammer_inode *dip, | |
5a64efa1 | 1373 | const char *name, int namelen, |
ea434b6f MD |
1374 | hammer_pseudofs_inmem_t pfsm, |
1375 | struct hammer_inode **ipp); | |
a89aec1b | 1376 | void hammer_rel_inode(hammer_inode_t ip, int flush); |
51c35492 | 1377 | int hammer_reload_inode(hammer_inode_t ip, void *arg __unused); |
af209b0f | 1378 | int hammer_ino_rb_compare(hammer_inode_t ip1, hammer_inode_t ip2); |
73896937 | 1379 | int hammer_redo_rb_compare(hammer_inode_t ip1, hammer_inode_t ip2); |
cdb6e4e6 | 1380 | int hammer_destroy_inode_callback(hammer_inode_t ip, void *data __unused); |
51c35492 | 1381 | |
02325004 | 1382 | int hammer_sync_inode(hammer_transaction_t trans, hammer_inode_t ip); |
7b6ccb11 | 1383 | void hammer_test_inode(hammer_inode_t dip); |
e8599db1 | 1384 | void hammer_inode_unloadable_check(hammer_inode_t ip, int getvp); |
8cd0a023 | 1385 | |
a89aec1b | 1386 | int hammer_ip_add_directory(struct hammer_transaction *trans, |
5a930e66 | 1387 | hammer_inode_t dip, const char *name, int bytes, |
8cd0a023 | 1388 | hammer_inode_t nip); |
a89aec1b | 1389 | int hammer_ip_del_directory(struct hammer_transaction *trans, |
8cd0a023 MD |
1390 | hammer_cursor_t cursor, hammer_inode_t dip, |
1391 | hammer_inode_t ip); | |
6362a262 | 1392 | void hammer_ip_replace_bulk(hammer_mount_t hmp, hammer_record_t record); |
47637bff | 1393 | hammer_record_t hammer_ip_add_bulk(hammer_inode_t ip, off_t file_offset, |
0832c9bb | 1394 | void *data, int bytes, int *errorp); |
47637bff | 1395 | int hammer_ip_frontend_trunc(struct hammer_inode *ip, off_t file_size); |
7a04d74f MD |
1396 | int hammer_ip_add_record(struct hammer_transaction *trans, |
1397 | hammer_record_t record); | |
4e17f465 | 1398 | int hammer_ip_delete_range(hammer_cursor_t cursor, hammer_inode_t ip, |
47637bff | 1399 | int64_t ran_beg, int64_t ran_end, int truncating); |
a9d52b76 | 1400 | int hammer_ip_delete_clean(hammer_cursor_t cursor, hammer_inode_t ip, |
4e17f465 MD |
1401 | int *countp); |
1402 | int hammer_ip_sync_data(hammer_cursor_t cursor, hammer_inode_t ip, | |
1403 | int64_t offset, void *data, int bytes); | |
36f82b23 | 1404 | int hammer_ip_sync_record(hammer_transaction_t trans, hammer_record_t rec); |
4e17f465 | 1405 | int hammer_ip_sync_record_cursor(hammer_cursor_t cursor, hammer_record_t rec); |
ea434b6f MD |
1406 | hammer_pseudofs_inmem_t hammer_load_pseudofs(hammer_transaction_t trans, |
1407 | u_int32_t localization, int *errorp); | |
1408 | int hammer_mkroot_pseudofs(hammer_transaction_t trans, struct ucred *cred, | |
1409 | hammer_pseudofs_inmem_t pfsm); | |
1410 | int hammer_save_pseudofs(hammer_transaction_t trans, | |
1411 | hammer_pseudofs_inmem_t pfsm); | |
842e7a70 | 1412 | int hammer_unload_pseudofs(hammer_transaction_t trans, u_int32_t localization); |
5fa5c92f | 1413 | void hammer_rel_pseudofs(hammer_mount_t hmp, hammer_pseudofs_inmem_t pfsm); |
7dc57964 MD |
1414 | int hammer_ioctl(hammer_inode_t ip, u_long com, caddr_t data, int fflag, |
1415 | struct ucred *cred); | |
1416 | ||
748efb59 | 1417 | void hammer_io_init(hammer_io_t io, hammer_volume_t volume, |
10a5d1ba | 1418 | enum hammer_io_type type); |
b7de8aa5 | 1419 | int hammer_io_read(struct vnode *devvp, struct hammer_io *io, int limit); |
0e8bd897 | 1420 | void hammer_io_advance(struct hammer_io *io); |
66325755 | 1421 | int hammer_io_new(struct vnode *devvp, struct hammer_io *io); |
362ec2dc | 1422 | int hammer_io_inval(hammer_volume_t volume, hammer_off_t zone2_offset); |
ecca949a | 1423 | struct buf *hammer_io_release(struct hammer_io *io, int flush); |
710733a6 | 1424 | void hammer_io_flush(struct hammer_io *io, int reclaim); |
1b0ab2c3 | 1425 | void hammer_io_wait(struct hammer_io *io); |
055f5ff8 | 1426 | void hammer_io_waitdep(struct hammer_io *io); |
eddadaee | 1427 | void hammer_io_wait_all(hammer_mount_t hmp, const char *ident, int doflush); |
1b0ab2c3 MD |
1428 | int hammer_io_direct_read(hammer_mount_t hmp, struct bio *bio, |
1429 | hammer_btree_leaf_elm_t leaf); | |
9a98f3cc MD |
1430 | int hammer_io_indirect_read(hammer_mount_t hmp, struct bio *bio, |
1431 | hammer_btree_leaf_elm_t leaf); | |
6362a262 MD |
1432 | int hammer_io_direct_write(hammer_mount_t hmp, struct bio *bio, |
1433 | hammer_record_t record); | |
1b0ab2c3 | 1434 | void hammer_io_direct_wait(hammer_record_t record); |
43c665ae | 1435 | void hammer_io_direct_uncache(hammer_mount_t hmp, hammer_btree_leaf_elm_t leaf); |
9f5097dc MD |
1436 | void hammer_io_write_interlock(hammer_io_t io); |
1437 | void hammer_io_done_interlock(hammer_io_t io); | |
4a2796f3 | 1438 | void hammer_io_clear_modify(struct hammer_io *io, int inval); |
cebe9493 | 1439 | void hammer_io_clear_modlist(struct hammer_io *io); |
748efb59 | 1440 | void hammer_io_flush_sync(hammer_mount_t hmp); |
2faf0737 | 1441 | void hammer_io_clear_error(struct hammer_io *io); |
77912481 | 1442 | void hammer_io_clear_error_noassert(struct hammer_io *io); |
b8a41159 | 1443 | void hammer_io_notmeta(hammer_buffer_t buffer); |
ba298df1 | 1444 | void hammer_io_limit_backlog(hammer_mount_t hmp); |
748efb59 | 1445 | |
36f82b23 MD |
1446 | void hammer_modify_volume(hammer_transaction_t trans, hammer_volume_t volume, |
1447 | void *base, int len); | |
1448 | void hammer_modify_buffer(hammer_transaction_t trans, hammer_buffer_t buffer, | |
1449 | void *base, int len); | |
10a5d1ba MD |
1450 | void hammer_modify_volume_done(hammer_volume_t volume); |
1451 | void hammer_modify_buffer_done(hammer_buffer_t buffer); | |
0b075555 | 1452 | |
36f82b23 MD |
1453 | int hammer_ioc_reblock(hammer_transaction_t trans, hammer_inode_t ip, |
1454 | struct hammer_ioc_reblock *reblock); | |
1775b6a0 MD |
1455 | int hammer_ioc_rebalance(hammer_transaction_t trans, hammer_inode_t ip, |
1456 | struct hammer_ioc_rebalance *rebal); | |
11ad5ade MD |
1457 | int hammer_ioc_prune(hammer_transaction_t trans, hammer_inode_t ip, |
1458 | struct hammer_ioc_prune *prune); | |
c82af904 MD |
1459 | int hammer_ioc_mirror_read(hammer_transaction_t trans, hammer_inode_t ip, |
1460 | struct hammer_ioc_mirror_rw *mirror); | |
1461 | int hammer_ioc_mirror_write(hammer_transaction_t trans, hammer_inode_t ip, | |
1462 | struct hammer_ioc_mirror_rw *mirror); | |
5fa5c92f | 1463 | int hammer_ioc_set_pseudofs(hammer_transaction_t trans, hammer_inode_t ip, |
ea434b6f | 1464 | struct ucred *cred, struct hammer_ioc_pseudofs_rw *pfs); |
5fa5c92f MD |
1465 | int hammer_ioc_get_pseudofs(hammer_transaction_t trans, hammer_inode_t ip, |
1466 | struct hammer_ioc_pseudofs_rw *pfs); | |
842e7a70 MD |
1467 | int hammer_ioc_destroy_pseudofs(hammer_transaction_t trans, hammer_inode_t ip, |
1468 | struct hammer_ioc_pseudofs_rw *pfs); | |
1469 | int hammer_ioc_downgrade_pseudofs(hammer_transaction_t trans, hammer_inode_t ip, | |
1470 | struct hammer_ioc_pseudofs_rw *pfs); | |
1471 | int hammer_ioc_upgrade_pseudofs(hammer_transaction_t trans, hammer_inode_t ip, | |
1472 | struct hammer_ioc_pseudofs_rw *pfs); | |
4889cbd4 MD |
1473 | int hammer_ioc_wait_pseudofs(hammer_transaction_t trans, hammer_inode_t ip, |
1474 | struct hammer_ioc_pseudofs_rw *pfs); | |
d121f61c | 1475 | int hammer_ioc_volume_add(hammer_transaction_t trans, hammer_inode_t ip, |
865c9609 MN |
1476 | struct hammer_ioc_volume *ioc); |
1477 | int hammer_ioc_volume_del(hammer_transaction_t trans, hammer_inode_t ip, | |
1478 | struct hammer_ioc_volume *ioc); | |
e914c91d SK |
1479 | int hammer_ioc_volume_list(hammer_transaction_t trans, hammer_inode_t ip, |
1480 | struct hammer_ioc_volume_list *ioc); | |
bb29b5d8 MD |
1481 | int hammer_ioc_dedup(hammer_transaction_t trans, hammer_inode_t ip, |
1482 | struct hammer_ioc_dedup *dedup); | |
bf686dbe | 1483 | |
855942b6 | 1484 | int hammer_signal_check(hammer_mount_t hmp); |
bf686dbe | 1485 | |
059819e3 MD |
1486 | void hammer_flusher_create(hammer_mount_t hmp); |
1487 | void hammer_flusher_destroy(hammer_mount_t hmp); | |
1488 | void hammer_flusher_sync(hammer_mount_t hmp); | |
7a61b85d | 1489 | int hammer_flusher_async(hammer_mount_t hmp, hammer_flush_group_t flg); |
15e75dab | 1490 | int hammer_flusher_async_one(hammer_mount_t hmp); |
93291532 | 1491 | void hammer_flusher_wait(hammer_mount_t hmp, int seq); |
82010f9f | 1492 | void hammer_flusher_wait_next(hammer_mount_t hmp); |
06ad81ff | 1493 | int hammer_flusher_meta_limit(hammer_mount_t hmp); |
93291532 | 1494 | int hammer_flusher_meta_halflimit(hammer_mount_t hmp); |
06ad81ff | 1495 | int hammer_flusher_undo_exhausted(hammer_transaction_t trans, int quarter); |
525aad3a | 1496 | void hammer_flusher_clean_loose_ios(hammer_mount_t hmp); |
6c1f89f4 | 1497 | void hammer_flusher_finalize(hammer_transaction_t trans, int final); |
1b0ab2c3 | 1498 | int hammer_flusher_haswork(hammer_mount_t hmp); |
6048b411 | 1499 | void hammer_flusher_flush_undos(hammer_mount_t hmp, int already_flushed); |
059819e3 | 1500 | |
02428fb6 MD |
1501 | int hammer_recover_stage1(hammer_mount_t hmp, hammer_volume_t rootvol); |
1502 | int hammer_recover_stage2(hammer_mount_t hmp, hammer_volume_t rootvol); | |
51c35492 | 1503 | void hammer_recover_flush_buffers(hammer_mount_t hmp, |
06ad81ff | 1504 | hammer_volume_t root_volume, int final); |
f90dde4c | 1505 | |
19619882 MD |
1506 | void hammer_crc_set_blockmap(hammer_blockmap_t blockmap); |
1507 | void hammer_crc_set_volume(hammer_volume_ondisk_t ondisk); | |
ddfdf542 | 1508 | void hammer_crc_set_leaf(void *data, hammer_btree_leaf_elm_t leaf); |
19619882 MD |
1509 | |
1510 | int hammer_crc_test_blockmap(hammer_blockmap_t blockmap); | |
1511 | int hammer_crc_test_volume(hammer_volume_ondisk_t ondisk); | |
19619882 | 1512 | int hammer_crc_test_btree(hammer_node_ondisk_t ondisk); |
ddfdf542 | 1513 | int hammer_crc_test_leaf(void *data, hammer_btree_leaf_elm_t leaf); |
c6b20585 | 1514 | void hkprintf(const char *ctl, ...) __printflike(1, 2); |
a56cb012 MD |
1515 | udev_t hammer_fsid_to_udev(uuid_t *uuid); |
1516 | ||
19619882 | 1517 | |
4a2796f3 | 1518 | int hammer_blocksize(int64_t file_offset); |
6362a262 | 1519 | int hammer_blockoff(int64_t file_offset); |
4a2796f3 MD |
1520 | int64_t hammer_blockdemarc(int64_t file_offset1, int64_t file_offset2); |
1521 | ||
0f65be10 MD |
1522 | /* |
1523 | * Shortcut for _hammer_checkspace(), used all over the code. | |
1524 | */ | |
1525 | static __inline int | |
1526 | hammer_checkspace(hammer_mount_t hmp, int slop) | |
1527 | { | |
1528 | return(_hammer_checkspace(hmp, slop, NULL)); | |
1529 | } | |
1530 | ||
055f5ff8 | 1531 | #endif |
427e5fc6 | 1532 | |
af209b0f MD |
1533 | static __inline void |
1534 | hammer_wait_mem_record(hammer_record_t record) | |
1535 | { | |
1536 | hammer_wait_mem_record_ident(record, "hmmwai"); | |
1537 | } | |
1538 | ||
1539 | static __inline void | |
1540 | hammer_lock_ex(struct hammer_lock *lock) | |
1541 | { | |
1542 | hammer_lock_ex_ident(lock, "hmrlck"); | |
1543 | } | |
1544 | ||
bcac4bbb MD |
1545 | /* |
1546 | * Indicate that a B-Tree node is being modified. | |
1547 | */ | |
8cd0a023 | 1548 | static __inline void |
36f82b23 | 1549 | hammer_modify_node_noundo(hammer_transaction_t trans, hammer_node_t node) |
bf686dbe | 1550 | { |
4c286c36 | 1551 | KKASSERT((node->flags & HAMMER_NODE_CRCBAD) == 0); |
36f82b23 | 1552 | hammer_modify_buffer(trans, node->buffer, NULL, 0); |
bf686dbe MD |
1553 | } |
1554 | ||
1555 | static __inline void | |
36f82b23 | 1556 | hammer_modify_node_all(hammer_transaction_t trans, struct hammer_node *node) |
8cd0a023 | 1557 | { |
4c286c36 | 1558 | KKASSERT((node->flags & HAMMER_NODE_CRCBAD) == 0); |
36f82b23 MD |
1559 | hammer_modify_buffer(trans, node->buffer, |
1560 | node->ondisk, sizeof(*node->ondisk)); | |
427e5fc6 MD |
1561 | } |
1562 | ||
bf686dbe | 1563 | static __inline void |
36f82b23 MD |
1564 | hammer_modify_node(hammer_transaction_t trans, hammer_node_t node, |
1565 | void *base, int len) | |
bf686dbe | 1566 | { |
19619882 MD |
1567 | hammer_crc_t *crcptr; |
1568 | ||
bf686dbe MD |
1569 | KKASSERT((char *)base >= (char *)node->ondisk && |
1570 | (char *)base + len <= | |
1571 | (char *)node->ondisk + sizeof(*node->ondisk)); | |
4c286c36 | 1572 | KKASSERT((node->flags & HAMMER_NODE_CRCBAD) == 0); |
36f82b23 | 1573 | hammer_modify_buffer(trans, node->buffer, base, len); |
19619882 MD |
1574 | crcptr = &node->ondisk->crc; |
1575 | hammer_modify_buffer(trans, node->buffer, crcptr, sizeof(hammer_crc_t)); | |
1576 | --node->buffer->io.modify_refs; /* only want one ref */ | |
bf686dbe MD |
1577 | } |
1578 | ||
bcac4bbb MD |
1579 | /* |
1580 | * Indicate that the specified modifications have been completed. | |
1581 | * | |
1582 | * Do not try to generate the crc here, it's very expensive to do and a | |
1583 | * sequence of insertions or deletions can result in many calls to this | |
1584 | * function on the same node. | |
1585 | */ | |
10a5d1ba MD |
1586 | static __inline void |
1587 | hammer_modify_node_done(hammer_node_t node) | |
1588 | { | |
bcac4bbb MD |
1589 | node->flags |= HAMMER_NODE_CRCGOOD; |
1590 | if ((node->flags & HAMMER_NODE_NEEDSCRC) == 0) { | |
1591 | node->flags |= HAMMER_NODE_NEEDSCRC; | |
1592 | node->buffer->io.gencrc = 1; | |
1593 | hammer_ref_node(node); | |
1594 | } | |
10a5d1ba MD |
1595 | hammer_modify_buffer_done(node->buffer); |
1596 | } | |
1597 | ||
e8599db1 MD |
1598 | #define hammer_modify_volume_field(trans, vol, field) \ |
1599 | hammer_modify_volume(trans, vol, &(vol)->ondisk->field, \ | |
1600 | sizeof((vol)->ondisk->field)) | |
1601 | ||
c9b9e29d MD |
1602 | #define hammer_modify_node_field(trans, node, field) \ |
1603 | hammer_modify_node(trans, node, &(node)->ondisk->field, \ | |
1604 | sizeof((node)->ondisk->field)) | |
1605 | ||
beec5dc4 MD |
1606 | /* |
1607 | * The HAMMER_INODE_CAP_DIR_LOCAL_INO capability is set on newly | |
1608 | * created directories for HAMMER version 2 or greater and causes | |
1609 | * directory entries to be placed the inode localization zone in | |
1610 | * the B-Tree instead of the misc zone. | |
1611 | * | |
1612 | * This greatly improves localization between directory entries and | |
1613 | * inodes | |
1614 | */ | |
1615 | static __inline u_int32_t | |
1616 | hammer_dir_localization(hammer_inode_t dip) | |
1617 | { | |
1618 | if (dip->ino_data.cap_flags & HAMMER_INODE_CAP_DIR_LOCAL_INO) | |
1619 | return(HAMMER_LOCALIZE_INODE); | |
1620 | else | |
1621 | return(HAMMER_LOCALIZE_MISC); | |
1622 | } |