Commit | Line | Data |
---|---|---|
8750964d | 1 | /* |
1f07f686 | 2 | * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved. |
745703c7 | 3 | * |
8750964d MD |
4 | * This code is derived from software contributed to The DragonFly Project |
5 | * by Matthew Dillon <dillon@backplane.com> | |
745703c7 | 6 | * |
8750964d MD |
7 | * Redistribution and use in source and binary forms, with or without |
8 | * modification, are permitted provided that the following conditions | |
9 | * are met: | |
745703c7 | 10 | * |
8750964d MD |
11 | * 1. Redistributions of source code must retain the above copyright |
12 | * notice, this list of conditions and the following disclaimer. | |
13 | * 2. Redistributions in binary form must reproduce the above copyright | |
14 | * notice, this list of conditions and the following disclaimer in | |
15 | * the documentation and/or other materials provided with the | |
16 | * distribution. | |
17 | * 3. Neither the name of The DragonFly Project nor the names of its | |
18 | * contributors may be used to endorse or promote products derived | |
19 | * from this software without specific, prior written permission. | |
745703c7 | 20 | * |
8750964d MD |
21 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
22 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |
23 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS | |
24 | * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE | |
25 | * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, | |
26 | * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, | |
27 | * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; | |
28 | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED | |
29 | * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, | |
30 | * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT | |
31 | * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | |
32 | * SUCH DAMAGE. | |
8750964d | 33 | */ |
964cb30d TK |
34 | |
35 | #ifndef VFS_HAMMER_HAMMER_H_ | |
36 | #define VFS_HAMMER_HAMMER_H_ | |
37 | ||
8750964d MD |
38 | /* |
39 | * This header file contains structures used internally by the HAMMERFS | |
c60bb2c5 | 40 | * implementation. See hammer_disk.h for on-disk structures. |
8750964d MD |
41 | */ |
42 | ||
427e5fc6 | 43 | #include <sys/param.h> |
872a7eee | 44 | #ifdef _KERNEL |
427e5fc6 MD |
45 | #include <sys/kernel.h> |
46 | #include <sys/systm.h> | |
13dd34d8 | 47 | #include <sys/uio.h> |
872a7eee | 48 | #endif |
23e66b3b | 49 | #include <sys/conf.h> |
8750964d MD |
50 | #include <sys/tree.h> |
51 | #include <sys/malloc.h> | |
427e5fc6 MD |
52 | #include <sys/mount.h> |
53 | #include <sys/vnode.h> | |
42c7d26b | 54 | #include <sys/proc.h> |
2b3f93ea | 55 | #include <sys/caps.h> |
7bb4ec32 | 56 | #include <sys/dirent.h> |
e63644f0 | 57 | #include <sys/stat.h> |
7bb4ec32 | 58 | #include <sys/fcntl.h> |
66325755 | 59 | #include <sys/lockf.h> |
7bb4ec32 TK |
60 | #include <sys/file.h> |
61 | #include <sys/event.h> | |
66325755 | 62 | #include <sys/buf.h> |
8cd0a023 | 63 | #include <sys/queue.h> |
bcac4bbb | 64 | #include <sys/ktr.h> |
9192654c | 65 | #include <sys/limits.h> |
7bb4ec32 TK |
66 | #include <sys/sysctl.h> |
67 | #include <vm/swap_pager.h> | |
c652be54 | 68 | #include <vm/vm_extern.h> |
66325755 | 69 | |
427e5fc6 | 70 | #include "hammer_disk.h" |
8750964d | 71 | #include "hammer_mount.h" |
7dc57964 | 72 | #include "hammer_ioctl.h" |
8856c8bd | 73 | #include "hammer_crc.h" |
8750964d MD |
74 | |
75 | #if defined(_KERNEL) || defined(_KERNEL_STRUCTURES) | |
76 | ||
7ea34faa | 77 | #ifdef MALLOC_DECLARE |
8750964d | 78 | MALLOC_DECLARE(M_HAMMER); |
7ea34faa | 79 | #endif |
8750964d | 80 | |
bcac4bbb MD |
81 | /* |
82 | * Kernel trace | |
83 | */ | |
84 | #if !defined(KTR_HAMMER) | |
85 | #define KTR_HAMMER KTR_ALL | |
86 | #endif | |
879a1b60 | 87 | /* KTR_INFO_MASTER_EXTERN(hammer); */ |
bcac4bbb MD |
88 | |
89 | /* | |
90 | * Misc structures | |
91 | */ | |
66325755 | 92 | struct hammer_mount; |
2275141b TK |
93 | struct hammer_inode; |
94 | struct hammer_volume; | |
95 | struct hammer_buffer; | |
96 | struct hammer_node; | |
97 | struct hammer_undo; | |
98 | struct hammer_reserve; | |
99 | struct hammer_io; | |
66325755 | 100 | |
8750964d MD |
101 | /* |
102 | * Key structure used for custom RB tree inode lookups. This prototypes | |
103 | * the function hammer_ino_rb_tree_RB_LOOKUP_INFO(root, info). | |
104 | */ | |
105 | typedef struct hammer_inode_info { | |
513ca7d7 | 106 | int64_t obj_id; /* (key) object identifier */ |
8750964d | 107 | hammer_tid_t obj_asof; /* (key) snapshot transid or 0 */ |
7e52af60 | 108 | uint32_t obj_localization; /* (key) pseudo-fs id for upper 16 bits */ |
43c665ae | 109 | union { |
053f997d | 110 | hammer_btree_leaf_elm_t leaf; |
43c665ae | 111 | } u; |
8750964d MD |
112 | } *hammer_inode_info_t; |
113 | ||
b84de5af MD |
114 | typedef enum hammer_transaction_type { |
115 | HAMMER_TRANS_RO, | |
116 | HAMMER_TRANS_STD, | |
117 | HAMMER_TRANS_FLS | |
118 | } hammer_transaction_type_t; | |
119 | ||
66325755 MD |
120 | /* |
121 | * HAMMER Transaction tracking | |
122 | */ | |
22a0040d | 123 | typedef struct hammer_transaction { |
b84de5af | 124 | hammer_transaction_type_t type; |
66325755 MD |
125 | struct hammer_mount *hmp; |
126 | hammer_tid_t tid; | |
46137e17 TK |
127 | uint64_t time; |
128 | uint32_t time32; | |
2f85fa4d | 129 | int sync_lock_refs; |
21fde338 | 130 | int flags; |
a89aec1b | 131 | struct hammer_volume *rootvol; |
22a0040d | 132 | } *hammer_transaction_t; |
8cd0a023 | 133 | |
21fde338 | 134 | #define HAMMER_TRANSF_NEWINODE 0x0001 |
4c286c36 | 135 | #define HAMMER_TRANSF_CRCDOM 0x0004 /* EDOM on CRC error, less critical */ |
21fde338 | 136 | |
66325755 MD |
137 | /* |
138 | * HAMMER locks | |
139 | */ | |
427e5fc6 | 140 | struct hammer_lock { |
250aec18 | 141 | volatile u_int refs; /* active references */ |
899eb297 | 142 | volatile u_int lockval; /* lock count and control bits */ |
250aec18 MD |
143 | struct thread *lowner; /* owner if exclusively held */ |
144 | struct thread *rowner; /* owner if exclusively held */ | |
8750964d MD |
145 | }; |
146 | ||
250aec18 MD |
147 | #define HAMMER_REFS_LOCKED 0x40000000 /* transition check */ |
148 | #define HAMMER_REFS_WANTED 0x20000000 /* transition check */ | |
149 | #define HAMMER_REFS_CHECK 0x10000000 /* transition check */ | |
150 | ||
151 | #define HAMMER_REFS_FLAGS (HAMMER_REFS_LOCKED | \ | |
152 | HAMMER_REFS_WANTED | \ | |
153 | HAMMER_REFS_CHECK) | |
154 | ||
899eb297 | 155 | #define HAMMER_LOCKF_EXCLUSIVE 0x40000000 |
250aec18 | 156 | #define HAMMER_LOCKF_WANTED 0x20000000 |
899eb297 | 157 | |
0fe46dc6 MD |
158 | #define HAMMER_LIMIT_RECLAIMS 16384 /* maximum reclaims in-prog */ |
159 | ||
899eb297 MD |
160 | static __inline int |
161 | hammer_notlocked(struct hammer_lock *lock) | |
162 | { | |
163 | return(lock->lockval == 0); | |
164 | } | |
165 | ||
427e5fc6 MD |
166 | static __inline int |
167 | hammer_islocked(struct hammer_lock *lock) | |
168 | { | |
899eb297 | 169 | return(lock->lockval != 0); |
427e5fc6 MD |
170 | } |
171 | ||
250aec18 MD |
172 | /* |
173 | * Returns the number of refs on the object. | |
174 | */ | |
0b075555 MD |
175 | static __inline int |
176 | hammer_isactive(struct hammer_lock *lock) | |
177 | { | |
250aec18 MD |
178 | return(lock->refs & ~HAMMER_REFS_FLAGS); |
179 | } | |
180 | ||
181 | static __inline int | |
182 | hammer_oneref(struct hammer_lock *lock) | |
183 | { | |
184 | return((lock->refs & ~HAMMER_REFS_FLAGS) == 1); | |
0b075555 MD |
185 | } |
186 | ||
427e5fc6 | 187 | static __inline int |
250aec18 | 188 | hammer_norefs(struct hammer_lock *lock) |
427e5fc6 | 189 | { |
250aec18 MD |
190 | return((lock->refs & ~HAMMER_REFS_FLAGS) == 0); |
191 | } | |
192 | ||
193 | static __inline int | |
194 | hammer_norefsorlock(struct hammer_lock *lock) | |
195 | { | |
196 | return(lock->refs == 0); | |
197 | } | |
198 | ||
199 | static __inline int | |
200 | hammer_refsorlock(struct hammer_lock *lock) | |
201 | { | |
202 | return(lock->refs != 0); | |
427e5fc6 | 203 | } |
c60bb2c5 | 204 | |
6a37e7e4 | 205 | /* |
7aa3b8a6 | 206 | * Return if we specifically own the lock exclusively. |
6a37e7e4 MD |
207 | */ |
208 | static __inline int | |
7aa3b8a6 | 209 | hammer_lock_excl_owned(struct hammer_lock *lock, thread_t td) |
6a37e7e4 | 210 | { |
899eb297 | 211 | if ((lock->lockval & HAMMER_LOCKF_EXCLUSIVE) && |
250aec18 | 212 | lock->lowner == td) { |
7aa3b8a6 | 213 | return(1); |
899eb297 | 214 | } |
7aa3b8a6 | 215 | return(0); |
6a37e7e4 MD |
216 | } |
217 | ||
ec4e8497 | 218 | /* |
1f07f686 | 219 | * Flush state, used by various structures |
ec4e8497 | 220 | */ |
1f07f686 MD |
221 | typedef enum hammer_inode_state { |
222 | HAMMER_FST_IDLE, | |
223 | HAMMER_FST_SETUP, | |
224 | HAMMER_FST_FLUSH | |
225 | } hammer_inode_state_t; | |
ec4e8497 | 226 | |
5fa5c92f MD |
227 | /* |
228 | * Pseudo-filesystem extended data tracking | |
229 | */ | |
5fa5c92f MD |
230 | struct hammer_pseudofs_inmem; |
231 | RB_HEAD(hammer_pfs_rb_tree, hammer_pseudofs_inmem); | |
232 | RB_PROTOTYPE2(hammer_pfs_rb_tree, hammer_pseudofs_inmem, rb_node, | |
46137e17 | 233 | hammer_pfs_rb_compare, uint32_t); |
5fa5c92f | 234 | |
22a0040d | 235 | typedef struct hammer_pseudofs_inmem { |
5fa5c92f MD |
236 | RB_ENTRY(hammer_pseudofs_inmem) rb_node; |
237 | struct hammer_lock lock; | |
46137e17 | 238 | uint32_t localization; |
ea434b6f | 239 | hammer_tid_t create_tid; |
842e7a70 | 240 | int flags; |
91ffdfc5 | 241 | dev_t fsid_udev; |
5fa5c92f | 242 | struct hammer_pseudofs_data pfsd; |
22a0040d | 243 | } *hammer_pseudofs_inmem_t; |
5fa5c92f | 244 | |
0729c8c8 MD |
245 | /* |
246 | * Cache object ids. A fixed number of objid cache structures are | |
247 | * created to reserve object id's for newly created files in multiples | |
248 | * of 100,000, localized to a particular directory, and recycled as | |
249 | * needed. This allows parallel create operations in different | |
250 | * directories to retain fairly localized object ids which in turn | |
251 | * improves reblocking performance and layout. | |
252 | */ | |
7d29aec0 | 253 | #define OBJID_CACHE_SIZE 2048 |
5a64efa1 MD |
254 | #define OBJID_CACHE_BULK_BITS 10 /* 10 bits (1024) */ |
255 | #define OBJID_CACHE_BULK (32 * 32) /* two level (1024) */ | |
256 | #define OBJID_CACHE_BULK_MASK (OBJID_CACHE_BULK - 1) | |
46137e17 | 257 | #define OBJID_CACHE_BULK_MASK64 ((uint64_t)(OBJID_CACHE_BULK - 1)) |
0729c8c8 MD |
258 | |
259 | typedef struct hammer_objid_cache { | |
260 | TAILQ_ENTRY(hammer_objid_cache) entry; | |
261 | struct hammer_inode *dip; | |
5a64efa1 | 262 | hammer_tid_t base_tid; |
0729c8c8 | 263 | int count; |
46137e17 TK |
264 | uint32_t bm0; |
265 | uint32_t bm1[32]; | |
0729c8c8 MD |
266 | } *hammer_objid_cache_t; |
267 | ||
bcac4bbb MD |
268 | /* |
269 | * Associate an inode with a B-Tree node to cache search start positions | |
270 | */ | |
271 | typedef struct hammer_node_cache { | |
7a61b85d | 272 | TAILQ_ENTRY(hammer_node_cache) entry; |
bcac4bbb MD |
273 | struct hammer_node *node; |
274 | struct hammer_inode *ip; | |
275 | } *hammer_node_cache_t; | |
276 | ||
277 | TAILQ_HEAD(hammer_node_cache_list, hammer_node_cache); | |
278 | ||
7a61b85d MD |
279 | /* |
280 | * Structure used to organize flush groups. Flush groups must be | |
281 | * organized into chunks in order to avoid blowing out the UNDO FIFO. | |
282 | * Without this a 'sync' could end up flushing 50,000 inodes in a single | |
283 | * transaction. | |
284 | */ | |
ff003b11 MD |
285 | RB_HEAD(hammer_fls_rb_tree, hammer_inode); |
286 | RB_PROTOTYPE(hammer_fls_rb_tree, hammer_inode, rb_flsnode, | |
287 | hammer_ino_rb_compare); | |
288 | ||
22a0040d | 289 | typedef struct hammer_flush_group { |
7a61b85d | 290 | TAILQ_ENTRY(hammer_flush_group) flush_entry; |
ff003b11 | 291 | struct hammer_fls_rb_tree flush_tree; |
37646115 | 292 | int seq; /* our seq no */ |
7a61b85d MD |
293 | int total_count; /* record load */ |
294 | int running; /* group is running */ | |
295 | int closed; | |
296 | int refs; | |
22a0040d | 297 | } *hammer_flush_group_t; |
7a61b85d MD |
298 | |
299 | TAILQ_HEAD(hammer_flush_group_list, hammer_flush_group); | |
300 | ||
8750964d | 301 | /* |
8cd0a023 MD |
302 | * Structure used to represent an inode in-memory. |
303 | * | |
304 | * The record and data associated with an inode may be out of sync with | |
305 | * the disk (xDIRTY flags), or not even on the disk at all (ONDISK flag | |
306 | * clear). | |
307 | * | |
308 | * An inode may also hold a cache of unsynchronized records, used for | |
309 | * database and directories only. Unsynchronized regular file data is | |
310 | * stored in the buffer cache. | |
311 | * | |
312 | * NOTE: A file which is created and destroyed within the initial | |
313 | * synchronization period can wind up not doing any disk I/O at all. | |
314 | * | |
315 | * Finally, an inode may cache numerous disk-referencing B-Tree cursors. | |
8750964d | 316 | */ |
8750964d MD |
317 | RB_HEAD(hammer_ino_rb_tree, hammer_inode); |
318 | RB_PROTOTYPEX(hammer_ino_rb_tree, INFO, hammer_inode, rb_node, | |
8cd0a023 MD |
319 | hammer_ino_rb_compare, hammer_inode_info_t); |
320 | ||
73896937 MD |
321 | RB_HEAD(hammer_redo_rb_tree, hammer_inode); |
322 | RB_PROTOTYPE2(hammer_redo_rb_tree, hammer_inode, rb_redonode, | |
323 | hammer_redo_rb_compare, hammer_off_t); | |
324 | ||
8cd0a023 MD |
325 | struct hammer_record; |
326 | RB_HEAD(hammer_rec_rb_tree, hammer_record); | |
327 | RB_PROTOTYPEX(hammer_rec_rb_tree, INFO, hammer_record, rb_node, | |
0832c9bb | 328 | hammer_rec_rb_compare, hammer_btree_leaf_elm_t); |
8cd0a023 | 329 | |
2275141b | 330 | TAILQ_HEAD(hammer_record_list, hammer_record); |
8cd0a023 | 331 | TAILQ_HEAD(hammer_node_list, hammer_node); |
8750964d | 332 | |
22a0040d | 333 | typedef struct hammer_inode { |
1f07f686 MD |
334 | RB_ENTRY(hammer_inode) rb_node; |
335 | hammer_inode_state_t flush_state; | |
7a61b85d | 336 | hammer_flush_group_t flush_group; |
ff003b11 | 337 | RB_ENTRY(hammer_inode) rb_flsnode; /* when on flush list */ |
73896937 | 338 | RB_ENTRY(hammer_inode) rb_redonode; /* when INODE_RDIRTY is set */ |
1f07f686 | 339 | struct hammer_record_list target_list; /* target of dependant recs */ |
adf01747 | 340 | int64_t obj_id; /* (key) object identifier */ |
b84de5af | 341 | hammer_tid_t obj_asof; /* (key) snapshot or 0 */ |
7e52af60 | 342 | uint32_t obj_localization; /* (key) pseudo-fs id for upper 16 bits */ |
7866ea2a TK |
343 | struct hammer_mount *hmp; |
344 | hammer_objid_cache_t objid_cache; | |
b84de5af MD |
345 | int flags; |
346 | int error; /* flush error */ | |
347 | int cursor_ip_refs; /* sanity */ | |
102e7dca | 348 | #if 0 |
e2a02b72 | 349 | int cursor_exclreq_count; |
102e7dca | 350 | #endif |
47637bff | 351 | int rsv_recs; |
b84de5af | 352 | struct vnode *vp; |
5fa5c92f | 353 | hammer_pseudofs_inmem_t pfsm; |
b84de5af MD |
354 | struct lockf advlock; |
355 | struct hammer_lock lock; /* sync copy interlock */ | |
b84de5af | 356 | off_t trunc_off; |
11ad5ade | 357 | struct hammer_btree_leaf_elm ino_leaf; /* in-memory cache */ |
b84de5af MD |
358 | struct hammer_inode_data ino_data; /* in-memory cache */ |
359 | struct hammer_rec_rb_tree rec_tree; /* in-memory cache */ | |
3214ade6 | 360 | int rec_generation; |
ae744f81 TK |
361 | |
362 | /* | |
363 | * search initiate cache | |
364 | * cache[0] - this inode | |
365 | * cache[1] - related data, the content depends on situations | |
366 | * cache[2] - for dip to cache ip to shortcut B-Tree search | |
367 | * cache[3] - related data copied from dip to a new ip's cache[1] | |
368 | */ | |
369 | struct hammer_node_cache cache[4]; | |
b84de5af MD |
370 | |
371 | /* | |
372 | * When a demark is created to synchronize an inode to | |
373 | * disk, certain fields are copied so the front-end VOPs | |
374 | * can continue to run in parallel with the synchronization | |
375 | * occuring in the background. | |
376 | */ | |
377 | int sync_flags; /* to-sync flags cache */ | |
378 | off_t sync_trunc_off; /* to-sync truncation */ | |
a9d52b76 | 379 | off_t save_trunc_off; /* write optimization */ |
11ad5ade | 380 | struct hammer_btree_leaf_elm sync_ino_leaf; /* to-sync cache */ |
b84de5af | 381 | struct hammer_inode_data sync_ino_data; /* to-sync cache */ |
9192654c | 382 | size_t redo_count; |
73896937 MD |
383 | |
384 | /* | |
385 | * Track the earliest offset in the UNDO/REDO FIFO containing | |
386 | * REDO records. This is staged to the backend during flush | |
387 | * sequences. While the inode is staged redo_fifo_next is used | |
388 | * to track the earliest offset for rotation into redo_fifo_start | |
389 | * on completion of the flush. | |
390 | */ | |
391 | hammer_off_t redo_fifo_start; | |
392 | hammer_off_t redo_fifo_next; | |
22a0040d | 393 | } *hammer_inode_t; |
8cd0a023 | 394 | |
e1067862 | 395 | #define VTOI(vp) ((hammer_inode_t)(vp)->v_data) |
66325755 | 396 | |
9192654c MD |
397 | /* |
398 | * NOTE: DDIRTY does not include atime or mtime and does not include | |
399 | * write-append size changes. SDIRTY handles write-append size | |
400 | * changes. | |
47f363f1 MD |
401 | * |
402 | * REDO indicates that REDO logging is active, creating a definitive | |
403 | * stream of REDO records in the UNDO/REDO log for writes and | |
404 | * truncations, including boundary records when/if REDO is turned off. | |
405 | * REDO is typically enabled by fsync() and turned off if excessive | |
406 | * writes without an fsync() occurs. | |
407 | * | |
408 | * RDIRTY indicates that REDO records were laid down in the UNDO/REDO | |
409 | * FIFO (even if REDO is turned off some might still be active) and | |
410 | * still being tracked for this inode. See hammer_redo.c | |
9192654c | 411 | */ |
9192654c | 412 | #define HAMMER_INODE_DDIRTY 0x0001 /* in-memory ino_data is dirty */ |
d6571938 | 413 | /* (not including atime/mtime) */ |
e63644f0 | 414 | #define HAMMER_INODE_RSV_INODES 0x0002 /* hmp->rsv_inodes bumped */ |
7b6ccb11 | 415 | #define HAMMER_INODE_CONN_DOWN 0x0004 /* include in downward recursion */ |
1f07f686 | 416 | #define HAMMER_INODE_XDIRTY 0x0008 /* in-memory records */ |
8cd0a023 | 417 | #define HAMMER_INODE_ONDISK 0x0010 /* inode is on-disk (else not yet) */ |
a89aec1b | 418 | #define HAMMER_INODE_FLUSH 0x0020 /* flush on last ref */ |
869e8f55 | 419 | #define HAMMER_INODE_DELETED 0x0080 /* inode delete (backend) */ |
76376933 | 420 | #define HAMMER_INODE_DELONDISK 0x0100 /* delete synchronized to disk */ |
d113fda1 | 421 | #define HAMMER_INODE_RO 0x0200 /* read-only (because of as-of) */ |
a117fbeb | 422 | #define HAMMER_INODE_RECSW 0x0400 /* waiting on data record flush */ |
0a72edae | 423 | #define HAMMER_INODE_DONDISK 0x0800 /* data records may be on disk */ |
f3b0f382 | 424 | #define HAMMER_INODE_BUFS 0x1000 /* dirty high level bps present */ |
f153644d | 425 | #define HAMMER_INODE_REFLUSH 0x2000 /* flush on dependancy / reflush */ |
9f5097dc | 426 | #define HAMMER_INODE_RECLAIM 0x4000 /* trying to reclaim */ |
d6571938 | 427 | #define HAMMER_INODE_FLUSHW 0x8000 /* someone waiting for flush */ |
8cd0a023 | 428 | |
b84de5af | 429 | #define HAMMER_INODE_TRUNCATED 0x00010000 |
869e8f55 | 430 | #define HAMMER_INODE_DELETING 0x00020000 /* inode delete request (frontend)*/ |
4e17f465 | 431 | #define HAMMER_INODE_RESIGNAL 0x00040000 /* re-signal on re-flush */ |
ddfdf542 | 432 | #define HAMMER_INODE_ATIME 0x00100000 /* in-memory atime modified */ |
06ad81ff MD |
433 | #define HAMMER_INODE_MTIME 0x00200000 /* in-memory mtime modified */ |
434 | #define HAMMER_INODE_WOULDBLOCK 0x00400000 /* re-issue to new flush group */ | |
7866ea2a | 435 | #define HAMMER_INODE_DUMMY 0x00800000 /* dummy inode covering bad file */ |
9192654c | 436 | #define HAMMER_INODE_SDIRTY 0x01000000 /* in-memory ino_data.size is dirty*/ |
47f363f1 MD |
437 | #define HAMMER_INODE_REDO 0x02000000 /* REDO logging active */ |
438 | #define HAMMER_INODE_RDIRTY 0x04000000 /* REDO records active in fifo */ | |
e2a02b72 | 439 | #define HAMMER_INODE_SLAVEFLUSH 0x08000000 /* being flushed by slave */ |
b84de5af | 440 | |
9192654c | 441 | #define HAMMER_INODE_MODMASK (HAMMER_INODE_DDIRTY|HAMMER_INODE_SDIRTY| \ |
11ad5ade | 442 | HAMMER_INODE_XDIRTY|HAMMER_INODE_BUFS| \ |
ddfdf542 MD |
443 | HAMMER_INODE_ATIME|HAMMER_INODE_MTIME| \ |
444 | HAMMER_INODE_TRUNCATED|HAMMER_INODE_DELETING) | |
1f07f686 | 445 | |
9192654c | 446 | #define HAMMER_INODE_MODMASK_NOXDIRTY \ |
1f07f686 | 447 | (HAMMER_INODE_MODMASK & ~HAMMER_INODE_XDIRTY) |
0b075555 | 448 | |
9192654c MD |
449 | #define HAMMER_INODE_MODMASK_NOREDO \ |
450 | (HAMMER_INODE_DDIRTY| \ | |
451 | HAMMER_INODE_XDIRTY| \ | |
452 | HAMMER_INODE_TRUNCATED|HAMMER_INODE_DELETING) | |
453 | ||
f90dde4c | 454 | #define HAMMER_FLUSH_SIGNAL 0x0001 |
4e17f465 | 455 | #define HAMMER_FLUSH_RECURSION 0x0002 |
f90dde4c | 456 | |
7bc5b8c2 MD |
457 | /* |
458 | * Used by the inode reclaim code to pipeline reclaims and avoid | |
459 | * blowing out kernel memory or letting the flusher get too far | |
82010f9f MD |
460 | * behind. The reclaim wakes up when count reaches 0 or the |
461 | * timer expires. | |
7bc5b8c2 MD |
462 | */ |
463 | struct hammer_reclaim { | |
464 | TAILQ_ENTRY(hammer_reclaim) entry; | |
82010f9f | 465 | int count; |
7bc5b8c2 MD |
466 | }; |
467 | ||
e98f1b96 MD |
468 | /* |
469 | * Track who is creating the greatest burden on the | |
470 | * inode cache. | |
471 | */ | |
472 | struct hammer_inostats { | |
473 | pid_t pid; /* track user process */ | |
474 | int ltick; /* last tick */ | |
475 | int count; /* count (degenerates) */ | |
476 | }; | |
477 | ||
478 | #define HAMMER_INOSTATS_HSIZE 32 | |
479 | #define HAMMER_INOSTATS_HMASK (HAMMER_INOSTATS_HSIZE - 1) | |
480 | ||
8750964d | 481 | /* |
1f07f686 MD |
482 | * Structure used to represent an unsynchronized record in-memory. These |
483 | * records typically represent directory entries. Only non-historical | |
484 | * records are kept in-memory. | |
485 | * | |
486 | * Records are organized as a per-inode RB-Tree. If the inode is not | |
8cd0a023 MD |
487 | * on disk then neither are any records and the in-memory record tree |
488 | * represents the entire contents of the inode. If the inode is on disk | |
489 | * then the on-disk B-Tree is scanned in parallel with the in-memory | |
490 | * RB-Tree to synthesize the current state of the file. | |
491 | * | |
1f07f686 MD |
492 | * Records are also used to enforce the ordering of directory create/delete |
493 | * operations. A new inode will not be flushed to disk unless its related | |
494 | * directory entry is also being flushed at the same time. A directory entry | |
495 | * will not be removed unless its related inode is also being removed at the | |
496 | * same time. | |
8750964d | 497 | */ |
1f07f686 | 498 | typedef enum hammer_record_type { |
e8599db1 | 499 | HAMMER_MEM_RECORD_GENERAL, /* misc record */ |
930bf163 | 500 | HAMMER_MEM_RECORD_INODE, /* inode record */ |
1f07f686 | 501 | HAMMER_MEM_RECORD_ADD, /* positive memory cache record */ |
47637bff MD |
502 | HAMMER_MEM_RECORD_DEL, /* negative delete-on-disk record */ |
503 | HAMMER_MEM_RECORD_DATA /* bulk-data record w/on-disk ref */ | |
1f07f686 MD |
504 | } hammer_record_type_t; |
505 | ||
22a0040d | 506 | typedef struct hammer_record { |
8cd0a023 | 507 | RB_ENTRY(hammer_record) rb_node; |
1f07f686 MD |
508 | TAILQ_ENTRY(hammer_record) target_entry; |
509 | hammer_inode_state_t flush_state; | |
7a61b85d | 510 | hammer_flush_group_t flush_group; |
1f07f686 | 511 | hammer_record_type_t type; |
a89aec1b | 512 | struct hammer_lock lock; |
0832c9bb | 513 | struct hammer_reserve *resv; |
e1067862 TK |
514 | hammer_inode_t ip; |
515 | hammer_inode_t target_ip; | |
11ad5ade | 516 | struct hammer_btree_leaf_elm leaf; |
513f50d5 | 517 | hammer_data_ondisk_t data; |
8cd0a023 | 518 | int flags; |
77912481 | 519 | int gflags; |
e469566b | 520 | hammer_off_t zone2_offset; /* direct-write only */ |
22a0040d | 521 | } *hammer_record_t; |
8cd0a023 | 522 | |
d36ec43b MD |
523 | /* |
524 | * Record flags. Note that FE can only be set by the frontend if the | |
525 | * record has not been interlocked by the backend w/ BE. | |
526 | */ | |
8cd0a023 MD |
527 | #define HAMMER_RECF_ALLOCDATA 0x0001 |
528 | #define HAMMER_RECF_ONRBTREE 0x0002 | |
b84de5af | 529 | #define HAMMER_RECF_DELETED_FE 0x0004 /* deleted (frontend) */ |
d36ec43b | 530 | #define HAMMER_RECF_DELETED_BE 0x0008 /* deleted (backend) */ |
1b0ab2c3 | 531 | #define HAMMER_RECF_COMMITTED 0x0010 /* committed to the B-Tree */ |
d36ec43b | 532 | #define HAMMER_RECF_INTERLOCK_BE 0x0020 /* backend interlock */ |
0832c9bb | 533 | #define HAMMER_RECF_WANTED 0x0040 /* wanted by the frontend */ |
7866ea2a | 534 | #define HAMMER_RECF_CONVERT_DELETE 0x0100 /* special case */ |
47f363f1 | 535 | #define HAMMER_RECF_REDO 0x1000 /* REDO was laid down */ |
8cd0a023 | 536 | |
77912481 MD |
537 | /* |
538 | * These flags must be separate to deal with SMP races | |
539 | */ | |
540 | #define HAMMER_RECG_DIRECT_IO 0x0001 /* related direct I/O running*/ | |
541 | #define HAMMER_RECG_DIRECT_WAIT 0x0002 /* related direct I/O running*/ | |
542 | #define HAMMER_RECG_DIRECT_INVAL 0x0004 /* buffer alias invalidation */ | |
602c6cb8 | 543 | /* |
83f2a3aa | 544 | * hammer_create_at_cursor() and hammer_delete_at_cursor() flags. |
602c6cb8 | 545 | */ |
83f2a3aa MD |
546 | #define HAMMER_CREATE_MODE_UMIRROR 0x0001 |
547 | #define HAMMER_CREATE_MODE_SYS 0x0002 | |
548 | ||
602c6cb8 MD |
549 | #define HAMMER_DELETE_ADJUST 0x0001 |
550 | #define HAMMER_DELETE_DESTROY 0x0002 | |
551 | ||
8cd0a023 | 552 | /* |
47197d71 | 553 | * In-memory structures representing on-disk structures. |
8cd0a023 | 554 | */ |
8750964d | 555 | RB_HEAD(hammer_vol_rb_tree, hammer_volume); |
427e5fc6 | 556 | RB_HEAD(hammer_buf_rb_tree, hammer_buffer); |
8cd0a023 | 557 | RB_HEAD(hammer_nod_rb_tree, hammer_node); |
e8599db1 | 558 | RB_HEAD(hammer_und_rb_tree, hammer_undo); |
0832c9bb | 559 | RB_HEAD(hammer_res_rb_tree, hammer_reserve); |
1afb73cf | 560 | RB_HEAD(hammer_mod_rb_tree, hammer_io); |
8750964d MD |
561 | |
562 | RB_PROTOTYPE2(hammer_vol_rb_tree, hammer_volume, rb_node, | |
563 | hammer_vol_rb_compare, int32_t); | |
427e5fc6 | 564 | RB_PROTOTYPE2(hammer_buf_rb_tree, hammer_buffer, rb_node, |
47197d71 | 565 | hammer_buf_rb_compare, hammer_off_t); |
8cd0a023 | 566 | RB_PROTOTYPE2(hammer_nod_rb_tree, hammer_node, rb_node, |
47197d71 | 567 | hammer_nod_rb_compare, hammer_off_t); |
e8599db1 MD |
568 | RB_PROTOTYPE2(hammer_und_rb_tree, hammer_undo, rb_node, |
569 | hammer_und_rb_compare, hammer_off_t); | |
0832c9bb MD |
570 | RB_PROTOTYPE2(hammer_res_rb_tree, hammer_reserve, rb_node, |
571 | hammer_res_rb_compare, hammer_off_t); | |
1afb73cf MD |
572 | RB_PROTOTYPE2(hammer_mod_rb_tree, hammer_io, rb_node, |
573 | hammer_mod_rb_compare, hammer_off_t); | |
8750964d | 574 | |
66325755 MD |
575 | /* |
576 | * IO management - embedded at the head of various in-memory structures | |
10a5d1ba MD |
577 | * |
578 | * VOLUME - hammer_volume containing meta-data | |
579 | * META_BUFFER - hammer_buffer containing meta-data | |
88cecadc | 580 | * UNDO_BUFFER - hammer_buffer containing undo-data |
10a5d1ba | 581 | * DATA_BUFFER - hammer_buffer containing pure-data |
88cecadc | 582 | * DUMMY - hammer_buffer not containing valid data |
10a5d1ba MD |
583 | * |
584 | * Dirty volume headers and dirty meta-data buffers are locked until the | |
585 | * flusher can sequence them out. Dirty pure-data buffers can be written. | |
586 | * Clean buffers can be passively released. | |
66325755 | 587 | */ |
10a5d1ba | 588 | typedef enum hammer_io_type { |
7fb33ff0 TK |
589 | HAMMER_IOTYPE_VOLUME, |
590 | HAMMER_IOTYPE_META_BUFFER, | |
591 | HAMMER_IOTYPE_UNDO_BUFFER, | |
592 | HAMMER_IOTYPE_DATA_BUFFER, | |
593 | HAMMER_IOTYPE_DUMMY | |
10a5d1ba | 594 | } hammer_io_type_t; |
66325755 | 595 | |
22a0040d | 596 | typedef struct hammer_io { |
10a5d1ba | 597 | struct hammer_lock lock; |
7251986a | 598 | hammer_io_type_t type; |
10a5d1ba | 599 | struct hammer_mount *hmp; |
748efb59 | 600 | struct hammer_volume *volume; |
1afb73cf | 601 | RB_ENTRY(hammer_io) rb_node; /* if modified */ |
eddadaee | 602 | TAILQ_ENTRY(hammer_io) iorun_entry; /* iorun_list */ |
1afb73cf | 603 | struct hammer_mod_rb_tree *mod_root; |
10a5d1ba | 604 | struct buf *bp; |
334cf18c | 605 | int64_t offset; /* volume offset */ |
4a2796f3 | 606 | int bytes; /* buffer cache buffer size */ |
10a5d1ba MD |
607 | int modify_refs; |
608 | ||
77912481 MD |
609 | /* |
610 | * These can be modified at any time by the backend while holding | |
611 | * io_token, due to bio_done and hammer_io_complete() callbacks. | |
612 | */ | |
055f5ff8 MD |
613 | u_int running : 1; /* bp write IO in progress */ |
614 | u_int waiting : 1; /* someone is waiting on us */ | |
77912481 MD |
615 | u_int ioerror : 1; /* abort on io-error */ |
616 | u_int unusedA : 29; | |
617 | ||
618 | /* | |
619 | * These can only be modified by the frontend while holding | |
620 | * fs_token, or by the backend while holding the io interlocked | |
621 | * with no references (which will block the frontend when it | |
622 | * tries to reference it). | |
623 | * | |
624 | * WARNING! SMP RACES will create havoc if the callbacks ever tried | |
625 | * to modify any of these outside the above restrictions. | |
626 | */ | |
627 | u_int modified : 1; /* bp's data was modified */ | |
628 | u_int released : 1; /* bp released (w/ B_LOCKED set) */ | |
b58c6388 | 629 | u_int waitdep : 1; /* flush waits for dependancies */ |
51c35492 | 630 | u_int recovered : 1; /* has recovery ref */ |
9f5097dc | 631 | u_int waitmod : 1; /* waiting for modify_refs */ |
cebe9493 | 632 | u_int reclaim : 1; /* reclaim requested */ |
bcac4bbb | 633 | u_int gencrc : 1; /* crc needs to be generated */ |
c0a828f7 | 634 | u_int unusedB : 25; |
22a0040d | 635 | } *hammer_io_t; |
8cd0a023 | 636 | |
af209b0f MD |
637 | #define HAMMER_CLUSTER_SIZE (64 * 1024) |
638 | #if HAMMER_CLUSTER_SIZE > MAXBSIZE | |
639 | #undef HAMMER_CLUSTER_SIZE | |
640 | #define HAMMER_CLUSTER_SIZE MAXBSIZE | |
641 | #endif | |
af209b0f | 642 | |
66325755 | 643 | /* |
8cd0a023 | 644 | * In-memory volume representing on-disk buffer |
66325755 | 645 | */ |
22a0040d | 646 | typedef struct hammer_volume { |
ff66f880 | 647 | struct hammer_io io; /* must be at offset 0 */ |
8750964d | 648 | RB_ENTRY(hammer_volume) rb_node; |
b419d3ee | 649 | hammer_volume_ondisk_t ondisk; |
8750964d | 650 | int32_t vol_no; |
2f85fa4d | 651 | hammer_off_t maxbuf_off; /* Maximum buffer offset (zone-2) */ |
c60bb2c5 | 652 | char *vol_name; |
8750964d | 653 | struct vnode *devvp; |
427e5fc6 | 654 | int vol_flags; |
22a0040d | 655 | } *hammer_volume_t; |
8cd0a023 | 656 | |
ff66f880 TK |
657 | #define HAMMER_ITOV(iop) ((hammer_volume_t)(iop)) |
658 | ||
66325755 | 659 | /* |
f176517c | 660 | * In-memory buffer representing an on-disk buffer. |
66325755 | 661 | */ |
22a0040d | 662 | typedef struct hammer_buffer { |
ff66f880 | 663 | struct hammer_io io; /* must be at offset 0 */ |
427e5fc6 | 664 | RB_ENTRY(hammer_buffer) rb_node; |
47197d71 | 665 | void *ondisk; |
34d829f7 | 666 | hammer_off_t zoneX_offset; |
0832c9bb | 667 | hammer_off_t zone2_offset; |
cebe9493 | 668 | struct hammer_reserve *resv; |
c242ffec | 669 | struct hammer_node_list node_list; |
22a0040d | 670 | } *hammer_buffer_t; |
8cd0a023 | 671 | |
195f6076 TK |
672 | #define HAMMER_ITOB(iop) ((hammer_buffer_t)(iop)) |
673 | ||
8cd0a023 MD |
674 | /* |
675 | * In-memory B-Tree node, representing an on-disk B-Tree node. | |
676 | * | |
677 | * This is a hang-on structure which is backed by a hammer_buffer, | |
f176517c TK |
678 | * and used for fine-grained locking of B-Tree nodes in order to |
679 | * properly control lock ordering. | |
8cd0a023 | 680 | */ |
22a0040d | 681 | typedef struct hammer_node { |
8cd0a023 MD |
682 | struct hammer_lock lock; /* node-by-node lock */ |
683 | TAILQ_ENTRY(hammer_node) entry; /* per-buffer linkage */ | |
f176517c | 684 | RB_ENTRY(hammer_node) rb_node; /* per-mount linkage */ |
47197d71 | 685 | hammer_off_t node_offset; /* full offset spec */ |
40043e7f | 686 | struct hammer_mount *hmp; |
562d34c2 | 687 | hammer_buffer_t buffer; /* backing buffer */ |
8cd0a023 | 688 | hammer_node_ondisk_t ondisk; /* ptr to on-disk structure */ |
b3bad96f | 689 | TAILQ_HEAD(, hammer_cursor) cursor_list; /* deadlock recovery */ |
bcac4bbb | 690 | struct hammer_node_cache_list cache_list; /* passive caches */ |
b3deaf57 | 691 | int flags; |
102e7dca | 692 | #if 0 |
e2a02b72 | 693 | int cursor_exclreq_count; |
102e7dca | 694 | #endif |
22a0040d | 695 | } *hammer_node_t; |
8cd0a023 | 696 | |
b3deaf57 MD |
697 | #define HAMMER_NODE_DELETED 0x0001 |
698 | #define HAMMER_NODE_FLUSH 0x0002 | |
bcac4bbb MD |
699 | #define HAMMER_NODE_CRCGOOD 0x0004 |
700 | #define HAMMER_NODE_NEEDSCRC 0x0008 | |
c82af904 | 701 | #define HAMMER_NODE_NEEDSMIRROR 0x0010 |
4c286c36 | 702 | #define HAMMER_NODE_CRCBAD 0x0020 |
fa2b9a03 | 703 | #define HAMMER_NODE_NONLINEAR 0x0040 /* linear heuristic */ |
4c286c36 MD |
704 | |
705 | #define HAMMER_NODE_CRCANY (HAMMER_NODE_CRCGOOD | HAMMER_NODE_CRCBAD) | |
b3deaf57 | 706 | |
b33e2cc0 | 707 | /* |
1775b6a0 MD |
708 | * List of locked nodes. This structure is used to lock potentially large |
709 | * numbers of nodes as an aid for complex B-Tree operations. | |
b33e2cc0 | 710 | */ |
1775b6a0 MD |
711 | struct hammer_node_lock; |
712 | TAILQ_HEAD(hammer_node_lock_list, hammer_node_lock); | |
713 | ||
22a0040d | 714 | typedef struct hammer_node_lock { |
1775b6a0 MD |
715 | TAILQ_ENTRY(hammer_node_lock) entry; |
716 | struct hammer_node_lock_list list; | |
717 | struct hammer_node_lock *parent; | |
b33e2cc0 | 718 | hammer_node_t node; |
1775b6a0 MD |
719 | hammer_node_ondisk_t copy; /* copy of on-disk data */ |
720 | int index; /* index of this node in parent */ | |
721 | int count; /* count children */ | |
722 | int flags; | |
22a0040d | 723 | } *hammer_node_lock_t; |
b33e2cc0 | 724 | |
1775b6a0 | 725 | #define HAMMER_NODE_LOCK_UPDATED 0x0001 |
24cf83d2 | 726 | #define HAMMER_NODE_LOCK_LCACHE 0x0002 |
b33e2cc0 | 727 | |
cebe9493 MD |
728 | /* |
729 | * The reserve structure prevents the blockmap from allocating | |
a981af19 | 730 | * out of a reserved big-block. Such reservations are used by |
cebe9493 MD |
731 | * the direct-write mechanism. |
732 | * | |
733 | * The structure is also used to hold off on reallocations of | |
d165c90a | 734 | * big-blocks from the freemap until flush dependancies have |
cebe9493 MD |
735 | * been dealt with. |
736 | */ | |
22a0040d | 737 | typedef struct hammer_reserve { |
0832c9bb | 738 | RB_ENTRY(hammer_reserve) rb_node; |
cebe9493 | 739 | TAILQ_ENTRY(hammer_reserve) delay_entry; |
f8a7a900 | 740 | int flg_no; |
cb51be26 | 741 | int flags; |
0832c9bb | 742 | int refs; |
cb51be26 | 743 | int zone; |
df301614 | 744 | int append_off; |
cebe9493 | 745 | hammer_off_t zone_offset; |
22a0040d | 746 | } *hammer_reserve_t; |
0832c9bb | 747 | |
cb51be26 | 748 | #define HAMMER_RESF_ONDELAY 0x0001 |
5e435c92 | 749 | #define HAMMER_RESF_LAYER2FREE 0x0002 |
cb51be26 | 750 | |
8cd0a023 MD |
751 | #include "hammer_cursor.h" |
752 | ||
e8599db1 | 753 | /* |
cebe9493 MD |
754 | * The undo structure tracks recent undos to avoid laying down duplicate |
755 | * undos within a flush group, saving us a significant amount of overhead. | |
756 | * | |
757 | * This is strictly a heuristic. | |
e8599db1 | 758 | */ |
7a61b85d MD |
759 | #define HAMMER_MAX_UNDOS 1024 |
760 | #define HAMMER_MAX_FLUSHERS 4 | |
e8599db1 | 761 | |
22a0040d | 762 | typedef struct hammer_undo { |
e8599db1 MD |
763 | RB_ENTRY(hammer_undo) rb_node; |
764 | TAILQ_ENTRY(hammer_undo) lru_entry; | |
765 | hammer_off_t offset; | |
766 | int bytes; | |
22a0040d | 767 | } *hammer_undo_t; |
e8599db1 | 768 | |
af209b0f | 769 | struct hammer_flusher_info; |
7a61b85d | 770 | TAILQ_HEAD(hammer_flusher_info_list, hammer_flusher_info); |
da2da375 MD |
771 | |
772 | struct hammer_flusher { | |
773 | int signal; /* flusher thread sequencer */ | |
e86903d8 | 774 | int done; /* last completed flush group */ |
37646115 | 775 | int next; /* next unallocated flg seqno */ |
da2da375 MD |
776 | int group_lock; /* lock sequencing of the next flush */ |
777 | int exiting; /* request master exit */ | |
da2da375 MD |
778 | thread_t td; /* master flusher thread */ |
779 | hammer_tid_t tid; /* last flushed transaction id */ | |
780 | int finalize_want; /* serialize finalization */ | |
781 | struct hammer_lock finalize_lock; /* serialize finalization */ | |
782 | struct hammer_transaction trans; /* shared transaction */ | |
7a61b85d MD |
783 | struct hammer_flusher_info_list run_list; |
784 | struct hammer_flusher_info_list ready_list; | |
da2da375 MD |
785 | }; |
786 | ||
9192654c MD |
787 | #define HAMMER_FLUSH_UNDOS_RELAXED 0 |
788 | #define HAMMER_FLUSH_UNDOS_FORCED 1 | |
789 | #define HAMMER_FLUSH_UNDOS_AUTO 2 | |
8750964d MD |
790 | /* |
791 | * Internal hammer mount data structure | |
792 | */ | |
22a0040d | 793 | typedef struct hammer_mount { |
8750964d | 794 | struct mount *mp; |
8750964d | 795 | struct hammer_ino_rb_tree rb_inos_root; |
73896937 | 796 | struct hammer_redo_rb_tree rb_redo_root; |
8750964d | 797 | struct hammer_vol_rb_tree rb_vols_root; |
40043e7f | 798 | struct hammer_nod_rb_tree rb_nods_root; |
e8599db1 | 799 | struct hammer_und_rb_tree rb_undo_root; |
0832c9bb MD |
800 | struct hammer_res_rb_tree rb_resv_root; |
801 | struct hammer_buf_rb_tree rb_bufs_root; | |
5fa5c92f | 802 | struct hammer_pfs_rb_tree rb_pfsm_root; |
a6b7735b | 803 | hammer_volume_t rootvol; |
47197d71 MD |
804 | struct hammer_base_elm root_btree_beg; |
805 | struct hammer_base_elm root_btree_end; | |
bac808fe MD |
806 | |
807 | struct malloc_type *m_misc; | |
808 | struct malloc_type *m_inodes; | |
809 | ||
cdb6e4e6 | 810 | int flags; /* HAMMER_MOUNT_xxx flags */ |
195c19a1 MD |
811 | int hflags; |
812 | int ronly; | |
813 | int nvolumes; | |
f43e824c | 814 | int master_id; /* default 0, no-mirror -1, otherwise 1-15 */ |
44a83111 | 815 | int version; /* hammer filesystem version to use */ |
e63644f0 | 816 | int rsv_inodes; /* reserved space due to dirty inodes */ |
a7e9bef1 | 817 | int64_t rsv_databytes; /* reserved space due to record data */ |
e63644f0 | 818 | int rsv_recs; /* reserved space due to dirty records */ |
a981af19 | 819 | int rsv_fromdelay; /* big-blocks reserved due to flush delay */ |
7a61b85d | 820 | int undo_rec_limit; /* based on size of undo area */ |
da2da375 | 821 | |
865c9609 MN |
822 | int volume_to_remove; /* volume that is currently being removed */ |
823 | ||
9f5097dc | 824 | int count_inodes; /* total number of inodes */ |
af209b0f | 825 | int count_iqueued; /* inodes queued to flusher */ |
e2a02b72 | 826 | int count_reclaims; /* inodes pending reclaim by flusher */ |
da2da375 MD |
827 | |
828 | struct hammer_flusher flusher; | |
829 | ||
855942b6 | 830 | u_int check_interrupt; |
3e583440 | 831 | u_int check_yield; |
90da8fc8 | 832 | hammer_uuid_t fsid; |
1afb73cf MD |
833 | struct hammer_mod_rb_tree volu_root; /* dirty undo buffers */ |
834 | struct hammer_mod_rb_tree undo_root; /* dirty undo buffers */ | |
835 | struct hammer_mod_rb_tree data_root; /* dirty data buffers */ | |
836 | struct hammer_mod_rb_tree meta_root; /* dirty meta bufs */ | |
837 | struct hammer_mod_rb_tree lose_root; /* loose buffers */ | |
3583bbb4 MD |
838 | long locked_dirty_space; /* meta/volu count */ |
839 | long io_running_space; /* io_token */ | |
0729c8c8 | 840 | int objid_cache_count; |
cdb6e4e6 MD |
841 | int error; /* critical I/O error */ |
842 | struct krate krate; /* rate limited kprintf */ | |
a8d31329 | 843 | struct krate kdiag; /* rate limited kprintf */ |
ddfdf542 | 844 | hammer_tid_t asof; /* snapshot mount */ |
4889cbd4 MD |
845 | hammer_tid_t next_tid; |
846 | hammer_tid_t flush_tid1; /* flusher tid sequencing */ | |
847 | hammer_tid_t flush_tid2; /* flusher tid sequencing */ | |
a981af19 | 848 | int64_t copy_stat_freebigblocks; /* number of free big-blocks */ |
46137e17 TK |
849 | uint32_t undo_seqno; /* UNDO/REDO FIFO seqno */ |
850 | uint32_t recover_stage2_seqno; /* REDO recovery seqno */ | |
c58123da | 851 | hammer_off_t recover_stage2_offset; /* REDO recovery offset */ |
e63644f0 | 852 | |
513ca7d7 | 853 | struct netexport export; |
9480ff55 | 854 | struct hammer_lock sync_lock; |
d99d6bf5 MD |
855 | struct hammer_lock undo_lock; |
856 | struct hammer_lock blkmap_lock; | |
83f2a3aa | 857 | struct hammer_lock snapshot_lock; |
52e547e3 | 858 | struct hammer_lock volume_lock; |
0729c8c8 | 859 | struct hammer_blockmap blockmap[HAMMER_MAX_ZONES]; |
e8599db1 MD |
860 | struct hammer_undo undos[HAMMER_MAX_UNDOS]; |
861 | int undo_alloc; | |
862 | TAILQ_HEAD(, hammer_undo) undo_lru_list; | |
cebe9493 | 863 | TAILQ_HEAD(, hammer_reserve) delay_list; |
7a61b85d | 864 | struct hammer_flush_group_list flush_group_list; |
37646115 | 865 | hammer_flush_group_t fill_flush_group; |
7b6ccb11 | 866 | hammer_flush_group_t next_flush_group; |
0729c8c8 | 867 | TAILQ_HEAD(, hammer_objid_cache) objid_cache_list; |
7bc5b8c2 | 868 | TAILQ_HEAD(, hammer_reclaim) reclaim_list; |
eddadaee | 869 | TAILQ_HEAD(, hammer_io) iorun_list; |
e98f1b96 | 870 | |
b0aab9b9 MD |
871 | struct lwkt_token fs_token; /* high level */ |
872 | struct lwkt_token io_token; /* low level (IO callback) */ | |
873 | ||
e98f1b96 | 874 | struct hammer_inostats inostats[HAMMER_INOSTATS_HSIZE]; |
78249d7f | 875 | uint64_t volume_map[4]; /* 256 bits bitfield */ |
22a0040d | 876 | } *hammer_mount_t; |
8750964d | 877 | |
cdb6e4e6 | 878 | #define HAMMER_MOUNT_CRITICAL_ERROR 0x0001 |
82010f9f | 879 | #define HAMMER_MOUNT_FLUSH_RECOVERY 0x0002 |
47f363f1 | 880 | #define HAMMER_MOUNT_REDO_SYNC 0x0004 |
c58123da MD |
881 | #define HAMMER_MOUNT_REDO_RECOVERY_REQ 0x0008 |
882 | #define HAMMER_MOUNT_REDO_RECOVERY_RUN 0x0010 | |
9f5097dc | 883 | |
75e4d787 TK |
884 | #define HAMMER_VOLUME_NUMBER_FOREACH(hmp, n) \ |
885 | for (n = 0; n < HAMMER_MAX_VOLUMES; n++) \ | |
886 | if (hammer_volume_number_test(hmp, n)) | |
78249d7f | 887 | |
f6468e9e MD |
888 | /* |
889 | * Minium buffer cache bufs required to rebalance the B-Tree. | |
890 | * This is because we must hold the children and the children's children | |
891 | * locked. Even this might not be enough if things are horribly out | |
892 | * of balance. | |
893 | */ | |
894 | #define HAMMER_REBALANCE_MIN_BUFS \ | |
895 | (HAMMER_BTREE_LEAF_ELMS * HAMMER_BTREE_LEAF_ELMS) | |
896 | ||
1caa2035 | 897 | #endif /* _KERNEL || _KERNEL_STRUCTURES */ |
8750964d | 898 | |
23e66b3b | 899 | #if defined(_KERNEL) |
a7e9bef1 MD |
900 | /* |
901 | * checkspace slop (8MB chunks), higher numbers are more conservative. | |
902 | */ | |
93291532 MD |
903 | #define HAMMER_CHKSPC_REBLOCK 25 |
904 | #define HAMMER_CHKSPC_MIRROR 20 | |
905 | #define HAMMER_CHKSPC_WRITE 20 | |
906 | #define HAMMER_CHKSPC_CREATE 20 | |
907 | #define HAMMER_CHKSPC_REMOVE 10 | |
7b6ccb11 | 908 | #define HAMMER_CHKSPC_EMERGENCY 0 |
a7e9bef1 | 909 | |
8750964d | 910 | extern struct vop_ops hammer_vnode_vops; |
7a04d74f MD |
911 | extern struct vop_ops hammer_spec_vops; |
912 | extern struct vop_ops hammer_fifo_vops; | |
427e5fc6 | 913 | |
2f85fa4d | 914 | extern int hammer_debug_io; |
d5ef456e | 915 | extern int hammer_debug_general; |
e8599db1 | 916 | extern int hammer_debug_inode; |
7d683b0f | 917 | extern int hammer_debug_locks; |
b3deaf57 | 918 | extern int hammer_debug_btree; |
d113fda1 | 919 | extern int hammer_debug_tid; |
b33e2cc0 | 920 | extern int hammer_debug_recover; |
fc73edd8 | 921 | extern int hammer_debug_critical; |
1b0ab2c3 | 922 | extern int hammer_cluster_enable; |
e2a02b72 | 923 | extern int hammer_tdmux_ticks; |
7a61b85d | 924 | extern int hammer_count_fsyncs; |
b3deaf57 | 925 | extern int hammer_count_inodes; |
af209b0f | 926 | extern int hammer_count_iqueued; |
e2a02b72 | 927 | extern int hammer_count_reclaims; |
b3deaf57 MD |
928 | extern int hammer_count_records; |
929 | extern int hammer_count_record_datas; | |
930 | extern int hammer_count_volumes; | |
b3deaf57 MD |
931 | extern int hammer_count_buffers; |
932 | extern int hammer_count_nodes; | |
cb51be26 MD |
933 | extern int64_t hammer_stats_btree_lookups; |
934 | extern int64_t hammer_stats_btree_searches; | |
935 | extern int64_t hammer_stats_btree_inserts; | |
936 | extern int64_t hammer_stats_btree_deletes; | |
937 | extern int64_t hammer_stats_btree_elements; | |
938 | extern int64_t hammer_stats_btree_splits; | |
939 | extern int64_t hammer_stats_btree_iterations; | |
39d8fd63 | 940 | extern int64_t hammer_stats_btree_root_iterations; |
cb51be26 | 941 | extern int64_t hammer_stats_record_iterations; |
ce0138a6 MD |
942 | extern int64_t hammer_stats_file_read; |
943 | extern int64_t hammer_stats_file_write; | |
ce0138a6 MD |
944 | extern int64_t hammer_stats_disk_read; |
945 | extern int64_t hammer_stats_disk_write; | |
946 | extern int64_t hammer_stats_inode_flushes; | |
947 | extern int64_t hammer_stats_commits; | |
89e744ce | 948 | extern int64_t hammer_stats_undo; |
6048b411 | 949 | extern int64_t hammer_stats_redo; |
3583bbb4 | 950 | extern long hammer_count_dirtybufspace; |
a99b9ea2 | 951 | extern int hammer_count_refedbufs; |
0832c9bb | 952 | extern int hammer_count_reservations; |
3583bbb4 MD |
953 | extern long hammer_count_io_running_read; |
954 | extern long hammer_count_io_running_write; | |
a99b9ea2 | 955 | extern int hammer_count_io_locked; |
3583bbb4 | 956 | extern long hammer_limit_dirtybufspace; |
47637bff | 957 | extern int hammer_limit_recs; |
de996e86 | 958 | extern int hammer_limit_inode_recs; |
e2a02b72 | 959 | extern int hammer_limit_reclaims; |
e2ef7a95 | 960 | extern int hammer_live_dedup_cache_size; |
9192654c | 961 | extern int hammer_limit_redo; |
cb51be26 | 962 | extern int hammer_verify_zone; |
1b0ab2c3 | 963 | extern int hammer_verify_data; |
b4f86ea3 | 964 | extern int hammer_double_buffer; |
283a4a38 | 965 | extern int hammer_btree_full_undo; |
3e583440 | 966 | extern int hammer_yield_check; |
6f3d87c0 | 967 | extern int hammer_fsync_mode; |
21fde338 | 968 | extern int hammer_autoflush; |
7d683b0f | 969 | extern int64_t hammer_contention_count; |
b3deaf57 | 970 | |
cdb6e4e6 MD |
971 | void hammer_critical_error(hammer_mount_t hmp, hammer_inode_t ip, |
972 | int error, const char *msg); | |
8750964d MD |
973 | int hammer_vop_inactive(struct vop_inactive_args *); |
974 | int hammer_vop_reclaim(struct vop_reclaim_args *); | |
e1067862 TK |
975 | int hammer_get_vnode(hammer_inode_t ip, struct vnode **vpp); |
976 | hammer_inode_t hammer_get_inode(hammer_transaction_t trans, | |
adf01747 | 977 | hammer_inode_t dip, int64_t obj_id, |
46137e17 | 978 | hammer_tid_t asof, uint32_t localization, |
ddfdf542 | 979 | int flags, int *errorp); |
e1067862 | 980 | hammer_inode_t hammer_get_dummy_inode(hammer_transaction_t trans, |
4c286c36 | 981 | hammer_inode_t dip, int64_t obj_id, |
46137e17 | 982 | hammer_tid_t asof, uint32_t localization, |
4c286c36 | 983 | int flags, int *errorp); |
e1067862 | 984 | hammer_inode_t hammer_find_inode(hammer_transaction_t trans, |
39d8fd63 | 985 | int64_t obj_id, hammer_tid_t asof, |
46137e17 | 986 | uint32_t localization); |
43c665ae MD |
987 | void hammer_scan_inode_snapshots(hammer_mount_t hmp, |
988 | hammer_inode_info_t iinfo, | |
989 | int (*callback)(hammer_inode_t ip, void *data), | |
990 | void *data); | |
e1067862 TK |
991 | void hammer_put_inode(hammer_inode_t ip); |
992 | void hammer_put_inode_ref(hammer_inode_t ip); | |
e98f1b96 | 993 | void hammer_inode_waitreclaims(hammer_transaction_t trans); |
e1067862 | 994 | void hammer_inode_dirty(hammer_inode_t ip); |
66325755 | 995 | |
deabdbfb | 996 | int hammer_unload_volume(hammer_volume_t volume, void *data); |
51c35492 MD |
997 | int hammer_adjust_volume_mode(hammer_volume_t volume, void *data __unused); |
998 | ||
07be83b8 | 999 | int hammer_unload_buffer(hammer_buffer_t buffer, void *data); |
7c19b529 | 1000 | int hammer_install_volume(hammer_mount_t hmp, const char *volname, |
deabdbfb | 1001 | struct vnode *devvp, void *data); |
1b0ab2c3 | 1002 | int hammer_mountcheck_volumes(hammer_mount_t hmp); |
c302e844 | 1003 | int hammer_get_installed_volumes(hammer_mount_t hmp); |
8cd0a023 | 1004 | |
e469566b | 1005 | int hammer_mem_add(hammer_record_t record); |
45a014dc | 1006 | int hammer_ip_lookup(hammer_cursor_t cursor); |
4e17f465 | 1007 | int hammer_ip_first(hammer_cursor_t cursor); |
a89aec1b | 1008 | int hammer_ip_next(hammer_cursor_t cursor); |
8cd0a023 | 1009 | int hammer_ip_resolve_data(hammer_cursor_t cursor); |
e63644f0 MD |
1010 | int hammer_ip_delete_record(hammer_cursor_t cursor, hammer_inode_t ip, |
1011 | hammer_tid_t tid); | |
83f2a3aa MD |
1012 | int hammer_create_at_cursor(hammer_cursor_t cursor, |
1013 | hammer_btree_leaf_elm_t leaf, void *udata, int mode); | |
602c6cb8 | 1014 | int hammer_delete_at_cursor(hammer_cursor_t cursor, int delete_flags, |
46137e17 | 1015 | hammer_tid_t delete_tid, uint32_t delete_ts, |
842e7a70 | 1016 | int track, int64_t *stat_bytes); |
b3deaf57 | 1017 | int hammer_ip_check_directory_empty(hammer_transaction_t trans, |
98f7132d | 1018 | hammer_inode_t ip); |
fbc6e32a | 1019 | int hammer_sync_hmp(hammer_mount_t hmp, int waitfor); |
f36a9737 MD |
1020 | int hammer_queue_inodes_flusher(hammer_mount_t hmp, int waitfor); |
1021 | ||
8cd0a023 | 1022 | hammer_record_t |
11ad5ade | 1023 | hammer_alloc_mem_record(hammer_inode_t ip, int data_len); |
d36ec43b | 1024 | void hammer_flush_record_done(hammer_record_t record, int error); |
af209b0f | 1025 | void hammer_wait_mem_record_ident(hammer_record_t record, const char *ident); |
b3deaf57 | 1026 | void hammer_rel_mem_record(hammer_record_t record); |
b84de5af | 1027 | |
6a37e7e4 | 1028 | int hammer_cursor_up(hammer_cursor_t cursor); |
f36a9737 | 1029 | int hammer_cursor_up_locked(hammer_cursor_t cursor); |
8cd0a023 | 1030 | int hammer_cursor_down(hammer_cursor_t cursor); |
6a37e7e4 | 1031 | int hammer_cursor_upgrade(hammer_cursor_t cursor); |
7bc5b8c2 | 1032 | int hammer_cursor_upgrade_node(hammer_cursor_t cursor); |
6a37e7e4 | 1033 | void hammer_cursor_downgrade(hammer_cursor_t cursor); |
bb29b5d8 MD |
1034 | int hammer_cursor_upgrade2(hammer_cursor_t c1, hammer_cursor_t c2); |
1035 | void hammer_cursor_downgrade2(hammer_cursor_t c1, hammer_cursor_t c2); | |
32c90105 MD |
1036 | int hammer_cursor_seek(hammer_cursor_t cursor, hammer_node_t node, |
1037 | int index); | |
af209b0f | 1038 | void hammer_lock_ex_ident(struct hammer_lock *lock, const char *ident); |
8cd0a023 MD |
1039 | int hammer_lock_ex_try(struct hammer_lock *lock); |
1040 | void hammer_lock_sh(struct hammer_lock *lock); | |
47637bff | 1041 | int hammer_lock_sh_try(struct hammer_lock *lock); |
bb29b5d8 MD |
1042 | int hammer_lock_upgrade(struct hammer_lock *lock, int shcount); |
1043 | void hammer_lock_downgrade(struct hammer_lock *lock, int shcount); | |
b3bad96f | 1044 | int hammer_lock_status(struct hammer_lock *lock); |
427e5fc6 | 1045 | void hammer_unlock(struct hammer_lock *lock); |
66325755 | 1046 | void hammer_ref(struct hammer_lock *lock); |
250aec18 MD |
1047 | int hammer_ref_interlock(struct hammer_lock *lock); |
1048 | int hammer_ref_interlock_true(struct hammer_lock *lock); | |
1049 | void hammer_ref_interlock_done(struct hammer_lock *lock); | |
1050 | void hammer_rel(struct hammer_lock *lock); | |
1051 | int hammer_rel_interlock(struct hammer_lock *lock, int locked); | |
1052 | void hammer_rel_interlock_done(struct hammer_lock *lock, int orig_locked); | |
1053 | int hammer_get_interlock(struct hammer_lock *lock); | |
1054 | int hammer_try_interlock_norefs(struct hammer_lock *lock); | |
1055 | void hammer_put_interlock(struct hammer_lock *lock, int error); | |
8cd0a023 | 1056 | |
2f85fa4d MD |
1057 | void hammer_sync_lock_ex(hammer_transaction_t trans); |
1058 | void hammer_sync_lock_sh(hammer_transaction_t trans); | |
47637bff | 1059 | int hammer_sync_lock_sh_try(hammer_transaction_t trans); |
2f85fa4d MD |
1060 | void hammer_sync_unlock(hammer_transaction_t trans); |
1061 | ||
90da8fc8 TK |
1062 | uint32_t hammer_to_unix_xid(hammer_uuid_t *uuid); |
1063 | void hammer_guid_to_uuid(hammer_uuid_t *uuid, uint32_t guid); | |
46137e17 TK |
1064 | void hammer_time_to_timespec(uint64_t xtime, struct timespec *ts); |
1065 | uint64_t hammer_timespec_to_time(struct timespec *ts); | |
bc6c1f13 | 1066 | int hammer_str_to_tid(const char *str, int *ispfsp, |
46137e17 | 1067 | hammer_tid_t *tidp, uint32_t *localizationp); |
5a64efa1 MD |
1068 | hammer_tid_t hammer_alloc_objid(hammer_mount_t hmp, hammer_inode_t dip, |
1069 | int64_t namekey); | |
0729c8c8 MD |
1070 | void hammer_clear_objid(hammer_inode_t dip); |
1071 | void hammer_destroy_objid_cache(hammer_mount_t hmp); | |
8cd0a023 | 1072 | |
e8599db1 | 1073 | int hammer_enter_undo_history(hammer_mount_t hmp, hammer_off_t offset, |
b3bad96f | 1074 | int bytes); |
e8599db1 | 1075 | void hammer_clear_undo_history(hammer_mount_t hmp); |
46137e17 TK |
1076 | enum vtype hammer_get_vnode_type(uint8_t obj_type); |
1077 | int hammer_get_dtype(uint8_t obj_type); | |
1078 | uint8_t hammer_get_obj_type(enum vtype vtype); | |
6e414d58 | 1079 | int64_t hammer_direntry_namekey(hammer_inode_t dip, const void *name, int len, |
46137e17 | 1080 | uint32_t *max_iterationsp); |
e63644f0 | 1081 | int hammer_nohistory(hammer_inode_t ip); |
427e5fc6 | 1082 | |
36f82b23 | 1083 | int hammer_init_cursor(hammer_transaction_t trans, hammer_cursor_t cursor, |
b3bad96f | 1084 | hammer_node_cache_t cache, hammer_inode_t ip); |
4e17f465 | 1085 | void hammer_normalize_cursor(hammer_cursor_t cursor); |
8cd0a023 | 1086 | void hammer_done_cursor(hammer_cursor_t cursor); |
b3bad96f | 1087 | int hammer_recover_cursor(hammer_cursor_t cursor); |
982be4bf MD |
1088 | void hammer_unlock_cursor(hammer_cursor_t cursor); |
1089 | int hammer_lock_cursor(hammer_cursor_t cursor); | |
3f43fb33 MD |
1090 | hammer_cursor_t hammer_push_cursor(hammer_cursor_t ocursor); |
1091 | void hammer_pop_cursor(hammer_cursor_t ocursor, hammer_cursor_t ncursor); | |
b3bad96f MD |
1092 | |
1093 | void hammer_cursor_replaced_node(hammer_node_t onode, hammer_node_t nnode); | |
1094 | void hammer_cursor_removed_node(hammer_node_t onode, hammer_node_t parent, | |
1095 | int index); | |
1096 | void hammer_cursor_split_node(hammer_node_t onode, hammer_node_t nnode, | |
1097 | int index); | |
bbb01e14 MD |
1098 | void hammer_cursor_moved_element(hammer_node_t oparent, int pindex, |
1099 | hammer_node_t onode, int oindex, | |
1100 | hammer_node_t nnode, int nindex); | |
1775b6a0 MD |
1101 | void hammer_cursor_parent_changed(hammer_node_t node, hammer_node_t oparent, |
1102 | hammer_node_t nparent, int nindex); | |
b3bad96f MD |
1103 | void hammer_cursor_inserted_element(hammer_node_t node, int index); |
1104 | void hammer_cursor_deleted_element(hammer_node_t node, int index); | |
b9107f58 | 1105 | void hammer_cursor_invalidate_cache(hammer_cursor_t cursor); |
8cd0a023 MD |
1106 | |
1107 | int hammer_btree_lookup(hammer_cursor_t cursor); | |
d26d0ae9 | 1108 | int hammer_btree_first(hammer_cursor_t cursor); |
32c90105 | 1109 | int hammer_btree_last(hammer_cursor_t cursor); |
8cd0a023 MD |
1110 | int hammer_btree_extract(hammer_cursor_t cursor, int flags); |
1111 | int hammer_btree_iterate(hammer_cursor_t cursor); | |
32c90105 | 1112 | int hammer_btree_iterate_reverse(hammer_cursor_t cursor); |
11ad5ade | 1113 | int hammer_btree_insert(hammer_cursor_t cursor, |
602c6cb8 | 1114 | hammer_btree_leaf_elm_t elm, int *doprop); |
03b6feea | 1115 | int hammer_btree_delete(hammer_cursor_t cursor, int *ndelete); |
4c038e17 | 1116 | void hammer_btree_do_propagation(hammer_cursor_t cursor, |
602c6cb8 | 1117 | hammer_btree_leaf_elm_t leaf); |
8cd0a023 | 1118 | int hammer_btree_cmp(hammer_base_elm_t key1, hammer_base_elm_t key2); |
d26d0ae9 | 1119 | int hammer_btree_chkts(hammer_tid_t ts, hammer_base_elm_t key); |
32c90105 MD |
1120 | int hammer_btree_correct_rhb(hammer_cursor_t cursor, hammer_tid_t tid); |
1121 | int hammer_btree_correct_lhb(hammer_cursor_t cursor, hammer_tid_t tid); | |
1122 | ||
90f96c37 TK |
1123 | int btree_set_parent_of_child(hammer_transaction_t trans, |
1124 | hammer_node_t node, | |
2f85fa4d | 1125 | hammer_btree_elm_t elm); |
1775b6a0 | 1126 | void hammer_node_lock_init(hammer_node_lock_t parent, hammer_node_t node); |
24cf83d2 MD |
1127 | void hammer_btree_lcache_init(hammer_mount_t hmp, hammer_node_lock_t lcache, |
1128 | int depth); | |
1129 | void hammer_btree_lcache_free(hammer_mount_t hmp, hammer_node_lock_t lcache); | |
1775b6a0 | 1130 | int hammer_btree_lock_children(hammer_cursor_t cursor, int depth, |
24cf83d2 MD |
1131 | hammer_node_lock_t parent, |
1132 | hammer_node_lock_t lcache); | |
1775b6a0 MD |
1133 | void hammer_btree_lock_copy(hammer_cursor_t cursor, |
1134 | hammer_node_lock_t parent); | |
7ddc70d1 | 1135 | int hammer_btree_sync_copy(hammer_cursor_t cursor, |
1775b6a0 | 1136 | hammer_node_lock_t parent); |
24cf83d2 MD |
1137 | void hammer_btree_unlock_children(hammer_mount_t hmp, |
1138 | hammer_node_lock_t parent, | |
1139 | hammer_node_lock_t lcache); | |
bcac4bbb | 1140 | int hammer_btree_search_node(hammer_base_elm_t elm, hammer_node_ondisk_t node); |
82010f9f MD |
1141 | hammer_node_t hammer_btree_get_parent(hammer_transaction_t trans, |
1142 | hammer_node_t node, int *parent_indexp, | |
c82af904 | 1143 | int *errorp, int try_exclusive); |
b33e2cc0 | 1144 | |
c0ade690 | 1145 | void hammer_print_btree_node(hammer_node_ondisk_t ondisk); |
a17eeab0 | 1146 | void hammer_print_btree_elm(hammer_btree_elm_t elm); |
8750964d | 1147 | |
4af0f405 | 1148 | void *hammer_bread(hammer_mount_t hmp, hammer_off_t off, |
562d34c2 | 1149 | int *errorp, hammer_buffer_t *bufferp); |
4af0f405 | 1150 | void *hammer_bnew(hammer_mount_t hmp, hammer_off_t off, |
562d34c2 | 1151 | int *errorp, hammer_buffer_t *bufferp); |
4af0f405 | 1152 | void *hammer_bread_ext(hammer_mount_t hmp, hammer_off_t off, int bytes, |
562d34c2 | 1153 | int *errorp, hammer_buffer_t *bufferp); |
4af0f405 | 1154 | void *hammer_bnew_ext(hammer_mount_t hmp, hammer_off_t off, int bytes, |
562d34c2 | 1155 | int *errorp, hammer_buffer_t *bufferp); |
8cd0a023 MD |
1156 | |
1157 | hammer_volume_t hammer_get_root_volume(hammer_mount_t hmp, int *errorp); | |
427e5fc6 | 1158 | |
8cd0a023 | 1159 | hammer_volume_t hammer_get_volume(hammer_mount_t hmp, |
427e5fc6 | 1160 | int32_t vol_no, int *errorp); |
4a2796f3 MD |
1161 | hammer_buffer_t hammer_get_buffer(hammer_mount_t hmp, hammer_off_t buf_offset, |
1162 | int bytes, int isnew, int *errorp); | |
1b0ab2c3 MD |
1163 | void hammer_sync_buffers(hammer_mount_t hmp, |
1164 | hammer_off_t base_offset, int bytes); | |
362ec2dc | 1165 | int hammer_del_buffers(hammer_mount_t hmp, |
1b0ab2c3 | 1166 | hammer_off_t base_offset, |
362ec2dc MD |
1167 | hammer_off_t zone2_offset, int bytes, |
1168 | int report_conflicts); | |
8cd0a023 | 1169 | |
fbc6e32a | 1170 | int hammer_ref_volume(hammer_volume_t volume); |
8cd0a023 MD |
1171 | int hammer_ref_buffer(hammer_buffer_t buffer); |
1172 | void hammer_flush_buffer_nodes(hammer_buffer_t buffer); | |
1173 | ||
250aec18 MD |
1174 | void hammer_rel_volume(hammer_volume_t volume, int locked); |
1175 | void hammer_rel_buffer(hammer_buffer_t buffer, int locked); | |
8cd0a023 | 1176 | |
513ca7d7 MD |
1177 | int hammer_vfs_export(struct mount *mp, int op, |
1178 | const struct export_args *export); | |
82010f9f MD |
1179 | hammer_node_t hammer_get_node(hammer_transaction_t trans, |
1180 | hammer_off_t node_offset, int isnew, int *errorp); | |
740d8317 | 1181 | void hammer_ref_node(hammer_node_t node); |
4c286c36 | 1182 | hammer_node_t hammer_ref_node_safe(hammer_transaction_t trans, |
bcac4bbb | 1183 | hammer_node_cache_t cache, int *errorp); |
8cd0a023 | 1184 | void hammer_rel_node(hammer_node_t node); |
36f82b23 MD |
1185 | void hammer_delete_node(hammer_transaction_t trans, |
1186 | hammer_node_t node); | |
bcac4bbb MD |
1187 | void hammer_cache_node(hammer_node_cache_t cache, |
1188 | hammer_node_t node); | |
1189 | void hammer_uncache_node(hammer_node_cache_t cache); | |
250aec18 | 1190 | void hammer_flush_node(hammer_node_t node, int locked); |
8cd0a023 | 1191 | |
df2ccbac MD |
1192 | hammer_node_t hammer_alloc_btree(hammer_transaction_t trans, |
1193 | hammer_off_t hint, int *errorp); | |
36f82b23 | 1194 | void *hammer_alloc_data(hammer_transaction_t trans, int32_t data_len, |
46137e17 | 1195 | uint16_t rec_type, hammer_off_t *data_offsetp, |
562d34c2 | 1196 | hammer_buffer_t *data_bufferp, |
df2ccbac | 1197 | hammer_off_t hint, int *errorp); |
bf686dbe | 1198 | |
02428fb6 | 1199 | int hammer_generate_undo(hammer_transaction_t trans, |
2426f861 | 1200 | hammer_off_t zone_offset, void *base, int len); |
6048b411 | 1201 | int hammer_generate_redo(hammer_transaction_t trans, hammer_inode_t ip, |
46137e17 | 1202 | hammer_off_t file_offset, uint32_t flags, |
47f363f1 MD |
1203 | void *base, int len); |
1204 | void hammer_generate_redo_sync(hammer_transaction_t trans); | |
73896937 MD |
1205 | void hammer_redo_fifo_start_flush(hammer_inode_t ip); |
1206 | void hammer_redo_fifo_end_flush(hammer_inode_t ip); | |
1207 | ||
4c09d9c4 | 1208 | void hammer_format_undo(hammer_mount_t hmp, void *base, uint32_t seqno); |
02428fb6 | 1209 | int hammer_upgrade_undo_4(hammer_transaction_t trans); |
427e5fc6 | 1210 | |
36f82b23 MD |
1211 | hammer_off_t hammer_freemap_alloc(hammer_transaction_t trans, |
1212 | hammer_off_t owner, int *errorp); | |
1213 | void hammer_freemap_free(hammer_transaction_t trans, hammer_off_t phys_offset, | |
c3be93f2 | 1214 | hammer_off_t owner, int *errorp); |
0f65be10 | 1215 | int _hammer_checkspace(hammer_mount_t hmp, int slop, int64_t *resp); |
36f82b23 | 1216 | hammer_off_t hammer_blockmap_alloc(hammer_transaction_t trans, int zone, |
df2ccbac | 1217 | int bytes, hammer_off_t hint, int *errorp); |
0832c9bb MD |
1218 | hammer_reserve_t hammer_blockmap_reserve(hammer_mount_t hmp, int zone, |
1219 | int bytes, hammer_off_t *zone_offp, int *errorp); | |
1220 | void hammer_blockmap_reserve_complete(hammer_mount_t hmp, | |
1221 | hammer_reserve_t resv); | |
cb51be26 | 1222 | void hammer_reserve_clrdelay(hammer_mount_t hmp, hammer_reserve_t resv); |
36f82b23 | 1223 | void hammer_blockmap_free(hammer_transaction_t trans, |
ae9ef1bf | 1224 | hammer_off_t zone_offset, int bytes); |
bb29b5d8 | 1225 | int hammer_blockmap_dedup(hammer_transaction_t trans, |
ae9ef1bf | 1226 | hammer_off_t zone_offset, int bytes); |
cdb6e4e6 | 1227 | int hammer_blockmap_finalize(hammer_transaction_t trans, |
5e435c92 | 1228 | hammer_reserve_t resv, |
ae9ef1bf MD |
1229 | hammer_off_t zone_offset, int bytes); |
1230 | int hammer_blockmap_getfree(hammer_mount_t hmp, hammer_off_t zone_offset, | |
bf686dbe | 1231 | int *curp, int *errorp); |
f4fe61c2 MD |
1232 | hammer_off_t hammer_blockmap_lookup_verify(hammer_mount_t hmp, |
1233 | hammer_off_t zone_offset, int *errorp); | |
1234 | ||
ae9ef1bf | 1235 | hammer_off_t hammer_undo_lookup(hammer_mount_t hmp, hammer_off_t zone_offset, |
bf686dbe | 1236 | int *errorp); |
06ad81ff MD |
1237 | int64_t hammer_undo_used(hammer_transaction_t trans); |
1238 | int64_t hammer_undo_space(hammer_transaction_t trans); | |
1f07f686 | 1239 | int64_t hammer_undo_max(hammer_mount_t hmp); |
710733a6 | 1240 | int hammer_undo_reclaim(hammer_io_t io); |
1f07f686 | 1241 | |
87d20609 | 1242 | void hammer_start_transaction(hammer_transaction_t trans, |
ba2be8e9 | 1243 | hammer_mount_t hmp); |
87d20609 | 1244 | void hammer_simple_transaction(hammer_transaction_t trans, |
ba2be8e9 | 1245 | hammer_mount_t hmp); |
87d20609 | 1246 | void hammer_start_transaction_fls(hammer_transaction_t trans, |
ba2be8e9 | 1247 | hammer_mount_t hmp); |
87d20609 | 1248 | void hammer_done_transaction(hammer_transaction_t trans); |
83f2a3aa | 1249 | hammer_tid_t hammer_alloc_tid(hammer_mount_t hmp, int count); |
66325755 | 1250 | |
e98f1b96 | 1251 | void hammer_modify_inode(hammer_transaction_t trans, hammer_inode_t ip, int flags); |
f90dde4c | 1252 | void hammer_flush_inode(hammer_inode_t ip, int flags); |
b84de5af MD |
1253 | void hammer_wait_inode(hammer_inode_t ip); |
1254 | ||
87d20609 | 1255 | int hammer_create_inode(hammer_transaction_t trans, struct vattr *vap, |
e1067862 | 1256 | struct ucred *cred, hammer_inode_t dip, |
5a64efa1 | 1257 | const char *name, int namelen, |
ea434b6f | 1258 | hammer_pseudofs_inmem_t pfsm, |
e1067862 | 1259 | hammer_inode_t *ipp); |
a89aec1b | 1260 | void hammer_rel_inode(hammer_inode_t ip, int flush); |
51c35492 | 1261 | int hammer_reload_inode(hammer_inode_t ip, void *arg __unused); |
af209b0f | 1262 | int hammer_ino_rb_compare(hammer_inode_t ip1, hammer_inode_t ip2); |
73896937 | 1263 | int hammer_redo_rb_compare(hammer_inode_t ip1, hammer_inode_t ip2); |
cdb6e4e6 | 1264 | int hammer_destroy_inode_callback(hammer_inode_t ip, void *data __unused); |
51c35492 | 1265 | |
02325004 | 1266 | int hammer_sync_inode(hammer_transaction_t trans, hammer_inode_t ip); |
edcd6db2 | 1267 | void hammer_sync_inode_done(hammer_inode_t ip, int error); |
7b6ccb11 | 1268 | void hammer_test_inode(hammer_inode_t dip); |
e8599db1 | 1269 | void hammer_inode_unloadable_check(hammer_inode_t ip, int getvp); |
70125e78 | 1270 | int hammer_update_atime_quick(hammer_inode_t ip); |
8cd0a023 | 1271 | |
87d20609 | 1272 | int hammer_ip_add_direntry(hammer_transaction_t trans, |
5a930e66 | 1273 | hammer_inode_t dip, const char *name, int bytes, |
8cd0a023 | 1274 | hammer_inode_t nip); |
87d20609 | 1275 | int hammer_ip_del_direntry(hammer_transaction_t trans, |
8cd0a023 MD |
1276 | hammer_cursor_t cursor, hammer_inode_t dip, |
1277 | hammer_inode_t ip); | |
6362a262 | 1278 | void hammer_ip_replace_bulk(hammer_mount_t hmp, hammer_record_t record); |
47637bff | 1279 | hammer_record_t hammer_ip_add_bulk(hammer_inode_t ip, off_t file_offset, |
0832c9bb | 1280 | void *data, int bytes, int *errorp); |
e1067862 | 1281 | int hammer_ip_frontend_trunc(hammer_inode_t ip, off_t file_size); |
87d20609 | 1282 | int hammer_ip_add_record(hammer_transaction_t trans, |
7a04d74f | 1283 | hammer_record_t record); |
4e17f465 | 1284 | int hammer_ip_delete_range(hammer_cursor_t cursor, hammer_inode_t ip, |
47637bff | 1285 | int64_t ran_beg, int64_t ran_end, int truncating); |
a9d52b76 | 1286 | int hammer_ip_delete_clean(hammer_cursor_t cursor, hammer_inode_t ip, |
4e17f465 MD |
1287 | int *countp); |
1288 | int hammer_ip_sync_data(hammer_cursor_t cursor, hammer_inode_t ip, | |
1289 | int64_t offset, void *data, int bytes); | |
4e17f465 | 1290 | int hammer_ip_sync_record_cursor(hammer_cursor_t cursor, hammer_record_t rec); |
ea434b6f | 1291 | hammer_pseudofs_inmem_t hammer_load_pseudofs(hammer_transaction_t trans, |
46137e17 | 1292 | uint32_t localization, int *errorp); |
ea434b6f | 1293 | int hammer_mkroot_pseudofs(hammer_transaction_t trans, struct ucred *cred, |
3378c207 | 1294 | hammer_pseudofs_inmem_t pfsm, hammer_inode_t dip); |
ea434b6f MD |
1295 | int hammer_save_pseudofs(hammer_transaction_t trans, |
1296 | hammer_pseudofs_inmem_t pfsm); | |
46137e17 | 1297 | int hammer_unload_pseudofs(hammer_transaction_t trans, uint32_t localization); |
5fa5c92f | 1298 | void hammer_rel_pseudofs(hammer_mount_t hmp, hammer_pseudofs_inmem_t pfsm); |
7dc57964 MD |
1299 | int hammer_ioctl(hammer_inode_t ip, u_long com, caddr_t data, int fflag, |
1300 | struct ucred *cred); | |
1301 | ||
748efb59 | 1302 | void hammer_io_init(hammer_io_t io, hammer_volume_t volume, |
7251986a | 1303 | hammer_io_type_t type); |
4fcdc8b4 | 1304 | hammer_io_type_t hammer_zone_to_iotype(int zone); |
6b482339 TK |
1305 | int hammer_io_read(struct vnode *devvp, hammer_io_t io, int limit); |
1306 | void hammer_io_advance(hammer_io_t io); | |
1307 | int hammer_io_new(struct vnode *devvp, hammer_io_t io); | |
362ec2dc | 1308 | int hammer_io_inval(hammer_volume_t volume, hammer_off_t zone2_offset); |
6b482339 TK |
1309 | struct buf *hammer_io_release(hammer_io_t io, int flush); |
1310 | void hammer_io_flush(hammer_io_t io, int reclaim); | |
1311 | void hammer_io_wait(hammer_io_t io); | |
1312 | void hammer_io_waitdep(hammer_io_t io); | |
eddadaee | 1313 | void hammer_io_wait_all(hammer_mount_t hmp, const char *ident, int doflush); |
1b0ab2c3 MD |
1314 | int hammer_io_direct_read(hammer_mount_t hmp, struct bio *bio, |
1315 | hammer_btree_leaf_elm_t leaf); | |
9a98f3cc MD |
1316 | int hammer_io_indirect_read(hammer_mount_t hmp, struct bio *bio, |
1317 | hammer_btree_leaf_elm_t leaf); | |
6362a262 MD |
1318 | int hammer_io_direct_write(hammer_mount_t hmp, struct bio *bio, |
1319 | hammer_record_t record); | |
1b0ab2c3 | 1320 | void hammer_io_direct_wait(hammer_record_t record); |
43c665ae | 1321 | void hammer_io_direct_uncache(hammer_mount_t hmp, hammer_btree_leaf_elm_t leaf); |
9f5097dc MD |
1322 | void hammer_io_write_interlock(hammer_io_t io); |
1323 | void hammer_io_done_interlock(hammer_io_t io); | |
6b482339 TK |
1324 | void hammer_io_clear_modify(hammer_io_t io, int inval); |
1325 | void hammer_io_clear_modlist(hammer_io_t io); | |
748efb59 | 1326 | void hammer_io_flush_sync(hammer_mount_t hmp); |
6b482339 TK |
1327 | void hammer_io_clear_error(hammer_io_t io); |
1328 | void hammer_io_clear_error_noassert(hammer_io_t io); | |
b8a41159 | 1329 | void hammer_io_notmeta(hammer_buffer_t buffer); |
ba298df1 | 1330 | void hammer_io_limit_backlog(hammer_mount_t hmp); |
748efb59 | 1331 | |
36f82b23 MD |
1332 | void hammer_modify_volume(hammer_transaction_t trans, hammer_volume_t volume, |
1333 | void *base, int len); | |
1334 | void hammer_modify_buffer(hammer_transaction_t trans, hammer_buffer_t buffer, | |
1335 | void *base, int len); | |
10a5d1ba MD |
1336 | void hammer_modify_volume_done(hammer_volume_t volume); |
1337 | void hammer_modify_buffer_done(hammer_buffer_t buffer); | |
0b075555 | 1338 | |
36f82b23 MD |
1339 | int hammer_ioc_reblock(hammer_transaction_t trans, hammer_inode_t ip, |
1340 | struct hammer_ioc_reblock *reblock); | |
1775b6a0 MD |
1341 | int hammer_ioc_rebalance(hammer_transaction_t trans, hammer_inode_t ip, |
1342 | struct hammer_ioc_rebalance *rebal); | |
11ad5ade MD |
1343 | int hammer_ioc_prune(hammer_transaction_t trans, hammer_inode_t ip, |
1344 | struct hammer_ioc_prune *prune); | |
c82af904 MD |
1345 | int hammer_ioc_mirror_read(hammer_transaction_t trans, hammer_inode_t ip, |
1346 | struct hammer_ioc_mirror_rw *mirror); | |
1347 | int hammer_ioc_mirror_write(hammer_transaction_t trans, hammer_inode_t ip, | |
1348 | struct hammer_ioc_mirror_rw *mirror); | |
5fa5c92f | 1349 | int hammer_ioc_set_pseudofs(hammer_transaction_t trans, hammer_inode_t ip, |
ea434b6f | 1350 | struct ucred *cred, struct hammer_ioc_pseudofs_rw *pfs); |
5fa5c92f MD |
1351 | int hammer_ioc_get_pseudofs(hammer_transaction_t trans, hammer_inode_t ip, |
1352 | struct hammer_ioc_pseudofs_rw *pfs); | |
842e7a70 MD |
1353 | int hammer_ioc_destroy_pseudofs(hammer_transaction_t trans, hammer_inode_t ip, |
1354 | struct hammer_ioc_pseudofs_rw *pfs); | |
1355 | int hammer_ioc_downgrade_pseudofs(hammer_transaction_t trans, hammer_inode_t ip, | |
1356 | struct hammer_ioc_pseudofs_rw *pfs); | |
1357 | int hammer_ioc_upgrade_pseudofs(hammer_transaction_t trans, hammer_inode_t ip, | |
1358 | struct hammer_ioc_pseudofs_rw *pfs); | |
4889cbd4 MD |
1359 | int hammer_ioc_wait_pseudofs(hammer_transaction_t trans, hammer_inode_t ip, |
1360 | struct hammer_ioc_pseudofs_rw *pfs); | |
7dc46daa | 1361 | int hammer_ioc_scan_pseudofs(hammer_transaction_t trans, hammer_inode_t ip, |
e0f42079 | 1362 | struct hammer_ioc_pseudofs_rw *pfs); |
d121f61c | 1363 | int hammer_ioc_volume_add(hammer_transaction_t trans, hammer_inode_t ip, |
865c9609 MN |
1364 | struct hammer_ioc_volume *ioc); |
1365 | int hammer_ioc_volume_del(hammer_transaction_t trans, hammer_inode_t ip, | |
1366 | struct hammer_ioc_volume *ioc); | |
e914c91d SK |
1367 | int hammer_ioc_volume_list(hammer_transaction_t trans, hammer_inode_t ip, |
1368 | struct hammer_ioc_volume_list *ioc); | |
bb29b5d8 MD |
1369 | int hammer_ioc_dedup(hammer_transaction_t trans, hammer_inode_t ip, |
1370 | struct hammer_ioc_dedup *dedup); | |
bf686dbe | 1371 | |
855942b6 | 1372 | int hammer_signal_check(hammer_mount_t hmp); |
bf686dbe | 1373 | |
059819e3 MD |
1374 | void hammer_flusher_create(hammer_mount_t hmp); |
1375 | void hammer_flusher_destroy(hammer_mount_t hmp); | |
1376 | void hammer_flusher_sync(hammer_mount_t hmp); | |
7a61b85d | 1377 | int hammer_flusher_async(hammer_mount_t hmp, hammer_flush_group_t flg); |
15e75dab | 1378 | int hammer_flusher_async_one(hammer_mount_t hmp); |
e2a02b72 | 1379 | int hammer_flusher_running(hammer_mount_t hmp); |
93291532 | 1380 | void hammer_flusher_wait(hammer_mount_t hmp, int seq); |
82010f9f | 1381 | void hammer_flusher_wait_next(hammer_mount_t hmp); |
06ad81ff | 1382 | int hammer_flusher_meta_limit(hammer_mount_t hmp); |
93291532 | 1383 | int hammer_flusher_meta_halflimit(hammer_mount_t hmp); |
06ad81ff | 1384 | int hammer_flusher_undo_exhausted(hammer_transaction_t trans, int quarter); |
525aad3a | 1385 | void hammer_flusher_clean_loose_ios(hammer_mount_t hmp); |
6c1f89f4 | 1386 | void hammer_flusher_finalize(hammer_transaction_t trans, int final); |
1b0ab2c3 | 1387 | int hammer_flusher_haswork(hammer_mount_t hmp); |
8bae937e | 1388 | int hammer_flush_dirty(hammer_mount_t hmp, int max_count); |
6048b411 | 1389 | void hammer_flusher_flush_undos(hammer_mount_t hmp, int already_flushed); |
059819e3 | 1390 | |
02428fb6 MD |
1391 | int hammer_recover_stage1(hammer_mount_t hmp, hammer_volume_t rootvol); |
1392 | int hammer_recover_stage2(hammer_mount_t hmp, hammer_volume_t rootvol); | |
51c35492 | 1393 | void hammer_recover_flush_buffers(hammer_mount_t hmp, |
06ad81ff | 1394 | hammer_volume_t root_volume, int final); |
f90dde4c | 1395 | |
91ffdfc5 | 1396 | dev_t hammer_fsid_to_udev(hammer_uuid_t *uuid); |
a56cb012 | 1397 | |
19619882 | 1398 | |
4a2796f3 | 1399 | int hammer_blocksize(int64_t file_offset); |
6362a262 | 1400 | int hammer_blockoff(int64_t file_offset); |
4a2796f3 MD |
1401 | int64_t hammer_blockdemarc(int64_t file_offset1, int64_t file_offset2); |
1402 | ||
0f65be10 MD |
1403 | /* |
1404 | * Shortcut for _hammer_checkspace(), used all over the code. | |
1405 | */ | |
1406 | static __inline int | |
1407 | hammer_checkspace(hammer_mount_t hmp, int slop) | |
1408 | { | |
1409 | return(_hammer_checkspace(hmp, slop, NULL)); | |
1410 | } | |
1411 | ||
af209b0f MD |
1412 | static __inline void |
1413 | hammer_wait_mem_record(hammer_record_t record) | |
1414 | { | |
1415 | hammer_wait_mem_record_ident(record, "hmmwai"); | |
1416 | } | |
1417 | ||
1418 | static __inline void | |
1419 | hammer_lock_ex(struct hammer_lock *lock) | |
1420 | { | |
1421 | hammer_lock_ex_ident(lock, "hmrlck"); | |
1422 | } | |
1423 | ||
f1c0ae53 TK |
1424 | static __inline void |
1425 | hammer_modify_volume_noundo(hammer_transaction_t trans, hammer_volume_t volume) | |
1426 | { | |
1427 | hammer_modify_volume(trans, volume, NULL, 0); | |
1428 | } | |
1429 | ||
1430 | static __inline void | |
1431 | hammer_modify_buffer_noundo(hammer_transaction_t trans, hammer_buffer_t buffer) | |
1432 | { | |
1433 | hammer_modify_buffer(trans, buffer, NULL, 0); | |
1434 | } | |
1435 | ||
bcac4bbb MD |
1436 | /* |
1437 | * Indicate that a B-Tree node is being modified. | |
1438 | */ | |
8cd0a023 | 1439 | static __inline void |
36f82b23 | 1440 | hammer_modify_node_noundo(hammer_transaction_t trans, hammer_node_t node) |
bf686dbe | 1441 | { |
4c286c36 | 1442 | KKASSERT((node->flags & HAMMER_NODE_CRCBAD) == 0); |
36f82b23 | 1443 | hammer_modify_buffer(trans, node->buffer, NULL, 0); |
bf686dbe MD |
1444 | } |
1445 | ||
1446 | static __inline void | |
56bbb861 | 1447 | hammer_modify_node_all(hammer_transaction_t trans, hammer_node_t node) |
8cd0a023 | 1448 | { |
4c286c36 | 1449 | KKASSERT((node->flags & HAMMER_NODE_CRCBAD) == 0); |
36f82b23 MD |
1450 | hammer_modify_buffer(trans, node->buffer, |
1451 | node->ondisk, sizeof(*node->ondisk)); | |
427e5fc6 MD |
1452 | } |
1453 | ||
bf686dbe | 1454 | static __inline void |
36f82b23 MD |
1455 | hammer_modify_node(hammer_transaction_t trans, hammer_node_t node, |
1456 | void *base, int len) | |
bf686dbe MD |
1457 | { |
1458 | KKASSERT((char *)base >= (char *)node->ondisk && | |
1459 | (char *)base + len <= | |
1460 | (char *)node->ondisk + sizeof(*node->ondisk)); | |
4c286c36 | 1461 | KKASSERT((node->flags & HAMMER_NODE_CRCBAD) == 0); |
283a4a38 MD |
1462 | |
1463 | if (hammer_btree_full_undo) { | |
1464 | hammer_modify_node_all(trans, node); | |
1465 | } else { | |
1466 | hammer_modify_buffer(trans, node->buffer, base, len); | |
f8baaab1 TK |
1467 | hammer_modify_buffer(trans, node->buffer, &node->ondisk->crc, |
1468 | sizeof(hammer_crc_t)); | |
283a4a38 MD |
1469 | --node->buffer->io.modify_refs; /* only want one ref */ |
1470 | } | |
bf686dbe MD |
1471 | } |
1472 | ||
bcac4bbb MD |
1473 | /* |
1474 | * Indicate that the specified modifications have been completed. | |
1475 | * | |
1476 | * Do not try to generate the crc here, it's very expensive to do and a | |
1477 | * sequence of insertions or deletions can result in many calls to this | |
1478 | * function on the same node. | |
1479 | */ | |
10a5d1ba MD |
1480 | static __inline void |
1481 | hammer_modify_node_done(hammer_node_t node) | |
1482 | { | |
bcac4bbb MD |
1483 | node->flags |= HAMMER_NODE_CRCGOOD; |
1484 | if ((node->flags & HAMMER_NODE_NEEDSCRC) == 0) { | |
1485 | node->flags |= HAMMER_NODE_NEEDSCRC; | |
1486 | node->buffer->io.gencrc = 1; | |
1487 | hammer_ref_node(node); | |
1488 | } | |
10a5d1ba MD |
1489 | hammer_modify_buffer_done(node->buffer); |
1490 | } | |
f4fe61c2 | 1491 | |
c11c5877 TK |
1492 | static __inline int |
1493 | hammer_btree_extract_leaf(hammer_cursor_t cursor) | |
1494 | { | |
cf977f11 | 1495 | return(hammer_btree_extract(cursor, 0)); |
c11c5877 TK |
1496 | } |
1497 | ||
0a6fabdb TK |
1498 | static __inline int |
1499 | hammer_btree_extract_data(hammer_cursor_t cursor) | |
1500 | { | |
cf977f11 | 1501 | return(hammer_btree_extract(cursor, HAMMER_CURSOR_GET_DATA)); |
0a6fabdb TK |
1502 | } |
1503 | ||
f4fe61c2 MD |
1504 | /* |
1505 | * Lookup a blockmap offset. | |
1506 | */ | |
1507 | static __inline hammer_off_t | |
1508 | hammer_blockmap_lookup(hammer_mount_t hmp, hammer_off_t zone_offset, | |
1509 | int *errorp) | |
1510 | { | |
1511 | #if defined INVARIANTS | |
f6d29b27 | 1512 | KKASSERT(hammer_is_zone_record(zone_offset)); |
f4fe61c2 MD |
1513 | #endif |
1514 | ||
1515 | /* | |
1516 | * We can actually skip blockmap verify by default, | |
1517 | * as normal blockmaps are now direct-mapped onto the freemap | |
1518 | * and so represent zone-2 addresses. | |
1519 | */ | |
1520 | if (hammer_verify_zone == 0) { | |
1521 | *errorp = 0; | |
1522 | return hammer_xlate_to_zone2(zone_offset); | |
1523 | } | |
1524 | ||
1525 | return hammer_blockmap_lookup_verify(hmp, zone_offset, errorp); | |
1526 | } | |
10a5d1ba | 1527 | |
e8599db1 MD |
1528 | #define hammer_modify_volume_field(trans, vol, field) \ |
1529 | hammer_modify_volume(trans, vol, &(vol)->ondisk->field, \ | |
1530 | sizeof((vol)->ondisk->field)) | |
1531 | ||
c9b9e29d MD |
1532 | #define hammer_modify_node_field(trans, node, field) \ |
1533 | hammer_modify_node(trans, node, &(node)->ondisk->field, \ | |
1534 | sizeof((node)->ondisk->field)) | |
1535 | ||
beec5dc4 MD |
1536 | /* |
1537 | * The HAMMER_INODE_CAP_DIR_LOCAL_INO capability is set on newly | |
1538 | * created directories for HAMMER version 2 or greater and causes | |
1539 | * directory entries to be placed the inode localization zone in | |
1540 | * the B-Tree instead of the misc zone. | |
1541 | * | |
1542 | * This greatly improves localization between directory entries and | |
1543 | * inodes | |
1544 | */ | |
46137e17 | 1545 | static __inline uint32_t |
beec5dc4 MD |
1546 | hammer_dir_localization(hammer_inode_t dip) |
1547 | { | |
ed959c01 | 1548 | return(HAMMER_DIR_INODE_LOCALIZATION(&dip->ino_data)); |
beec5dc4 | 1549 | } |
d053aa8a | 1550 | |
38f04e14 TK |
1551 | static __inline |
1552 | hammer_io_t | |
1553 | hammer_buf_peek_io(struct buf *bp) | |
1554 | { | |
3dd1f60c | 1555 | return((hammer_io_t)bp->b_priv); |
38f04e14 TK |
1556 | } |
1557 | ||
1558 | static __inline | |
1559 | void | |
1560 | hammer_buf_attach_io(struct buf *bp, hammer_io_t io) | |
1561 | { | |
1562 | /* struct buf and struct hammer_io are 1:1 */ | |
1563 | KKASSERT(hammer_buf_peek_io(bp) == NULL); | |
3dd1f60c | 1564 | bp->b_priv = io; |
38f04e14 TK |
1565 | } |
1566 | ||
75e4d787 TK |
1567 | static __inline int |
1568 | __hammer_vol_index(int vol_no) | |
1569 | { | |
1570 | return(vol_no >> 6); | |
1571 | } | |
1572 | ||
1573 | static __inline uint64_t | |
1574 | __hammer_vol_low(int vol_no) | |
1575 | { | |
1576 | return((uint64_t)1 << (vol_no & ((1 << 6) - 1))); | |
1577 | } | |
1578 | ||
1579 | static __inline void | |
1580 | hammer_volume_number_add(hammer_mount_t hmp, hammer_volume_t vol) | |
1581 | { | |
1582 | int i = __hammer_vol_index(vol->vol_no); | |
1583 | hmp->volume_map[i] |= __hammer_vol_low(vol->vol_no); | |
1584 | } | |
1585 | ||
1586 | static __inline void | |
1587 | hammer_volume_number_del(hammer_mount_t hmp, hammer_volume_t vol) | |
1588 | { | |
1589 | int i = __hammer_vol_index(vol->vol_no); | |
1590 | hmp->volume_map[i] &= ~__hammer_vol_low(vol->vol_no); | |
1591 | } | |
1592 | ||
1593 | static __inline int | |
1594 | hammer_volume_number_test(hammer_mount_t hmp, int n) | |
1595 | { | |
1596 | int i = __hammer_vol_index(n); | |
1597 | return((hmp->volume_map[i] & __hammer_vol_low(n)) != 0); | |
1598 | } | |
1599 | ||
d053aa8a TK |
1600 | #define hkprintf(format, args...) \ |
1601 | kprintf("HAMMER: "format,## args) | |
1602 | #define hvkprintf(vol, format, args...) \ | |
6c39d27a | 1603 | kprintf("HAMMER(%s) "format, vol->ondisk->vol_label,## args) |
d053aa8a TK |
1604 | #define hmkprintf(hmp, format, args...) \ |
1605 | kprintf("HAMMER(%s) "format, hmp->mp->mnt_stat.f_mntfromname,## args) | |
11605a5c TK |
1606 | #define hdkprintf(format, args...) \ |
1607 | kprintf("%s: "format, __func__,## args) | |
d053aa8a TK |
1608 | |
1609 | #define hkrateprintf(rate , format, args...) \ | |
1610 | krateprintf(rate, "HAMMER: "format,## args) | |
1611 | #define hvkrateprintf(rate, vol, format, args...) \ | |
6c39d27a | 1612 | krateprintf(rate, "HAMMER(%s) "format, vol->ondisk->vol_label,## args) |
d053aa8a TK |
1613 | #define hmkrateprintf(rate, hmp, format, args...) \ |
1614 | krateprintf(rate, "HAMMER(%s) "format, hmp->mp->mnt_stat.f_mntfromname,## args) | |
11605a5c TK |
1615 | #define hdkrateprintf(rate, format, args...) \ |
1616 | krateprintf(rate, "%s: "format, __func__,## args) | |
903fdd05 TK |
1617 | |
1618 | #define hpanic(format, args...) \ | |
1619 | panic("%s: "format, __func__,## args) | |
1caa2035 | 1620 | #endif /* _KERNEL */ |
964cb30d TK |
1621 | |
1622 | #endif /* !VFS_HAMMER_HAMMER_H_ */ |