| 1 | /* |
| 2 | * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved. |
| 3 | * |
| 4 | * This code is derived from software contributed to The DragonFly Project |
| 5 | * by Matthew Dillon <dillon@backplane.com> |
| 6 | * |
| 7 | * Redistribution and use in source and binary forms, with or without |
| 8 | * modification, are permitted provided that the following conditions |
| 9 | * are met: |
| 10 | * |
| 11 | * 1. Redistributions of source code must retain the above copyright |
| 12 | * notice, this list of conditions and the following disclaimer. |
| 13 | * 2. Redistributions in binary form must reproduce the above copyright |
| 14 | * notice, this list of conditions and the following disclaimer in |
| 15 | * the documentation and/or other materials provided with the |
| 16 | * distribution. |
| 17 | * 3. Neither the name of The DragonFly Project nor the names of its |
| 18 | * contributors may be used to endorse or promote products derived |
| 19 | * from this software without specific, prior written permission. |
| 20 | * |
| 21 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| 22 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| 23 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS |
| 24 | * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE |
| 25 | * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, |
| 26 | * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, |
| 27 | * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; |
| 28 | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED |
| 29 | * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, |
| 30 | * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT |
| 31 | * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
| 32 | * SUCH DAMAGE. |
| 33 | * |
| 34 | * $DragonFly: src/sys/vfs/hammer/hammer.h,v 1.130 2008/11/13 02:18:43 dillon Exp $ |
| 35 | */ |
| 36 | /* |
| 37 | * This header file contains structures used internally by the HAMMERFS |
| 38 | * implementation. See hammer_disk.h for on-disk structures. |
| 39 | */ |
| 40 | |
| 41 | #include <sys/param.h> |
| 42 | #include <sys/types.h> |
| 43 | #include <sys/kernel.h> |
| 44 | #include <sys/conf.h> |
| 45 | #include <sys/systm.h> |
| 46 | #include <sys/tree.h> |
| 47 | #include <sys/malloc.h> |
| 48 | #include <sys/mount.h> |
| 49 | #include <sys/mountctl.h> |
| 50 | #include <sys/vnode.h> |
| 51 | #include <sys/proc.h> |
| 52 | #include <sys/priv.h> |
| 53 | #include <sys/stat.h> |
| 54 | #include <sys/globaldata.h> |
| 55 | #include <sys/lockf.h> |
| 56 | #include <sys/buf.h> |
| 57 | #include <sys/queue.h> |
| 58 | #include <sys/ktr.h> |
| 59 | #include <sys/globaldata.h> |
| 60 | #include <sys/limits.h> |
| 61 | #include <vm/vm_extern.h> |
| 62 | |
| 63 | #include <sys/buf2.h> |
| 64 | #include <sys/signal2.h> |
| 65 | #include <sys/mplock2.h> |
| 66 | #include "hammer_disk.h" |
| 67 | #include "hammer_mount.h" |
| 68 | #include "hammer_ioctl.h" |
| 69 | |
| 70 | #if defined(_KERNEL) || defined(_KERNEL_STRUCTURES) |
| 71 | |
| 72 | MALLOC_DECLARE(M_HAMMER); |
| 73 | |
| 74 | /* |
| 75 | * Kernel trace |
| 76 | */ |
| 77 | #if !defined(KTR_HAMMER) |
| 78 | #define KTR_HAMMER KTR_ALL |
| 79 | #endif |
| 80 | KTR_INFO_MASTER_EXTERN(hammer); |
| 81 | |
| 82 | /* |
| 83 | * Misc structures |
| 84 | */ |
| 85 | struct hammer_mount; |
| 86 | |
| 87 | /* |
| 88 | * Key structure used for custom RB tree inode lookups. This prototypes |
| 89 | * the function hammer_ino_rb_tree_RB_LOOKUP_INFO(root, info). |
| 90 | */ |
| 91 | typedef struct hammer_inode_info { |
| 92 | int64_t obj_id; /* (key) object identifier */ |
| 93 | hammer_tid_t obj_asof; /* (key) snapshot transid or 0 */ |
| 94 | u_int32_t obj_localization; /* (key) pseudo-fs */ |
| 95 | union { |
| 96 | struct hammer_btree_leaf_elm *leaf; |
| 97 | } u; |
| 98 | } *hammer_inode_info_t; |
| 99 | |
| 100 | typedef enum hammer_transaction_type { |
| 101 | HAMMER_TRANS_RO, |
| 102 | HAMMER_TRANS_STD, |
| 103 | HAMMER_TRANS_FLS |
| 104 | } hammer_transaction_type_t; |
| 105 | |
| 106 | /* |
| 107 | * HAMMER Transaction tracking |
| 108 | */ |
| 109 | struct hammer_transaction { |
| 110 | hammer_transaction_type_t type; |
| 111 | struct hammer_mount *hmp; |
| 112 | hammer_tid_t tid; |
| 113 | u_int64_t time; |
| 114 | u_int32_t time32; |
| 115 | int sync_lock_refs; |
| 116 | int flags; |
| 117 | struct hammer_volume *rootvol; |
| 118 | }; |
| 119 | |
| 120 | typedef struct hammer_transaction *hammer_transaction_t; |
| 121 | |
| 122 | #define HAMMER_TRANSF_NEWINODE 0x0001 |
| 123 | #define HAMMER_TRANSF_DIDIO 0x0002 |
| 124 | #define HAMMER_TRANSF_CRCDOM 0x0004 /* EDOM on CRC error, less critical */ |
| 125 | |
| 126 | /* |
| 127 | * HAMMER locks |
| 128 | */ |
| 129 | struct hammer_lock { |
| 130 | volatile u_int refs; /* active references */ |
| 131 | volatile u_int lockval; /* lock count and control bits */ |
| 132 | struct thread *lowner; /* owner if exclusively held */ |
| 133 | struct thread *rowner; /* owner if exclusively held */ |
| 134 | }; |
| 135 | |
| 136 | #define HAMMER_REFS_LOCKED 0x40000000 /* transition check */ |
| 137 | #define HAMMER_REFS_WANTED 0x20000000 /* transition check */ |
| 138 | #define HAMMER_REFS_CHECK 0x10000000 /* transition check */ |
| 139 | |
| 140 | #define HAMMER_REFS_FLAGS (HAMMER_REFS_LOCKED | \ |
| 141 | HAMMER_REFS_WANTED | \ |
| 142 | HAMMER_REFS_CHECK) |
| 143 | |
| 144 | #define HAMMER_LOCKF_EXCLUSIVE 0x40000000 |
| 145 | #define HAMMER_LOCKF_WANTED 0x20000000 |
| 146 | |
| 147 | static __inline int |
| 148 | hammer_notlocked(struct hammer_lock *lock) |
| 149 | { |
| 150 | return(lock->lockval == 0); |
| 151 | } |
| 152 | |
| 153 | static __inline int |
| 154 | hammer_islocked(struct hammer_lock *lock) |
| 155 | { |
| 156 | return(lock->lockval != 0); |
| 157 | } |
| 158 | |
| 159 | /* |
| 160 | * Returns the number of refs on the object. |
| 161 | */ |
| 162 | static __inline int |
| 163 | hammer_isactive(struct hammer_lock *lock) |
| 164 | { |
| 165 | return(lock->refs & ~HAMMER_REFS_FLAGS); |
| 166 | } |
| 167 | |
| 168 | static __inline int |
| 169 | hammer_oneref(struct hammer_lock *lock) |
| 170 | { |
| 171 | return((lock->refs & ~HAMMER_REFS_FLAGS) == 1); |
| 172 | } |
| 173 | |
| 174 | static __inline int |
| 175 | hammer_norefs(struct hammer_lock *lock) |
| 176 | { |
| 177 | return((lock->refs & ~HAMMER_REFS_FLAGS) == 0); |
| 178 | } |
| 179 | |
| 180 | static __inline int |
| 181 | hammer_norefsorlock(struct hammer_lock *lock) |
| 182 | { |
| 183 | return(lock->refs == 0); |
| 184 | } |
| 185 | |
| 186 | static __inline int |
| 187 | hammer_refsorlock(struct hammer_lock *lock) |
| 188 | { |
| 189 | return(lock->refs != 0); |
| 190 | } |
| 191 | |
| 192 | /* |
| 193 | * Return if we specifically own the lock exclusively. |
| 194 | */ |
| 195 | static __inline int |
| 196 | hammer_lock_excl_owned(struct hammer_lock *lock, thread_t td) |
| 197 | { |
| 198 | if ((lock->lockval & HAMMER_LOCKF_EXCLUSIVE) && |
| 199 | lock->lowner == td) { |
| 200 | return(1); |
| 201 | } |
| 202 | return(0); |
| 203 | } |
| 204 | |
| 205 | /* |
| 206 | * Flush state, used by various structures |
| 207 | */ |
| 208 | typedef enum hammer_inode_state { |
| 209 | HAMMER_FST_IDLE, |
| 210 | HAMMER_FST_SETUP, |
| 211 | HAMMER_FST_FLUSH |
| 212 | } hammer_inode_state_t; |
| 213 | |
| 214 | TAILQ_HEAD(hammer_record_list, hammer_record); |
| 215 | |
| 216 | /* |
| 217 | * Pseudo-filesystem extended data tracking |
| 218 | */ |
| 219 | struct hammer_pfs_rb_tree; |
| 220 | struct hammer_pseudofs_inmem; |
| 221 | RB_HEAD(hammer_pfs_rb_tree, hammer_pseudofs_inmem); |
| 222 | RB_PROTOTYPE2(hammer_pfs_rb_tree, hammer_pseudofs_inmem, rb_node, |
| 223 | hammer_pfs_rb_compare, u_int32_t); |
| 224 | |
| 225 | struct hammer_pseudofs_inmem { |
| 226 | RB_ENTRY(hammer_pseudofs_inmem) rb_node; |
| 227 | struct hammer_lock lock; |
| 228 | u_int32_t localization; |
| 229 | hammer_tid_t create_tid; |
| 230 | int flags; |
| 231 | udev_t fsid_udev; |
| 232 | struct hammer_pseudofs_data pfsd; |
| 233 | }; |
| 234 | |
| 235 | typedef struct hammer_pseudofs_inmem *hammer_pseudofs_inmem_t; |
| 236 | |
| 237 | #define HAMMER_PFSM_DELETED 0x0001 |
| 238 | |
| 239 | /* |
| 240 | * Cache object ids. A fixed number of objid cache structures are |
| 241 | * created to reserve object id's for newly created files in multiples |
| 242 | * of 100,000, localized to a particular directory, and recycled as |
| 243 | * needed. This allows parallel create operations in different |
| 244 | * directories to retain fairly localized object ids which in turn |
| 245 | * improves reblocking performance and layout. |
| 246 | */ |
| 247 | #define OBJID_CACHE_SIZE 1024 |
| 248 | #define OBJID_CACHE_BULK_BITS 10 /* 10 bits (1024) */ |
| 249 | #define OBJID_CACHE_BULK (32 * 32) /* two level (1024) */ |
| 250 | #define OBJID_CACHE_BULK_MASK (OBJID_CACHE_BULK - 1) |
| 251 | #define OBJID_CACHE_BULK_MASK64 ((u_int64_t)(OBJID_CACHE_BULK - 1)) |
| 252 | |
| 253 | typedef struct hammer_objid_cache { |
| 254 | TAILQ_ENTRY(hammer_objid_cache) entry; |
| 255 | struct hammer_inode *dip; |
| 256 | hammer_tid_t base_tid; |
| 257 | int count; |
| 258 | u_int32_t bm0; |
| 259 | u_int32_t bm1[32]; |
| 260 | } *hammer_objid_cache_t; |
| 261 | |
| 262 | /* |
| 263 | * Associate an inode with a B-Tree node to cache search start positions |
| 264 | */ |
| 265 | typedef struct hammer_node_cache { |
| 266 | TAILQ_ENTRY(hammer_node_cache) entry; |
| 267 | struct hammer_node *node; |
| 268 | struct hammer_inode *ip; |
| 269 | } *hammer_node_cache_t; |
| 270 | |
| 271 | TAILQ_HEAD(hammer_node_cache_list, hammer_node_cache); |
| 272 | |
| 273 | /* |
| 274 | * Live dedup cache |
| 275 | */ |
| 276 | struct hammer_dedup_crc_rb_tree; |
| 277 | RB_HEAD(hammer_dedup_crc_rb_tree, hammer_dedup_cache); |
| 278 | RB_PROTOTYPE2(hammer_dedup_crc_rb_tree, hammer_dedup_cache, crc_entry, |
| 279 | hammer_dedup_crc_rb_compare, hammer_crc_t); |
| 280 | |
| 281 | struct hammer_dedup_off_rb_tree; |
| 282 | RB_HEAD(hammer_dedup_off_rb_tree, hammer_dedup_cache); |
| 283 | RB_PROTOTYPE2(hammer_dedup_off_rb_tree, hammer_dedup_cache, off_entry, |
| 284 | hammer_dedup_off_rb_compare, hammer_off_t); |
| 285 | |
| 286 | #define DEDUP_CACHE_SIZE 4096 /* XXX make it a dynamic tunable */ |
| 287 | |
| 288 | typedef struct hammer_dedup_cache { |
| 289 | RB_ENTRY(hammer_dedup_cache) crc_entry; |
| 290 | RB_ENTRY(hammer_dedup_cache) off_entry; |
| 291 | TAILQ_ENTRY(hammer_dedup_cache) lru_entry; |
| 292 | struct hammer_mount *hmp; |
| 293 | int64_t obj_id; |
| 294 | u_int32_t localization; |
| 295 | off_t file_offset; |
| 296 | int bytes; |
| 297 | hammer_off_t data_offset; |
| 298 | hammer_crc_t crc; |
| 299 | } *hammer_dedup_cache_t; |
| 300 | |
| 301 | /* |
| 302 | * Structure used to organize flush groups. Flush groups must be |
| 303 | * organized into chunks in order to avoid blowing out the UNDO FIFO. |
| 304 | * Without this a 'sync' could end up flushing 50,000 inodes in a single |
| 305 | * transaction. |
| 306 | */ |
| 307 | struct hammer_fls_rb_tree; |
| 308 | RB_HEAD(hammer_fls_rb_tree, hammer_inode); |
| 309 | RB_PROTOTYPE(hammer_fls_rb_tree, hammer_inode, rb_flsnode, |
| 310 | hammer_ino_rb_compare); |
| 311 | |
| 312 | struct hammer_flush_group { |
| 313 | TAILQ_ENTRY(hammer_flush_group) flush_entry; |
| 314 | struct hammer_fls_rb_tree flush_tree; |
| 315 | int unused01; |
| 316 | int total_count; /* record load */ |
| 317 | int running; /* group is running */ |
| 318 | int closed; |
| 319 | int refs; |
| 320 | }; |
| 321 | |
| 322 | typedef struct hammer_flush_group *hammer_flush_group_t; |
| 323 | |
| 324 | TAILQ_HEAD(hammer_flush_group_list, hammer_flush_group); |
| 325 | |
| 326 | /* |
| 327 | * Structure used to represent an inode in-memory. |
| 328 | * |
| 329 | * The record and data associated with an inode may be out of sync with |
| 330 | * the disk (xDIRTY flags), or not even on the disk at all (ONDISK flag |
| 331 | * clear). |
| 332 | * |
| 333 | * An inode may also hold a cache of unsynchronized records, used for |
| 334 | * database and directories only. Unsynchronized regular file data is |
| 335 | * stored in the buffer cache. |
| 336 | * |
| 337 | * NOTE: A file which is created and destroyed within the initial |
| 338 | * synchronization period can wind up not doing any disk I/O at all. |
| 339 | * |
| 340 | * Finally, an inode may cache numerous disk-referencing B-Tree cursors. |
| 341 | */ |
| 342 | struct hammer_ino_rb_tree; |
| 343 | struct hammer_inode; |
| 344 | RB_HEAD(hammer_ino_rb_tree, hammer_inode); |
| 345 | RB_PROTOTYPEX(hammer_ino_rb_tree, INFO, hammer_inode, rb_node, |
| 346 | hammer_ino_rb_compare, hammer_inode_info_t); |
| 347 | |
| 348 | struct hammer_redo_rb_tree; |
| 349 | RB_HEAD(hammer_redo_rb_tree, hammer_inode); |
| 350 | RB_PROTOTYPE2(hammer_redo_rb_tree, hammer_inode, rb_redonode, |
| 351 | hammer_redo_rb_compare, hammer_off_t); |
| 352 | |
| 353 | struct hammer_rec_rb_tree; |
| 354 | struct hammer_record; |
| 355 | RB_HEAD(hammer_rec_rb_tree, hammer_record); |
| 356 | RB_PROTOTYPEX(hammer_rec_rb_tree, INFO, hammer_record, rb_node, |
| 357 | hammer_rec_rb_compare, hammer_btree_leaf_elm_t); |
| 358 | |
| 359 | TAILQ_HEAD(hammer_node_list, hammer_node); |
| 360 | |
| 361 | struct hammer_inode { |
| 362 | RB_ENTRY(hammer_inode) rb_node; |
| 363 | hammer_inode_state_t flush_state; |
| 364 | hammer_flush_group_t flush_group; |
| 365 | RB_ENTRY(hammer_inode) rb_flsnode; /* when on flush list */ |
| 366 | RB_ENTRY(hammer_inode) rb_redonode; /* when INODE_RDIRTY is set */ |
| 367 | struct hammer_record_list target_list; /* target of dependant recs */ |
| 368 | int64_t obj_id; /* (key) object identifier */ |
| 369 | hammer_tid_t obj_asof; /* (key) snapshot or 0 */ |
| 370 | u_int32_t obj_localization; /* (key) pseudo-fs */ |
| 371 | struct hammer_mount *hmp; |
| 372 | hammer_objid_cache_t objid_cache; |
| 373 | int flags; |
| 374 | int error; /* flush error */ |
| 375 | int cursor_ip_refs; /* sanity */ |
| 376 | int rsv_recs; |
| 377 | struct vnode *vp; |
| 378 | hammer_pseudofs_inmem_t pfsm; |
| 379 | struct lockf advlock; |
| 380 | struct hammer_lock lock; /* sync copy interlock */ |
| 381 | off_t trunc_off; |
| 382 | struct hammer_btree_leaf_elm ino_leaf; /* in-memory cache */ |
| 383 | struct hammer_inode_data ino_data; /* in-memory cache */ |
| 384 | struct hammer_rec_rb_tree rec_tree; /* in-memory cache */ |
| 385 | int rec_generation; |
| 386 | struct hammer_node_cache cache[4]; /* search initiate cache */ |
| 387 | |
| 388 | /* |
| 389 | * When a demark is created to synchronize an inode to |
| 390 | * disk, certain fields are copied so the front-end VOPs |
| 391 | * can continue to run in parallel with the synchronization |
| 392 | * occuring in the background. |
| 393 | */ |
| 394 | int sync_flags; /* to-sync flags cache */ |
| 395 | off_t sync_trunc_off; /* to-sync truncation */ |
| 396 | off_t save_trunc_off; /* write optimization */ |
| 397 | struct hammer_btree_leaf_elm sync_ino_leaf; /* to-sync cache */ |
| 398 | struct hammer_inode_data sync_ino_data; /* to-sync cache */ |
| 399 | size_t redo_count; |
| 400 | |
| 401 | /* |
| 402 | * Track the earliest offset in the UNDO/REDO FIFO containing |
| 403 | * REDO records. This is staged to the backend during flush |
| 404 | * sequences. While the inode is staged redo_fifo_next is used |
| 405 | * to track the earliest offset for rotation into redo_fifo_start |
| 406 | * on completion of the flush. |
| 407 | */ |
| 408 | hammer_off_t redo_fifo_start; |
| 409 | hammer_off_t redo_fifo_next; |
| 410 | }; |
| 411 | |
| 412 | typedef struct hammer_inode *hammer_inode_t; |
| 413 | |
| 414 | #define VTOI(vp) ((struct hammer_inode *)(vp)->v_data) |
| 415 | |
| 416 | /* |
| 417 | * NOTE: DDIRTY does not include atime or mtime and does not include |
| 418 | * write-append size changes. SDIRTY handles write-append size |
| 419 | * changes. |
| 420 | * |
| 421 | * REDO indicates that REDO logging is active, creating a definitive |
| 422 | * stream of REDO records in the UNDO/REDO log for writes and |
| 423 | * truncations, including boundary records when/if REDO is turned off. |
| 424 | * REDO is typically enabled by fsync() and turned off if excessive |
| 425 | * writes without an fsync() occurs. |
| 426 | * |
| 427 | * RDIRTY indicates that REDO records were laid down in the UNDO/REDO |
| 428 | * FIFO (even if REDO is turned off some might still be active) and |
| 429 | * still being tracked for this inode. See hammer_redo.c |
| 430 | */ |
| 431 | /* (not including atime/mtime) */ |
| 432 | #define HAMMER_INODE_DDIRTY 0x0001 /* in-memory ino_data is dirty */ |
| 433 | #define HAMMER_INODE_RSV_INODES 0x0002 /* hmp->rsv_inodes bumped */ |
| 434 | #define HAMMER_INODE_CONN_DOWN 0x0004 /* include in downward recursion */ |
| 435 | #define HAMMER_INODE_XDIRTY 0x0008 /* in-memory records */ |
| 436 | #define HAMMER_INODE_ONDISK 0x0010 /* inode is on-disk (else not yet) */ |
| 437 | #define HAMMER_INODE_FLUSH 0x0020 /* flush on last ref */ |
| 438 | #define HAMMER_INODE_DELETED 0x0080 /* inode delete (backend) */ |
| 439 | #define HAMMER_INODE_DELONDISK 0x0100 /* delete synchronized to disk */ |
| 440 | #define HAMMER_INODE_RO 0x0200 /* read-only (because of as-of) */ |
| 441 | #define HAMMER_INODE_UNUSED0400 0x0400 |
| 442 | #define HAMMER_INODE_DONDISK 0x0800 /* data records may be on disk */ |
| 443 | #define HAMMER_INODE_BUFS 0x1000 /* dirty high level bps present */ |
| 444 | #define HAMMER_INODE_REFLUSH 0x2000 /* flush on dependancy / reflush */ |
| 445 | #define HAMMER_INODE_RECLAIM 0x4000 /* trying to reclaim */ |
| 446 | #define HAMMER_INODE_FLUSHW 0x8000 /* Someone waiting for flush */ |
| 447 | |
| 448 | #define HAMMER_INODE_TRUNCATED 0x00010000 |
| 449 | #define HAMMER_INODE_DELETING 0x00020000 /* inode delete request (frontend)*/ |
| 450 | #define HAMMER_INODE_RESIGNAL 0x00040000 /* re-signal on re-flush */ |
| 451 | #define HAMMER_INODE_ATIME 0x00100000 /* in-memory atime modified */ |
| 452 | #define HAMMER_INODE_MTIME 0x00200000 /* in-memory mtime modified */ |
| 453 | #define HAMMER_INODE_WOULDBLOCK 0x00400000 /* re-issue to new flush group */ |
| 454 | #define HAMMER_INODE_DUMMY 0x00800000 /* dummy inode covering bad file */ |
| 455 | #define HAMMER_INODE_SDIRTY 0x01000000 /* in-memory ino_data.size is dirty*/ |
| 456 | #define HAMMER_INODE_REDO 0x02000000 /* REDO logging active */ |
| 457 | #define HAMMER_INODE_RDIRTY 0x04000000 /* REDO records active in fifo */ |
| 458 | |
| 459 | #define HAMMER_INODE_MODMASK (HAMMER_INODE_DDIRTY|HAMMER_INODE_SDIRTY| \ |
| 460 | HAMMER_INODE_XDIRTY|HAMMER_INODE_BUFS| \ |
| 461 | HAMMER_INODE_ATIME|HAMMER_INODE_MTIME| \ |
| 462 | HAMMER_INODE_TRUNCATED|HAMMER_INODE_DELETING) |
| 463 | |
| 464 | #define HAMMER_INODE_MODMASK_NOXDIRTY \ |
| 465 | (HAMMER_INODE_MODMASK & ~HAMMER_INODE_XDIRTY) |
| 466 | |
| 467 | #define HAMMER_INODE_MODMASK_NOREDO \ |
| 468 | (HAMMER_INODE_DDIRTY| \ |
| 469 | HAMMER_INODE_XDIRTY| \ |
| 470 | HAMMER_INODE_TRUNCATED|HAMMER_INODE_DELETING) |
| 471 | |
| 472 | #define HAMMER_FLUSH_SIGNAL 0x0001 |
| 473 | #define HAMMER_FLUSH_RECURSION 0x0002 |
| 474 | |
| 475 | /* |
| 476 | * Used by the inode reclaim code to pipeline reclaims and avoid |
| 477 | * blowing out kernel memory or letting the flusher get too far |
| 478 | * behind. The reclaim wakes up when count reaches 0 or the |
| 479 | * timer expires. |
| 480 | */ |
| 481 | struct hammer_reclaim { |
| 482 | TAILQ_ENTRY(hammer_reclaim) entry; |
| 483 | int count; |
| 484 | }; |
| 485 | |
| 486 | #define HAMMER_RECLAIM_WAIT 4000 /* default vfs.hammer.limit_reclaim */ |
| 487 | |
| 488 | /* |
| 489 | * Track who is creating the greatest burden on the |
| 490 | * inode cache. |
| 491 | */ |
| 492 | struct hammer_inostats { |
| 493 | pid_t pid; /* track user process */ |
| 494 | int ltick; /* last tick */ |
| 495 | int count; /* count (degenerates) */ |
| 496 | }; |
| 497 | |
| 498 | #define HAMMER_INOSTATS_HSIZE 32 |
| 499 | #define HAMMER_INOSTATS_HMASK (HAMMER_INOSTATS_HSIZE - 1) |
| 500 | |
| 501 | /* |
| 502 | * Structure used to represent an unsynchronized record in-memory. These |
| 503 | * records typically represent directory entries. Only non-historical |
| 504 | * records are kept in-memory. |
| 505 | * |
| 506 | * Records are organized as a per-inode RB-Tree. If the inode is not |
| 507 | * on disk then neither are any records and the in-memory record tree |
| 508 | * represents the entire contents of the inode. If the inode is on disk |
| 509 | * then the on-disk B-Tree is scanned in parallel with the in-memory |
| 510 | * RB-Tree to synthesize the current state of the file. |
| 511 | * |
| 512 | * Records are also used to enforce the ordering of directory create/delete |
| 513 | * operations. A new inode will not be flushed to disk unless its related |
| 514 | * directory entry is also being flushed at the same time. A directory entry |
| 515 | * will not be removed unless its related inode is also being removed at the |
| 516 | * same time. |
| 517 | */ |
| 518 | typedef enum hammer_record_type { |
| 519 | HAMMER_MEM_RECORD_GENERAL, /* misc record */ |
| 520 | HAMMER_MEM_RECORD_INODE, /* inode record */ |
| 521 | HAMMER_MEM_RECORD_ADD, /* positive memory cache record */ |
| 522 | HAMMER_MEM_RECORD_DEL, /* negative delete-on-disk record */ |
| 523 | HAMMER_MEM_RECORD_DATA /* bulk-data record w/on-disk ref */ |
| 524 | } hammer_record_type_t; |
| 525 | |
| 526 | struct hammer_record { |
| 527 | RB_ENTRY(hammer_record) rb_node; |
| 528 | TAILQ_ENTRY(hammer_record) target_entry; |
| 529 | hammer_inode_state_t flush_state; |
| 530 | hammer_flush_group_t flush_group; |
| 531 | hammer_record_type_t type; |
| 532 | struct hammer_lock lock; |
| 533 | struct hammer_reserve *resv; |
| 534 | struct hammer_inode *ip; |
| 535 | struct hammer_inode *target_ip; |
| 536 | struct hammer_btree_leaf_elm leaf; |
| 537 | union hammer_data_ondisk *data; |
| 538 | int flags; |
| 539 | int gflags; |
| 540 | hammer_off_t zone2_offset; /* direct-write only */ |
| 541 | }; |
| 542 | |
| 543 | typedef struct hammer_record *hammer_record_t; |
| 544 | |
| 545 | /* |
| 546 | * Record flags. Note that FE can only be set by the frontend if the |
| 547 | * record has not been interlocked by the backend w/ BE. |
| 548 | */ |
| 549 | #define HAMMER_RECF_ALLOCDATA 0x0001 |
| 550 | #define HAMMER_RECF_ONRBTREE 0x0002 |
| 551 | #define HAMMER_RECF_DELETED_FE 0x0004 /* deleted (frontend) */ |
| 552 | #define HAMMER_RECF_DELETED_BE 0x0008 /* deleted (backend) */ |
| 553 | #define HAMMER_RECF_COMMITTED 0x0010 /* committed to the B-Tree */ |
| 554 | #define HAMMER_RECF_INTERLOCK_BE 0x0020 /* backend interlock */ |
| 555 | #define HAMMER_RECF_WANTED 0x0040 /* wanted by the frontend */ |
| 556 | #define HAMMER_RECF_DEDUPED 0x0080 /* will be live-dedup'ed */ |
| 557 | #define HAMMER_RECF_CONVERT_DELETE 0x0100 /* special case */ |
| 558 | #define HAMMER_RECF_REDO 0x1000 /* REDO was laid down */ |
| 559 | |
| 560 | /* |
| 561 | * These flags must be separate to deal with SMP races |
| 562 | */ |
| 563 | #define HAMMER_RECG_DIRECT_IO 0x0001 /* related direct I/O running*/ |
| 564 | #define HAMMER_RECG_DIRECT_WAIT 0x0002 /* related direct I/O running*/ |
| 565 | #define HAMMER_RECG_DIRECT_INVAL 0x0004 /* buffer alias invalidation */ |
| 566 | /* |
| 567 | * hammer_create_at_cursor() and hammer_delete_at_cursor() flags. |
| 568 | */ |
| 569 | #define HAMMER_CREATE_MODE_UMIRROR 0x0001 |
| 570 | #define HAMMER_CREATE_MODE_SYS 0x0002 |
| 571 | |
| 572 | #define HAMMER_DELETE_ADJUST 0x0001 |
| 573 | #define HAMMER_DELETE_DESTROY 0x0002 |
| 574 | |
| 575 | /* |
| 576 | * In-memory structures representing on-disk structures. |
| 577 | */ |
| 578 | struct hammer_volume; |
| 579 | struct hammer_buffer; |
| 580 | struct hammer_node; |
| 581 | struct hammer_undo; |
| 582 | struct hammer_reserve; |
| 583 | |
| 584 | RB_HEAD(hammer_vol_rb_tree, hammer_volume); |
| 585 | RB_HEAD(hammer_buf_rb_tree, hammer_buffer); |
| 586 | RB_HEAD(hammer_nod_rb_tree, hammer_node); |
| 587 | RB_HEAD(hammer_und_rb_tree, hammer_undo); |
| 588 | RB_HEAD(hammer_res_rb_tree, hammer_reserve); |
| 589 | |
| 590 | RB_PROTOTYPE2(hammer_vol_rb_tree, hammer_volume, rb_node, |
| 591 | hammer_vol_rb_compare, int32_t); |
| 592 | RB_PROTOTYPE2(hammer_buf_rb_tree, hammer_buffer, rb_node, |
| 593 | hammer_buf_rb_compare, hammer_off_t); |
| 594 | RB_PROTOTYPE2(hammer_nod_rb_tree, hammer_node, rb_node, |
| 595 | hammer_nod_rb_compare, hammer_off_t); |
| 596 | RB_PROTOTYPE2(hammer_und_rb_tree, hammer_undo, rb_node, |
| 597 | hammer_und_rb_compare, hammer_off_t); |
| 598 | RB_PROTOTYPE2(hammer_res_rb_tree, hammer_reserve, rb_node, |
| 599 | hammer_res_rb_compare, hammer_off_t); |
| 600 | |
| 601 | /* |
| 602 | * IO management - embedded at the head of various in-memory structures |
| 603 | * |
| 604 | * VOLUME - hammer_volume containing meta-data |
| 605 | * META_BUFFER - hammer_buffer containing meta-data |
| 606 | * DATA_BUFFER - hammer_buffer containing pure-data |
| 607 | * |
| 608 | * Dirty volume headers and dirty meta-data buffers are locked until the |
| 609 | * flusher can sequence them out. Dirty pure-data buffers can be written. |
| 610 | * Clean buffers can be passively released. |
| 611 | */ |
| 612 | typedef enum hammer_io_type { |
| 613 | HAMMER_STRUCTURE_VOLUME, |
| 614 | HAMMER_STRUCTURE_META_BUFFER, |
| 615 | HAMMER_STRUCTURE_UNDO_BUFFER, |
| 616 | HAMMER_STRUCTURE_DATA_BUFFER, |
| 617 | HAMMER_STRUCTURE_DUMMY |
| 618 | } hammer_io_type_t; |
| 619 | |
| 620 | union hammer_io_structure; |
| 621 | struct hammer_io; |
| 622 | |
| 623 | struct worklist { |
| 624 | LIST_ENTRY(worklist) node; |
| 625 | }; |
| 626 | |
| 627 | TAILQ_HEAD(hammer_io_list, hammer_io); |
| 628 | typedef struct hammer_io_list *hammer_io_list_t; |
| 629 | |
| 630 | struct hammer_io { |
| 631 | struct worklist worklist; |
| 632 | struct hammer_lock lock; |
| 633 | enum hammer_io_type type; |
| 634 | struct hammer_mount *hmp; |
| 635 | struct hammer_volume *volume; |
| 636 | TAILQ_ENTRY(hammer_io) mod_entry; /* list entry if modified */ |
| 637 | TAILQ_ENTRY(hammer_io) iorun_entry; /* iorun_list */ |
| 638 | hammer_io_list_t mod_list; |
| 639 | struct buf *bp; |
| 640 | int64_t offset; /* zone-2 offset */ |
| 641 | int bytes; /* buffer cache buffer size */ |
| 642 | int modify_refs; |
| 643 | |
| 644 | /* |
| 645 | * These can be modified at any time by the backend while holding |
| 646 | * io_token, due to bio_done and hammer_io_complete() callbacks. |
| 647 | */ |
| 648 | u_int running : 1; /* bp write IO in progress */ |
| 649 | u_int waiting : 1; /* someone is waiting on us */ |
| 650 | u_int ioerror : 1; /* abort on io-error */ |
| 651 | u_int unusedA : 29; |
| 652 | |
| 653 | /* |
| 654 | * These can only be modified by the frontend while holding |
| 655 | * fs_token, or by the backend while holding the io interlocked |
| 656 | * with no references (which will block the frontend when it |
| 657 | * tries to reference it). |
| 658 | * |
| 659 | * WARNING! SMP RACES will create havoc if the callbacks ever tried |
| 660 | * to modify any of these outside the above restrictions. |
| 661 | */ |
| 662 | u_int modified : 1; /* bp's data was modified */ |
| 663 | u_int released : 1; /* bp released (w/ B_LOCKED set) */ |
| 664 | u_int validated : 1; /* ondisk has been validated */ |
| 665 | u_int waitdep : 1; /* flush waits for dependancies */ |
| 666 | u_int recovered : 1; /* has recovery ref */ |
| 667 | u_int waitmod : 1; /* waiting for modify_refs */ |
| 668 | u_int reclaim : 1; /* reclaim requested */ |
| 669 | u_int gencrc : 1; /* crc needs to be generated */ |
| 670 | u_int unusedB : 24; |
| 671 | }; |
| 672 | |
| 673 | typedef struct hammer_io *hammer_io_t; |
| 674 | |
| 675 | #define HAMMER_CLUSTER_SIZE (64 * 1024) |
| 676 | #if HAMMER_CLUSTER_SIZE > MAXBSIZE |
| 677 | #undef HAMMER_CLUSTER_SIZE |
| 678 | #define HAMMER_CLUSTER_SIZE MAXBSIZE |
| 679 | #endif |
| 680 | #define HAMMER_CLUSTER_BUFS (HAMMER_CLUSTER_SIZE / HAMMER_BUFSIZE) |
| 681 | |
| 682 | /* |
| 683 | * In-memory volume representing on-disk buffer |
| 684 | */ |
| 685 | struct hammer_volume { |
| 686 | struct hammer_io io; |
| 687 | RB_ENTRY(hammer_volume) rb_node; |
| 688 | struct hammer_volume_ondisk *ondisk; |
| 689 | int32_t vol_no; |
| 690 | int64_t nblocks; /* note: special calculation for statfs */ |
| 691 | int64_t buffer_base; /* base offset of buffer 0 */ |
| 692 | hammer_off_t maxbuf_off; /* Maximum buffer offset (zone-2) */ |
| 693 | hammer_off_t maxraw_off; /* Maximum raw offset for device */ |
| 694 | char *vol_name; |
| 695 | struct vnode *devvp; |
| 696 | int vol_flags; |
| 697 | }; |
| 698 | |
| 699 | typedef struct hammer_volume *hammer_volume_t; |
| 700 | |
| 701 | /* |
| 702 | * In-memory buffer (other then volume, super-cluster, or cluster), |
| 703 | * representing an on-disk buffer. |
| 704 | */ |
| 705 | struct hammer_buffer { |
| 706 | struct hammer_io io; |
| 707 | RB_ENTRY(hammer_buffer) rb_node; |
| 708 | void *ondisk; |
| 709 | hammer_off_t zoneX_offset; |
| 710 | hammer_off_t zone2_offset; |
| 711 | struct hammer_reserve *resv; |
| 712 | struct hammer_node_list clist; |
| 713 | }; |
| 714 | |
| 715 | typedef struct hammer_buffer *hammer_buffer_t; |
| 716 | |
| 717 | /* |
| 718 | * In-memory B-Tree node, representing an on-disk B-Tree node. |
| 719 | * |
| 720 | * This is a hang-on structure which is backed by a hammer_buffer, |
| 721 | * indexed by a hammer_cluster, and used for fine-grained locking of |
| 722 | * B-Tree nodes in order to properly control lock ordering. A hammer_buffer |
| 723 | * can contain multiple nodes representing wildly disassociated portions |
| 724 | * of the B-Tree so locking cannot be done on a buffer-by-buffer basis. |
| 725 | * |
| 726 | * This structure uses a cluster-relative index to reduce the number |
| 727 | * of layers required to access it, and also because all on-disk B-Tree |
| 728 | * references are cluster-relative offsets. |
| 729 | */ |
| 730 | struct hammer_node { |
| 731 | struct hammer_lock lock; /* node-by-node lock */ |
| 732 | TAILQ_ENTRY(hammer_node) entry; /* per-buffer linkage */ |
| 733 | RB_ENTRY(hammer_node) rb_node; /* per-cluster linkage */ |
| 734 | hammer_off_t node_offset; /* full offset spec */ |
| 735 | struct hammer_mount *hmp; |
| 736 | struct hammer_buffer *buffer; /* backing buffer */ |
| 737 | hammer_node_ondisk_t ondisk; /* ptr to on-disk structure */ |
| 738 | TAILQ_HEAD(, hammer_cursor) cursor_list; /* deadlock recovery */ |
| 739 | struct hammer_node_cache_list cache_list; /* passive caches */ |
| 740 | int flags; |
| 741 | }; |
| 742 | |
| 743 | #define HAMMER_NODE_DELETED 0x0001 |
| 744 | #define HAMMER_NODE_FLUSH 0x0002 |
| 745 | #define HAMMER_NODE_CRCGOOD 0x0004 |
| 746 | #define HAMMER_NODE_NEEDSCRC 0x0008 |
| 747 | #define HAMMER_NODE_NEEDSMIRROR 0x0010 |
| 748 | #define HAMMER_NODE_CRCBAD 0x0020 |
| 749 | #define HAMMER_NODE_NONLINEAR 0x0040 /* linear heuristic */ |
| 750 | |
| 751 | #define HAMMER_NODE_CRCANY (HAMMER_NODE_CRCGOOD | HAMMER_NODE_CRCBAD) |
| 752 | |
| 753 | typedef struct hammer_node *hammer_node_t; |
| 754 | |
| 755 | /* |
| 756 | * List of locked nodes. This structure is used to lock potentially large |
| 757 | * numbers of nodes as an aid for complex B-Tree operations. |
| 758 | */ |
| 759 | struct hammer_node_lock; |
| 760 | TAILQ_HEAD(hammer_node_lock_list, hammer_node_lock); |
| 761 | |
| 762 | struct hammer_node_lock { |
| 763 | TAILQ_ENTRY(hammer_node_lock) entry; |
| 764 | struct hammer_node_lock_list list; |
| 765 | struct hammer_node_lock *parent; |
| 766 | hammer_node_t node; |
| 767 | hammer_node_ondisk_t copy; /* copy of on-disk data */ |
| 768 | int index; /* index of this node in parent */ |
| 769 | int count; /* count children */ |
| 770 | int flags; |
| 771 | }; |
| 772 | |
| 773 | typedef struct hammer_node_lock *hammer_node_lock_t; |
| 774 | |
| 775 | #define HAMMER_NODE_LOCK_UPDATED 0x0001 |
| 776 | #define HAMMER_NODE_LOCK_LCACHE 0x0002 |
| 777 | |
| 778 | /* |
| 779 | * Common I/O management structure - embedded in in-memory structures |
| 780 | * which are backed by filesystem buffers. |
| 781 | */ |
| 782 | union hammer_io_structure { |
| 783 | struct hammer_io io; |
| 784 | struct hammer_volume volume; |
| 785 | struct hammer_buffer buffer; |
| 786 | }; |
| 787 | |
| 788 | typedef union hammer_io_structure *hammer_io_structure_t; |
| 789 | |
| 790 | /* |
| 791 | * The reserve structure prevents the blockmap from allocating |
| 792 | * out of a reserved bigblock. Such reservations are used by |
| 793 | * the direct-write mechanism. |
| 794 | * |
| 795 | * The structure is also used to hold off on reallocations of |
| 796 | * big blocks from the freemap until flush dependancies have |
| 797 | * been dealt with. |
| 798 | */ |
| 799 | struct hammer_reserve { |
| 800 | RB_ENTRY(hammer_reserve) rb_node; |
| 801 | TAILQ_ENTRY(hammer_reserve) delay_entry; |
| 802 | int flush_group; |
| 803 | int flags; |
| 804 | int refs; |
| 805 | int zone; |
| 806 | int append_off; |
| 807 | int32_t bytes_free; |
| 808 | hammer_off_t zone_offset; |
| 809 | }; |
| 810 | |
| 811 | typedef struct hammer_reserve *hammer_reserve_t; |
| 812 | |
| 813 | #define HAMMER_RESF_ONDELAY 0x0001 |
| 814 | #define HAMMER_RESF_LAYER2FREE 0x0002 |
| 815 | |
| 816 | #include "hammer_cursor.h" |
| 817 | |
| 818 | /* |
| 819 | * The undo structure tracks recent undos to avoid laying down duplicate |
| 820 | * undos within a flush group, saving us a significant amount of overhead. |
| 821 | * |
| 822 | * This is strictly a heuristic. |
| 823 | */ |
| 824 | #define HAMMER_MAX_UNDOS 1024 |
| 825 | #define HAMMER_MAX_FLUSHERS 4 |
| 826 | |
| 827 | struct hammer_undo { |
| 828 | RB_ENTRY(hammer_undo) rb_node; |
| 829 | TAILQ_ENTRY(hammer_undo) lru_entry; |
| 830 | hammer_off_t offset; |
| 831 | int bytes; |
| 832 | }; |
| 833 | |
| 834 | typedef struct hammer_undo *hammer_undo_t; |
| 835 | |
| 836 | struct hammer_flusher_info; |
| 837 | TAILQ_HEAD(hammer_flusher_info_list, hammer_flusher_info); |
| 838 | |
| 839 | struct hammer_flusher { |
| 840 | int signal; /* flusher thread sequencer */ |
| 841 | int act; /* currently active flush group */ |
| 842 | int done; /* set to act when complete */ |
| 843 | int next; /* next flush group */ |
| 844 | int group_lock; /* lock sequencing of the next flush */ |
| 845 | int exiting; /* request master exit */ |
| 846 | thread_t td; /* master flusher thread */ |
| 847 | hammer_tid_t tid; /* last flushed transaction id */ |
| 848 | int finalize_want; /* serialize finalization */ |
| 849 | struct hammer_lock finalize_lock; /* serialize finalization */ |
| 850 | struct hammer_transaction trans; /* shared transaction */ |
| 851 | struct hammer_flusher_info_list run_list; |
| 852 | struct hammer_flusher_info_list ready_list; |
| 853 | }; |
| 854 | |
| 855 | #define HAMMER_FLUSH_UNDOS_RELAXED 0 |
| 856 | #define HAMMER_FLUSH_UNDOS_FORCED 1 |
| 857 | #define HAMMER_FLUSH_UNDOS_AUTO 2 |
| 858 | /* |
| 859 | * Internal hammer mount data structure |
| 860 | */ |
| 861 | struct hammer_mount { |
| 862 | struct mount *mp; |
| 863 | /*struct vnode *rootvp;*/ |
| 864 | struct hammer_ino_rb_tree rb_inos_root; |
| 865 | struct hammer_redo_rb_tree rb_redo_root; |
| 866 | struct hammer_vol_rb_tree rb_vols_root; |
| 867 | struct hammer_nod_rb_tree rb_nods_root; |
| 868 | struct hammer_und_rb_tree rb_undo_root; |
| 869 | struct hammer_res_rb_tree rb_resv_root; |
| 870 | struct hammer_buf_rb_tree rb_bufs_root; |
| 871 | struct hammer_pfs_rb_tree rb_pfsm_root; |
| 872 | |
| 873 | struct hammer_dedup_crc_rb_tree rb_dedup_crc_root; |
| 874 | struct hammer_dedup_off_rb_tree rb_dedup_off_root; |
| 875 | |
| 876 | struct hammer_volume *rootvol; |
| 877 | struct hammer_base_elm root_btree_beg; |
| 878 | struct hammer_base_elm root_btree_end; |
| 879 | |
| 880 | struct malloc_type *m_misc; |
| 881 | struct malloc_type *m_inodes; |
| 882 | |
| 883 | int flags; /* HAMMER_MOUNT_xxx flags */ |
| 884 | int hflags; |
| 885 | int ronly; |
| 886 | int nvolumes; |
| 887 | int volume_iterator; |
| 888 | int master_id; /* -1 or 0-15 - clustering and mirroring */ |
| 889 | int version; /* hammer filesystem version to use */ |
| 890 | int rsv_inodes; /* reserved space due to dirty inodes */ |
| 891 | int64_t rsv_databytes; /* reserved space due to record data */ |
| 892 | int rsv_recs; /* reserved space due to dirty records */ |
| 893 | int rsv_fromdelay; /* bigblocks reserved due to flush delay */ |
| 894 | int undo_rec_limit; /* based on size of undo area */ |
| 895 | int last_newrecords; |
| 896 | int count_newrecords; |
| 897 | |
| 898 | int volume_to_remove; /* volume that is currently being removed */ |
| 899 | |
| 900 | int inode_reclaims; /* inodes pending reclaim by flusher */ |
| 901 | int count_inodes; /* total number of inodes */ |
| 902 | int count_iqueued; /* inodes queued to flusher */ |
| 903 | |
| 904 | struct hammer_flusher flusher; |
| 905 | |
| 906 | u_int check_interrupt; |
| 907 | u_int check_yield; |
| 908 | uuid_t fsid; |
| 909 | struct hammer_io_list volu_list; /* dirty undo buffers */ |
| 910 | struct hammer_io_list undo_list; /* dirty undo buffers */ |
| 911 | struct hammer_io_list data_list; /* dirty data buffers */ |
| 912 | struct hammer_io_list alt_data_list; /* dirty data buffers */ |
| 913 | struct hammer_io_list meta_list; /* dirty meta bufs */ |
| 914 | struct hammer_io_list lose_list; /* loose buffers */ |
| 915 | int locked_dirty_space; /* meta/volu count */ |
| 916 | int io_running_space; /* io_token */ |
| 917 | int io_running_wakeup; /* io_token */ |
| 918 | int objid_cache_count; |
| 919 | int dedup_cache_count; |
| 920 | int error; /* critical I/O error */ |
| 921 | struct krate krate; /* rate limited kprintf */ |
| 922 | hammer_tid_t asof; /* snapshot mount */ |
| 923 | hammer_tid_t next_tid; |
| 924 | hammer_tid_t flush_tid1; /* flusher tid sequencing */ |
| 925 | hammer_tid_t flush_tid2; /* flusher tid sequencing */ |
| 926 | int64_t copy_stat_freebigblocks; /* number of free bigblocks */ |
| 927 | u_int32_t undo_seqno; /* UNDO/REDO FIFO seqno */ |
| 928 | u_int32_t recover_stage2_seqno; /* REDO recovery seqno */ |
| 929 | hammer_off_t recover_stage2_offset; /* REDO recovery offset */ |
| 930 | |
| 931 | struct netexport export; |
| 932 | struct hammer_lock sync_lock; |
| 933 | struct hammer_lock free_lock; |
| 934 | struct hammer_lock undo_lock; |
| 935 | struct hammer_lock blkmap_lock; |
| 936 | struct hammer_lock snapshot_lock; |
| 937 | struct hammer_lock volume_lock; |
| 938 | struct hammer_blockmap blockmap[HAMMER_MAX_ZONES]; |
| 939 | struct hammer_undo undos[HAMMER_MAX_UNDOS]; |
| 940 | int undo_alloc; |
| 941 | TAILQ_HEAD(, hammer_undo) undo_lru_list; |
| 942 | TAILQ_HEAD(, hammer_reserve) delay_list; |
| 943 | struct hammer_flush_group_list flush_group_list; |
| 944 | hammer_flush_group_t next_flush_group; |
| 945 | TAILQ_HEAD(, hammer_objid_cache) objid_cache_list; |
| 946 | TAILQ_HEAD(, hammer_dedup_cache) dedup_lru_list; |
| 947 | hammer_dedup_cache_t dedup_free_cache; |
| 948 | TAILQ_HEAD(, hammer_reclaim) reclaim_list; |
| 949 | TAILQ_HEAD(, hammer_io) iorun_list; |
| 950 | |
| 951 | struct lwkt_token fs_token; /* high level */ |
| 952 | struct lwkt_token io_token; /* low level (IO callback) */ |
| 953 | |
| 954 | struct hammer_inostats inostats[HAMMER_INOSTATS_HSIZE]; |
| 955 | }; |
| 956 | |
| 957 | typedef struct hammer_mount *hammer_mount_t; |
| 958 | |
| 959 | #define HAMMER_MOUNT_CRITICAL_ERROR 0x0001 |
| 960 | #define HAMMER_MOUNT_FLUSH_RECOVERY 0x0002 |
| 961 | #define HAMMER_MOUNT_REDO_SYNC 0x0004 |
| 962 | #define HAMMER_MOUNT_REDO_RECOVERY_REQ 0x0008 |
| 963 | #define HAMMER_MOUNT_REDO_RECOVERY_RUN 0x0010 |
| 964 | |
| 965 | struct hammer_sync_info { |
| 966 | int error; |
| 967 | int waitfor; |
| 968 | }; |
| 969 | |
| 970 | /* |
| 971 | * Minium buffer cache bufs required to rebalance the B-Tree. |
| 972 | * This is because we must hold the children and the children's children |
| 973 | * locked. Even this might not be enough if things are horribly out |
| 974 | * of balance. |
| 975 | */ |
| 976 | #define HAMMER_REBALANCE_MIN_BUFS \ |
| 977 | (HAMMER_BTREE_LEAF_ELMS * HAMMER_BTREE_LEAF_ELMS) |
| 978 | |
| 979 | |
| 980 | #endif |
| 981 | |
| 982 | /* |
| 983 | * checkspace slop (8MB chunks), higher numbers are more conservative. |
| 984 | */ |
| 985 | #define HAMMER_CHKSPC_REBLOCK 25 |
| 986 | #define HAMMER_CHKSPC_MIRROR 20 |
| 987 | #define HAMMER_CHKSPC_WRITE 20 |
| 988 | #define HAMMER_CHKSPC_CREATE 20 |
| 989 | #define HAMMER_CHKSPC_REMOVE 10 |
| 990 | #define HAMMER_CHKSPC_EMERGENCY 0 |
| 991 | |
| 992 | #if defined(_KERNEL) |
| 993 | |
| 994 | extern struct vop_ops hammer_vnode_vops; |
| 995 | extern struct vop_ops hammer_spec_vops; |
| 996 | extern struct vop_ops hammer_fifo_vops; |
| 997 | extern struct bio_ops hammer_bioops; |
| 998 | |
| 999 | extern int hammer_debug_io; |
| 1000 | extern int hammer_debug_general; |
| 1001 | extern int hammer_debug_debug; |
| 1002 | extern int hammer_debug_inode; |
| 1003 | extern int hammer_debug_locks; |
| 1004 | extern int hammer_debug_btree; |
| 1005 | extern int hammer_debug_tid; |
| 1006 | extern int hammer_debug_recover; |
| 1007 | extern int hammer_debug_recover_faults; |
| 1008 | extern int hammer_debug_critical; |
| 1009 | extern int hammer_cluster_enable; |
| 1010 | extern int hammer_live_dedup; |
| 1011 | extern int hammer_count_fsyncs; |
| 1012 | extern int hammer_count_inodes; |
| 1013 | extern int hammer_count_iqueued; |
| 1014 | extern int hammer_count_reclaiming; |
| 1015 | extern int hammer_count_records; |
| 1016 | extern int hammer_count_record_datas; |
| 1017 | extern int hammer_count_volumes; |
| 1018 | extern int hammer_count_buffers; |
| 1019 | extern int hammer_count_nodes; |
| 1020 | extern int64_t hammer_count_extra_space_used; |
| 1021 | extern int64_t hammer_stats_btree_lookups; |
| 1022 | extern int64_t hammer_stats_btree_searches; |
| 1023 | extern int64_t hammer_stats_btree_inserts; |
| 1024 | extern int64_t hammer_stats_btree_deletes; |
| 1025 | extern int64_t hammer_stats_btree_elements; |
| 1026 | extern int64_t hammer_stats_btree_splits; |
| 1027 | extern int64_t hammer_stats_btree_iterations; |
| 1028 | extern int64_t hammer_stats_btree_root_iterations; |
| 1029 | extern int64_t hammer_stats_record_iterations; |
| 1030 | extern int64_t hammer_stats_file_read; |
| 1031 | extern int64_t hammer_stats_file_write; |
| 1032 | extern int64_t hammer_stats_file_iopsr; |
| 1033 | extern int64_t hammer_stats_file_iopsw; |
| 1034 | extern int64_t hammer_stats_disk_read; |
| 1035 | extern int64_t hammer_stats_disk_write; |
| 1036 | extern int64_t hammer_stats_inode_flushes; |
| 1037 | extern int64_t hammer_stats_commits; |
| 1038 | extern int64_t hammer_stats_undo; |
| 1039 | extern int64_t hammer_stats_redo; |
| 1040 | extern int hammer_count_dirtybufspace; |
| 1041 | extern int hammer_count_refedbufs; |
| 1042 | extern int hammer_count_reservations; |
| 1043 | extern int hammer_count_io_running_read; |
| 1044 | extern int hammer_count_io_running_write; |
| 1045 | extern int hammer_count_io_locked; |
| 1046 | extern int hammer_limit_dirtybufspace; |
| 1047 | extern int hammer_limit_running_io; |
| 1048 | extern int hammer_limit_recs; |
| 1049 | extern int hammer_limit_inode_recs; |
| 1050 | extern int hammer_limit_reclaim; |
| 1051 | extern int hammer_limit_redo; |
| 1052 | extern int hammer_bio_count; |
| 1053 | extern int hammer_verify_zone; |
| 1054 | extern int hammer_verify_data; |
| 1055 | extern int hammer_write_mode; |
| 1056 | extern int hammer_yield_check; |
| 1057 | extern int hammer_fsync_mode; |
| 1058 | extern int hammer_autoflush; |
| 1059 | extern int64_t hammer_contention_count; |
| 1060 | |
| 1061 | extern int64_t hammer_live_dedup_vnode_bcmps; |
| 1062 | extern int64_t hammer_live_dedup_device_bcmps; |
| 1063 | extern int64_t hammer_live_dedup_findblk_failures; |
| 1064 | extern int64_t hammer_live_dedup_bmap_saves; |
| 1065 | |
| 1066 | void hammer_critical_error(hammer_mount_t hmp, hammer_inode_t ip, |
| 1067 | int error, const char *msg); |
| 1068 | int hammer_vop_inactive(struct vop_inactive_args *); |
| 1069 | int hammer_vop_reclaim(struct vop_reclaim_args *); |
| 1070 | int hammer_get_vnode(struct hammer_inode *ip, struct vnode **vpp); |
| 1071 | struct hammer_inode *hammer_get_inode(hammer_transaction_t trans, |
| 1072 | hammer_inode_t dip, int64_t obj_id, |
| 1073 | hammer_tid_t asof, u_int32_t localization, |
| 1074 | int flags, int *errorp); |
| 1075 | struct hammer_inode *hammer_get_dummy_inode(hammer_transaction_t trans, |
| 1076 | hammer_inode_t dip, int64_t obj_id, |
| 1077 | hammer_tid_t asof, u_int32_t localization, |
| 1078 | int flags, int *errorp); |
| 1079 | struct hammer_inode *hammer_find_inode(hammer_transaction_t trans, |
| 1080 | int64_t obj_id, hammer_tid_t asof, |
| 1081 | u_int32_t localization); |
| 1082 | void hammer_scan_inode_snapshots(hammer_mount_t hmp, |
| 1083 | hammer_inode_info_t iinfo, |
| 1084 | int (*callback)(hammer_inode_t ip, void *data), |
| 1085 | void *data); |
| 1086 | void hammer_put_inode(struct hammer_inode *ip); |
| 1087 | void hammer_put_inode_ref(struct hammer_inode *ip); |
| 1088 | void hammer_inode_waitreclaims(hammer_transaction_t trans); |
| 1089 | |
| 1090 | int hammer_unload_volume(hammer_volume_t volume, void *data __unused); |
| 1091 | int hammer_adjust_volume_mode(hammer_volume_t volume, void *data __unused); |
| 1092 | |
| 1093 | int hammer_unload_buffer(hammer_buffer_t buffer, void *data); |
| 1094 | int hammer_install_volume(hammer_mount_t hmp, const char *volname, |
| 1095 | struct vnode *devvp); |
| 1096 | int hammer_mountcheck_volumes(hammer_mount_t hmp); |
| 1097 | |
| 1098 | int hammer_mem_add(hammer_record_t record); |
| 1099 | int hammer_ip_lookup(hammer_cursor_t cursor); |
| 1100 | int hammer_ip_first(hammer_cursor_t cursor); |
| 1101 | int hammer_ip_next(hammer_cursor_t cursor); |
| 1102 | int hammer_ip_resolve_data(hammer_cursor_t cursor); |
| 1103 | int hammer_ip_delete_record(hammer_cursor_t cursor, hammer_inode_t ip, |
| 1104 | hammer_tid_t tid); |
| 1105 | int hammer_create_at_cursor(hammer_cursor_t cursor, |
| 1106 | hammer_btree_leaf_elm_t leaf, void *udata, int mode); |
| 1107 | int hammer_delete_at_cursor(hammer_cursor_t cursor, int delete_flags, |
| 1108 | hammer_tid_t delete_tid, u_int32_t delete_ts, |
| 1109 | int track, int64_t *stat_bytes); |
| 1110 | int hammer_ip_check_directory_empty(hammer_transaction_t trans, |
| 1111 | hammer_inode_t ip); |
| 1112 | int hammer_sync_hmp(hammer_mount_t hmp, int waitfor); |
| 1113 | int hammer_queue_inodes_flusher(hammer_mount_t hmp, int waitfor); |
| 1114 | |
| 1115 | hammer_record_t |
| 1116 | hammer_alloc_mem_record(hammer_inode_t ip, int data_len); |
| 1117 | void hammer_flush_record_done(hammer_record_t record, int error); |
| 1118 | void hammer_wait_mem_record_ident(hammer_record_t record, const char *ident); |
| 1119 | void hammer_rel_mem_record(hammer_record_t record); |
| 1120 | |
| 1121 | int hammer_cursor_up(hammer_cursor_t cursor); |
| 1122 | int hammer_cursor_up_locked(hammer_cursor_t cursor); |
| 1123 | int hammer_cursor_down(hammer_cursor_t cursor); |
| 1124 | int hammer_cursor_upgrade(hammer_cursor_t cursor); |
| 1125 | int hammer_cursor_upgrade_node(hammer_cursor_t cursor); |
| 1126 | void hammer_cursor_downgrade(hammer_cursor_t cursor); |
| 1127 | int hammer_cursor_upgrade2(hammer_cursor_t c1, hammer_cursor_t c2); |
| 1128 | void hammer_cursor_downgrade2(hammer_cursor_t c1, hammer_cursor_t c2); |
| 1129 | int hammer_cursor_seek(hammer_cursor_t cursor, hammer_node_t node, |
| 1130 | int index); |
| 1131 | void hammer_lock_ex_ident(struct hammer_lock *lock, const char *ident); |
| 1132 | int hammer_lock_ex_try(struct hammer_lock *lock); |
| 1133 | void hammer_lock_sh(struct hammer_lock *lock); |
| 1134 | int hammer_lock_sh_try(struct hammer_lock *lock); |
| 1135 | int hammer_lock_upgrade(struct hammer_lock *lock, int shcount); |
| 1136 | void hammer_lock_downgrade(struct hammer_lock *lock, int shcount); |
| 1137 | int hammer_lock_status(struct hammer_lock *lock); |
| 1138 | void hammer_unlock(struct hammer_lock *lock); |
| 1139 | void hammer_ref(struct hammer_lock *lock); |
| 1140 | int hammer_ref_interlock(struct hammer_lock *lock); |
| 1141 | int hammer_ref_interlock_true(struct hammer_lock *lock); |
| 1142 | void hammer_ref_interlock_done(struct hammer_lock *lock); |
| 1143 | void hammer_rel(struct hammer_lock *lock); |
| 1144 | int hammer_rel_interlock(struct hammer_lock *lock, int locked); |
| 1145 | void hammer_rel_interlock_done(struct hammer_lock *lock, int orig_locked); |
| 1146 | int hammer_get_interlock(struct hammer_lock *lock); |
| 1147 | int hammer_try_interlock_norefs(struct hammer_lock *lock); |
| 1148 | void hammer_put_interlock(struct hammer_lock *lock, int error); |
| 1149 | |
| 1150 | void hammer_sync_lock_ex(hammer_transaction_t trans); |
| 1151 | void hammer_sync_lock_sh(hammer_transaction_t trans); |
| 1152 | int hammer_sync_lock_sh_try(hammer_transaction_t trans); |
| 1153 | void hammer_sync_unlock(hammer_transaction_t trans); |
| 1154 | |
| 1155 | u_int32_t hammer_to_unix_xid(uuid_t *uuid); |
| 1156 | void hammer_guid_to_uuid(uuid_t *uuid, u_int32_t guid); |
| 1157 | void hammer_time_to_timespec(u_int64_t xtime, struct timespec *ts); |
| 1158 | u_int64_t hammer_timespec_to_time(struct timespec *ts); |
| 1159 | int hammer_str_to_tid(const char *str, int *ispfsp, |
| 1160 | hammer_tid_t *tidp, u_int32_t *localizationp); |
| 1161 | int hammer_is_atatext(const char *name, int len); |
| 1162 | hammer_tid_t hammer_alloc_objid(hammer_mount_t hmp, hammer_inode_t dip, |
| 1163 | int64_t namekey); |
| 1164 | void hammer_clear_objid(hammer_inode_t dip); |
| 1165 | void hammer_destroy_objid_cache(hammer_mount_t hmp); |
| 1166 | |
| 1167 | int hammer_dedup_crc_rb_compare(hammer_dedup_cache_t dc1, |
| 1168 | hammer_dedup_cache_t dc2); |
| 1169 | int hammer_dedup_off_rb_compare(hammer_dedup_cache_t dc1, |
| 1170 | hammer_dedup_cache_t dc2); |
| 1171 | hammer_dedup_cache_t hammer_dedup_cache_add(hammer_inode_t ip, |
| 1172 | hammer_btree_leaf_elm_t leaf); |
| 1173 | hammer_dedup_cache_t hammer_dedup_cache_lookup(hammer_mount_t hmp, |
| 1174 | hammer_crc_t crc); |
| 1175 | void hammer_dedup_cache_inval(hammer_mount_t hmp, hammer_off_t base_offset); |
| 1176 | void hammer_destroy_dedup_cache(hammer_mount_t hmp); |
| 1177 | void hammer_dump_dedup_cache(hammer_mount_t hmp); |
| 1178 | int hammer_dedup_validate(hammer_dedup_cache_t dcp, int zone, int bytes, |
| 1179 | void *data); |
| 1180 | |
| 1181 | int hammer_enter_undo_history(hammer_mount_t hmp, hammer_off_t offset, |
| 1182 | int bytes); |
| 1183 | void hammer_clear_undo_history(hammer_mount_t hmp); |
| 1184 | enum vtype hammer_get_vnode_type(u_int8_t obj_type); |
| 1185 | int hammer_get_dtype(u_int8_t obj_type); |
| 1186 | u_int8_t hammer_get_obj_type(enum vtype vtype); |
| 1187 | int64_t hammer_directory_namekey(hammer_inode_t dip, const void *name, int len, |
| 1188 | u_int32_t *max_iterationsp); |
| 1189 | int hammer_nohistory(hammer_inode_t ip); |
| 1190 | |
| 1191 | int hammer_init_cursor(hammer_transaction_t trans, hammer_cursor_t cursor, |
| 1192 | hammer_node_cache_t cache, hammer_inode_t ip); |
| 1193 | void hammer_normalize_cursor(hammer_cursor_t cursor); |
| 1194 | void hammer_done_cursor(hammer_cursor_t cursor); |
| 1195 | int hammer_recover_cursor(hammer_cursor_t cursor); |
| 1196 | void hammer_unlock_cursor(hammer_cursor_t cursor); |
| 1197 | int hammer_lock_cursor(hammer_cursor_t cursor); |
| 1198 | hammer_cursor_t hammer_push_cursor(hammer_cursor_t ocursor); |
| 1199 | void hammer_pop_cursor(hammer_cursor_t ocursor, hammer_cursor_t ncursor); |
| 1200 | |
| 1201 | void hammer_cursor_replaced_node(hammer_node_t onode, hammer_node_t nnode); |
| 1202 | void hammer_cursor_removed_node(hammer_node_t onode, hammer_node_t parent, |
| 1203 | int index); |
| 1204 | void hammer_cursor_split_node(hammer_node_t onode, hammer_node_t nnode, |
| 1205 | int index); |
| 1206 | void hammer_cursor_moved_element(hammer_node_t oparent, int pindex, |
| 1207 | hammer_node_t onode, int oindex, |
| 1208 | hammer_node_t nnode, int nindex); |
| 1209 | void hammer_cursor_parent_changed(hammer_node_t node, hammer_node_t oparent, |
| 1210 | hammer_node_t nparent, int nindex); |
| 1211 | void hammer_cursor_inserted_element(hammer_node_t node, int index); |
| 1212 | void hammer_cursor_deleted_element(hammer_node_t node, int index); |
| 1213 | void hammer_cursor_invalidate_cache(hammer_cursor_t cursor); |
| 1214 | |
| 1215 | int hammer_btree_lookup(hammer_cursor_t cursor); |
| 1216 | int hammer_btree_first(hammer_cursor_t cursor); |
| 1217 | int hammer_btree_last(hammer_cursor_t cursor); |
| 1218 | int hammer_btree_extract(hammer_cursor_t cursor, int flags); |
| 1219 | int hammer_btree_iterate(hammer_cursor_t cursor); |
| 1220 | int hammer_btree_iterate_reverse(hammer_cursor_t cursor); |
| 1221 | int hammer_btree_insert(hammer_cursor_t cursor, |
| 1222 | hammer_btree_leaf_elm_t elm, int *doprop); |
| 1223 | int hammer_btree_delete(hammer_cursor_t cursor); |
| 1224 | void hammer_btree_do_propagation(hammer_cursor_t cursor, |
| 1225 | hammer_pseudofs_inmem_t pfsm, |
| 1226 | hammer_btree_leaf_elm_t leaf); |
| 1227 | int hammer_btree_cmp(hammer_base_elm_t key1, hammer_base_elm_t key2); |
| 1228 | int hammer_btree_chkts(hammer_tid_t ts, hammer_base_elm_t key); |
| 1229 | int hammer_btree_correct_rhb(hammer_cursor_t cursor, hammer_tid_t tid); |
| 1230 | int hammer_btree_correct_lhb(hammer_cursor_t cursor, hammer_tid_t tid); |
| 1231 | |
| 1232 | int btree_set_parent(hammer_transaction_t trans, hammer_node_t node, |
| 1233 | hammer_btree_elm_t elm); |
| 1234 | void hammer_node_lock_init(hammer_node_lock_t parent, hammer_node_t node); |
| 1235 | void hammer_btree_lcache_init(hammer_mount_t hmp, hammer_node_lock_t lcache, |
| 1236 | int depth); |
| 1237 | void hammer_btree_lcache_free(hammer_mount_t hmp, hammer_node_lock_t lcache); |
| 1238 | int hammer_btree_lock_children(hammer_cursor_t cursor, int depth, |
| 1239 | hammer_node_lock_t parent, |
| 1240 | hammer_node_lock_t lcache); |
| 1241 | void hammer_btree_lock_copy(hammer_cursor_t cursor, |
| 1242 | hammer_node_lock_t parent); |
| 1243 | int hammer_btree_sync_copy(hammer_cursor_t cursor, |
| 1244 | hammer_node_lock_t parent); |
| 1245 | void hammer_btree_unlock_children(hammer_mount_t hmp, |
| 1246 | hammer_node_lock_t parent, |
| 1247 | hammer_node_lock_t lcache); |
| 1248 | int hammer_btree_search_node(hammer_base_elm_t elm, hammer_node_ondisk_t node); |
| 1249 | hammer_node_t hammer_btree_get_parent(hammer_transaction_t trans, |
| 1250 | hammer_node_t node, int *parent_indexp, |
| 1251 | int *errorp, int try_exclusive); |
| 1252 | |
| 1253 | void hammer_print_btree_node(hammer_node_ondisk_t ondisk); |
| 1254 | void hammer_print_btree_elm(hammer_btree_elm_t elm, u_int8_t type, int i); |
| 1255 | |
| 1256 | void *hammer_bread(struct hammer_mount *hmp, hammer_off_t off, |
| 1257 | int *errorp, struct hammer_buffer **bufferp); |
| 1258 | void *hammer_bnew(struct hammer_mount *hmp, hammer_off_t off, |
| 1259 | int *errorp, struct hammer_buffer **bufferp); |
| 1260 | void *hammer_bread_ext(struct hammer_mount *hmp, hammer_off_t off, int bytes, |
| 1261 | int *errorp, struct hammer_buffer **bufferp); |
| 1262 | void *hammer_bnew_ext(struct hammer_mount *hmp, hammer_off_t off, int bytes, |
| 1263 | int *errorp, struct hammer_buffer **bufferp); |
| 1264 | |
| 1265 | hammer_volume_t hammer_get_root_volume(hammer_mount_t hmp, int *errorp); |
| 1266 | |
| 1267 | hammer_volume_t hammer_get_volume(hammer_mount_t hmp, |
| 1268 | int32_t vol_no, int *errorp); |
| 1269 | hammer_buffer_t hammer_get_buffer(hammer_mount_t hmp, hammer_off_t buf_offset, |
| 1270 | int bytes, int isnew, int *errorp); |
| 1271 | void hammer_sync_buffers(hammer_mount_t hmp, |
| 1272 | hammer_off_t base_offset, int bytes); |
| 1273 | int hammer_del_buffers(hammer_mount_t hmp, |
| 1274 | hammer_off_t base_offset, |
| 1275 | hammer_off_t zone2_offset, int bytes, |
| 1276 | int report_conflicts); |
| 1277 | |
| 1278 | int hammer_ref_volume(hammer_volume_t volume); |
| 1279 | int hammer_ref_buffer(hammer_buffer_t buffer); |
| 1280 | void hammer_flush_buffer_nodes(hammer_buffer_t buffer); |
| 1281 | |
| 1282 | void hammer_rel_volume(hammer_volume_t volume, int locked); |
| 1283 | void hammer_rel_buffer(hammer_buffer_t buffer, int locked); |
| 1284 | |
| 1285 | int hammer_vfs_export(struct mount *mp, int op, |
| 1286 | const struct export_args *export); |
| 1287 | hammer_node_t hammer_get_node(hammer_transaction_t trans, |
| 1288 | hammer_off_t node_offset, int isnew, int *errorp); |
| 1289 | void hammer_ref_node(hammer_node_t node); |
| 1290 | hammer_node_t hammer_ref_node_safe(hammer_transaction_t trans, |
| 1291 | hammer_node_cache_t cache, int *errorp); |
| 1292 | void hammer_rel_node(hammer_node_t node); |
| 1293 | void hammer_delete_node(hammer_transaction_t trans, |
| 1294 | hammer_node_t node); |
| 1295 | void hammer_cache_node(hammer_node_cache_t cache, |
| 1296 | hammer_node_t node); |
| 1297 | void hammer_uncache_node(hammer_node_cache_t cache); |
| 1298 | void hammer_flush_node(hammer_node_t node, int locked); |
| 1299 | |
| 1300 | void hammer_dup_buffer(struct hammer_buffer **bufferp, |
| 1301 | struct hammer_buffer *buffer); |
| 1302 | hammer_node_t hammer_alloc_btree(hammer_transaction_t trans, |
| 1303 | hammer_off_t hint, int *errorp); |
| 1304 | void *hammer_alloc_data(hammer_transaction_t trans, int32_t data_len, |
| 1305 | u_int16_t rec_type, hammer_off_t *data_offsetp, |
| 1306 | struct hammer_buffer **data_bufferp, |
| 1307 | hammer_off_t hint, int *errorp); |
| 1308 | |
| 1309 | int hammer_generate_undo(hammer_transaction_t trans, |
| 1310 | hammer_off_t zone1_offset, void *base, int len); |
| 1311 | int hammer_generate_redo(hammer_transaction_t trans, hammer_inode_t ip, |
| 1312 | hammer_off_t file_offset, u_int32_t flags, |
| 1313 | void *base, int len); |
| 1314 | void hammer_generate_redo_sync(hammer_transaction_t trans); |
| 1315 | void hammer_redo_fifo_start_flush(hammer_inode_t ip); |
| 1316 | void hammer_redo_fifo_end_flush(hammer_inode_t ip); |
| 1317 | |
| 1318 | void hammer_format_undo(void *base, u_int32_t seqno); |
| 1319 | int hammer_upgrade_undo_4(hammer_transaction_t trans); |
| 1320 | |
| 1321 | void hammer_put_volume(struct hammer_volume *volume, int flush); |
| 1322 | void hammer_put_buffer(struct hammer_buffer *buffer, int flush); |
| 1323 | |
| 1324 | hammer_off_t hammer_freemap_alloc(hammer_transaction_t trans, |
| 1325 | hammer_off_t owner, int *errorp); |
| 1326 | void hammer_freemap_free(hammer_transaction_t trans, hammer_off_t phys_offset, |
| 1327 | hammer_off_t owner, int *errorp); |
| 1328 | int _hammer_checkspace(hammer_mount_t hmp, int slop, int64_t *resp); |
| 1329 | hammer_off_t hammer_blockmap_alloc(hammer_transaction_t trans, int zone, |
| 1330 | int bytes, hammer_off_t hint, int *errorp); |
| 1331 | hammer_reserve_t hammer_blockmap_reserve(hammer_mount_t hmp, int zone, |
| 1332 | int bytes, hammer_off_t *zone_offp, int *errorp); |
| 1333 | hammer_reserve_t hammer_blockmap_reserve_dedup(hammer_mount_t hmp, int zone, |
| 1334 | int bytes, hammer_off_t zone_offset, int *errorp); |
| 1335 | void hammer_blockmap_reserve_complete(hammer_mount_t hmp, |
| 1336 | hammer_reserve_t resv); |
| 1337 | void hammer_reserve_clrdelay(hammer_mount_t hmp, hammer_reserve_t resv); |
| 1338 | void hammer_blockmap_free(hammer_transaction_t trans, |
| 1339 | hammer_off_t bmap_off, int bytes); |
| 1340 | int hammer_blockmap_dedup(hammer_transaction_t trans, |
| 1341 | hammer_off_t bmap_off, int bytes); |
| 1342 | int hammer_blockmap_finalize(hammer_transaction_t trans, |
| 1343 | hammer_reserve_t resv, |
| 1344 | hammer_off_t bmap_off, int bytes); |
| 1345 | int hammer_blockmap_getfree(hammer_mount_t hmp, hammer_off_t bmap_off, |
| 1346 | int *curp, int *errorp); |
| 1347 | hammer_off_t hammer_blockmap_lookup(hammer_mount_t hmp, hammer_off_t bmap_off, |
| 1348 | int *errorp); |
| 1349 | hammer_off_t hammer_undo_lookup(hammer_mount_t hmp, hammer_off_t bmap_off, |
| 1350 | int *errorp); |
| 1351 | int64_t hammer_undo_used(hammer_transaction_t trans); |
| 1352 | int64_t hammer_undo_space(hammer_transaction_t trans); |
| 1353 | int64_t hammer_undo_max(hammer_mount_t hmp); |
| 1354 | int hammer_undo_reclaim(hammer_io_t io); |
| 1355 | |
| 1356 | void hammer_start_transaction(struct hammer_transaction *trans, |
| 1357 | struct hammer_mount *hmp); |
| 1358 | void hammer_simple_transaction(struct hammer_transaction *trans, |
| 1359 | struct hammer_mount *hmp); |
| 1360 | void hammer_start_transaction_fls(struct hammer_transaction *trans, |
| 1361 | struct hammer_mount *hmp); |
| 1362 | void hammer_done_transaction(struct hammer_transaction *trans); |
| 1363 | hammer_tid_t hammer_alloc_tid(hammer_mount_t hmp, int count); |
| 1364 | |
| 1365 | void hammer_modify_inode(hammer_transaction_t trans, hammer_inode_t ip, int flags); |
| 1366 | void hammer_flush_inode(hammer_inode_t ip, int flags); |
| 1367 | void hammer_flush_inode_done(hammer_inode_t ip, int error); |
| 1368 | void hammer_wait_inode(hammer_inode_t ip); |
| 1369 | |
| 1370 | int hammer_create_inode(struct hammer_transaction *trans, struct vattr *vap, |
| 1371 | struct ucred *cred, struct hammer_inode *dip, |
| 1372 | const char *name, int namelen, |
| 1373 | hammer_pseudofs_inmem_t pfsm, |
| 1374 | struct hammer_inode **ipp); |
| 1375 | void hammer_rel_inode(hammer_inode_t ip, int flush); |
| 1376 | int hammer_reload_inode(hammer_inode_t ip, void *arg __unused); |
| 1377 | int hammer_ino_rb_compare(hammer_inode_t ip1, hammer_inode_t ip2); |
| 1378 | int hammer_redo_rb_compare(hammer_inode_t ip1, hammer_inode_t ip2); |
| 1379 | int hammer_destroy_inode_callback(hammer_inode_t ip, void *data __unused); |
| 1380 | |
| 1381 | int hammer_sync_inode(hammer_transaction_t trans, hammer_inode_t ip); |
| 1382 | void hammer_test_inode(hammer_inode_t dip); |
| 1383 | void hammer_inode_unloadable_check(hammer_inode_t ip, int getvp); |
| 1384 | |
| 1385 | int hammer_ip_add_directory(struct hammer_transaction *trans, |
| 1386 | hammer_inode_t dip, const char *name, int bytes, |
| 1387 | hammer_inode_t nip); |
| 1388 | int hammer_ip_del_directory(struct hammer_transaction *trans, |
| 1389 | hammer_cursor_t cursor, hammer_inode_t dip, |
| 1390 | hammer_inode_t ip); |
| 1391 | void hammer_ip_replace_bulk(hammer_mount_t hmp, hammer_record_t record); |
| 1392 | hammer_record_t hammer_ip_add_bulk(hammer_inode_t ip, off_t file_offset, |
| 1393 | void *data, int bytes, int *errorp); |
| 1394 | int hammer_ip_frontend_trunc(struct hammer_inode *ip, off_t file_size); |
| 1395 | int hammer_ip_add_record(struct hammer_transaction *trans, |
| 1396 | hammer_record_t record); |
| 1397 | int hammer_ip_delete_range(hammer_cursor_t cursor, hammer_inode_t ip, |
| 1398 | int64_t ran_beg, int64_t ran_end, int truncating); |
| 1399 | int hammer_ip_delete_clean(hammer_cursor_t cursor, hammer_inode_t ip, |
| 1400 | int *countp); |
| 1401 | int hammer_ip_sync_data(hammer_cursor_t cursor, hammer_inode_t ip, |
| 1402 | int64_t offset, void *data, int bytes); |
| 1403 | int hammer_ip_sync_record(hammer_transaction_t trans, hammer_record_t rec); |
| 1404 | int hammer_ip_sync_record_cursor(hammer_cursor_t cursor, hammer_record_t rec); |
| 1405 | hammer_pseudofs_inmem_t hammer_load_pseudofs(hammer_transaction_t trans, |
| 1406 | u_int32_t localization, int *errorp); |
| 1407 | int hammer_mkroot_pseudofs(hammer_transaction_t trans, struct ucred *cred, |
| 1408 | hammer_pseudofs_inmem_t pfsm); |
| 1409 | int hammer_save_pseudofs(hammer_transaction_t trans, |
| 1410 | hammer_pseudofs_inmem_t pfsm); |
| 1411 | int hammer_unload_pseudofs(hammer_transaction_t trans, u_int32_t localization); |
| 1412 | void hammer_rel_pseudofs(hammer_mount_t hmp, hammer_pseudofs_inmem_t pfsm); |
| 1413 | int hammer_ioctl(hammer_inode_t ip, u_long com, caddr_t data, int fflag, |
| 1414 | struct ucred *cred); |
| 1415 | |
| 1416 | void hammer_io_init(hammer_io_t io, hammer_volume_t volume, |
| 1417 | enum hammer_io_type type); |
| 1418 | int hammer_io_read(struct vnode *devvp, struct hammer_io *io, int limit); |
| 1419 | void hammer_io_advance(struct hammer_io *io); |
| 1420 | int hammer_io_new(struct vnode *devvp, struct hammer_io *io); |
| 1421 | int hammer_io_inval(hammer_volume_t volume, hammer_off_t zone2_offset); |
| 1422 | struct buf *hammer_io_release(struct hammer_io *io, int flush); |
| 1423 | void hammer_io_flush(struct hammer_io *io, int reclaim); |
| 1424 | void hammer_io_wait(struct hammer_io *io); |
| 1425 | void hammer_io_waitdep(struct hammer_io *io); |
| 1426 | void hammer_io_wait_all(hammer_mount_t hmp, const char *ident, int doflush); |
| 1427 | int hammer_io_direct_read(hammer_mount_t hmp, struct bio *bio, |
| 1428 | hammer_btree_leaf_elm_t leaf); |
| 1429 | int hammer_io_direct_write(hammer_mount_t hmp, struct bio *bio, |
| 1430 | hammer_record_t record); |
| 1431 | void hammer_io_direct_wait(hammer_record_t record); |
| 1432 | void hammer_io_direct_uncache(hammer_mount_t hmp, hammer_btree_leaf_elm_t leaf); |
| 1433 | void hammer_io_write_interlock(hammer_io_t io); |
| 1434 | void hammer_io_done_interlock(hammer_io_t io); |
| 1435 | void hammer_io_clear_modify(struct hammer_io *io, int inval); |
| 1436 | void hammer_io_clear_modlist(struct hammer_io *io); |
| 1437 | void hammer_io_flush_sync(hammer_mount_t hmp); |
| 1438 | void hammer_io_clear_error(struct hammer_io *io); |
| 1439 | void hammer_io_clear_error_noassert(struct hammer_io *io); |
| 1440 | void hammer_io_notmeta(hammer_buffer_t buffer); |
| 1441 | void hammer_io_limit_backlog(hammer_mount_t hmp); |
| 1442 | |
| 1443 | void hammer_modify_volume(hammer_transaction_t trans, hammer_volume_t volume, |
| 1444 | void *base, int len); |
| 1445 | void hammer_modify_buffer(hammer_transaction_t trans, hammer_buffer_t buffer, |
| 1446 | void *base, int len); |
| 1447 | void hammer_modify_volume_done(hammer_volume_t volume); |
| 1448 | void hammer_modify_buffer_done(hammer_buffer_t buffer); |
| 1449 | |
| 1450 | int hammer_ioc_reblock(hammer_transaction_t trans, hammer_inode_t ip, |
| 1451 | struct hammer_ioc_reblock *reblock); |
| 1452 | int hammer_ioc_rebalance(hammer_transaction_t trans, hammer_inode_t ip, |
| 1453 | struct hammer_ioc_rebalance *rebal); |
| 1454 | int hammer_ioc_prune(hammer_transaction_t trans, hammer_inode_t ip, |
| 1455 | struct hammer_ioc_prune *prune); |
| 1456 | int hammer_ioc_mirror_read(hammer_transaction_t trans, hammer_inode_t ip, |
| 1457 | struct hammer_ioc_mirror_rw *mirror); |
| 1458 | int hammer_ioc_mirror_write(hammer_transaction_t trans, hammer_inode_t ip, |
| 1459 | struct hammer_ioc_mirror_rw *mirror); |
| 1460 | int hammer_ioc_set_pseudofs(hammer_transaction_t trans, hammer_inode_t ip, |
| 1461 | struct ucred *cred, struct hammer_ioc_pseudofs_rw *pfs); |
| 1462 | int hammer_ioc_get_pseudofs(hammer_transaction_t trans, hammer_inode_t ip, |
| 1463 | struct hammer_ioc_pseudofs_rw *pfs); |
| 1464 | int hammer_ioc_destroy_pseudofs(hammer_transaction_t trans, hammer_inode_t ip, |
| 1465 | struct hammer_ioc_pseudofs_rw *pfs); |
| 1466 | int hammer_ioc_downgrade_pseudofs(hammer_transaction_t trans, hammer_inode_t ip, |
| 1467 | struct hammer_ioc_pseudofs_rw *pfs); |
| 1468 | int hammer_ioc_upgrade_pseudofs(hammer_transaction_t trans, hammer_inode_t ip, |
| 1469 | struct hammer_ioc_pseudofs_rw *pfs); |
| 1470 | int hammer_ioc_wait_pseudofs(hammer_transaction_t trans, hammer_inode_t ip, |
| 1471 | struct hammer_ioc_pseudofs_rw *pfs); |
| 1472 | int hammer_ioc_volume_add(hammer_transaction_t trans, hammer_inode_t ip, |
| 1473 | struct hammer_ioc_volume *ioc); |
| 1474 | int hammer_ioc_volume_del(hammer_transaction_t trans, hammer_inode_t ip, |
| 1475 | struct hammer_ioc_volume *ioc); |
| 1476 | int hammer_ioc_volume_list(hammer_transaction_t trans, hammer_inode_t ip, |
| 1477 | struct hammer_ioc_volume_list *ioc); |
| 1478 | int hammer_ioc_dedup(hammer_transaction_t trans, hammer_inode_t ip, |
| 1479 | struct hammer_ioc_dedup *dedup); |
| 1480 | |
| 1481 | int hammer_signal_check(hammer_mount_t hmp); |
| 1482 | |
| 1483 | void hammer_flusher_create(hammer_mount_t hmp); |
| 1484 | void hammer_flusher_destroy(hammer_mount_t hmp); |
| 1485 | void hammer_flusher_sync(hammer_mount_t hmp); |
| 1486 | int hammer_flusher_async(hammer_mount_t hmp, hammer_flush_group_t flg); |
| 1487 | int hammer_flusher_async_one(hammer_mount_t hmp); |
| 1488 | void hammer_flusher_wait(hammer_mount_t hmp, int seq); |
| 1489 | void hammer_flusher_wait_next(hammer_mount_t hmp); |
| 1490 | int hammer_flusher_meta_limit(hammer_mount_t hmp); |
| 1491 | int hammer_flusher_meta_halflimit(hammer_mount_t hmp); |
| 1492 | int hammer_flusher_undo_exhausted(hammer_transaction_t trans, int quarter); |
| 1493 | void hammer_flusher_clean_loose_ios(hammer_mount_t hmp); |
| 1494 | void hammer_flusher_finalize(hammer_transaction_t trans, int final); |
| 1495 | int hammer_flusher_haswork(hammer_mount_t hmp); |
| 1496 | void hammer_flusher_flush_undos(hammer_mount_t hmp, int already_flushed); |
| 1497 | |
| 1498 | int hammer_recover_stage1(hammer_mount_t hmp, hammer_volume_t rootvol); |
| 1499 | int hammer_recover_stage2(hammer_mount_t hmp, hammer_volume_t rootvol); |
| 1500 | void hammer_recover_flush_buffers(hammer_mount_t hmp, |
| 1501 | hammer_volume_t root_volume, int final); |
| 1502 | |
| 1503 | void hammer_crc_set_blockmap(hammer_blockmap_t blockmap); |
| 1504 | void hammer_crc_set_volume(hammer_volume_ondisk_t ondisk); |
| 1505 | void hammer_crc_set_leaf(void *data, hammer_btree_leaf_elm_t leaf); |
| 1506 | |
| 1507 | int hammer_crc_test_blockmap(hammer_blockmap_t blockmap); |
| 1508 | int hammer_crc_test_volume(hammer_volume_ondisk_t ondisk); |
| 1509 | int hammer_crc_test_btree(hammer_node_ondisk_t ondisk); |
| 1510 | int hammer_crc_test_leaf(void *data, hammer_btree_leaf_elm_t leaf); |
| 1511 | void hkprintf(const char *ctl, ...) __printflike(1, 2); |
| 1512 | udev_t hammer_fsid_to_udev(uuid_t *uuid); |
| 1513 | |
| 1514 | |
| 1515 | int hammer_blocksize(int64_t file_offset); |
| 1516 | int hammer_blockoff(int64_t file_offset); |
| 1517 | int64_t hammer_blockdemarc(int64_t file_offset1, int64_t file_offset2); |
| 1518 | |
| 1519 | /* |
| 1520 | * Shortcut for _hammer_checkspace(), used all over the code. |
| 1521 | */ |
| 1522 | static __inline int |
| 1523 | hammer_checkspace(hammer_mount_t hmp, int slop) |
| 1524 | { |
| 1525 | return(_hammer_checkspace(hmp, slop, NULL)); |
| 1526 | } |
| 1527 | |
| 1528 | #endif |
| 1529 | |
| 1530 | static __inline void |
| 1531 | hammer_wait_mem_record(hammer_record_t record) |
| 1532 | { |
| 1533 | hammer_wait_mem_record_ident(record, "hmmwai"); |
| 1534 | } |
| 1535 | |
| 1536 | static __inline void |
| 1537 | hammer_lock_ex(struct hammer_lock *lock) |
| 1538 | { |
| 1539 | hammer_lock_ex_ident(lock, "hmrlck"); |
| 1540 | } |
| 1541 | |
| 1542 | /* |
| 1543 | * Indicate that a B-Tree node is being modified. |
| 1544 | */ |
| 1545 | static __inline void |
| 1546 | hammer_modify_node_noundo(hammer_transaction_t trans, hammer_node_t node) |
| 1547 | { |
| 1548 | KKASSERT((node->flags & HAMMER_NODE_CRCBAD) == 0); |
| 1549 | hammer_modify_buffer(trans, node->buffer, NULL, 0); |
| 1550 | } |
| 1551 | |
| 1552 | static __inline void |
| 1553 | hammer_modify_node_all(hammer_transaction_t trans, struct hammer_node *node) |
| 1554 | { |
| 1555 | KKASSERT((node->flags & HAMMER_NODE_CRCBAD) == 0); |
| 1556 | hammer_modify_buffer(trans, node->buffer, |
| 1557 | node->ondisk, sizeof(*node->ondisk)); |
| 1558 | } |
| 1559 | |
| 1560 | static __inline void |
| 1561 | hammer_modify_node(hammer_transaction_t trans, hammer_node_t node, |
| 1562 | void *base, int len) |
| 1563 | { |
| 1564 | hammer_crc_t *crcptr; |
| 1565 | |
| 1566 | KKASSERT((char *)base >= (char *)node->ondisk && |
| 1567 | (char *)base + len <= |
| 1568 | (char *)node->ondisk + sizeof(*node->ondisk)); |
| 1569 | KKASSERT((node->flags & HAMMER_NODE_CRCBAD) == 0); |
| 1570 | hammer_modify_buffer(trans, node->buffer, base, len); |
| 1571 | crcptr = &node->ondisk->crc; |
| 1572 | hammer_modify_buffer(trans, node->buffer, crcptr, sizeof(hammer_crc_t)); |
| 1573 | --node->buffer->io.modify_refs; /* only want one ref */ |
| 1574 | } |
| 1575 | |
| 1576 | /* |
| 1577 | * Indicate that the specified modifications have been completed. |
| 1578 | * |
| 1579 | * Do not try to generate the crc here, it's very expensive to do and a |
| 1580 | * sequence of insertions or deletions can result in many calls to this |
| 1581 | * function on the same node. |
| 1582 | */ |
| 1583 | static __inline void |
| 1584 | hammer_modify_node_done(hammer_node_t node) |
| 1585 | { |
| 1586 | node->flags |= HAMMER_NODE_CRCGOOD; |
| 1587 | if ((node->flags & HAMMER_NODE_NEEDSCRC) == 0) { |
| 1588 | node->flags |= HAMMER_NODE_NEEDSCRC; |
| 1589 | node->buffer->io.gencrc = 1; |
| 1590 | hammer_ref_node(node); |
| 1591 | } |
| 1592 | hammer_modify_buffer_done(node->buffer); |
| 1593 | } |
| 1594 | |
| 1595 | #define hammer_modify_volume_field(trans, vol, field) \ |
| 1596 | hammer_modify_volume(trans, vol, &(vol)->ondisk->field, \ |
| 1597 | sizeof((vol)->ondisk->field)) |
| 1598 | |
| 1599 | #define hammer_modify_node_field(trans, node, field) \ |
| 1600 | hammer_modify_node(trans, node, &(node)->ondisk->field, \ |
| 1601 | sizeof((node)->ondisk->field)) |
| 1602 | |
| 1603 | /* |
| 1604 | * The HAMMER_INODE_CAP_DIR_LOCAL_INO capability is set on newly |
| 1605 | * created directories for HAMMER version 2 or greater and causes |
| 1606 | * directory entries to be placed the inode localization zone in |
| 1607 | * the B-Tree instead of the misc zone. |
| 1608 | * |
| 1609 | * This greatly improves localization between directory entries and |
| 1610 | * inodes |
| 1611 | */ |
| 1612 | static __inline u_int32_t |
| 1613 | hammer_dir_localization(hammer_inode_t dip) |
| 1614 | { |
| 1615 | if (dip->ino_data.cap_flags & HAMMER_INODE_CAP_DIR_LOCAL_INO) |
| 1616 | return(HAMMER_LOCALIZE_INODE); |
| 1617 | else |
| 1618 | return(HAMMER_LOCALIZE_MISC); |
| 1619 | } |