| 1 | /* |
| 2 | * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved. |
| 3 | * |
| 4 | * This code is derived from software contributed to The DragonFly Project |
| 5 | * by Matthew Dillon <dillon@backplane.com> |
| 6 | * |
| 7 | * Redistribution and use in source and binary forms, with or without |
| 8 | * modification, are permitted provided that the following conditions |
| 9 | * are met: |
| 10 | * |
| 11 | * 1. Redistributions of source code must retain the above copyright |
| 12 | * notice, this list of conditions and the following disclaimer. |
| 13 | * 2. Redistributions in binary form must reproduce the above copyright |
| 14 | * notice, this list of conditions and the following disclaimer in |
| 15 | * the documentation and/or other materials provided with the |
| 16 | * distribution. |
| 17 | * 3. Neither the name of The DragonFly Project nor the names of its |
| 18 | * contributors may be used to endorse or promote products derived |
| 19 | * from this software without specific, prior written permission. |
| 20 | * |
| 21 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| 22 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| 23 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS |
| 24 | * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE |
| 25 | * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, |
| 26 | * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, |
| 27 | * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; |
| 28 | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED |
| 29 | * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, |
| 30 | * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT |
| 31 | * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
| 32 | * SUCH DAMAGE. |
| 33 | * |
| 34 | * $DragonFly: src/sys/vfs/hammer/hammer.h,v 1.87 2008/06/20 05:38:26 dillon Exp $ |
| 35 | */ |
| 36 | /* |
| 37 | * This header file contains structures used internally by the HAMMERFS |
| 38 | * implementation. See hammer_disk.h for on-disk structures. |
| 39 | */ |
| 40 | |
| 41 | #include <sys/param.h> |
| 42 | #include <sys/types.h> |
| 43 | #include <sys/kernel.h> |
| 44 | #include <sys/conf.h> |
| 45 | #include <sys/systm.h> |
| 46 | #include <sys/tree.h> |
| 47 | #include <sys/malloc.h> |
| 48 | #include <sys/mount.h> |
| 49 | #include <sys/mountctl.h> |
| 50 | #include <sys/vnode.h> |
| 51 | #include <sys/proc.h> |
| 52 | #include <sys/stat.h> |
| 53 | #include <sys/globaldata.h> |
| 54 | #include <sys/lockf.h> |
| 55 | #include <sys/buf.h> |
| 56 | #include <sys/queue.h> |
| 57 | #include <sys/ktr.h> |
| 58 | #include <sys/globaldata.h> |
| 59 | |
| 60 | #include <sys/buf2.h> |
| 61 | #include <sys/signal2.h> |
| 62 | #include "hammer_disk.h" |
| 63 | #include "hammer_mount.h" |
| 64 | #include "hammer_ioctl.h" |
| 65 | |
| 66 | #if defined(_KERNEL) || defined(_KERNEL_STRUCTURES) |
| 67 | |
| 68 | MALLOC_DECLARE(M_HAMMER); |
| 69 | |
| 70 | /* |
| 71 | * Kernel trace |
| 72 | */ |
| 73 | #if !defined(KTR_HAMMER) |
| 74 | #define KTR_HAMMER KTR_ALL |
| 75 | #endif |
| 76 | KTR_INFO_MASTER_EXTERN(hammer); |
| 77 | |
| 78 | /* |
| 79 | * Misc structures |
| 80 | */ |
| 81 | struct hammer_mount; |
| 82 | |
| 83 | /* |
| 84 | * Key structure used for custom RB tree inode lookups. This prototypes |
| 85 | * the function hammer_ino_rb_tree_RB_LOOKUP_INFO(root, info). |
| 86 | */ |
| 87 | typedef struct hammer_inode_info { |
| 88 | int64_t obj_id; /* (key) object identifier */ |
| 89 | hammer_tid_t obj_asof; /* (key) snapshot transid or 0 */ |
| 90 | } *hammer_inode_info_t; |
| 91 | |
| 92 | typedef enum hammer_transaction_type { |
| 93 | HAMMER_TRANS_RO, |
| 94 | HAMMER_TRANS_STD, |
| 95 | HAMMER_TRANS_FLS |
| 96 | } hammer_transaction_type_t; |
| 97 | |
| 98 | /* |
| 99 | * HAMMER Transaction tracking |
| 100 | */ |
| 101 | struct hammer_transaction { |
| 102 | hammer_transaction_type_t type; |
| 103 | struct hammer_mount *hmp; |
| 104 | hammer_tid_t tid; |
| 105 | hammer_tid_t time; |
| 106 | int sync_lock_refs; |
| 107 | struct hammer_volume *rootvol; |
| 108 | }; |
| 109 | |
| 110 | typedef struct hammer_transaction *hammer_transaction_t; |
| 111 | |
| 112 | /* |
| 113 | * HAMMER locks |
| 114 | */ |
| 115 | struct hammer_lock { |
| 116 | int refs; /* active references delay writes */ |
| 117 | int lockcount; /* lock count for exclusive/shared access */ |
| 118 | int wanted; |
| 119 | int exwanted; /* number of threads waiting for ex lock */ |
| 120 | struct thread *locktd; |
| 121 | }; |
| 122 | |
| 123 | static __inline int |
| 124 | hammer_islocked(struct hammer_lock *lock) |
| 125 | { |
| 126 | return(lock->lockcount != 0); |
| 127 | } |
| 128 | |
| 129 | static __inline int |
| 130 | hammer_isactive(struct hammer_lock *lock) |
| 131 | { |
| 132 | return(lock->refs != 0); |
| 133 | } |
| 134 | |
| 135 | static __inline int |
| 136 | hammer_islastref(struct hammer_lock *lock) |
| 137 | { |
| 138 | return(lock->refs == 1); |
| 139 | } |
| 140 | |
| 141 | /* |
| 142 | * Return if we specifically own the lock exclusively. |
| 143 | */ |
| 144 | static __inline int |
| 145 | hammer_lock_excl_owned(struct hammer_lock *lock, thread_t td) |
| 146 | { |
| 147 | if (lock->lockcount > 0 && lock->locktd == td) |
| 148 | return(1); |
| 149 | return(0); |
| 150 | } |
| 151 | |
| 152 | /* |
| 153 | * Flush state, used by various structures |
| 154 | */ |
| 155 | typedef enum hammer_inode_state { |
| 156 | HAMMER_FST_IDLE, |
| 157 | HAMMER_FST_SETUP, |
| 158 | HAMMER_FST_FLUSH |
| 159 | } hammer_inode_state_t; |
| 160 | |
| 161 | TAILQ_HEAD(hammer_record_list, hammer_record); |
| 162 | |
| 163 | /* |
| 164 | * Cache object ids. A fixed number of objid cache structures are |
| 165 | * created to reserve object id's for newly created files in multiples |
| 166 | * of 100,000, localized to a particular directory, and recycled as |
| 167 | * needed. This allows parallel create operations in different |
| 168 | * directories to retain fairly localized object ids which in turn |
| 169 | * improves reblocking performance and layout. |
| 170 | */ |
| 171 | #define OBJID_CACHE_SIZE 1024 |
| 172 | #define OBJID_CACHE_BULK 100000 |
| 173 | |
| 174 | typedef struct hammer_objid_cache { |
| 175 | TAILQ_ENTRY(hammer_objid_cache) entry; |
| 176 | struct hammer_inode *dip; |
| 177 | hammer_tid_t next_tid; |
| 178 | int count; |
| 179 | } *hammer_objid_cache_t; |
| 180 | |
| 181 | /* |
| 182 | * Associate an inode with a B-Tree node to cache search start positions |
| 183 | */ |
| 184 | typedef struct hammer_node_cache { |
| 185 | TAILQ_ENTRY(hammer_node_cache) entry; |
| 186 | struct hammer_node *node; |
| 187 | struct hammer_inode *ip; |
| 188 | } *hammer_node_cache_t; |
| 189 | |
| 190 | TAILQ_HEAD(hammer_node_cache_list, hammer_node_cache); |
| 191 | |
| 192 | /* |
| 193 | * Structure used to represent an inode in-memory. |
| 194 | * |
| 195 | * The record and data associated with an inode may be out of sync with |
| 196 | * the disk (xDIRTY flags), or not even on the disk at all (ONDISK flag |
| 197 | * clear). |
| 198 | * |
| 199 | * An inode may also hold a cache of unsynchronized records, used for |
| 200 | * database and directories only. Unsynchronized regular file data is |
| 201 | * stored in the buffer cache. |
| 202 | * |
| 203 | * NOTE: A file which is created and destroyed within the initial |
| 204 | * synchronization period can wind up not doing any disk I/O at all. |
| 205 | * |
| 206 | * Finally, an inode may cache numerous disk-referencing B-Tree cursors. |
| 207 | */ |
| 208 | struct hammer_ino_rb_tree; |
| 209 | struct hammer_inode; |
| 210 | RB_HEAD(hammer_ino_rb_tree, hammer_inode); |
| 211 | RB_PROTOTYPEX(hammer_ino_rb_tree, INFO, hammer_inode, rb_node, |
| 212 | hammer_ino_rb_compare, hammer_inode_info_t); |
| 213 | |
| 214 | struct hammer_rec_rb_tree; |
| 215 | struct hammer_record; |
| 216 | RB_HEAD(hammer_rec_rb_tree, hammer_record); |
| 217 | RB_PROTOTYPEX(hammer_rec_rb_tree, INFO, hammer_record, rb_node, |
| 218 | hammer_rec_rb_compare, hammer_btree_leaf_elm_t); |
| 219 | |
| 220 | TAILQ_HEAD(hammer_node_list, hammer_node); |
| 221 | |
| 222 | struct hammer_inode { |
| 223 | RB_ENTRY(hammer_inode) rb_node; |
| 224 | hammer_inode_state_t flush_state; |
| 225 | int flush_group; |
| 226 | TAILQ_ENTRY(hammer_inode) flush_entry; |
| 227 | struct hammer_record_list target_list; /* target of dependant recs */ |
| 228 | u_int64_t obj_id; /* (key) object identifier */ |
| 229 | hammer_tid_t obj_asof; /* (key) snapshot or 0 */ |
| 230 | struct hammer_mount *hmp; |
| 231 | hammer_objid_cache_t objid_cache; |
| 232 | int flags; |
| 233 | int error; /* flush error */ |
| 234 | int cursor_ip_refs; /* sanity */ |
| 235 | int rsv_databufs; |
| 236 | int rsv_recs; |
| 237 | struct vnode *vp; |
| 238 | struct lockf advlock; |
| 239 | struct hammer_lock lock; /* sync copy interlock */ |
| 240 | off_t trunc_off; |
| 241 | struct hammer_btree_leaf_elm ino_leaf; /* in-memory cache */ |
| 242 | struct hammer_inode_data ino_data; /* in-memory cache */ |
| 243 | struct hammer_rec_rb_tree rec_tree; /* in-memory cache */ |
| 244 | struct hammer_node_cache cache[2]; /* search initiate cache */ |
| 245 | |
| 246 | /* |
| 247 | * When a demark is created to synchronize an inode to |
| 248 | * disk, certain fields are copied so the front-end VOPs |
| 249 | * can continue to run in parallel with the synchronization |
| 250 | * occuring in the background. |
| 251 | */ |
| 252 | int sync_flags; /* to-sync flags cache */ |
| 253 | off_t sync_trunc_off; /* to-sync truncation */ |
| 254 | struct hammer_btree_leaf_elm sync_ino_leaf; /* to-sync cache */ |
| 255 | struct hammer_inode_data sync_ino_data; /* to-sync cache */ |
| 256 | }; |
| 257 | |
| 258 | typedef struct hammer_inode *hammer_inode_t; |
| 259 | |
| 260 | #define VTOI(vp) ((struct hammer_inode *)(vp)->v_data) |
| 261 | |
| 262 | #define HAMMER_INODE_DDIRTY 0x0001 /* in-memory ino_data is dirty */ |
| 263 | #define HAMMER_INODE_RSV_INODES 0x0002 /* hmp->rsv_inodes bumped */ |
| 264 | #define HAMMER_INODE_ITIMES 0x0004 /* in-memory mtime/atime modified */ |
| 265 | #define HAMMER_INODE_XDIRTY 0x0008 /* in-memory records */ |
| 266 | #define HAMMER_INODE_ONDISK 0x0010 /* inode is on-disk (else not yet) */ |
| 267 | #define HAMMER_INODE_FLUSH 0x0020 /* flush on last ref */ |
| 268 | #define HAMMER_INODE_DELETED 0x0080 /* inode delete (backend) */ |
| 269 | #define HAMMER_INODE_DELONDISK 0x0100 /* delete synchronized to disk */ |
| 270 | #define HAMMER_INODE_RO 0x0200 /* read-only (because of as-of) */ |
| 271 | #define HAMMER_INODE_VHELD 0x0400 /* vnode held on sync */ |
| 272 | #define HAMMER_INODE_DONDISK 0x0800 /* data records may be on disk */ |
| 273 | #define HAMMER_INODE_BUFS 0x1000 /* dirty high level bps present */ |
| 274 | #define HAMMER_INODE_REFLUSH 0x2000 /* pipelined flush during flush */ |
| 275 | #define HAMMER_INODE_RECLAIM 0x4000 /* trying to reclaim */ |
| 276 | #define HAMMER_INODE_FLUSHW 0x8000 /* Someone waiting for flush */ |
| 277 | |
| 278 | #define HAMMER_INODE_TRUNCATED 0x00010000 |
| 279 | #define HAMMER_INODE_DELETING 0x00020000 /* inode delete request (frontend)*/ |
| 280 | #define HAMMER_INODE_RESIGNAL 0x00040000 /* re-signal on re-flush */ |
| 281 | |
| 282 | #define HAMMER_INODE_MODMASK (HAMMER_INODE_DDIRTY| \ |
| 283 | HAMMER_INODE_XDIRTY|HAMMER_INODE_BUFS| \ |
| 284 | HAMMER_INODE_ITIMES|HAMMER_INODE_TRUNCATED|\ |
| 285 | HAMMER_INODE_DELETING) |
| 286 | #define HAMMER_INODE_MODEASY (HAMMER_INODE_DDIRTY|HAMMER_INODE_ITIMES) |
| 287 | |
| 288 | #define HAMMER_INODE_MODMASK_NOXDIRTY \ |
| 289 | (HAMMER_INODE_MODMASK & ~HAMMER_INODE_XDIRTY) |
| 290 | |
| 291 | #define HAMMER_FLUSH_GROUP_SIZE 64 |
| 292 | |
| 293 | #define HAMMER_FLUSH_SIGNAL 0x0001 |
| 294 | #define HAMMER_FLUSH_RECURSION 0x0002 |
| 295 | |
| 296 | /* |
| 297 | * Used by the inode reclaim code to pipeline reclaims and avoid |
| 298 | * blowing out kernel memory or letting the flusher get too far |
| 299 | * behind. |
| 300 | */ |
| 301 | struct hammer_reclaim { |
| 302 | TAILQ_ENTRY(hammer_reclaim) entry; |
| 303 | int okydoky; |
| 304 | }; |
| 305 | |
| 306 | #define HAMMER_RECLAIM_FLUSH 2000 |
| 307 | #define HAMMER_RECLAIM_WAIT 4000 |
| 308 | |
| 309 | /* |
| 310 | * Structure used to represent an unsynchronized record in-memory. These |
| 311 | * records typically represent directory entries. Only non-historical |
| 312 | * records are kept in-memory. |
| 313 | * |
| 314 | * Records are organized as a per-inode RB-Tree. If the inode is not |
| 315 | * on disk then neither are any records and the in-memory record tree |
| 316 | * represents the entire contents of the inode. If the inode is on disk |
| 317 | * then the on-disk B-Tree is scanned in parallel with the in-memory |
| 318 | * RB-Tree to synthesize the current state of the file. |
| 319 | * |
| 320 | * Records are also used to enforce the ordering of directory create/delete |
| 321 | * operations. A new inode will not be flushed to disk unless its related |
| 322 | * directory entry is also being flushed at the same time. A directory entry |
| 323 | * will not be removed unless its related inode is also being removed at the |
| 324 | * same time. |
| 325 | */ |
| 326 | typedef enum hammer_record_type { |
| 327 | HAMMER_MEM_RECORD_GENERAL, /* misc record */ |
| 328 | HAMMER_MEM_RECORD_INODE, /* inode record */ |
| 329 | HAMMER_MEM_RECORD_ADD, /* positive memory cache record */ |
| 330 | HAMMER_MEM_RECORD_DEL, /* negative delete-on-disk record */ |
| 331 | HAMMER_MEM_RECORD_DATA /* bulk-data record w/on-disk ref */ |
| 332 | } hammer_record_type_t; |
| 333 | |
| 334 | struct hammer_record { |
| 335 | RB_ENTRY(hammer_record) rb_node; |
| 336 | TAILQ_ENTRY(hammer_record) target_entry; |
| 337 | hammer_inode_state_t flush_state; |
| 338 | int flush_group; |
| 339 | hammer_record_type_t type; |
| 340 | struct hammer_lock lock; |
| 341 | struct hammer_reserve *resv; |
| 342 | struct hammer_inode *ip; |
| 343 | struct hammer_inode *target_ip; |
| 344 | struct hammer_btree_leaf_elm leaf; |
| 345 | union hammer_data_ondisk *data; |
| 346 | int flags; |
| 347 | }; |
| 348 | |
| 349 | typedef struct hammer_record *hammer_record_t; |
| 350 | |
| 351 | /* |
| 352 | * Record flags. Note that FE can only be set by the frontend if the |
| 353 | * record has not been interlocked by the backend w/ BE. |
| 354 | */ |
| 355 | #define HAMMER_RECF_ALLOCDATA 0x0001 |
| 356 | #define HAMMER_RECF_ONRBTREE 0x0002 |
| 357 | #define HAMMER_RECF_DELETED_FE 0x0004 /* deleted (frontend) */ |
| 358 | #define HAMMER_RECF_DELETED_BE 0x0008 /* deleted (backend) */ |
| 359 | #define HAMMER_RECF_UNUSED0010 0x0010 |
| 360 | #define HAMMER_RECF_INTERLOCK_BE 0x0020 /* backend interlock */ |
| 361 | #define HAMMER_RECF_WANTED 0x0040 /* wanted by the frontend */ |
| 362 | #define HAMMER_RECF_CONVERT_DELETE 0x0100 /* special case */ |
| 363 | |
| 364 | /* |
| 365 | * In-memory structures representing on-disk structures. |
| 366 | */ |
| 367 | struct hammer_volume; |
| 368 | struct hammer_buffer; |
| 369 | struct hammer_node; |
| 370 | struct hammer_undo; |
| 371 | struct hammer_reserve; |
| 372 | |
| 373 | RB_HEAD(hammer_vol_rb_tree, hammer_volume); |
| 374 | RB_HEAD(hammer_buf_rb_tree, hammer_buffer); |
| 375 | RB_HEAD(hammer_nod_rb_tree, hammer_node); |
| 376 | RB_HEAD(hammer_und_rb_tree, hammer_undo); |
| 377 | RB_HEAD(hammer_res_rb_tree, hammer_reserve); |
| 378 | |
| 379 | RB_PROTOTYPE2(hammer_vol_rb_tree, hammer_volume, rb_node, |
| 380 | hammer_vol_rb_compare, int32_t); |
| 381 | RB_PROTOTYPE2(hammer_buf_rb_tree, hammer_buffer, rb_node, |
| 382 | hammer_buf_rb_compare, hammer_off_t); |
| 383 | RB_PROTOTYPE2(hammer_nod_rb_tree, hammer_node, rb_node, |
| 384 | hammer_nod_rb_compare, hammer_off_t); |
| 385 | RB_PROTOTYPE2(hammer_und_rb_tree, hammer_undo, rb_node, |
| 386 | hammer_und_rb_compare, hammer_off_t); |
| 387 | RB_PROTOTYPE2(hammer_res_rb_tree, hammer_reserve, rb_node, |
| 388 | hammer_res_rb_compare, hammer_off_t); |
| 389 | |
| 390 | /* |
| 391 | * IO management - embedded at the head of various in-memory structures |
| 392 | * |
| 393 | * VOLUME - hammer_volume containing meta-data |
| 394 | * META_BUFFER - hammer_buffer containing meta-data |
| 395 | * DATA_BUFFER - hammer_buffer containing pure-data |
| 396 | * |
| 397 | * Dirty volume headers and dirty meta-data buffers are locked until the |
| 398 | * flusher can sequence them out. Dirty pure-data buffers can be written. |
| 399 | * Clean buffers can be passively released. |
| 400 | */ |
| 401 | typedef enum hammer_io_type { |
| 402 | HAMMER_STRUCTURE_VOLUME, |
| 403 | HAMMER_STRUCTURE_META_BUFFER, |
| 404 | HAMMER_STRUCTURE_UNDO_BUFFER, |
| 405 | HAMMER_STRUCTURE_DATA_BUFFER |
| 406 | } hammer_io_type_t; |
| 407 | |
| 408 | union hammer_io_structure; |
| 409 | struct hammer_io; |
| 410 | |
| 411 | struct worklist { |
| 412 | LIST_ENTRY(worklist) node; |
| 413 | }; |
| 414 | |
| 415 | TAILQ_HEAD(hammer_io_list, hammer_io); |
| 416 | typedef struct hammer_io_list *hammer_io_list_t; |
| 417 | |
| 418 | struct hammer_io { |
| 419 | struct worklist worklist; |
| 420 | struct hammer_lock lock; |
| 421 | enum hammer_io_type type; |
| 422 | struct hammer_mount *hmp; |
| 423 | TAILQ_ENTRY(hammer_io) mod_entry; /* list entry if modified */ |
| 424 | hammer_io_list_t mod_list; |
| 425 | struct buf *bp; |
| 426 | int64_t offset; /* zone-2 offset */ |
| 427 | int bytes; /* buffer cache buffer size */ |
| 428 | int loading; /* loading/unloading interlock */ |
| 429 | int modify_refs; |
| 430 | |
| 431 | u_int modified : 1; /* bp's data was modified */ |
| 432 | u_int released : 1; /* bp released (w/ B_LOCKED set) */ |
| 433 | u_int running : 1; /* bp write IO in progress */ |
| 434 | u_int waiting : 1; /* someone is waiting on us */ |
| 435 | u_int validated : 1; /* ondisk has been validated */ |
| 436 | u_int waitdep : 1; /* flush waits for dependancies */ |
| 437 | u_int recovered : 1; /* has recovery ref */ |
| 438 | u_int waitmod : 1; /* waiting for modify_refs */ |
| 439 | u_int reclaim : 1; /* reclaim requested */ |
| 440 | u_int gencrc : 1; /* crc needs to be generated */ |
| 441 | }; |
| 442 | |
| 443 | typedef struct hammer_io *hammer_io_t; |
| 444 | |
| 445 | #define HAMMER_CLUSTER_SIZE (64 * 1024) |
| 446 | #if HAMMER_CLUSTER_SIZE > MAXBSIZE |
| 447 | #undef HAMMER_CLUSTER_SIZE |
| 448 | #define HAMMER_CLUSTER_SIZE MAXBSIZE |
| 449 | #endif |
| 450 | #define HAMMER_CLUSTER_BUFS (HAMMER_CLUSTER_SIZE / HAMMER_BUFSIZE) |
| 451 | |
| 452 | /* |
| 453 | * In-memory volume representing on-disk buffer |
| 454 | */ |
| 455 | struct hammer_volume { |
| 456 | struct hammer_io io; |
| 457 | RB_ENTRY(hammer_volume) rb_node; |
| 458 | struct hammer_volume_ondisk *ondisk; |
| 459 | int32_t vol_no; |
| 460 | int64_t nblocks; /* note: special calculation for statfs */ |
| 461 | int64_t buffer_base; /* base offset of buffer 0 */ |
| 462 | hammer_off_t maxbuf_off; /* Maximum buffer offset (zone-2) */ |
| 463 | hammer_off_t maxraw_off; /* Maximum raw offset for device */ |
| 464 | char *vol_name; |
| 465 | struct vnode *devvp; |
| 466 | int vol_flags; |
| 467 | }; |
| 468 | |
| 469 | typedef struct hammer_volume *hammer_volume_t; |
| 470 | |
| 471 | /* |
| 472 | * In-memory buffer (other then volume, super-cluster, or cluster), |
| 473 | * representing an on-disk buffer. |
| 474 | */ |
| 475 | struct hammer_buffer { |
| 476 | struct hammer_io io; |
| 477 | RB_ENTRY(hammer_buffer) rb_node; |
| 478 | void *ondisk; |
| 479 | struct hammer_volume *volume; |
| 480 | hammer_off_t zoneX_offset; |
| 481 | hammer_off_t zone2_offset; |
| 482 | struct hammer_reserve *resv; |
| 483 | struct hammer_node_list clist; |
| 484 | }; |
| 485 | |
| 486 | typedef struct hammer_buffer *hammer_buffer_t; |
| 487 | |
| 488 | /* |
| 489 | * In-memory B-Tree node, representing an on-disk B-Tree node. |
| 490 | * |
| 491 | * This is a hang-on structure which is backed by a hammer_buffer, |
| 492 | * indexed by a hammer_cluster, and used for fine-grained locking of |
| 493 | * B-Tree nodes in order to properly control lock ordering. A hammer_buffer |
| 494 | * can contain multiple nodes representing wildly disassociated portions |
| 495 | * of the B-Tree so locking cannot be done on a buffer-by-buffer basis. |
| 496 | * |
| 497 | * This structure uses a cluster-relative index to reduce the number |
| 498 | * of layers required to access it, and also because all on-disk B-Tree |
| 499 | * references are cluster-relative offsets. |
| 500 | */ |
| 501 | struct hammer_node { |
| 502 | struct hammer_lock lock; /* node-by-node lock */ |
| 503 | TAILQ_ENTRY(hammer_node) entry; /* per-buffer linkage */ |
| 504 | RB_ENTRY(hammer_node) rb_node; /* per-cluster linkage */ |
| 505 | hammer_off_t node_offset; /* full offset spec */ |
| 506 | struct hammer_mount *hmp; |
| 507 | struct hammer_buffer *buffer; /* backing buffer */ |
| 508 | hammer_node_ondisk_t ondisk; /* ptr to on-disk structure */ |
| 509 | struct hammer_node_cache_list cache_list; /* passive caches */ |
| 510 | int flags; |
| 511 | int loading; /* load interlock */ |
| 512 | }; |
| 513 | |
| 514 | #define HAMMER_NODE_DELETED 0x0001 |
| 515 | #define HAMMER_NODE_FLUSH 0x0002 |
| 516 | #define HAMMER_NODE_CRCGOOD 0x0004 |
| 517 | #define HAMMER_NODE_NEEDSCRC 0x0008 |
| 518 | |
| 519 | typedef struct hammer_node *hammer_node_t; |
| 520 | |
| 521 | /* |
| 522 | * List of locked nodes. |
| 523 | */ |
| 524 | struct hammer_node_locklist { |
| 525 | struct hammer_node_locklist *next; |
| 526 | hammer_node_t node; |
| 527 | }; |
| 528 | |
| 529 | typedef struct hammer_node_locklist *hammer_node_locklist_t; |
| 530 | |
| 531 | |
| 532 | /* |
| 533 | * Common I/O management structure - embedded in in-memory structures |
| 534 | * which are backed by filesystem buffers. |
| 535 | */ |
| 536 | union hammer_io_structure { |
| 537 | struct hammer_io io; |
| 538 | struct hammer_volume volume; |
| 539 | struct hammer_buffer buffer; |
| 540 | }; |
| 541 | |
| 542 | typedef union hammer_io_structure *hammer_io_structure_t; |
| 543 | |
| 544 | /* |
| 545 | * The reserve structure prevents the blockmap from allocating |
| 546 | * out of a reserved bigblock. Such reservations are used by |
| 547 | * the direct-write mechanism. |
| 548 | * |
| 549 | * The structure is also used to hold off on reallocations of |
| 550 | * big blocks from the freemap until flush dependancies have |
| 551 | * been dealt with. |
| 552 | */ |
| 553 | struct hammer_reserve { |
| 554 | RB_ENTRY(hammer_reserve) rb_node; |
| 555 | TAILQ_ENTRY(hammer_reserve) delay_entry; |
| 556 | int flush_group; |
| 557 | int flags; |
| 558 | int refs; |
| 559 | int zone; |
| 560 | hammer_off_t zone_offset; |
| 561 | }; |
| 562 | |
| 563 | typedef struct hammer_reserve *hammer_reserve_t; |
| 564 | |
| 565 | #define HAMMER_RESF_ONDELAY 0x0001 |
| 566 | |
| 567 | #include "hammer_cursor.h" |
| 568 | |
| 569 | /* |
| 570 | * The undo structure tracks recent undos to avoid laying down duplicate |
| 571 | * undos within a flush group, saving us a significant amount of overhead. |
| 572 | * |
| 573 | * This is strictly a heuristic. |
| 574 | */ |
| 575 | #define HAMMER_MAX_UNDOS 1024 |
| 576 | #define HAMMER_MAX_FLUSHERS 4 |
| 577 | |
| 578 | struct hammer_undo { |
| 579 | RB_ENTRY(hammer_undo) rb_node; |
| 580 | TAILQ_ENTRY(hammer_undo) lru_entry; |
| 581 | hammer_off_t offset; |
| 582 | int bytes; |
| 583 | }; |
| 584 | |
| 585 | typedef struct hammer_undo *hammer_undo_t; |
| 586 | |
| 587 | struct hammer_flusher_info; |
| 588 | |
| 589 | struct hammer_flusher { |
| 590 | int signal; /* flusher thread sequencer */ |
| 591 | int act; /* currently active flush group */ |
| 592 | int done; /* set to act when complete */ |
| 593 | int next; /* next flush group */ |
| 594 | int group_lock; /* lock sequencing of the next flush */ |
| 595 | int exiting; /* request master exit */ |
| 596 | int count; /* number of slave flushers */ |
| 597 | int running; /* number of slave flushers running */ |
| 598 | thread_t td; /* master flusher thread */ |
| 599 | hammer_tid_t tid; /* last flushed transaction id */ |
| 600 | int finalize_want; /* serialize finalization */ |
| 601 | struct hammer_lock finalize_lock; /* serialize finalization */ |
| 602 | struct hammer_transaction trans; /* shared transaction */ |
| 603 | struct hammer_flusher_info *info[HAMMER_MAX_FLUSHERS]; |
| 604 | }; |
| 605 | |
| 606 | /* |
| 607 | * Internal hammer mount data structure |
| 608 | */ |
| 609 | struct hammer_mount { |
| 610 | struct mount *mp; |
| 611 | /*struct vnode *rootvp;*/ |
| 612 | struct hammer_ino_rb_tree rb_inos_root; |
| 613 | struct hammer_vol_rb_tree rb_vols_root; |
| 614 | struct hammer_nod_rb_tree rb_nods_root; |
| 615 | struct hammer_und_rb_tree rb_undo_root; |
| 616 | struct hammer_res_rb_tree rb_resv_root; |
| 617 | struct hammer_buf_rb_tree rb_bufs_root; |
| 618 | struct hammer_volume *rootvol; |
| 619 | struct hammer_base_elm root_btree_beg; |
| 620 | struct hammer_base_elm root_btree_end; |
| 621 | int flags; |
| 622 | int hflags; |
| 623 | int ronly; |
| 624 | int nvolumes; |
| 625 | int volume_iterator; |
| 626 | int rsv_inodes; /* reserved space due to dirty inodes */ |
| 627 | int rsv_databufs; /* reserved space due to dirty buffers */ |
| 628 | int rsv_databytes; /* reserved space due to record data */ |
| 629 | int rsv_recs; /* reserved space due to dirty records */ |
| 630 | int last_newrecords; |
| 631 | int count_newrecords; |
| 632 | |
| 633 | int inode_reclaims; /* inodes pending reclaim by flusher */ |
| 634 | int count_inodes; /* total number of inodes */ |
| 635 | int count_iqueued; /* inodes queued to flusher */ |
| 636 | |
| 637 | struct hammer_flusher flusher; |
| 638 | |
| 639 | u_int check_interrupt; |
| 640 | uuid_t fsid; |
| 641 | udev_t fsid_udev; |
| 642 | struct hammer_io_list volu_list; /* dirty undo buffers */ |
| 643 | struct hammer_io_list undo_list; /* dirty undo buffers */ |
| 644 | struct hammer_io_list data_list; /* dirty data buffers */ |
| 645 | struct hammer_io_list alt_data_list; /* dirty data buffers */ |
| 646 | struct hammer_io_list meta_list; /* dirty meta bufs */ |
| 647 | struct hammer_io_list lose_list; /* loose buffers */ |
| 648 | int locked_dirty_count; /* meta/volu count */ |
| 649 | int io_running_count; |
| 650 | int objid_cache_count; |
| 651 | hammer_tid_t asof; |
| 652 | hammer_off_t next_tid; |
| 653 | int64_t copy_stat_freebigblocks; /* number of free bigblocks */ |
| 654 | |
| 655 | u_int32_t namekey_iterator; |
| 656 | struct netexport export; |
| 657 | struct hammer_lock sync_lock; |
| 658 | struct hammer_lock free_lock; |
| 659 | struct hammer_lock undo_lock; |
| 660 | struct hammer_lock blkmap_lock; |
| 661 | struct hammer_blockmap blockmap[HAMMER_MAX_ZONES]; |
| 662 | struct hammer_undo undos[HAMMER_MAX_UNDOS]; |
| 663 | int undo_alloc; |
| 664 | TAILQ_HEAD(, hammer_undo) undo_lru_list; |
| 665 | TAILQ_HEAD(, hammer_inode) flush_list; |
| 666 | TAILQ_HEAD(, hammer_reserve) delay_list; |
| 667 | TAILQ_HEAD(, hammer_objid_cache) objid_cache_list; |
| 668 | TAILQ_HEAD(, hammer_reclaim) reclaim_list; |
| 669 | }; |
| 670 | |
| 671 | typedef struct hammer_mount *hammer_mount_t; |
| 672 | |
| 673 | #define HAMMER_MOUNT_UNUSED0001 0x0001 |
| 674 | |
| 675 | struct hammer_sync_info { |
| 676 | int error; |
| 677 | int waitfor; |
| 678 | }; |
| 679 | |
| 680 | #endif |
| 681 | |
| 682 | #if defined(_KERNEL) |
| 683 | |
| 684 | extern struct vop_ops hammer_vnode_vops; |
| 685 | extern struct vop_ops hammer_spec_vops; |
| 686 | extern struct vop_ops hammer_fifo_vops; |
| 687 | extern struct bio_ops hammer_bioops; |
| 688 | |
| 689 | extern int hammer_debug_io; |
| 690 | extern int hammer_debug_general; |
| 691 | extern int hammer_debug_debug; |
| 692 | extern int hammer_debug_inode; |
| 693 | extern int hammer_debug_locks; |
| 694 | extern int hammer_debug_btree; |
| 695 | extern int hammer_debug_tid; |
| 696 | extern int hammer_debug_recover; |
| 697 | extern int hammer_debug_recover_faults; |
| 698 | extern int hammer_debug_cluster_enable; |
| 699 | extern int hammer_count_inodes; |
| 700 | extern int hammer_count_iqueued; |
| 701 | extern int hammer_count_reclaiming; |
| 702 | extern int hammer_count_records; |
| 703 | extern int hammer_count_record_datas; |
| 704 | extern int hammer_count_volumes; |
| 705 | extern int hammer_count_buffers; |
| 706 | extern int hammer_count_nodes; |
| 707 | extern int64_t hammer_stats_btree_lookups; |
| 708 | extern int64_t hammer_stats_btree_searches; |
| 709 | extern int64_t hammer_stats_btree_inserts; |
| 710 | extern int64_t hammer_stats_btree_deletes; |
| 711 | extern int64_t hammer_stats_btree_elements; |
| 712 | extern int64_t hammer_stats_btree_splits; |
| 713 | extern int64_t hammer_stats_btree_iterations; |
| 714 | extern int64_t hammer_stats_record_iterations; |
| 715 | extern int hammer_count_dirtybufs; |
| 716 | extern int hammer_count_refedbufs; |
| 717 | extern int hammer_count_reservations; |
| 718 | extern int hammer_count_io_running_read; |
| 719 | extern int hammer_count_io_running_write; |
| 720 | extern int hammer_count_io_locked; |
| 721 | extern int hammer_limit_dirtybufs; |
| 722 | extern int hammer_limit_iqueued; |
| 723 | extern int hammer_limit_recs; |
| 724 | extern int hammer_bio_count; |
| 725 | extern int hammer_verify_zone; |
| 726 | extern int hammer_write_mode; |
| 727 | extern int64_t hammer_contention_count; |
| 728 | |
| 729 | int hammer_vop_inactive(struct vop_inactive_args *); |
| 730 | int hammer_vop_reclaim(struct vop_reclaim_args *); |
| 731 | int hammer_get_vnode(struct hammer_inode *ip, struct vnode **vpp); |
| 732 | struct hammer_inode *hammer_get_inode(hammer_transaction_t trans, |
| 733 | hammer_inode_t dip, u_int64_t obj_id, |
| 734 | hammer_tid_t asof, int flags, int *errorp); |
| 735 | void hammer_put_inode(struct hammer_inode *ip); |
| 736 | void hammer_put_inode_ref(struct hammer_inode *ip); |
| 737 | void hammer_inode_waitreclaims(hammer_mount_t hmp); |
| 738 | |
| 739 | int hammer_unload_volume(hammer_volume_t volume, void *data __unused); |
| 740 | int hammer_adjust_volume_mode(hammer_volume_t volume, void *data __unused); |
| 741 | |
| 742 | int hammer_unload_buffer(hammer_buffer_t buffer, void *data __unused); |
| 743 | int hammer_install_volume(hammer_mount_t hmp, const char *volname); |
| 744 | |
| 745 | int hammer_ip_lookup(hammer_cursor_t cursor); |
| 746 | int hammer_ip_first(hammer_cursor_t cursor); |
| 747 | int hammer_ip_next(hammer_cursor_t cursor); |
| 748 | int hammer_ip_resolve_data(hammer_cursor_t cursor); |
| 749 | int hammer_ip_delete_record(hammer_cursor_t cursor, hammer_inode_t ip, |
| 750 | hammer_tid_t tid); |
| 751 | int hammer_delete_at_cursor(hammer_cursor_t cursor, int64_t *stat_bytes); |
| 752 | int hammer_ip_check_directory_empty(hammer_transaction_t trans, |
| 753 | hammer_inode_t ip); |
| 754 | int hammer_sync_hmp(hammer_mount_t hmp, int waitfor); |
| 755 | int hammer_queue_inodes_flusher(hammer_mount_t hmp, int waitfor); |
| 756 | |
| 757 | |
| 758 | hammer_record_t |
| 759 | hammer_alloc_mem_record(hammer_inode_t ip, int data_len); |
| 760 | void hammer_flush_record_done(hammer_record_t record, int error); |
| 761 | void hammer_wait_mem_record_ident(hammer_record_t record, const char *ident); |
| 762 | void hammer_rel_mem_record(hammer_record_t record); |
| 763 | |
| 764 | int hammer_cursor_up(hammer_cursor_t cursor); |
| 765 | int hammer_cursor_up_locked(hammer_cursor_t cursor); |
| 766 | int hammer_cursor_down(hammer_cursor_t cursor); |
| 767 | int hammer_cursor_upgrade(hammer_cursor_t cursor); |
| 768 | int hammer_cursor_upgrade_node(hammer_cursor_t cursor); |
| 769 | void hammer_cursor_downgrade(hammer_cursor_t cursor); |
| 770 | int hammer_cursor_seek(hammer_cursor_t cursor, hammer_node_t node, |
| 771 | int index); |
| 772 | void hammer_lock_ex_ident(struct hammer_lock *lock, const char *ident); |
| 773 | int hammer_lock_ex_try(struct hammer_lock *lock); |
| 774 | void hammer_lock_sh(struct hammer_lock *lock); |
| 775 | void hammer_lock_sh_lowpri(struct hammer_lock *lock); |
| 776 | int hammer_lock_sh_try(struct hammer_lock *lock); |
| 777 | int hammer_lock_upgrade(struct hammer_lock *lock); |
| 778 | void hammer_lock_downgrade(struct hammer_lock *lock); |
| 779 | void hammer_unlock(struct hammer_lock *lock); |
| 780 | void hammer_ref(struct hammer_lock *lock); |
| 781 | void hammer_unref(struct hammer_lock *lock); |
| 782 | |
| 783 | void hammer_sync_lock_ex(hammer_transaction_t trans); |
| 784 | void hammer_sync_lock_sh(hammer_transaction_t trans); |
| 785 | int hammer_sync_lock_sh_try(hammer_transaction_t trans); |
| 786 | void hammer_sync_unlock(hammer_transaction_t trans); |
| 787 | |
| 788 | u_int32_t hammer_to_unix_xid(uuid_t *uuid); |
| 789 | void hammer_guid_to_uuid(uuid_t *uuid, u_int32_t guid); |
| 790 | void hammer_to_timespec(hammer_tid_t tid, struct timespec *ts); |
| 791 | hammer_tid_t hammer_timespec_to_transid(struct timespec *ts); |
| 792 | hammer_tid_t hammer_now_tid(void); |
| 793 | hammer_tid_t hammer_str_to_tid(const char *str); |
| 794 | hammer_tid_t hammer_alloc_objid(hammer_transaction_t trans, hammer_inode_t dip); |
| 795 | void hammer_clear_objid(hammer_inode_t dip); |
| 796 | void hammer_destroy_objid_cache(hammer_mount_t hmp); |
| 797 | |
| 798 | int hammer_enter_undo_history(hammer_mount_t hmp, hammer_off_t offset, |
| 799 | int bytes); |
| 800 | void hammer_clear_undo_history(hammer_mount_t hmp); |
| 801 | enum vtype hammer_get_vnode_type(u_int8_t obj_type); |
| 802 | int hammer_get_dtype(u_int8_t obj_type); |
| 803 | u_int8_t hammer_get_obj_type(enum vtype vtype); |
| 804 | int64_t hammer_directory_namekey(void *name, int len); |
| 805 | int hammer_nohistory(hammer_inode_t ip); |
| 806 | |
| 807 | int hammer_init_cursor(hammer_transaction_t trans, hammer_cursor_t cursor, |
| 808 | hammer_node_cache_t cache, hammer_inode_t ip); |
| 809 | int hammer_reinit_cursor(hammer_cursor_t cursor); |
| 810 | void hammer_normalize_cursor(hammer_cursor_t cursor); |
| 811 | void hammer_done_cursor(hammer_cursor_t cursor); |
| 812 | void hammer_mem_done(hammer_cursor_t cursor); |
| 813 | |
| 814 | int hammer_btree_lookup(hammer_cursor_t cursor); |
| 815 | int hammer_btree_first(hammer_cursor_t cursor); |
| 816 | int hammer_btree_last(hammer_cursor_t cursor); |
| 817 | int hammer_btree_extract(hammer_cursor_t cursor, int flags); |
| 818 | int hammer_btree_iterate(hammer_cursor_t cursor); |
| 819 | int hammer_btree_iterate_reverse(hammer_cursor_t cursor); |
| 820 | int hammer_btree_insert(hammer_cursor_t cursor, |
| 821 | hammer_btree_leaf_elm_t elm); |
| 822 | int hammer_btree_delete(hammer_cursor_t cursor); |
| 823 | int hammer_btree_cmp(hammer_base_elm_t key1, hammer_base_elm_t key2); |
| 824 | int hammer_btree_chkts(hammer_tid_t ts, hammer_base_elm_t key); |
| 825 | int hammer_btree_correct_rhb(hammer_cursor_t cursor, hammer_tid_t tid); |
| 826 | int hammer_btree_correct_lhb(hammer_cursor_t cursor, hammer_tid_t tid); |
| 827 | |
| 828 | int btree_set_parent(hammer_transaction_t trans, hammer_node_t node, |
| 829 | hammer_btree_elm_t elm); |
| 830 | int hammer_btree_lock_children(hammer_cursor_t cursor, |
| 831 | struct hammer_node_locklist **locklistp); |
| 832 | void hammer_btree_unlock_children(struct hammer_node_locklist **locklistp); |
| 833 | int hammer_btree_search_node(hammer_base_elm_t elm, hammer_node_ondisk_t node); |
| 834 | |
| 835 | void hammer_print_btree_node(hammer_node_ondisk_t ondisk); |
| 836 | void hammer_print_btree_elm(hammer_btree_elm_t elm, u_int8_t type, int i); |
| 837 | |
| 838 | void *hammer_bread(struct hammer_mount *hmp, hammer_off_t off, |
| 839 | int *errorp, struct hammer_buffer **bufferp); |
| 840 | void *hammer_bnew(struct hammer_mount *hmp, hammer_off_t off, |
| 841 | int *errorp, struct hammer_buffer **bufferp); |
| 842 | void *hammer_bread_ext(struct hammer_mount *hmp, hammer_off_t off, int bytes, |
| 843 | int *errorp, struct hammer_buffer **bufferp); |
| 844 | void *hammer_bnew_ext(struct hammer_mount *hmp, hammer_off_t off, int bytes, |
| 845 | int *errorp, struct hammer_buffer **bufferp); |
| 846 | |
| 847 | hammer_volume_t hammer_get_root_volume(hammer_mount_t hmp, int *errorp); |
| 848 | |
| 849 | hammer_volume_t hammer_get_volume(hammer_mount_t hmp, |
| 850 | int32_t vol_no, int *errorp); |
| 851 | hammer_buffer_t hammer_get_buffer(hammer_mount_t hmp, hammer_off_t buf_offset, |
| 852 | int bytes, int isnew, int *errorp); |
| 853 | void hammer_del_buffers(hammer_mount_t hmp, hammer_off_t base_offset, |
| 854 | hammer_off_t zone2_offset, int bytes); |
| 855 | |
| 856 | int hammer_ref_volume(hammer_volume_t volume); |
| 857 | int hammer_ref_buffer(hammer_buffer_t buffer); |
| 858 | void hammer_flush_buffer_nodes(hammer_buffer_t buffer); |
| 859 | |
| 860 | void hammer_rel_volume(hammer_volume_t volume, int flush); |
| 861 | void hammer_rel_buffer(hammer_buffer_t buffer, int flush); |
| 862 | |
| 863 | int hammer_vfs_export(struct mount *mp, int op, |
| 864 | const struct export_args *export); |
| 865 | hammer_node_t hammer_get_node(hammer_mount_t hmp, hammer_off_t node_offset, |
| 866 | int isnew, int *errorp); |
| 867 | void hammer_ref_node(hammer_node_t node); |
| 868 | hammer_node_t hammer_ref_node_safe(struct hammer_mount *hmp, |
| 869 | hammer_node_cache_t cache, int *errorp); |
| 870 | void hammer_rel_node(hammer_node_t node); |
| 871 | void hammer_delete_node(hammer_transaction_t trans, |
| 872 | hammer_node_t node); |
| 873 | void hammer_cache_node(hammer_node_cache_t cache, |
| 874 | hammer_node_t node); |
| 875 | void hammer_uncache_node(hammer_node_cache_t cache); |
| 876 | void hammer_flush_node(hammer_node_t node); |
| 877 | |
| 878 | void hammer_dup_buffer(struct hammer_buffer **bufferp, |
| 879 | struct hammer_buffer *buffer); |
| 880 | hammer_node_t hammer_alloc_btree(hammer_transaction_t trans, int *errorp); |
| 881 | void *hammer_alloc_data(hammer_transaction_t trans, int32_t data_len, |
| 882 | u_int16_t rec_type, hammer_off_t *data_offsetp, |
| 883 | struct hammer_buffer **data_bufferp, int *errorp); |
| 884 | |
| 885 | int hammer_generate_undo(hammer_transaction_t trans, hammer_io_t io, |
| 886 | hammer_off_t zone1_offset, void *base, int len); |
| 887 | |
| 888 | void hammer_put_volume(struct hammer_volume *volume, int flush); |
| 889 | void hammer_put_buffer(struct hammer_buffer *buffer, int flush); |
| 890 | |
| 891 | hammer_off_t hammer_freemap_alloc(hammer_transaction_t trans, |
| 892 | hammer_off_t owner, int *errorp); |
| 893 | void hammer_freemap_free(hammer_transaction_t trans, hammer_off_t phys_offset, |
| 894 | hammer_off_t owner, int *errorp); |
| 895 | int hammer_checkspace(hammer_mount_t hmp); |
| 896 | hammer_off_t hammer_blockmap_alloc(hammer_transaction_t trans, int zone, |
| 897 | int bytes, int *errorp); |
| 898 | hammer_reserve_t hammer_blockmap_reserve(hammer_mount_t hmp, int zone, |
| 899 | int bytes, hammer_off_t *zone_offp, int *errorp); |
| 900 | void hammer_blockmap_reserve_complete(hammer_mount_t hmp, |
| 901 | hammer_reserve_t resv); |
| 902 | void hammer_reserve_setdelay(hammer_mount_t hmp, hammer_reserve_t resv, |
| 903 | hammer_off_t zone2_offset); |
| 904 | void hammer_reserve_clrdelay(hammer_mount_t hmp, hammer_reserve_t resv); |
| 905 | void hammer_blockmap_free(hammer_transaction_t trans, |
| 906 | hammer_off_t bmap_off, int bytes); |
| 907 | void hammer_blockmap_finalize(hammer_transaction_t trans, |
| 908 | hammer_off_t bmap_off, int bytes); |
| 909 | int hammer_blockmap_getfree(hammer_mount_t hmp, hammer_off_t bmap_off, |
| 910 | int *curp, int *errorp); |
| 911 | hammer_off_t hammer_blockmap_lookup(hammer_mount_t hmp, hammer_off_t bmap_off, |
| 912 | int *errorp); |
| 913 | hammer_off_t hammer_undo_lookup(hammer_mount_t hmp, hammer_off_t bmap_off, |
| 914 | int *errorp); |
| 915 | int64_t hammer_undo_used(hammer_mount_t hmp); |
| 916 | int64_t hammer_undo_space(hammer_mount_t hmp); |
| 917 | int64_t hammer_undo_max(hammer_mount_t hmp); |
| 918 | |
| 919 | void hammer_start_transaction(struct hammer_transaction *trans, |
| 920 | struct hammer_mount *hmp); |
| 921 | void hammer_simple_transaction(struct hammer_transaction *trans, |
| 922 | struct hammer_mount *hmp); |
| 923 | void hammer_start_transaction_fls(struct hammer_transaction *trans, |
| 924 | struct hammer_mount *hmp); |
| 925 | void hammer_done_transaction(struct hammer_transaction *trans); |
| 926 | |
| 927 | void hammer_modify_inode(hammer_inode_t ip, int flags); |
| 928 | void hammer_flush_inode(hammer_inode_t ip, int flags); |
| 929 | void hammer_flush_inode_done(hammer_inode_t ip); |
| 930 | void hammer_wait_inode(hammer_inode_t ip); |
| 931 | |
| 932 | int hammer_create_inode(struct hammer_transaction *trans, struct vattr *vap, |
| 933 | struct ucred *cred, struct hammer_inode *dip, |
| 934 | struct hammer_inode **ipp); |
| 935 | void hammer_rel_inode(hammer_inode_t ip, int flush); |
| 936 | int hammer_reload_inode(hammer_inode_t ip, void *arg __unused); |
| 937 | int hammer_ino_rb_compare(hammer_inode_t ip1, hammer_inode_t ip2); |
| 938 | |
| 939 | int hammer_sync_inode(hammer_inode_t ip); |
| 940 | void hammer_test_inode(hammer_inode_t ip); |
| 941 | void hammer_inode_unloadable_check(hammer_inode_t ip, int getvp); |
| 942 | |
| 943 | int hammer_ip_add_directory(struct hammer_transaction *trans, |
| 944 | hammer_inode_t dip, struct namecache *ncp, |
| 945 | hammer_inode_t nip); |
| 946 | int hammer_ip_del_directory(struct hammer_transaction *trans, |
| 947 | hammer_cursor_t cursor, hammer_inode_t dip, |
| 948 | hammer_inode_t ip); |
| 949 | hammer_record_t hammer_ip_add_bulk(hammer_inode_t ip, off_t file_offset, |
| 950 | void *data, int bytes, int *errorp); |
| 951 | int hammer_ip_frontend_trunc(struct hammer_inode *ip, off_t file_size); |
| 952 | int hammer_ip_add_record(struct hammer_transaction *trans, |
| 953 | hammer_record_t record); |
| 954 | int hammer_ip_delete_range(hammer_cursor_t cursor, hammer_inode_t ip, |
| 955 | int64_t ran_beg, int64_t ran_end, int truncating); |
| 956 | int hammer_ip_delete_range_all(hammer_cursor_t cursor, hammer_inode_t ip, |
| 957 | int *countp); |
| 958 | int hammer_ip_sync_data(hammer_cursor_t cursor, hammer_inode_t ip, |
| 959 | int64_t offset, void *data, int bytes); |
| 960 | int hammer_ip_sync_record(hammer_transaction_t trans, hammer_record_t rec); |
| 961 | int hammer_ip_sync_record_cursor(hammer_cursor_t cursor, hammer_record_t rec); |
| 962 | |
| 963 | int hammer_ioctl(hammer_inode_t ip, u_long com, caddr_t data, int fflag, |
| 964 | struct ucred *cred); |
| 965 | |
| 966 | void hammer_io_init(hammer_io_t io, hammer_mount_t hmp, |
| 967 | enum hammer_io_type type); |
| 968 | int hammer_io_read(struct vnode *devvp, struct hammer_io *io, |
| 969 | hammer_off_t limit); |
| 970 | int hammer_io_new(struct vnode *devvp, struct hammer_io *io); |
| 971 | void hammer_io_inval(hammer_volume_t volume, hammer_off_t zone2_offset); |
| 972 | void hammer_io_release(struct hammer_io *io, int flush); |
| 973 | void hammer_io_flush(struct hammer_io *io); |
| 974 | void hammer_io_waitdep(struct hammer_io *io); |
| 975 | void hammer_io_wait_all(hammer_mount_t hmp, const char *ident); |
| 976 | int hammer_io_direct_read(hammer_mount_t hmp, struct bio *bio); |
| 977 | int hammer_io_direct_write(hammer_mount_t hmp, hammer_btree_leaf_elm_t leaf, |
| 978 | struct bio *bio); |
| 979 | void hammer_io_write_interlock(hammer_io_t io); |
| 980 | void hammer_io_done_interlock(hammer_io_t io); |
| 981 | void hammer_io_clear_modify(struct hammer_io *io, int inval); |
| 982 | void hammer_io_clear_modlist(struct hammer_io *io); |
| 983 | void hammer_modify_volume(hammer_transaction_t trans, hammer_volume_t volume, |
| 984 | void *base, int len); |
| 985 | void hammer_modify_buffer(hammer_transaction_t trans, hammer_buffer_t buffer, |
| 986 | void *base, int len); |
| 987 | void hammer_modify_volume_done(hammer_volume_t volume); |
| 988 | void hammer_modify_buffer_done(hammer_buffer_t buffer); |
| 989 | |
| 990 | int hammer_ioc_reblock(hammer_transaction_t trans, hammer_inode_t ip, |
| 991 | struct hammer_ioc_reblock *reblock); |
| 992 | int hammer_ioc_prune(hammer_transaction_t trans, hammer_inode_t ip, |
| 993 | struct hammer_ioc_prune *prune); |
| 994 | |
| 995 | int hammer_signal_check(hammer_mount_t hmp); |
| 996 | |
| 997 | void hammer_flusher_create(hammer_mount_t hmp); |
| 998 | void hammer_flusher_destroy(hammer_mount_t hmp); |
| 999 | void hammer_flusher_sync(hammer_mount_t hmp); |
| 1000 | void hammer_flusher_async(hammer_mount_t hmp); |
| 1001 | |
| 1002 | int hammer_recover(hammer_mount_t hmp, hammer_volume_t rootvol); |
| 1003 | void hammer_recover_flush_buffers(hammer_mount_t hmp, |
| 1004 | hammer_volume_t root_volume); |
| 1005 | |
| 1006 | void hammer_crc_set_blockmap(hammer_blockmap_t blockmap); |
| 1007 | void hammer_crc_set_volume(hammer_volume_ondisk_t ondisk); |
| 1008 | |
| 1009 | int hammer_crc_test_blockmap(hammer_blockmap_t blockmap); |
| 1010 | int hammer_crc_test_volume(hammer_volume_ondisk_t ondisk); |
| 1011 | int hammer_crc_test_btree(hammer_node_ondisk_t ondisk); |
| 1012 | void hkprintf(const char *ctl, ...); |
| 1013 | |
| 1014 | int hammer_blocksize(int64_t file_offset); |
| 1015 | int64_t hammer_blockdemarc(int64_t file_offset1, int64_t file_offset2); |
| 1016 | |
| 1017 | #endif |
| 1018 | |
| 1019 | static __inline void |
| 1020 | hammer_wait_mem_record(hammer_record_t record) |
| 1021 | { |
| 1022 | hammer_wait_mem_record_ident(record, "hmmwai"); |
| 1023 | } |
| 1024 | |
| 1025 | static __inline void |
| 1026 | hammer_lock_ex(struct hammer_lock *lock) |
| 1027 | { |
| 1028 | hammer_lock_ex_ident(lock, "hmrlck"); |
| 1029 | } |
| 1030 | |
| 1031 | /* |
| 1032 | * Indicate that a B-Tree node is being modified. |
| 1033 | */ |
| 1034 | static __inline void |
| 1035 | hammer_modify_node_noundo(hammer_transaction_t trans, hammer_node_t node) |
| 1036 | { |
| 1037 | hammer_modify_buffer(trans, node->buffer, NULL, 0); |
| 1038 | } |
| 1039 | |
| 1040 | static __inline void |
| 1041 | hammer_modify_node_all(hammer_transaction_t trans, struct hammer_node *node) |
| 1042 | { |
| 1043 | hammer_modify_buffer(trans, node->buffer, |
| 1044 | node->ondisk, sizeof(*node->ondisk)); |
| 1045 | } |
| 1046 | |
| 1047 | static __inline void |
| 1048 | hammer_modify_node(hammer_transaction_t trans, hammer_node_t node, |
| 1049 | void *base, int len) |
| 1050 | { |
| 1051 | hammer_crc_t *crcptr; |
| 1052 | |
| 1053 | KKASSERT((char *)base >= (char *)node->ondisk && |
| 1054 | (char *)base + len <= |
| 1055 | (char *)node->ondisk + sizeof(*node->ondisk)); |
| 1056 | hammer_modify_buffer(trans, node->buffer, base, len); |
| 1057 | crcptr = &node->ondisk->crc; |
| 1058 | hammer_modify_buffer(trans, node->buffer, crcptr, sizeof(hammer_crc_t)); |
| 1059 | --node->buffer->io.modify_refs; /* only want one ref */ |
| 1060 | } |
| 1061 | |
| 1062 | /* |
| 1063 | * Indicate that the specified modifications have been completed. |
| 1064 | * |
| 1065 | * Do not try to generate the crc here, it's very expensive to do and a |
| 1066 | * sequence of insertions or deletions can result in many calls to this |
| 1067 | * function on the same node. |
| 1068 | */ |
| 1069 | static __inline void |
| 1070 | hammer_modify_node_done(hammer_node_t node) |
| 1071 | { |
| 1072 | node->flags |= HAMMER_NODE_CRCGOOD; |
| 1073 | if ((node->flags & HAMMER_NODE_NEEDSCRC) == 0) { |
| 1074 | node->flags |= HAMMER_NODE_NEEDSCRC; |
| 1075 | node->buffer->io.gencrc = 1; |
| 1076 | hammer_ref_node(node); |
| 1077 | } |
| 1078 | hammer_modify_buffer_done(node->buffer); |
| 1079 | } |
| 1080 | |
| 1081 | #define hammer_modify_volume_field(trans, vol, field) \ |
| 1082 | hammer_modify_volume(trans, vol, &(vol)->ondisk->field, \ |
| 1083 | sizeof((vol)->ondisk->field)) |
| 1084 | |
| 1085 | #define hammer_modify_node_field(trans, node, field) \ |
| 1086 | hammer_modify_node(trans, node, &(node)->ondisk->field, \ |
| 1087 | sizeof((node)->ondisk->field)) |
| 1088 | |