| 1 | /* |
| 2 | * Copyright (c) 1982, 1986, 1989, 1993 |
| 3 | * The Regents of the University of California. All rights reserved. |
| 4 | * (c) UNIX System Laboratories, Inc. |
| 5 | * All or some portions of this file are derived from material licensed |
| 6 | * to the University of California by American Telephone and Telegraph |
| 7 | * Co. or Unix System Laboratories, Inc. and are reproduced herein with |
| 8 | * the permission of UNIX System Laboratories, Inc. |
| 9 | * |
| 10 | * Redistribution and use in source and binary forms, with or without |
| 11 | * modification, are permitted provided that the following conditions |
| 12 | * are met: |
| 13 | * 1. Redistributions of source code must retain the above copyright |
| 14 | * notice, this list of conditions and the following disclaimer. |
| 15 | * 2. Redistributions in binary form must reproduce the above copyright |
| 16 | * notice, this list of conditions and the following disclaimer in the |
| 17 | * documentation and/or other materials provided with the distribution. |
| 18 | * 3. All advertising materials mentioning features or use of this software |
| 19 | * must display the following acknowledgement: |
| 20 | * This product includes software developed by the University of |
| 21 | * California, Berkeley and its contributors. |
| 22 | * 4. Neither the name of the University nor the names of its contributors |
| 23 | * may be used to endorse or promote products derived from this software |
| 24 | * without specific prior written permission. |
| 25 | * |
| 26 | * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND |
| 27 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
| 28 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
| 29 | * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE |
| 30 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
| 31 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
| 32 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
| 33 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
| 34 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
| 35 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
| 36 | * SUCH DAMAGE. |
| 37 | * |
| 38 | * @(#)buf.h 8.9 (Berkeley) 3/30/95 |
| 39 | * $FreeBSD: src/sys/sys/buf.h,v 1.88.2.10 2003/01/25 19:02:23 dillon Exp $ |
| 40 | */ |
| 41 | |
| 42 | #ifndef _SYS_BUF_H_ |
| 43 | #define _SYS_BUF_H_ |
| 44 | |
| 45 | #include <sys/queue.h> |
| 46 | #include <sys/lock.h> |
| 47 | |
| 48 | struct buf; |
| 49 | struct mount; |
| 50 | struct vnode; |
| 51 | |
| 52 | /* |
| 53 | * To avoid including <ufs/ffs/softdep.h> |
| 54 | */ |
| 55 | LIST_HEAD(workhead, worklist); |
| 56 | /* |
| 57 | * These are currently used only by the soft dependency code, hence |
| 58 | * are stored once in a global variable. If other subsystems wanted |
| 59 | * to use these hooks, a pointer to a set of bio_ops could be added |
| 60 | * to each buffer. |
| 61 | */ |
| 62 | extern struct bio_ops { |
| 63 | void (*io_start) __P((struct buf *)); |
| 64 | void (*io_complete) __P((struct buf *)); |
| 65 | void (*io_deallocate) __P((struct buf *)); |
| 66 | int (*io_fsync) __P((struct vnode *)); |
| 67 | int (*io_sync) __P((struct mount *)); |
| 68 | void (*io_movedeps) __P((struct buf *, struct buf *)); |
| 69 | int (*io_countdeps) __P((struct buf *, int)); |
| 70 | } bioops; |
| 71 | |
| 72 | struct iodone_chain { |
| 73 | long ic_prev_flags; |
| 74 | void (*ic_prev_iodone) __P((struct buf *)); |
| 75 | void *ic_prev_iodone_chain; |
| 76 | struct { |
| 77 | long ia_long; |
| 78 | void *ia_ptr; |
| 79 | } ic_args[5]; |
| 80 | }; |
| 81 | |
| 82 | /* |
| 83 | * The buffer header describes an I/O operation in the kernel. |
| 84 | * |
| 85 | * NOTES: |
| 86 | * b_bufsize, b_bcount. b_bufsize is the allocation size of the |
| 87 | * buffer, either DEV_BSIZE or PAGE_SIZE aligned. b_bcount is the |
| 88 | * originally requested buffer size and can serve as a bounds check |
| 89 | * against EOF. For most, but not all uses, b_bcount == b_bufsize. |
| 90 | * |
| 91 | * b_dirtyoff, b_dirtyend. Buffers support piecemeal, unaligned |
| 92 | * ranges of dirty data that need to be written to backing store. |
| 93 | * The range is typically clipped at b_bcount ( not b_bufsize ). |
| 94 | * |
| 95 | * b_resid. Number of bytes remaining in I/O. After an I/O operation |
| 96 | * completes, b_resid is usually 0 indicating 100% success. |
| 97 | */ |
| 98 | struct buf { |
| 99 | LIST_ENTRY(buf) b_hash; /* Hash chain. */ |
| 100 | TAILQ_ENTRY(buf) b_vnbufs; /* Buffer's associated vnode. */ |
| 101 | TAILQ_ENTRY(buf) b_freelist; /* Free list position if not active. */ |
| 102 | TAILQ_ENTRY(buf) b_act; /* Device driver queue when active. *new* */ |
| 103 | long b_flags; /* B_* flags. */ |
| 104 | unsigned short b_qindex; /* buffer queue index */ |
| 105 | unsigned char b_xflags; /* extra flags */ |
| 106 | struct lock b_lock; /* Buffer lock */ |
| 107 | int b_error; /* Errno value. */ |
| 108 | long b_bufsize; /* Allocated buffer size. */ |
| 109 | long b_runningbufspace; /* when I/O is running, pipelining */ |
| 110 | long b_bcount; /* Valid bytes in buffer. */ |
| 111 | long b_resid; /* Remaining I/O. */ |
| 112 | dev_t b_dev; /* Device associated with buffer. */ |
| 113 | caddr_t b_data; /* Memory, superblocks, indirect etc. */ |
| 114 | caddr_t b_kvabase; /* base kva for buffer */ |
| 115 | int b_kvasize; /* size of kva for buffer */ |
| 116 | daddr_t b_lblkno; /* Logical block number. */ |
| 117 | daddr_t b_blkno; /* Underlying physical block number. */ |
| 118 | off_t b_offset; /* Offset into file */ |
| 119 | /* Function to call upon completion. */ |
| 120 | void (*b_iodone) __P((struct buf *)); |
| 121 | /* For nested b_iodone's. */ |
| 122 | struct iodone_chain *b_iodone_chain; |
| 123 | struct vnode *b_vp; /* Device vnode. */ |
| 124 | int b_dirtyoff; /* Offset in buffer of dirty region. */ |
| 125 | int b_dirtyend; /* Offset of end of dirty region. */ |
| 126 | struct ucred *b_rcred; /* Read credentials reference. */ |
| 127 | struct ucred *b_wcred; /* Write credentials reference. */ |
| 128 | daddr_t b_pblkno; /* physical block number */ |
| 129 | void *b_saveaddr; /* Original b_addr for physio. */ |
| 130 | void *b_driver1; /* for private use by the driver */ |
| 131 | void *b_driver2; /* for private use by the driver */ |
| 132 | void *b_caller1; /* for private use by the caller */ |
| 133 | void *b_caller2; /* for private use by the caller */ |
| 134 | union pager_info { |
| 135 | void *pg_spc; |
| 136 | int pg_reqpage; |
| 137 | } b_pager; |
| 138 | union cluster_info { |
| 139 | TAILQ_HEAD(cluster_list_head, buf) cluster_head; |
| 140 | TAILQ_ENTRY(buf) cluster_entry; |
| 141 | } b_cluster; |
| 142 | struct vm_page *b_pages[btoc(MAXPHYS)]; |
| 143 | int b_npages; |
| 144 | struct workhead b_dep; /* List of filesystem dependencies. */ |
| 145 | struct chain_info { /* buffer chaining */ |
| 146 | struct buf *parent; |
| 147 | int count; |
| 148 | } b_chain; |
| 149 | }; |
| 150 | |
| 151 | #define b_spc b_pager.pg_spc |
| 152 | |
| 153 | /* |
| 154 | * These flags are kept in b_flags. |
| 155 | * |
| 156 | * Notes: |
| 157 | * |
| 158 | * B_ASYNC VOP calls on bp's are usually async whether or not |
| 159 | * B_ASYNC is set, but some subsystems, such as NFS, like |
| 160 | * to know what is best for the caller so they can |
| 161 | * optimize the I/O. |
| 162 | * |
| 163 | * B_PAGING Indicates that bp is being used by the paging system or |
| 164 | * some paging system and that the bp is not linked into |
| 165 | * the b_vp's clean/dirty linked lists or ref counts. |
| 166 | * Buffer vp reassignments are illegal in this case. |
| 167 | * |
| 168 | * B_CACHE This may only be set if the buffer is entirely valid. |
| 169 | * The situation where B_DELWRI is set and B_CACHE is |
| 170 | * clear MUST be committed to disk by getblk() so |
| 171 | * B_DELWRI can also be cleared. See the comments for |
| 172 | * getblk() in kern/vfs_bio.c. If B_CACHE is clear, |
| 173 | * the caller is expected to clear B_ERROR|B_INVAL, |
| 174 | * set B_READ, and initiate an I/O. |
| 175 | * |
| 176 | * The 'entire buffer' is defined to be the range from |
| 177 | * 0 through b_bcount. |
| 178 | * |
| 179 | * B_MALLOC Request that the buffer be allocated from the malloc |
| 180 | * pool, DEV_BSIZE aligned instead of PAGE_SIZE aligned. |
| 181 | * |
| 182 | * B_CLUSTEROK This flag is typically set for B_DELWRI buffers |
| 183 | * by filesystems that allow clustering when the buffer |
| 184 | * is fully dirty and indicates that it may be clustered |
| 185 | * with other adjacent dirty buffers. Note the clustering |
| 186 | * may not be used with the stage 1 data write under NFS |
| 187 | * but may be used for the commit rpc portion. |
| 188 | * |
| 189 | * B_VMIO Indicates that the buffer is tied into an VM object. |
| 190 | * The buffer's data is always PAGE_SIZE aligned even |
| 191 | * if b_bufsize and b_bcount are not. ( b_bufsize is |
| 192 | * always at least DEV_BSIZE aligned, though ). |
| 193 | * |
| 194 | * B_DIRECT Hint that we should attempt to completely free |
| 195 | * the pages underlying the buffer. B_DIRECT is |
| 196 | * sticky until the buffer is released and typically |
| 197 | * only has an effect when B_RELBUF is also set. |
| 198 | * |
| 199 | * B_NOWDRAIN This flag should be set when a device (like VN) |
| 200 | * does a turn-around VOP_WRITE from its strategy |
| 201 | * routine. This flag prevents bwrite() from blocking |
| 202 | * in wdrain, avoiding a deadlock situation. |
| 203 | */ |
| 204 | |
| 205 | #define B_AGE 0x00000001 /* Move to age queue when I/O done. */ |
| 206 | #define B_NEEDCOMMIT 0x00000002 /* Append-write in progress. */ |
| 207 | #define B_ASYNC 0x00000004 /* Start I/O, do not wait. */ |
| 208 | #define B_DIRECT 0x00000008 /* direct I/O flag (pls free vmio) */ |
| 209 | #define B_DEFERRED 0x00000010 /* Skipped over for cleaning */ |
| 210 | #define B_CACHE 0x00000020 /* Bread found us in the cache. */ |
| 211 | #define B_CALL 0x00000040 /* Call b_iodone from biodone. */ |
| 212 | #define B_DELWRI 0x00000080 /* Delay I/O until buffer reused. */ |
| 213 | #define B_FREEBUF 0x00000100 /* Instruct driver: free blocks */ |
| 214 | #define B_DONE 0x00000200 /* I/O completed. */ |
| 215 | #define B_EINTR 0x00000400 /* I/O was interrupted */ |
| 216 | #define B_ERROR 0x00000800 /* I/O error occurred. */ |
| 217 | #define B_SCANNED 0x00001000 /* VOP_FSYNC funcs mark written bufs */ |
| 218 | #define B_INVAL 0x00002000 /* Does not contain valid info. */ |
| 219 | #define B_LOCKED 0x00004000 /* Locked in core (not reusable). */ |
| 220 | #define B_NOCACHE 0x00008000 /* Do not cache block after use. */ |
| 221 | #define B_MALLOC 0x00010000 /* malloced b_data */ |
| 222 | #define B_CLUSTEROK 0x00020000 /* Pagein op, so swap() can count it. */ |
| 223 | #define B_PHYS 0x00040000 /* I/O to user memory. */ |
| 224 | #define B_RAW 0x00080000 /* Set by physio for raw transfers. */ |
| 225 | #define B_READ 0x00100000 /* Read buffer. */ |
| 226 | #define B_DIRTY 0x00200000 /* Needs writing later. */ |
| 227 | #define B_RELBUF 0x00400000 /* Release VMIO buffer. */ |
| 228 | #define B_WANT 0x00800000 /* Used by vm_pager.c */ |
| 229 | #define B_WRITE 0x00000000 /* Write buffer (pseudo flag). */ |
| 230 | #define B_WRITEINPROG 0x01000000 /* Write in progress. */ |
| 231 | #define B_XXX 0x02000000 /* Debugging flag. */ |
| 232 | #define B_PAGING 0x04000000 /* volatile paging I/O -- bypass VMIO */ |
| 233 | #define B_ORDERED 0x08000000 /* Must guarantee I/O ordering */ |
| 234 | #define B_RAM 0x10000000 /* Read ahead mark (flag) */ |
| 235 | #define B_VMIO 0x20000000 /* VMIO flag */ |
| 236 | #define B_CLUSTER 0x40000000 /* pagein op, so swap() can count it */ |
| 237 | #define B_NOWDRAIN 0x80000000 /* Avoid wdrain deadlock */ |
| 238 | |
| 239 | #define PRINT_BUF_FLAGS "\20\40nowdrain\37cluster\36vmio\35ram\34ordered" \ |
| 240 | "\33paging\32xxx\31writeinprog\30want\27relbuf\26dirty" \ |
| 241 | "\25read\24raw\23phys\22clusterok\21malloc\20nocache" \ |
| 242 | "\17locked\16inval\15scanned\14error\13eintr\12done\11freebuf" \ |
| 243 | "\10delwri\7call\6cache\4direct\3async\2needcommit\1age" |
| 244 | |
| 245 | /* |
| 246 | * These flags are kept in b_xflags. |
| 247 | */ |
| 248 | #define BX_VNDIRTY 0x00000001 /* On vnode dirty list */ |
| 249 | #define BX_VNCLEAN 0x00000002 /* On vnode clean list */ |
| 250 | #define BX_BKGRDWRITE 0x00000004 /* Do writes in background */ |
| 251 | #define BX_BKGRDINPROG 0x00000008 /* Background write in progress */ |
| 252 | #define BX_BKGRDWAIT 0x00000010 /* Background write waiting */ |
| 253 | #define BX_AUTOCHAINDONE 0x00000020 /* pager I/O chain auto mode */ |
| 254 | |
| 255 | #define NOOFFSET (-1LL) /* No buffer offset calculated yet */ |
| 256 | |
| 257 | #ifdef _KERNEL |
| 258 | /* |
| 259 | * Buffer locking |
| 260 | */ |
| 261 | struct simplelock buftimelock; /* Interlock on setting prio and timo */ |
| 262 | extern char *buf_wmesg; /* Default buffer lock message */ |
| 263 | #define BUF_WMESG "bufwait" |
| 264 | #include <sys/proc.h> /* XXX for curproc */ |
| 265 | /* |
| 266 | * Initialize a lock. |
| 267 | */ |
| 268 | #define BUF_LOCKINIT(bp) \ |
| 269 | lockinit(&(bp)->b_lock, PRIBIO + 4, buf_wmesg, 0, 0) |
| 270 | /* |
| 271 | * |
| 272 | * Get a lock sleeping non-interruptably until it becomes available. |
| 273 | */ |
| 274 | static __inline int BUF_LOCK __P((struct buf *, int)); |
| 275 | static __inline int |
| 276 | BUF_LOCK(struct buf *bp, int locktype) |
| 277 | { |
| 278 | int s, ret; |
| 279 | |
| 280 | s = splbio(); |
| 281 | simple_lock(&buftimelock); |
| 282 | locktype |= LK_INTERLOCK; |
| 283 | bp->b_lock.lk_wmesg = buf_wmesg; |
| 284 | bp->b_lock.lk_prio = PRIBIO + 4; |
| 285 | /* bp->b_lock.lk_timo = 0; not necessary */ |
| 286 | ret = lockmgr(&(bp)->b_lock, locktype, &buftimelock, curproc); |
| 287 | splx(s); |
| 288 | return ret; |
| 289 | } |
| 290 | /* |
| 291 | * Get a lock sleeping with specified interruptably and timeout. |
| 292 | */ |
| 293 | static __inline int BUF_TIMELOCK __P((struct buf *, int, char *, int, int)); |
| 294 | static __inline int |
| 295 | BUF_TIMELOCK(struct buf *bp, int locktype, char *wmesg, int catch, int timo) |
| 296 | { |
| 297 | int s, ret; |
| 298 | |
| 299 | s = splbio(); |
| 300 | simple_lock(&buftimelock); |
| 301 | locktype |= LK_INTERLOCK | LK_TIMELOCK; |
| 302 | bp->b_lock.lk_wmesg = wmesg; |
| 303 | bp->b_lock.lk_prio = (PRIBIO + 4) | catch; |
| 304 | bp->b_lock.lk_timo = timo; |
| 305 | ret = lockmgr(&(bp)->b_lock, (locktype), &buftimelock, curproc); |
| 306 | splx(s); |
| 307 | return ret; |
| 308 | } |
| 309 | /* |
| 310 | * Release a lock. Only the acquiring process may free the lock unless |
| 311 | * it has been handed off to biodone. |
| 312 | */ |
| 313 | static __inline void BUF_UNLOCK __P((struct buf *)); |
| 314 | static __inline void |
| 315 | BUF_UNLOCK(struct buf *bp) |
| 316 | { |
| 317 | int s; |
| 318 | |
| 319 | s = splbio(); |
| 320 | lockmgr(&(bp)->b_lock, LK_RELEASE, NULL, curproc); |
| 321 | splx(s); |
| 322 | } |
| 323 | |
| 324 | /* |
| 325 | * Free a buffer lock. |
| 326 | */ |
| 327 | #define BUF_LOCKFREE(bp) \ |
| 328 | if (BUF_REFCNT(bp) > 0) \ |
| 329 | panic("free locked buf") |
| 330 | /* |
| 331 | * When initiating asynchronous I/O, change ownership of the lock to the |
| 332 | * kernel. Once done, the lock may legally released by biodone. The |
| 333 | * original owning process can no longer acquire it recursively, but must |
| 334 | * wait until the I/O is completed and the lock has been freed by biodone. |
| 335 | */ |
| 336 | static __inline void BUF_KERNPROC __P((struct buf *)); |
| 337 | static __inline void |
| 338 | BUF_KERNPROC(struct buf *bp) |
| 339 | { |
| 340 | struct proc *p = curproc; |
| 341 | |
| 342 | if (p != NULL && bp->b_lock.lk_lockholder == p->p_pid) |
| 343 | p->p_locks--; |
| 344 | bp->b_lock.lk_lockholder = LK_KERNPROC; |
| 345 | } |
| 346 | /* |
| 347 | * Find out the number of references to a lock. |
| 348 | */ |
| 349 | static __inline int BUF_REFCNT __P((struct buf *)); |
| 350 | static __inline int |
| 351 | BUF_REFCNT(struct buf *bp) |
| 352 | { |
| 353 | int s, ret; |
| 354 | |
| 355 | s = splbio(); |
| 356 | ret = lockcount(&(bp)->b_lock); |
| 357 | splx(s); |
| 358 | return ret; |
| 359 | } |
| 360 | |
| 361 | #endif /* _KERNEL */ |
| 362 | |
| 363 | struct buf_queue_head { |
| 364 | TAILQ_HEAD(buf_queue, buf) queue; |
| 365 | daddr_t last_pblkno; |
| 366 | struct buf *insert_point; |
| 367 | struct buf *switch_point; |
| 368 | }; |
| 369 | |
| 370 | /* |
| 371 | * This structure describes a clustered I/O. It is stored in the b_saveaddr |
| 372 | * field of the buffer on which I/O is done. At I/O completion, cluster |
| 373 | * callback uses the structure to parcel I/O's to individual buffers, and |
| 374 | * then free's this structure. |
| 375 | */ |
| 376 | struct cluster_save { |
| 377 | long bs_bcount; /* Saved b_bcount. */ |
| 378 | long bs_bufsize; /* Saved b_bufsize. */ |
| 379 | void *bs_saveaddr; /* Saved b_addr. */ |
| 380 | int bs_nchildren; /* Number of associated buffers. */ |
| 381 | struct buf **bs_children; /* List of associated buffers. */ |
| 382 | }; |
| 383 | |
| 384 | #ifdef _KERNEL |
| 385 | static __inline void bufq_init __P((struct buf_queue_head *head)); |
| 386 | |
| 387 | static __inline void bufq_insert_tail __P((struct buf_queue_head *head, |
| 388 | struct buf *bp)); |
| 389 | |
| 390 | static __inline void bufq_remove __P((struct buf_queue_head *head, |
| 391 | struct buf *bp)); |
| 392 | |
| 393 | static __inline struct buf *bufq_first __P((struct buf_queue_head *head)); |
| 394 | |
| 395 | static __inline void |
| 396 | bufq_init(struct buf_queue_head *head) |
| 397 | { |
| 398 | TAILQ_INIT(&head->queue); |
| 399 | head->last_pblkno = 0; |
| 400 | head->insert_point = NULL; |
| 401 | head->switch_point = NULL; |
| 402 | } |
| 403 | |
| 404 | static __inline void |
| 405 | bufq_insert_tail(struct buf_queue_head *head, struct buf *bp) |
| 406 | { |
| 407 | if ((bp->b_flags & B_ORDERED) != 0) { |
| 408 | head->insert_point = bp; |
| 409 | head->switch_point = NULL; |
| 410 | } |
| 411 | TAILQ_INSERT_TAIL(&head->queue, bp, b_act); |
| 412 | } |
| 413 | |
| 414 | static __inline void |
| 415 | bufq_remove(struct buf_queue_head *head, struct buf *bp) |
| 416 | { |
| 417 | if (bp == head->switch_point) |
| 418 | head->switch_point = TAILQ_NEXT(bp, b_act); |
| 419 | if (bp == head->insert_point) { |
| 420 | head->insert_point = TAILQ_PREV(bp, buf_queue, b_act); |
| 421 | if (head->insert_point == NULL) |
| 422 | head->last_pblkno = 0; |
| 423 | } else if (bp == TAILQ_FIRST(&head->queue)) |
| 424 | head->last_pblkno = bp->b_pblkno; |
| 425 | TAILQ_REMOVE(&head->queue, bp, b_act); |
| 426 | if (TAILQ_FIRST(&head->queue) == head->switch_point) |
| 427 | head->switch_point = NULL; |
| 428 | } |
| 429 | |
| 430 | static __inline struct buf * |
| 431 | bufq_first(struct buf_queue_head *head) |
| 432 | { |
| 433 | return (TAILQ_FIRST(&head->queue)); |
| 434 | } |
| 435 | |
| 436 | #endif /* _KERNEL */ |
| 437 | |
| 438 | /* |
| 439 | * Definitions for the buffer free lists. |
| 440 | */ |
| 441 | #define BUFFER_QUEUES 6 /* number of free buffer queues */ |
| 442 | |
| 443 | #define QUEUE_NONE 0 /* on no queue */ |
| 444 | #define QUEUE_LOCKED 1 /* locked buffers */ |
| 445 | #define QUEUE_CLEAN 2 /* non-B_DELWRI buffers */ |
| 446 | #define QUEUE_DIRTY 3 /* B_DELWRI buffers */ |
| 447 | #define QUEUE_EMPTYKVA 4 /* empty buffer headers w/KVA assignment */ |
| 448 | #define QUEUE_EMPTY 5 /* empty buffer headers */ |
| 449 | |
| 450 | /* |
| 451 | * Zero out the buffer's data area. |
| 452 | */ |
| 453 | #define clrbuf(bp) { \ |
| 454 | bzero((bp)->b_data, (u_int)(bp)->b_bcount); \ |
| 455 | (bp)->b_resid = 0; \ |
| 456 | } |
| 457 | |
| 458 | /* |
| 459 | * Flags to low-level bitmap allocation routines (balloc). |
| 460 | * |
| 461 | * Note: sequential_heuristic() in kern/vfs_vnops.c limits the count |
| 462 | * to 127. |
| 463 | */ |
| 464 | #define B_SEQMASK 0x7F000000 /* Sequential heuristic mask. */ |
| 465 | #define B_SEQSHIFT 24 /* Sequential heuristic shift. */ |
| 466 | #define B_SEQMAX 0x7F |
| 467 | #define B_CLRBUF 0x01 /* Cleared invalid areas of buffer. */ |
| 468 | #define B_SYNC 0x02 /* Do all allocations synchronously. */ |
| 469 | |
| 470 | #ifdef _KERNEL |
| 471 | extern int nbuf; /* The number of buffer headers */ |
| 472 | extern int maxswzone; /* Max KVA for swap structures */ |
| 473 | extern int maxbcache; /* Max KVA for buffer cache */ |
| 474 | extern int runningbufspace; |
| 475 | extern int buf_maxio; /* nominal maximum I/O for buffer */ |
| 476 | extern struct buf *buf; /* The buffer headers. */ |
| 477 | extern char *buffers; /* The buffer contents. */ |
| 478 | extern int bufpages; /* Number of memory pages in the buffer pool. */ |
| 479 | extern struct buf *swbuf; /* Swap I/O buffer headers. */ |
| 480 | extern int nswbuf; /* Number of swap I/O buffer headers. */ |
| 481 | extern TAILQ_HEAD(swqueue, buf) bswlist; |
| 482 | extern TAILQ_HEAD(bqueues, buf) bufqueues[BUFFER_QUEUES]; |
| 483 | |
| 484 | struct uio; |
| 485 | |
| 486 | caddr_t bufhashinit __P((caddr_t)); |
| 487 | void bufinit __P((void)); |
| 488 | void bwillwrite __P((void)); |
| 489 | int buf_dirty_count_severe __P((void)); |
| 490 | void bremfree __P((struct buf *)); |
| 491 | int bread __P((struct vnode *, daddr_t, int, |
| 492 | struct ucred *, struct buf **)); |
| 493 | int breadn __P((struct vnode *, daddr_t, int, daddr_t *, int *, int, |
| 494 | struct ucred *, struct buf **)); |
| 495 | int bwrite __P((struct buf *)); |
| 496 | void bdwrite __P((struct buf *)); |
| 497 | void bawrite __P((struct buf *)); |
| 498 | void bdirty __P((struct buf *)); |
| 499 | void bundirty __P((struct buf *)); |
| 500 | int bowrite __P((struct buf *)); |
| 501 | void brelse __P((struct buf *)); |
| 502 | void bqrelse __P((struct buf *)); |
| 503 | int vfs_bio_awrite __P((struct buf *)); |
| 504 | struct buf * getpbuf __P((int *)); |
| 505 | struct buf *incore __P((struct vnode *, daddr_t)); |
| 506 | struct buf *gbincore __P((struct vnode *, daddr_t)); |
| 507 | int inmem __P((struct vnode *, daddr_t)); |
| 508 | struct buf *getblk __P((struct vnode *, daddr_t, int, int, int)); |
| 509 | struct buf *geteblk __P((int)); |
| 510 | int biowait __P((struct buf *)); |
| 511 | void biodone __P((struct buf *)); |
| 512 | |
| 513 | void cluster_callback __P((struct buf *)); |
| 514 | int cluster_read __P((struct vnode *, u_quad_t, daddr_t, long, |
| 515 | struct ucred *, long, int, struct buf **)); |
| 516 | int cluster_wbuild __P((struct vnode *, long, daddr_t, int)); |
| 517 | void cluster_write __P((struct buf *, u_quad_t, int)); |
| 518 | int physio __P((dev_t dev, struct uio *uio, int ioflag)); |
| 519 | #define physread physio |
| 520 | #define physwrite physio |
| 521 | void vfs_bio_set_validclean __P((struct buf *, int base, int size)); |
| 522 | void vfs_bio_clrbuf __P((struct buf *)); |
| 523 | void vfs_busy_pages __P((struct buf *, int clear_modify)); |
| 524 | void vfs_unbusy_pages __P((struct buf *)); |
| 525 | void vwakeup __P((struct buf *)); |
| 526 | int vmapbuf __P((struct buf *)); |
| 527 | void vunmapbuf __P((struct buf *)); |
| 528 | void relpbuf __P((struct buf *, int *)); |
| 529 | void brelvp __P((struct buf *)); |
| 530 | void bgetvp __P((struct vnode *, struct buf *)); |
| 531 | void pbgetvp __P((struct vnode *, struct buf *)); |
| 532 | void pbrelvp __P((struct buf *)); |
| 533 | int allocbuf __P((struct buf *bp, int size)); |
| 534 | void reassignbuf __P((struct buf *, struct vnode *)); |
| 535 | void pbreassignbuf __P((struct buf *, struct vnode *)); |
| 536 | struct buf *trypbuf __P((int *)); |
| 537 | |
| 538 | #endif /* _KERNEL */ |
| 539 | |
| 540 | #endif /* !_SYS_BUF_H_ */ |