| 1 | /* |
| 2 | * Copyright (c) 1982, 1986, 1989, 1993 |
| 3 | * The Regents of the University of California. All rights reserved. |
| 4 | * (c) UNIX System Laboratories, Inc. |
| 5 | * All or some portions of this file are derived from material licensed |
| 6 | * to the University of California by American Telephone and Telegraph |
| 7 | * Co. or Unix System Laboratories, Inc. and are reproduced herein with |
| 8 | * the permission of UNIX System Laboratories, Inc. |
| 9 | * |
| 10 | * Redistribution and use in source and binary forms, with or without |
| 11 | * modification, are permitted provided that the following conditions |
| 12 | * are met: |
| 13 | * 1. Redistributions of source code must retain the above copyright |
| 14 | * notice, this list of conditions and the following disclaimer. |
| 15 | * 2. Redistributions in binary form must reproduce the above copyright |
| 16 | * notice, this list of conditions and the following disclaimer in the |
| 17 | * documentation and/or other materials provided with the distribution. |
| 18 | * 3. All advertising materials mentioning features or use of this software |
| 19 | * must display the following acknowledgement: |
| 20 | * This product includes software developed by the University of |
| 21 | * California, Berkeley and its contributors. |
| 22 | * 4. Neither the name of the University nor the names of its contributors |
| 23 | * may be used to endorse or promote products derived from this software |
| 24 | * without specific prior written permission. |
| 25 | * |
| 26 | * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND |
| 27 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
| 28 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
| 29 | * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE |
| 30 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
| 31 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
| 32 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
| 33 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
| 34 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
| 35 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
| 36 | * SUCH DAMAGE. |
| 37 | * |
| 38 | * @(#)buf.h 8.9 (Berkeley) 3/30/95 |
| 39 | * $FreeBSD: src/sys/sys/buf.h,v 1.88.2.10 2003/01/25 19:02:23 dillon Exp $ |
| 40 | * $DragonFly: src/sys/sys/buf.h,v 1.33 2006/04/30 20:23:25 dillon Exp $ |
| 41 | */ |
| 42 | |
| 43 | #ifndef _SYS_BUF_H_ |
| 44 | #define _SYS_BUF_H_ |
| 45 | |
| 46 | #ifndef _SYS_QUEUE_H_ |
| 47 | #include <sys/queue.h> |
| 48 | #endif |
| 49 | #ifndef _SYS_LOCK_H_ |
| 50 | #include <sys/lock.h> |
| 51 | #endif |
| 52 | #ifndef _SYS_DEVICE_H_ |
| 53 | #include <sys/device.h> |
| 54 | #endif |
| 55 | |
| 56 | #ifndef _SYS_XIO_H_ |
| 57 | #include <sys/xio.h> |
| 58 | #endif |
| 59 | #ifndef _SYS_TREE_H_ |
| 60 | #include <sys/tree.h> |
| 61 | #endif |
| 62 | #ifndef _SYS_BIO_H_ |
| 63 | #include <sys/bio.h> |
| 64 | #endif |
| 65 | #ifndef _SYS_SPINLOCK_H_ |
| 66 | #include <sys/spinlock.h> |
| 67 | #endif |
| 68 | |
| 69 | struct buf; |
| 70 | struct bio; |
| 71 | struct mount; |
| 72 | struct vnode; |
| 73 | struct xio; |
| 74 | |
| 75 | #define NBUF_BIO 4 |
| 76 | |
| 77 | struct buf_rb_tree; |
| 78 | struct buf_rb_hash; |
| 79 | RB_PROTOTYPE2(buf_rb_tree, buf, b_rbnode, rb_buf_compare, off_t); |
| 80 | RB_PROTOTYPE2(buf_rb_hash, buf, b_rbhash, rb_buf_compare, off_t); |
| 81 | |
| 82 | /* |
| 83 | * To avoid including <ufs/ffs/softdep.h> |
| 84 | */ |
| 85 | LIST_HEAD(workhead, worklist); |
| 86 | /* |
| 87 | * These are currently used only by the soft dependency code, hence |
| 88 | * are stored once in a global variable. If other subsystems wanted |
| 89 | * to use these hooks, a pointer to a set of bio_ops could be added |
| 90 | * to each buffer. |
| 91 | */ |
| 92 | extern struct bio_ops { |
| 93 | void (*io_start) (struct buf *); |
| 94 | void (*io_complete) (struct buf *); |
| 95 | void (*io_deallocate) (struct buf *); |
| 96 | int (*io_fsync) (struct vnode *); |
| 97 | int (*io_sync) (struct mount *); |
| 98 | void (*io_movedeps) (struct buf *, struct buf *); |
| 99 | int (*io_countdeps) (struct buf *, int); |
| 100 | } bioops; |
| 101 | |
| 102 | typedef enum buf_cmd { |
| 103 | BUF_CMD_DONE = 0, |
| 104 | BUF_CMD_READ, |
| 105 | BUF_CMD_WRITE, |
| 106 | BUF_CMD_FREEBLKS, |
| 107 | BUF_CMD_FORMAT |
| 108 | } buf_cmd_t; |
| 109 | |
| 110 | /* |
| 111 | * The buffer header describes an I/O operation in the kernel. |
| 112 | * |
| 113 | * NOTES: |
| 114 | * b_bufsize, b_bcount. b_bufsize is the allocation size of the |
| 115 | * buffer, either DEV_BSIZE or PAGE_SIZE aligned. b_bcount is the |
| 116 | * originally requested buffer size and can serve as a bounds check |
| 117 | * against EOF. For most, but not all uses, b_bcount == b_bufsize. |
| 118 | * |
| 119 | * b_dirtyoff, b_dirtyend. Buffers support piecemeal, unaligned |
| 120 | * ranges of dirty data that need to be written to backing store. |
| 121 | * The range is typically clipped at b_bcount ( not b_bufsize ). |
| 122 | * |
| 123 | * b_resid. Number of bytes remaining in I/O. After an I/O operation |
| 124 | * completes, b_resid is usually 0 indicating 100% success. |
| 125 | * |
| 126 | * b_bio1 and b_bio2 represent the two primary I/O layers. Additional |
| 127 | * I/O layers are allocated out of the object cache and may also exist. |
| 128 | * |
| 129 | * b_bio1 is the logical layer and contains offset or block number |
| 130 | * data for the primary vnode, b_vp. I/O operations are almost |
| 131 | * universally initiated from the logical layer, so you will often |
| 132 | * see things like: vn_strategy(bp->b_vp, &bp->b_bio1). |
| 133 | * |
| 134 | * b_bio2 is the first physical layer (typically the slice-relative |
| 135 | * layer) and contains the translated offset or block number for |
| 136 | * the block device underlying a filesystem. Filesystems such as UFS |
| 137 | * will maintain cached translations and you may see them initiate |
| 138 | * a 'physical' I/O using vn_strategy(devvp, &bp->b_bio2). BUT, |
| 139 | * remember that the layering is relative to bp->b_vp, so the |
| 140 | * device-relative block numbers for buffer cache operations that occur |
| 141 | * directly on a block device will be in the first BIO layer. |
| 142 | * |
| 143 | * NOTE!!! Only the BIO subsystem accesses b_bio1 and b_bio2 directly. |
| 144 | * ALL STRATEGY LAYERS FOR BOTH VNODES AND DEVICES ONLY ACCESS THE BIO |
| 145 | * PASSED TO THEM, AND WILL PUSH ANOTHER BIO LAYER IF FORWARDING THE |
| 146 | * I/O DEEPER. In particular, a vn_strategy() or dev_dstrategy() |
| 147 | * call should not ever access buf->b_vp as this vnode may be totally |
| 148 | * unrelated to the vnode/device whos strategy routine was called. |
| 149 | */ |
| 150 | struct buf { |
| 151 | RB_ENTRY(buf) b_rbnode; /* RB node in vnode clean/dirty tree */ |
| 152 | RB_ENTRY(buf) b_rbhash; /* RB node in vnode hash tree */ |
| 153 | TAILQ_ENTRY(buf) b_freelist; /* Free list position if not active. */ |
| 154 | struct buf *b_cluster_next; /* Next buffer (cluster code) */ |
| 155 | struct vnode *b_vp; /* (vp, loffset) index */ |
| 156 | struct bio b_bio_array[NBUF_BIO]; /* BIO translation layers */ |
| 157 | u_int32_t b_flags; /* B_* flags. */ |
| 158 | unsigned short b_qindex; /* buffer queue index */ |
| 159 | unsigned short b_unused01; |
| 160 | struct lock b_lock; /* Buffer lock */ |
| 161 | buf_cmd_t b_cmd; /* I/O command */ |
| 162 | int b_bufsize; /* Allocated buffer size. */ |
| 163 | int b_runningbufspace; /* when I/O is running, pipelining */ |
| 164 | int b_bcount; /* Valid bytes in buffer. */ |
| 165 | int b_resid; /* Remaining I/O */ |
| 166 | int b_error; /* Error return */ |
| 167 | caddr_t b_data; /* Memory, superblocks, indirect etc. */ |
| 168 | caddr_t b_kvabase; /* base kva for buffer */ |
| 169 | int b_kvasize; /* size of kva for buffer */ |
| 170 | int b_dirtyoff; /* Offset in buffer of dirty region. */ |
| 171 | int b_dirtyend; /* Offset of end of dirty region. */ |
| 172 | struct xio b_xio; /* data buffer page list management */ |
| 173 | struct workhead b_dep; /* List of filesystem dependencies. */ |
| 174 | }; |
| 175 | |
| 176 | /* |
| 177 | * XXX temporary |
| 178 | */ |
| 179 | #define b_bio1 b_bio_array[0] /* logical layer */ |
| 180 | #define b_bio2 b_bio_array[1] /* (typically) the disk layer */ |
| 181 | #define b_loffset b_bio1.bio_offset |
| 182 | |
| 183 | /* |
| 184 | * These flags are kept in b_flags. |
| 185 | * |
| 186 | * Notes: |
| 187 | * |
| 188 | * B_ASYNC VOP calls on bp's are usually async whether or not |
| 189 | * B_ASYNC is set, but some subsystems, such as NFS, like |
| 190 | * to know what is best for the caller so they can |
| 191 | * optimize the I/O. |
| 192 | * |
| 193 | * B_PAGING Indicates that bp is being used by the paging system or |
| 194 | * some paging system and that the bp is not linked into |
| 195 | * the b_vp's clean/dirty linked lists or ref counts. |
| 196 | * Buffer vp reassignments are illegal in this case. |
| 197 | * |
| 198 | * B_CACHE This may only be set if the buffer is entirely valid. |
| 199 | * The situation where B_DELWRI is set and B_CACHE is |
| 200 | * clear MUST be committed to disk by getblk() so |
| 201 | * B_DELWRI can also be cleared. See the comments for |
| 202 | * getblk() in kern/vfs_bio.c. If B_CACHE is clear, |
| 203 | * the caller is expected to clear B_ERROR|B_INVAL, |
| 204 | * set BUF_CMD_READ, and initiate an I/O. |
| 205 | * |
| 206 | * The 'entire buffer' is defined to be the range from |
| 207 | * 0 through b_bcount. |
| 208 | * |
| 209 | * B_MALLOC Request that the buffer be allocated from the malloc |
| 210 | * pool, DEV_BSIZE aligned instead of PAGE_SIZE aligned. |
| 211 | * |
| 212 | * B_CLUSTEROK This flag is typically set for B_DELWRI buffers |
| 213 | * by filesystems that allow clustering when the buffer |
| 214 | * is fully dirty and indicates that it may be clustered |
| 215 | * with other adjacent dirty buffers. Note the clustering |
| 216 | * may not be used with the stage 1 data write under NFS |
| 217 | * but may be used for the commit rpc portion. |
| 218 | * |
| 219 | * B_VMIO Indicates that the buffer is tied into an VM object. |
| 220 | * The buffer's data is always PAGE_SIZE aligned even |
| 221 | * if b_bufsize and b_bcount are not. ( b_bufsize is |
| 222 | * always at least DEV_BSIZE aligned, though ). |
| 223 | * |
| 224 | * B_DIRECT Hint that we should attempt to completely free |
| 225 | * the pages underlying the buffer. B_DIRECT is |
| 226 | * sticky until the buffer is released and typically |
| 227 | * only has an effect when B_RELBUF is also set. |
| 228 | * |
| 229 | * B_NOWDRAIN This flag should be set when a device (like VN) |
| 230 | * does a turn-around VOP_WRITE from its strategy |
| 231 | * routine. This flag prevents bwrite() from blocking |
| 232 | * in wdrain, avoiding a deadlock situation. |
| 233 | */ |
| 234 | |
| 235 | #define B_AGE 0x00000001 /* Move to age queue when I/O done. */ |
| 236 | #define B_NEEDCOMMIT 0x00000002 /* Append-write in progress. */ |
| 237 | #define B_ASYNC 0x00000004 /* Start I/O, do not wait. */ |
| 238 | #define B_DIRECT 0x00000008 /* direct I/O flag (pls free vmio) */ |
| 239 | #define B_DEFERRED 0x00000010 /* Skipped over for cleaning */ |
| 240 | #define B_CACHE 0x00000020 /* Bread found us in the cache. */ |
| 241 | #define B_HASHED 0x00000040 /* Indexed via v_rbhash_tree */ |
| 242 | #define B_DELWRI 0x00000080 /* Delay I/O until buffer reused. */ |
| 243 | #define B_UNUSED0100 0x00000100 |
| 244 | #define B_UNUSED0200 0x00000200 |
| 245 | #define B_EINTR 0x00000400 /* I/O was interrupted */ |
| 246 | #define B_ERROR 0x00000800 /* I/O error occurred. */ |
| 247 | #define B_UNUSED1000 0x00001000 /* Unused */ |
| 248 | #define B_INVAL 0x00002000 /* Does not contain valid info. */ |
| 249 | #define B_LOCKED 0x00004000 /* Locked in core (not reusable). */ |
| 250 | #define B_NOCACHE 0x00008000 /* Do not cache block after use. */ |
| 251 | #define B_MALLOC 0x00010000 /* malloced b_data */ |
| 252 | #define B_CLUSTEROK 0x00020000 /* Pagein op, so swap() can count it. */ |
| 253 | #define B_UNUSED40000 0x00040000 |
| 254 | #define B_RAW 0x00080000 /* Set by physio for raw transfers. */ |
| 255 | #define B_UNUSED100000 0x00100000 |
| 256 | #define B_DIRTY 0x00200000 /* Needs writing later. */ |
| 257 | #define B_RELBUF 0x00400000 /* Release VMIO buffer. */ |
| 258 | #define B_WANT 0x00800000 /* Used by vm_pager.c */ |
| 259 | #define B_VNCLEAN 0x01000000 /* On vnode clean list */ |
| 260 | #define B_VNDIRTY 0x02000000 /* On vnode dirty list */ |
| 261 | #define B_PAGING 0x04000000 /* volatile paging I/O -- bypass VMIO */ |
| 262 | #define B_ORDERED 0x08000000 /* Must guarantee I/O ordering */ |
| 263 | #define B_RAM 0x10000000 /* Read ahead mark (flag) */ |
| 264 | #define B_VMIO 0x20000000 /* VMIO flag */ |
| 265 | #define B_CLUSTER 0x40000000 /* pagein op, so swap() can count it */ |
| 266 | #define B_NOWDRAIN 0x80000000 /* Avoid wdrain deadlock */ |
| 267 | |
| 268 | #define PRINT_BUF_FLAGS "\20" \ |
| 269 | "\40nowdrain\37cluster\36vmio\35ram\34ordered" \ |
| 270 | "\33paging\32vndirty\31vnclean\30want\27relbuf\26dirty" \ |
| 271 | "\25unused20\24raw\23unused18\22clusterok\21malloc\20nocache" \ |
| 272 | "\17locked\16inval\15unused12\14error\13eintr\12unused9\11unused8" \ |
| 273 | "\10delwri\7hashed\6cache\5deferred\4direct\3async\2needcommit\1age" |
| 274 | |
| 275 | #define NOOFFSET (-1LL) /* No buffer offset calculated yet */ |
| 276 | |
| 277 | #ifdef _KERNEL |
| 278 | /* |
| 279 | * Buffer locking. See sys/buf2.h for inline functions. |
| 280 | */ |
| 281 | extern char *buf_wmesg; /* Default buffer lock message */ |
| 282 | #define BUF_WMESG "bufwait" |
| 283 | |
| 284 | #endif /* _KERNEL */ |
| 285 | |
| 286 | struct bio_queue_head { |
| 287 | TAILQ_HEAD(bio_queue, bio) queue; |
| 288 | off_t last_offset; |
| 289 | struct bio *insert_point; |
| 290 | struct bio *switch_point; |
| 291 | }; |
| 292 | |
| 293 | /* |
| 294 | * This structure describes a clustered I/O. |
| 295 | */ |
| 296 | struct cluster_save { |
| 297 | int bs_nchildren; /* Number of associated buffers. */ |
| 298 | struct buf **bs_children; /* List of associated buffers. */ |
| 299 | }; |
| 300 | |
| 301 | /* |
| 302 | * Zero out the buffer's data area. |
| 303 | */ |
| 304 | #define clrbuf(bp) { \ |
| 305 | bzero((bp)->b_data, (u_int)(bp)->b_bcount); \ |
| 306 | (bp)->b_resid = 0; \ |
| 307 | } |
| 308 | |
| 309 | /* |
| 310 | * Flags to low-level bitmap allocation routines (balloc). |
| 311 | * |
| 312 | * Note: sequential_heuristic() in kern/vfs_vnops.c limits the count |
| 313 | * to 127. |
| 314 | */ |
| 315 | #define B_SEQMASK 0x7F000000 /* Sequential heuristic mask. */ |
| 316 | #define B_SEQSHIFT 24 /* Sequential heuristic shift. */ |
| 317 | #define B_SEQMAX 0x7F |
| 318 | #define B_CLRBUF 0x01 /* Cleared invalid areas of buffer. */ |
| 319 | #define B_SYNC 0x02 /* Do all allocations synchronously. */ |
| 320 | |
| 321 | #ifdef _KERNEL |
| 322 | extern int nbuf; /* The number of buffer headers */ |
| 323 | extern int maxswzone; /* Max KVA for swap structures */ |
| 324 | extern int maxbcache; /* Max KVA for buffer cache */ |
| 325 | extern int runningbufspace; |
| 326 | extern int buf_maxio; /* nominal maximum I/O for buffer */ |
| 327 | extern struct buf *buf; /* The buffer headers. */ |
| 328 | extern char *buffers; /* The buffer contents. */ |
| 329 | extern int bufpages; /* Number of memory pages in the buffer pool. */ |
| 330 | extern struct buf *swbuf; /* Swap I/O buffer headers. */ |
| 331 | extern int nswbuf; /* Number of swap I/O buffer headers. */ |
| 332 | |
| 333 | struct uio; |
| 334 | |
| 335 | void bufinit (void); |
| 336 | void bwillwrite (void); |
| 337 | int buf_dirty_count_severe (void); |
| 338 | void initbufbio(struct buf *); |
| 339 | void reinitbufbio(struct buf *); |
| 340 | void clearbiocache(struct bio *); |
| 341 | void bremfree (struct buf *); |
| 342 | int bread (struct vnode *, off_t, int, struct buf **); |
| 343 | int breadn (struct vnode *, off_t, int, off_t *, int *, int, |
| 344 | struct buf **); |
| 345 | int bwrite (struct buf *); |
| 346 | void bdwrite (struct buf *); |
| 347 | void bawrite (struct buf *); |
| 348 | void bdirty (struct buf *); |
| 349 | void bundirty (struct buf *); |
| 350 | int bowrite (struct buf *); |
| 351 | void brelse (struct buf *); |
| 352 | void bqrelse (struct buf *); |
| 353 | int vfs_bio_awrite (struct buf *); |
| 354 | struct buf *getpbuf (int *); |
| 355 | int inmem (struct vnode *, off_t); |
| 356 | struct buf *findblk (struct vnode *, off_t); |
| 357 | struct buf *getblk (struct vnode *, off_t, int, int, int); |
| 358 | struct buf *geteblk (int); |
| 359 | struct bio *push_bio(struct bio *); |
| 360 | void pop_bio(struct bio *); |
| 361 | int biowait (struct buf *); |
| 362 | void biodone (struct bio *); |
| 363 | |
| 364 | void cluster_append(struct bio *, struct buf *); |
| 365 | int cluster_read (struct vnode *, off_t, off_t, int, |
| 366 | int, int, struct buf **); |
| 367 | int cluster_wbuild (struct vnode *, int, off_t, int); |
| 368 | void cluster_write (struct buf *, off_t, int); |
| 369 | int physio (dev_t dev, struct uio *uio, int ioflag); |
| 370 | #define physread physio |
| 371 | #define physwrite physio |
| 372 | void vfs_bio_set_validclean (struct buf *, int base, int size); |
| 373 | void vfs_bio_clrbuf (struct buf *); |
| 374 | void vfs_busy_pages (struct vnode *, struct buf *); |
| 375 | void vfs_unbusy_pages (struct buf *); |
| 376 | int vmapbuf (struct buf *, caddr_t, int); |
| 377 | void vunmapbuf (struct buf *); |
| 378 | void relpbuf (struct buf *, int *); |
| 379 | void brelvp (struct buf *); |
| 380 | void bgetvp (struct vnode *, struct buf *); |
| 381 | int allocbuf (struct buf *bp, int size); |
| 382 | int scan_all_buffers (int (*)(struct buf *, void *), void *); |
| 383 | void reassignbuf (struct buf *); |
| 384 | struct buf *trypbuf (int *); |
| 385 | |
| 386 | #endif /* _KERNEL */ |
| 387 | |
| 388 | #endif /* !_SYS_BUF_H_ */ |