2 * Copyright (c) 1982, 1986, 1989, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * @(#)buf.h 8.9 (Berkeley) 3/30/95
35 * $FreeBSD: src/sys/sys/buf.h,v 1.88.2.10 2003/01/25 19:02:23 dillon Exp $
47 #include <sys/mount.h>
50 #include <sys/vnode.h>
52 #ifndef _VM_VM_PAGE_H_
53 #include <vm/vm_page.h>
59 #define BUF_LOCKINIT(bp) \
60 lockinit(&(bp)->b_lock, buf_wmesg, 0, LK_NOCOLLSTATS)
64 * Get a lock sleeping non-interruptably until it becomes available.
66 * XXX lk_wmesg can race, but should not result in any operational issues.
69 BUF_LOCK(struct buf *bp, int locktype)
71 bp->b_lock.lk_wmesg = buf_wmesg;
72 return (lockmgr(&(bp)->b_lock, locktype));
75 * Get a lock sleeping with specified interruptably and timeout.
77 * XXX lk_timo can race against other entities calling BUF_TIMELOCK,
78 * but will not interfere with entities calling BUF_LOCK since LK_TIMELOCK
79 * will not be set in that case.
81 * XXX lk_wmesg can race, but should not result in any operational issues.
84 BUF_TIMELOCK(struct buf *bp, int locktype, char *wmesg, int timo)
86 bp->b_lock.lk_wmesg = wmesg;
87 bp->b_lock.lk_timo = timo;
88 return (lockmgr(&(bp)->b_lock, locktype | LK_TIMELOCK));
91 * Release a lock. Only the acquiring process may free the lock unless
92 * it has been handed off to biodone.
95 BUF_UNLOCK(struct buf *bp)
97 lockmgr(&(bp)->b_lock, LK_RELEASE);
101 * When initiating asynchronous I/O, change ownership of the lock to the
102 * kernel. Once done, the lock may legally released by biodone. The
103 * original owning process can no longer acquire it recursively, but must
104 * wait until the I/O is completed and the lock has been freed by biodone.
107 BUF_KERNPROC(struct buf *bp)
109 lockmgr_kernproc(&(bp)->b_lock);
112 * Find out the number of references to a lock.
114 * The non-blocking version should only be used for assertions in cases
115 * where the buffer is expected to be owned or otherwise data stable.
118 BUF_LOCKINUSE(struct buf *bp)
120 return (lockinuse(&(bp)->b_lock));
124 * Free a buffer lock.
126 #define BUF_LOCKFREE(bp) \
127 if (BUF_LOCKINUSE(bp)) \
128 panic("free locked buf")
131 bioq_init(struct bio_queue_head *bioq)
133 TAILQ_INIT(&bioq->queue);
134 bioq->off_unused = 0;
136 bioq->transition = NULL;
137 bioq->bio_unused = NULL;
141 bioq_insert_tail(struct bio_queue_head *bioq, struct bio *bio)
143 bioq->transition = NULL;
144 TAILQ_INSERT_TAIL(&bioq->queue, bio, bio_act);
148 bioq_remove(struct bio_queue_head *bioq, struct bio *bio)
151 * Adjust read insertion point when removing the bioq. The
152 * bio after the insert point is a write so move backwards
153 * one (NULL will indicate all the reads have cleared).
155 if (bio == bioq->transition)
156 bioq->transition = TAILQ_NEXT(bio, bio_act);
157 TAILQ_REMOVE(&bioq->queue, bio, bio_act);
160 static __inline struct bio *
161 bioq_first(struct bio_queue_head *bioq)
163 return (TAILQ_FIRST(&bioq->queue));
166 static __inline struct bio *
167 bioq_takefirst(struct bio_queue_head *bioq)
171 bp = TAILQ_FIRST(&bioq->queue);
173 bioq_remove(bioq, bp);
178 * Adjust buffer cache buffer's activity count. This
179 * works similarly to vm_page->act_count.
182 buf_act_advance(struct buf *bp)
184 if (bp->b_act_count > ACT_MAX - ACT_ADVANCE)
185 bp->b_act_count = ACT_MAX;
187 bp->b_act_count += ACT_ADVANCE;
191 buf_act_decline(struct buf *bp)
193 if (bp->b_act_count < ACT_DECLINE)
196 bp->b_act_count -= ACT_DECLINE;
200 * biodeps inlines - used by softupdates and HAMMER.
202 * All bioops are MPSAFE
205 buf_dep_init(struct buf *bp)
208 LIST_INIT(&bp->b_dep);
212 * Precondition: the buffer has some dependencies.
217 buf_deallocate(struct buf *bp)
219 struct bio_ops *ops = bp->b_ops;
221 KKASSERT(! LIST_EMPTY(&bp->b_dep));
223 ops->io_deallocate(bp);
227 * This callback is made from flushbufqueues() which uses BUF_LOCK().
228 * Since it isn't going through a normal buffer aquisition mechanic
229 * and calling the filesystem back enforce the vnode's KVABIO support.
232 buf_countdeps(struct buf *bp, int n)
234 struct bio_ops *ops = bp->b_ops;
238 if (bp->b_vp == NULL || (bp->b_vp->v_flag & VKVABIO) == 0)
240 r = ops->io_countdeps(bp, n);
251 buf_start(struct buf *bp)
253 struct bio_ops *ops = bp->b_ops;
263 buf_complete(struct buf *bp)
265 struct bio_ops *ops = bp->b_ops;
268 ops->io_complete(bp);
275 buf_fsync(struct vnode *vp)
277 struct bio_ops *ops = vp->v_mount->mnt_bioops;
281 r = ops->io_fsync(vp);
291 buf_movedeps(struct buf *bp1, struct buf *bp2)
293 struct bio_ops *ops = bp1->b_ops;
296 ops->io_movedeps(bp1, bp2);
303 buf_checkread(struct buf *bp)
305 struct bio_ops *ops = bp->b_ops;
308 return(ops->io_checkread(bp));
313 * This callback is made from flushbufqueues() which uses BUF_LOCK().
314 * Since it isn't going through a normal buffer aquisition mechanic
315 * and calling the filesystem back enforce the vnode's KVABIO support.
318 buf_checkwrite(struct buf *bp)
320 struct bio_ops *ops = bp->b_ops;
323 if (bp->b_vp == NULL || (bp->b_vp->v_flag & VKVABIO) == 0)
325 return(ops->io_checkwrite(bp));
331 * Chained biodone. The bio callback was made and the callback function
332 * wishes to chain the biodone. If no BIO's are left we call bpdone()
333 * with elseit=TRUE (asynchronous completion).
338 biodone_chain(struct bio *bio)
341 biodone(bio->bio_prev);
343 bpdone(bio->bio_buf, 1);
347 bread(struct vnode *vp, off_t loffset, int size, struct buf **bpp)
350 return(breadnx(vp, loffset, size, B_NOTMETA,
351 NULL, NULL, 0, bpp));
355 bread_kvabio(struct vnode *vp, off_t loffset, int size, struct buf **bpp)
358 return(breadnx(vp, loffset, size, B_NOTMETA | B_KVABIO,
359 NULL, NULL, 0, bpp));
363 breadn(struct vnode *vp, off_t loffset, int size, off_t *raoffset,
364 int *rabsize, int cnt, struct buf **bpp)
367 return(breadnx(vp, loffset, size, B_NOTMETA, raoffset,
372 cluster_read(struct vnode *vp, off_t filesize, off_t loffset,
373 int blksize, size_t minreq, size_t maxreq, struct buf **bpp)
376 return(cluster_readx(vp, filesize, loffset, blksize, B_NOTMETA,
377 minreq, maxreq, bpp));
381 cluster_read_kvabio(struct vnode *vp, off_t filesize, off_t loffset,
382 int blksize, size_t minreq, size_t maxreq, struct buf **bpp)
385 return(cluster_readx(vp, filesize, loffset, blksize,
386 B_NOTMETA | B_KVABIO,
387 minreq, maxreq, bpp));
392 #endif /* !_SYS_BUF2_H_ */