2 * Copyright (c) 2000-2003 Tor Egge
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * $FreeBSD: src/sys/ufs/ffs/ffs_rawread.c,v 1.3.2.2 2003/05/29 06:15:35 alc Exp $
27 * $DragonFly: src/sys/vfs/ufs/ffs_rawread.c,v 1.22 2006/04/30 20:23:26 dillon Exp $
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/fcntl.h>
36 #include <sys/mount.h>
37 #include <sys/namei.h>
38 #include <sys/vnode.h>
40 #include <sys/filio.h>
41 #include <sys/ttycom.h>
47 #include <machine/limits.h>
49 #include <vm/vm_extern.h>
50 #include <vm/vm_object.h>
51 #include <sys/kernel.h>
52 #include <sys/sysctl.h>
54 static int ffs_rawread_readahead(struct vnode *vp, caddr_t udata, off_t offset,
55 size_t len, struct thread *td, struct buf *bp,
57 static int ffs_rawread_main(struct vnode *vp,
60 static int ffs_rawread_sync(struct vnode *vp, struct thread *td);
62 int ffs_rawread(struct vnode *vp, struct uio *uio, int *workdone);
64 void ffs_rawread_setup(void);
66 static void ffs_rawreadwakeup(struct bio *bio);
69 SYSCTL_DECL(_vfs_ffs);
71 static int ffsrawbufcnt = 4;
72 SYSCTL_INT(_vfs_ffs, OID_AUTO, ffsrawbufcnt, CTLFLAG_RD, &ffsrawbufcnt, 0,
73 "Buffers available for raw reads");
75 static int allowrawread = 1;
76 SYSCTL_INT(_vfs_ffs, OID_AUTO, allowrawread, CTLFLAG_RW, &allowrawread, 0,
77 "Flag to enable raw reads");
79 static int rawreadahead = 1;
80 SYSCTL_INT(_vfs_ffs, OID_AUTO, rawreadahead, CTLFLAG_RW, &rawreadahead, 0,
81 "Flag to enable readahead for long raw reads");
85 ffs_rawread_setup(void)
87 ffsrawbufcnt = (nswbuf > 100 ) ? (nswbuf - (nswbuf >> 4)) : nswbuf - 8;
92 ffs_rawread_sync(struct vnode *vp, struct thread *td)
97 /* Check for dirty mmap, pending writes and dirty buffers */
99 if (vp->v_track_write.bk_active > 0 ||
100 !RB_EMPTY(&vp->v_rbdirty_tree) ||
101 (vp->v_flag & VOBJDIRTY) != 0) {
104 if (VOP_ISLOCKED(vp, td) != LK_EXCLUSIVE) {
106 /* Upgrade to exclusive lock, this might block */
107 VOP_LOCK(vp, LK_UPGRADE | LK_NOPAUSE, td);
111 /* Attempt to msync mmap() regions to clean dirty mmap */
112 if ((vp->v_flag & VOBJDIRTY) != 0) {
113 struct vm_object *obj;
114 if ((obj = vp->v_object) != NULL)
115 vm_object_page_clean(obj, 0, 0, OBJPC_SYNC);
118 /* Wait for pending writes to complete */
120 while (vp->v_track_write.bk_active) {
121 vp->v_track_write.bk_waitflag = 1;
122 error = tsleep(&vp->v_track_write, 0, "rawrdfls", 0);
126 VOP_LOCK(vp, LK_DOWNGRADE, td);
130 /* Flush dirty buffers */
131 if (!RB_EMPTY(&vp->v_rbdirty_tree)) {
133 if ((error = VOP_FSYNC(vp, MNT_WAIT, td)) != 0) {
135 VOP_LOCK(vp, LK_DOWNGRADE, td);
139 if (vp->v_track_write.bk_active > 0 ||
140 !RB_EMPTY(&vp->v_rbdirty_tree))
141 panic("ffs_rawread_sync: dirty bufs");
145 VOP_LOCK(vp, LK_DOWNGRADE, td);
154 ffs_rawread_readahead(struct vnode *vp, caddr_t udata, off_t loffset,
155 size_t len, struct thread *td, struct buf *bp,
165 bsize = vp->v_mount->mnt_stat.f_iosize;
168 * Make sure it fits into the pbuf
170 iolen = (int)(intptr_t)udata & PAGE_MASK;
171 if (len + iolen > bp->b_kvasize) {
176 bp->b_flags &= ~B_ERROR;
177 bp->b_loffset = loffset;
178 bp->b_bio2.bio_offset = NOOFFSET;
179 bp->b_bio2.bio_done = ffs_rawreadwakeup;
181 blockoff = (loffset % bsize) / DEV_BSIZE;
183 error = VOP_BMAP(vp, bp->b_loffset, &dp, &bp->b_bio2.bio_offset,
188 if (bp->b_bio2.bio_offset == NOOFFSET) {
190 * Fill holes with NULs to preserve semantics
192 if (len + blockoff * DEV_BSIZE > bsize)
193 len = bsize - blockoff * DEV_BSIZE;
195 if (vmapbuf(bp, udata, len) < 0)
198 if (ticks - *baseticks >= hogticks) {
202 bzero(bp->b_data, bp->b_bcount);
204 /* Mark operation completed (similar to bufdone()) */
210 if (len + blockoff * DEV_BSIZE > bforwards)
211 len = bforwards - blockoff * DEV_BSIZE;
212 bp->b_bio2.bio_offset += blockoff * DEV_BSIZE;
214 if (vmapbuf(bp, udata, len) < 0)
218 * Access the block device layer using the device vnode (dp) and
219 * the translated block number (bio2) instead of the logical block
222 * Even though we are bypassing the vnode layer, we still
223 * want the vnode state to indicate that an I/O on its behalf
226 bp->b_cmd = BUF_CMD_READ;
227 bio_start_transaction(&bp->b_bio1, &vp->v_track_read);
228 vn_strategy(dp, &bp->b_bio2);
233 ffs_rawread_main(struct vnode *vp, struct uio *uio)
236 struct buf *bp, *nbp, *tbp;
238 int baseticks = ticks;
244 td = uio->uio_td ? uio->uio_td : curthread;
245 udata = uio->uio_iov->iov_base;
246 resid = uio->uio_resid;
247 offset = uio->uio_offset;
257 if (bp == NULL) { /* Setup first read */
258 /* XXX: Leave some bufs for swap */
259 bp = getpbuf(&ffsrawbufcnt);
260 error = ffs_rawread_readahead(vp, udata, offset, resid,
265 if (resid > bp->b_bufsize) { /* Setup fist readahead */
266 /* XXX: Leave bufs for swap */
267 if (rawreadahead != 0)
268 nbp = trypbuf(&ffsrawbufcnt);
272 nerror = ffs_rawread_readahead(
274 udata + bp->b_bufsize,
275 offset + bp->b_bufsize,
276 resid - bp->b_bufsize,
277 td, nbp, &baseticks);
279 relpbuf(nbp, &ffsrawbufcnt);
287 while (bp->b_cmd != BUF_CMD_DONE)
288 tsleep((caddr_t)&bp->b_bio2, 0, "rawrd", 0);
293 iolen = bp->b_bcount - bp->b_resid;
294 if (iolen == 0 && (bp->b_flags & B_ERROR) == 0) {
295 nerror = 0; /* Ignore possible beyond EOF error */
299 if ((bp->b_flags & B_ERROR) != 0) {
303 clearbiocache(&bp->b_bio2);
307 if (iolen < bp->b_bufsize) {
308 /* Incomplete read. Try to read remaining part */
309 error = ffs_rawread_readahead(
311 bp->b_bufsize - iolen, td, bp, &baseticks);
314 } else if (nbp != NULL) { /* Complete read with readahead */
320 clearbiocache(&nbp->b_bio2);
322 if (resid <= bp->b_bufsize) { /* No more readaheads */
323 relpbuf(nbp, &ffsrawbufcnt);
325 } else { /* Setup next readahead */
326 nerror = ffs_rawread_readahead(
327 vp, udata + bp->b_bufsize,
328 offset + bp->b_bufsize,
329 resid - bp->b_bufsize,
330 td, nbp, &baseticks);
332 relpbuf(nbp, &ffsrawbufcnt);
336 } else if (nerror != 0) {/* Deferred Readahead error */
338 } else if (resid > 0) { /* More to read, no readahead */
339 error = ffs_rawread_readahead(vp, udata, offset,
348 relpbuf(bp, &ffsrawbufcnt);
349 if (nbp != NULL) { /* Run down readahead buffer */
351 while (nbp->b_cmd != BUF_CMD_DONE)
352 tsleep(&nbp->b_bio2, 0, "rawrd", 0);
355 relpbuf(nbp, &ffsrawbufcnt);
360 uio->uio_iov->iov_base = udata;
361 uio->uio_resid = resid;
362 uio->uio_offset = offset;
368 ffs_rawread(struct vnode *vp,
372 if (allowrawread != 0 &&
373 uio->uio_iovcnt == 1 &&
374 uio->uio_segflg == UIO_USERSPACE &&
375 uio->uio_resid == uio->uio_iov->iov_len &&
376 (((uio->uio_td != NULL) ? uio->uio_td : curthread)->td_flags &
377 TDF_DEADLKTREAT) == 0) {
378 int secsize; /* Media sector size */
379 off_t filebytes; /* Bytes left of file */
380 int blockbytes; /* Bytes left of file in full blocks */
381 int partialbytes; /* Bytes in last partial block */
382 int skipbytes; /* Bytes not to read in ffs_rawread */
387 /* Only handle sector aligned reads */
389 secsize = ip->i_devvp->v_rdev->si_bsize_phys;
390 if ((uio->uio_offset & (secsize - 1)) == 0 &&
391 (uio->uio_resid & (secsize - 1)) == 0) {
393 /* Sync dirty pages and buffers if needed */
394 error = ffs_rawread_sync(vp,
395 (uio->uio_td != NULL) ?
396 uio->uio_td : curthread);
400 /* Check for end of file */
401 if (ip->i_size > uio->uio_offset) {
402 filebytes = ip->i_size - uio->uio_offset;
404 /* No special eof handling needed ? */
405 if (uio->uio_resid <= filebytes) {
407 return ffs_rawread_main(vp, uio);
410 partialbytes = ((unsigned int) ip->i_size) %
412 blockbytes = (int) filebytes - partialbytes;
413 if (blockbytes > 0) {
414 skipbytes = uio->uio_resid -
416 uio->uio_resid = blockbytes;
417 error = ffs_rawread_main(vp, uio);
418 uio->uio_resid += skipbytes;
421 /* Read remaining part using buffer */
432 ffs_rawreadwakeup(struct bio *bio)
434 bio->bio_buf->b_cmd = BUF_CMD_DONE;