Style(9) cleanup to src/sys/vfs, stage 20/21: umapfs.
[dragonfly.git] / sys / vfs / specfs / spec_vnops.c
... / ...
CommitLineData
1/*
2 * Copyright (c) 1989, 1993, 1995
3 * The Regents of the University of California. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by the University of
16 * California, Berkeley and its contributors.
17 * 4. Neither the name of the University nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 * @(#)spec_vnops.c 8.14 (Berkeley) 5/21/95
34 * $FreeBSD: src/sys/miscfs/specfs/spec_vnops.c,v 1.131.2.4 2001/02/26 04:23:20 jlemon Exp $
35 * $DragonFly: src/sys/vfs/specfs/spec_vnops.c,v 1.14 2004/05/03 18:46:34 cpressey Exp $
36 */
37
38#include <sys/param.h>
39#include <sys/proc.h>
40#include <sys/systm.h>
41#include <sys/kernel.h>
42#include <sys/conf.h>
43#include <sys/buf.h>
44#include <sys/device.h>
45#include <sys/mount.h>
46#include <sys/vnode.h>
47#include <sys/stat.h>
48#include <sys/fcntl.h>
49#include <sys/vmmeter.h>
50#include <sys/tty.h>
51
52#include <vm/vm.h>
53#include <vm/vm_object.h>
54#include <vm/vm_page.h>
55#include <vm/vm_pager.h>
56
57#include <sys/buf2.h>
58
59static int spec_advlock (struct vop_advlock_args *);
60static int spec_bmap (struct vop_bmap_args *);
61static int spec_close (struct vop_close_args *);
62static int spec_freeblks (struct vop_freeblks_args *);
63static int spec_fsync (struct vop_fsync_args *);
64static int spec_getpages (struct vop_getpages_args *);
65static int spec_inactive (struct vop_inactive_args *);
66static int spec_ioctl (struct vop_ioctl_args *);
67static int spec_open (struct vop_open_args *);
68static int spec_poll (struct vop_poll_args *);
69static int spec_kqfilter (struct vop_kqfilter_args *);
70static int spec_print (struct vop_print_args *);
71static int spec_read (struct vop_read_args *);
72static int spec_strategy (struct vop_strategy_args *);
73static int spec_write (struct vop_write_args *);
74
75vop_t **spec_vnodeop_p;
76static struct vnodeopv_entry_desc spec_vnodeop_entries[] = {
77 { &vop_default_desc, (vop_t *) vop_defaultop },
78 { &vop_access_desc, (vop_t *) vop_ebadf },
79 { &vop_advlock_desc, (vop_t *) spec_advlock },
80 { &vop_bmap_desc, (vop_t *) spec_bmap },
81 { &vop_close_desc, (vop_t *) spec_close },
82 { &vop_create_desc, (vop_t *) vop_panic },
83 { &vop_freeblks_desc, (vop_t *) spec_freeblks },
84 { &vop_fsync_desc, (vop_t *) spec_fsync },
85 { &vop_getpages_desc, (vop_t *) spec_getpages },
86 { &vop_inactive_desc, (vop_t *) spec_inactive },
87 { &vop_ioctl_desc, (vop_t *) spec_ioctl },
88 { &vop_lease_desc, (vop_t *) vop_null },
89 { &vop_link_desc, (vop_t *) vop_panic },
90 { &vop_mkdir_desc, (vop_t *) vop_panic },
91 { &vop_mknod_desc, (vop_t *) vop_panic },
92 { &vop_open_desc, (vop_t *) spec_open },
93 { &vop_pathconf_desc, (vop_t *) vop_stdpathconf },
94 { &vop_poll_desc, (vop_t *) spec_poll },
95 { &vop_kqfilter_desc, (vop_t *) spec_kqfilter },
96 { &vop_print_desc, (vop_t *) spec_print },
97 { &vop_read_desc, (vop_t *) spec_read },
98 { &vop_readdir_desc, (vop_t *) vop_panic },
99 { &vop_readlink_desc, (vop_t *) vop_panic },
100 { &vop_reallocblks_desc, (vop_t *) vop_panic },
101 { &vop_reclaim_desc, (vop_t *) vop_null },
102 { &vop_remove_desc, (vop_t *) vop_panic },
103 { &vop_rename_desc, (vop_t *) vop_panic },
104 { &vop_rmdir_desc, (vop_t *) vop_panic },
105 { &vop_setattr_desc, (vop_t *) vop_ebadf },
106 { &vop_strategy_desc, (vop_t *) spec_strategy },
107 { &vop_symlink_desc, (vop_t *) vop_panic },
108 { &vop_write_desc, (vop_t *) spec_write },
109 { NULL, NULL }
110};
111static struct vnodeopv_desc spec_vnodeop_opv_desc =
112 { &spec_vnodeop_p, spec_vnodeop_entries };
113
114VNODEOP_SET(spec_vnodeop_opv_desc);
115
116/*
117 * spec_vnoperate(struct vnodeop_desc *a_desc, ...)
118 */
119int
120spec_vnoperate(struct vop_generic_args *ap)
121{
122 return (VOCALL(spec_vnodeop_p, ap->a_desc->vdesc_offset, ap));
123}
124
125static void spec_getpages_iodone (struct buf *bp);
126
127/*
128 * Open a special file.
129 *
130 * spec_open(struct vnode *a_vp, int a_mode, struct ucred *a_cred,
131 * struct thread *a_td)
132 */
133/* ARGSUSED */
134static int
135spec_open(struct vop_open_args *ap)
136{
137 struct vnode *vp = ap->a_vp;
138 dev_t dev = vp->v_rdev;
139 int error;
140 const char *cp;
141
142 /*
143 * Don't allow open if fs is mounted -nodev.
144 */
145 if (vp->v_mount && (vp->v_mount->mnt_flag & MNT_NODEV))
146 return (ENXIO);
147
148 if (dev_dport(dev) == NULL)
149 return ENXIO;
150
151 /* Make this field valid before any I/O in ->d_open */
152 if (!dev->si_iosize_max)
153 dev->si_iosize_max = DFLTPHYS;
154
155 /*
156 * XXX: Disks get special billing here, but it is mostly wrong.
157 * XXX: diskpartitions can overlap and the real checks should
158 * XXX: take this into account, and consequently they need to
159 * XXX: live in the diskslicing code. Some checks do.
160 */
161 if (vn_isdisk(vp, NULL) && ap->a_cred != FSCRED &&
162 (ap->a_mode & FWRITE)) {
163 /*
164 * Never allow opens for write if the device is mounted R/W
165 */
166 if (vp->v_specmountpoint != NULL &&
167 !(vp->v_specmountpoint->mnt_flag & MNT_RDONLY))
168 return (EBUSY);
169
170 /*
171 * When running in secure mode, do not allow opens
172 * for writing if the device is mounted
173 */
174 if (securelevel >= 1 && vp->v_specmountpoint != NULL)
175 return (EPERM);
176
177 /*
178 * When running in very secure mode, do not allow
179 * opens for writing of any devices.
180 */
181 if (securelevel >= 2)
182 return (EPERM);
183 }
184
185 /* XXX: Special casing of ttys for deadfs. Probably redundant */
186 if (dev_dflags(dev) & D_TTY)
187 vp->v_flag |= VISTTY;
188
189 VOP_UNLOCK(vp, NULL, 0, ap->a_td);
190 error = dev_dopen(dev, ap->a_mode, S_IFCHR, ap->a_td);
191 vn_lock(vp, NULL, LK_EXCLUSIVE | LK_RETRY, ap->a_td);
192
193 if (error)
194 return (error);
195
196 if (dev_dflags(dev) & D_TTY) {
197 if (dev->si_tty) {
198 struct tty *tp;
199 tp = dev->si_tty;
200 if (!tp->t_stop) {
201 printf("Warning:%s: no t_stop, using nottystop\n", devtoname(dev));
202 tp->t_stop = nottystop;
203 }
204 }
205 }
206
207 if (vn_isdisk(vp, NULL)) {
208 if (!dev->si_bsize_phys)
209 dev->si_bsize_phys = DEV_BSIZE;
210 }
211 if ((dev_dflags(dev) & D_DISK) == 0) {
212 cp = devtoname(dev);
213 if (*cp == '#') {
214 printf("WARNING: driver %s should register devices with make_dev() (dev_t = \"%s\")\n",
215 dev_dname(dev), cp);
216 }
217 }
218 return (error);
219}
220
221/*
222 * Vnode op for read
223 *
224 * spec_read(struct vnode *a_vp, struct uio *a_uio, int a_ioflag,
225 * struct ucred *a_cred)
226 */
227/* ARGSUSED */
228static int
229spec_read(struct vop_read_args *ap)
230{
231 struct vnode *vp;
232 struct thread *td;
233 struct uio *uio;
234 dev_t dev;
235 int error;
236
237 vp = ap->a_vp;
238 dev = vp->v_rdev;
239 uio = ap->a_uio;
240 td = uio->uio_td;
241
242 if (uio->uio_resid == 0)
243 return (0);
244
245 VOP_UNLOCK(vp, NULL, 0, td);
246 error = dev_dread(dev, uio, ap->a_ioflag);
247 vn_lock(vp, NULL, LK_EXCLUSIVE | LK_RETRY, td);
248 return (error);
249}
250
251/*
252 * Vnode op for write
253 *
254 * spec_write(struct vnode *a_vp, struct uio *a_uio, int a_ioflag,
255 * struct ucred *a_cred)
256 */
257/* ARGSUSED */
258static int
259spec_write(struct vop_write_args *ap)
260{
261 struct vnode *vp;
262 struct thread *td;
263 struct uio *uio;
264 dev_t dev;
265 int error;
266
267 vp = ap->a_vp;
268 dev = vp->v_rdev;
269 uio = ap->a_uio;
270 td = uio->uio_td;
271
272 VOP_UNLOCK(vp, NULL, 0, td);
273 error = dev_dwrite(dev, uio, ap->a_ioflag);
274 vn_lock(vp, NULL, LK_EXCLUSIVE | LK_RETRY, td);
275 return (error);
276}
277
278/*
279 * Device ioctl operation.
280 *
281 * spec_ioctl(struct vnode *a_vp, int a_command, caddr_t a_data,
282 * int a_fflag, struct ucred *a_cred, struct thread *a_td)
283 */
284/* ARGSUSED */
285static int
286spec_ioctl(struct vop_ioctl_args *ap)
287{
288 dev_t dev;
289
290 dev = ap->a_vp->v_rdev;
291 return (dev_dioctl(dev, ap->a_command, ap->a_data,
292 ap->a_fflag, ap->a_td));
293}
294
295/*
296 * spec_poll(struct vnode *a_vp, int a_events, struct ucred *a_cred,
297 * struct thread *a_td)
298 */
299/* ARGSUSED */
300static int
301spec_poll(struct vop_poll_args *ap)
302{
303 dev_t dev;
304
305 dev = ap->a_vp->v_rdev;
306 return (dev_dpoll(dev, ap->a_events, ap->a_td));
307}
308
309/*
310 * spec_kqfilter(struct vnode *a_vp, struct knote *a_kn)
311 */
312/* ARGSUSED */
313static int
314spec_kqfilter(struct vop_kqfilter_args *ap)
315{
316 dev_t dev;
317
318 dev = ap->a_vp->v_rdev;
319 return (dev_dkqfilter(dev, ap->a_kn));
320}
321
322/*
323 * Synch buffers associated with a block device
324 *
325 * spec_fsync(struct vnode *a_vp, struct ucred *a_cred,
326 * int a_waitfor, struct thread *a_td)
327 */
328/* ARGSUSED */
329static int
330spec_fsync(struct vop_fsync_args *ap)
331{
332 struct vnode *vp = ap->a_vp;
333 struct buf *bp;
334 struct buf *nbp;
335 int s;
336 int maxretry = 10000; /* large, arbitrarily chosen */
337
338 if (!vn_isdisk(vp, NULL))
339 return (0);
340
341loop1:
342 /*
343 * MARK/SCAN initialization to avoid infinite loops
344 */
345 s = splbio();
346 for (bp = TAILQ_FIRST(&vp->v_dirtyblkhd); bp;
347 bp = TAILQ_NEXT(bp, b_vnbufs)) {
348 bp->b_flags &= ~B_SCANNED;
349 }
350 splx(s);
351
352 /*
353 * Flush all dirty buffers associated with a block device.
354 */
355loop2:
356 s = splbio();
357 for (bp = TAILQ_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
358 nbp = TAILQ_NEXT(bp, b_vnbufs);
359 if ((bp->b_flags & B_SCANNED) != 0)
360 continue;
361 bp->b_flags |= B_SCANNED;
362 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT))
363 continue;
364 if ((bp->b_flags & B_DELWRI) == 0)
365 panic("spec_fsync: not dirty");
366 if ((vp->v_flag & VOBJBUF) && (bp->b_flags & B_CLUSTEROK)) {
367 BUF_UNLOCK(bp);
368 vfs_bio_awrite(bp);
369 splx(s);
370 } else {
371 bremfree(bp);
372 splx(s);
373 bawrite(bp);
374 }
375 goto loop2;
376 }
377
378 /*
379 * If synchronous the caller expects us to completely resolve all
380 * dirty buffers in the system. Wait for in-progress I/O to
381 * complete (which could include background bitmap writes), then
382 * retry if dirty blocks still exist.
383 */
384 if (ap->a_waitfor == MNT_WAIT) {
385 while (vp->v_numoutput) {
386 vp->v_flag |= VBWAIT;
387 (void) tsleep((caddr_t)&vp->v_numoutput, 0, "spfsyn", 0);
388 }
389 if (!TAILQ_EMPTY(&vp->v_dirtyblkhd)) {
390 if (--maxretry != 0) {
391 splx(s);
392 goto loop1;
393 }
394 vprint("spec_fsync: giving up on dirty", vp);
395 }
396 }
397 splx(s);
398 return (0);
399}
400
401/*
402 * spec_inactive(struct vnode *a_vp, struct thread *a_td)
403 */
404static int
405spec_inactive(struct vop_inactive_args *ap)
406{
407 VOP_UNLOCK(ap->a_vp, NULL, 0, ap->a_td);
408 return (0);
409}
410
411/*
412 * Just call the device strategy routine
413 *
414 * spec_strategy(struct vnode *a_vp, struct buf *a_bp)
415 */
416static int
417spec_strategy(struct vop_strategy_args *ap)
418{
419 struct buf *bp;
420 struct vnode *vp;
421 struct mount *mp;
422
423 bp = ap->a_bp;
424 if (((bp->b_flags & B_READ) == 0) &&
425 (LIST_FIRST(&bp->b_dep)) != NULL && bioops.io_start)
426 (*bioops.io_start)(bp);
427
428 /*
429 * Collect statistics on synchronous and asynchronous read
430 * and write counts for disks that have associated filesystems.
431 */
432 vp = ap->a_vp;
433 if (vn_isdisk(vp, NULL) && (mp = vp->v_specmountpoint) != NULL) {
434 if ((bp->b_flags & B_READ) == 0) {
435 if (bp->b_lock.lk_lockholder == LK_KERNTHREAD)
436 mp->mnt_stat.f_asyncwrites++;
437 else
438 mp->mnt_stat.f_syncwrites++;
439 } else {
440 if (bp->b_lock.lk_lockholder == LK_KERNTHREAD)
441 mp->mnt_stat.f_asyncreads++;
442 else
443 mp->mnt_stat.f_syncreads++;
444 }
445 }
446#if 0
447 KASSERT(devsw(bp->b_dev) != NULL,
448 ("No devsw on dev %s responsible for buffer %p\n",
449 devtoname(bp->b_dev), bp));
450 KASSERT(devsw(bp->b_dev)->d_strategy != NULL,
451 ("No strategy on dev %s responsible for buffer %p\n",
452 devtoname(bp->b_dev), bp));
453#endif
454 BUF_STRATEGY(bp, 0);
455 return (0);
456}
457
458/*
459 * spec_freeblks(struct vnode *a_vp, daddr_t a_addr, daddr_t a_length)
460 */
461static int
462spec_freeblks(struct vop_freeblks_args *ap)
463{
464 struct buf *bp;
465
466 /*
467 * XXX: This assumes that strategy does the deed right away.
468 * XXX: this may not be TRTTD.
469 */
470 if ((dev_dflags(ap->a_vp->v_rdev) & D_CANFREE) == 0)
471 return (0);
472 bp = geteblk(ap->a_length);
473 bp->b_flags |= B_FREEBUF;
474 bp->b_dev = ap->a_vp->v_rdev;
475 bp->b_blkno = ap->a_addr;
476 bp->b_offset = dbtob(ap->a_addr);
477 bp->b_bcount = ap->a_length;
478 BUF_STRATEGY(bp, 0);
479 return (0);
480}
481
482/*
483 * Implement degenerate case where the block requested is the block
484 * returned, and assume that the entire device is contiguous in regards
485 * to the contiguous block range (runp and runb).
486 *
487 * spec_bmap(struct vnode *a_vp, daddr_t a_bn, struct vnode **a_vpp,
488 * daddr_t *a_bnp, int *a_runp, int *a_runb)
489 */
490static int
491spec_bmap(struct vop_bmap_args *ap)
492{
493 struct vnode *vp = ap->a_vp;
494 int runp = 0;
495 int runb = 0;
496
497 if (ap->a_vpp != NULL)
498 *ap->a_vpp = vp;
499 if (ap->a_bnp != NULL)
500 *ap->a_bnp = ap->a_bn;
501 if (vp->v_mount != NULL)
502 runp = runb = MAXBSIZE / vp->v_mount->mnt_stat.f_iosize;
503 if (ap->a_runp != NULL)
504 *ap->a_runp = runp;
505 if (ap->a_runb != NULL)
506 *ap->a_runb = runb;
507 return (0);
508}
509
510/*
511 * Device close routine
512 *
513 * spec_close(struct vnode *a_vp, int a_fflag, struct ucred *a_cred,
514 * struct thread *a_td)
515 */
516/* ARGSUSED */
517static int
518spec_close(struct vop_close_args *ap)
519{
520 struct proc *p = ap->a_td->td_proc;
521 struct vnode *vp = ap->a_vp;
522 dev_t dev = vp->v_rdev;
523
524 /*
525 * Hack: a tty device that is a controlling terminal
526 * has a reference from the session structure.
527 * We cannot easily tell that a character device is
528 * a controlling terminal, unless it is the closing
529 * process' controlling terminal. In that case,
530 * if the reference count is 2 (this last descriptor
531 * plus the session), release the reference from the session.
532 */
533 if (vcount(vp) == 2 && p && (vp->v_flag & VXLOCK) == 0 &&
534 vp == p->p_session->s_ttyvp) {
535 vrele(vp);
536 p->p_session->s_ttyvp = NULL;
537 }
538 /*
539 * We do not want to really close the device if it
540 * is still in use unless we are trying to close it
541 * forcibly. Since every use (buffer, vnode, swap, cmap)
542 * holds a reference to the vnode, and because we mark
543 * any other vnodes that alias this device, when the
544 * sum of the reference counts on all the aliased
545 * vnodes descends to one, we are on last close.
546 */
547 if (vp->v_flag & VXLOCK) {
548 /* Forced close */
549 } else if (dev_dflags(dev) & D_TRACKCLOSE) {
550 /* Keep device updated on status */
551 } else if (vcount(vp) > 1) {
552 return (0);
553 }
554 return (dev_dclose(dev, ap->a_fflag, S_IFCHR, ap->a_td));
555}
556
557/*
558 * Print out the contents of a special device vnode.
559 *
560 * spec_print(struct vnode *a_vp)
561 */
562static int
563spec_print(struct vop_print_args *ap)
564{
565 printf("tag VT_NON, dev %s\n", devtoname(ap->a_vp->v_rdev));
566 return (0);
567}
568
569/*
570 * Special device advisory byte-level locks.
571 *
572 * spec_advlock(struct vnode *a_vp, caddr_t a_id, int a_op,
573 * struct flock *a_fl, int a_flags)
574 */
575/* ARGSUSED */
576static int
577spec_advlock(struct vop_advlock_args *ap)
578{
579 return (ap->a_flags & F_FLOCK ? EOPNOTSUPP : EINVAL);
580}
581
582static void
583spec_getpages_iodone(struct buf *bp)
584{
585 bp->b_flags |= B_DONE;
586 wakeup(bp);
587}
588
589static int
590spec_getpages(struct vop_getpages_args *ap)
591{
592 vm_offset_t kva;
593 int error;
594 int i, pcount, size, s;
595 daddr_t blkno;
596 struct buf *bp;
597 vm_page_t m;
598 vm_ooffset_t offset;
599 int toff, nextoff, nread;
600 struct vnode *vp = ap->a_vp;
601 int blksiz;
602 int gotreqpage;
603
604 error = 0;
605 pcount = round_page(ap->a_count) / PAGE_SIZE;
606
607 /*
608 * Calculate the offset of the transfer and do sanity check.
609 * FreeBSD currently only supports an 8 TB range due to b_blkno
610 * being in DEV_BSIZE ( usually 512 ) byte chunks on call to
611 * VOP_STRATEGY. XXX
612 */
613 offset = IDX_TO_OFF(ap->a_m[0]->pindex) + ap->a_offset;
614
615#define DADDR_T_BIT (sizeof(daddr_t)*8)
616#define OFFSET_MAX ((1LL << (DADDR_T_BIT + DEV_BSHIFT)) - 1)
617
618 if (offset < 0 || offset > OFFSET_MAX) {
619 /* XXX still no %q in kernel. */
620 printf("spec_getpages: preposterous offset 0x%x%08x\n",
621 (u_int)((u_quad_t)offset >> 32),
622 (u_int)(offset & 0xffffffff));
623 return (VM_PAGER_ERROR);
624 }
625
626 blkno = btodb(offset);
627
628 /*
629 * Round up physical size for real devices. We cannot round using
630 * v_mount's block size data because v_mount has nothing to do with
631 * the device. i.e. it's usually '/dev'. We need the physical block
632 * size for the device itself.
633 *
634 * We can't use v_specmountpoint because it only exists when the
635 * block device is mounted. However, we can use v_rdev.
636 */
637
638 if (vn_isdisk(vp, NULL))
639 blksiz = vp->v_rdev->si_bsize_phys;
640 else
641 blksiz = DEV_BSIZE;
642
643 size = (ap->a_count + blksiz - 1) & ~(blksiz - 1);
644
645 bp = getpbuf(NULL);
646 kva = (vm_offset_t)bp->b_data;
647
648 /*
649 * Map the pages to be read into the kva.
650 */
651 pmap_qenter(kva, ap->a_m, pcount);
652
653 /* Build a minimal buffer header. */
654 bp->b_flags = B_READ | B_CALL;
655 bp->b_iodone = spec_getpages_iodone;
656
657 /* B_PHYS is not set, but it is nice to fill this in. */
658 bp->b_blkno = blkno;
659 bp->b_lblkno = blkno;
660 pbgetvp(ap->a_vp, bp);
661 bp->b_bcount = size;
662 bp->b_bufsize = size;
663 bp->b_resid = 0;
664 bp->b_runningbufspace = bp->b_bufsize;
665 runningbufspace += bp->b_runningbufspace;
666
667 mycpu->gd_cnt.v_vnodein++;
668 mycpu->gd_cnt.v_vnodepgsin += pcount;
669
670 /* Do the input. */
671 VOP_STRATEGY(bp->b_vp, bp);
672
673 s = splbio();
674
675 /* We definitely need to be at splbio here. */
676 while ((bp->b_flags & B_DONE) == 0) {
677 tsleep(bp, 0, "spread", 0);
678 }
679
680 splx(s);
681
682 if ((bp->b_flags & B_ERROR) != 0) {
683 if (bp->b_error)
684 error = bp->b_error;
685 else
686 error = EIO;
687 }
688
689 nread = size - bp->b_resid;
690
691 if (nread < ap->a_count) {
692 bzero((caddr_t)kva + nread,
693 ap->a_count - nread);
694 }
695 pmap_qremove(kva, pcount);
696
697
698 gotreqpage = 0;
699 for (i = 0, toff = 0; i < pcount; i++, toff = nextoff) {
700 nextoff = toff + PAGE_SIZE;
701 m = ap->a_m[i];
702
703 m->flags &= ~PG_ZERO;
704
705 if (nextoff <= nread) {
706 m->valid = VM_PAGE_BITS_ALL;
707 vm_page_undirty(m);
708 } else if (toff < nread) {
709 /*
710 * Since this is a VM request, we have to supply the
711 * unaligned offset to allow vm_page_set_validclean()
712 * to zero sub-DEV_BSIZE'd portions of the page.
713 */
714 vm_page_set_validclean(m, 0, nread - toff);
715 } else {
716 m->valid = 0;
717 vm_page_undirty(m);
718 }
719
720 if (i != ap->a_reqpage) {
721 /*
722 * Just in case someone was asking for this page we
723 * now tell them that it is ok to use.
724 */
725 if (!error || (m->valid == VM_PAGE_BITS_ALL)) {
726 if (m->valid) {
727 if (m->flags & PG_WANTED) {
728 vm_page_activate(m);
729 } else {
730 vm_page_deactivate(m);
731 }
732 vm_page_wakeup(m);
733 } else {
734 vm_page_free(m);
735 }
736 } else {
737 vm_page_free(m);
738 }
739 } else if (m->valid) {
740 gotreqpage = 1;
741 /*
742 * Since this is a VM request, we need to make the
743 * entire page presentable by zeroing invalid sections.
744 */
745 if (m->valid != VM_PAGE_BITS_ALL)
746 vm_page_zero_invalid(m, FALSE);
747 }
748 }
749 if (!gotreqpage) {
750 m = ap->a_m[ap->a_reqpage];
751 printf(
752 "spec_getpages:(%s) I/O read failure: (error=%d) bp %p vp %p\n",
753 devtoname(bp->b_dev), error, bp, bp->b_vp);
754 printf(
755 " size: %d, resid: %ld, a_count: %d, valid: 0x%x\n",
756 size, bp->b_resid, ap->a_count, m->valid);
757 printf(
758 " nread: %d, reqpage: %d, pindex: %lu, pcount: %d\n",
759 nread, ap->a_reqpage, (u_long)m->pindex, pcount);
760 /*
761 * Free the buffer header back to the swap buffer pool.
762 */
763 relpbuf(bp, NULL);
764 return VM_PAGER_ERROR;
765 }
766 /*
767 * Free the buffer header back to the swap buffer pool.
768 */
769 relpbuf(bp, NULL);
770 return VM_PAGER_OK;
771}