Adjust some comments with reality.
[dragonfly.git] / sys / vfs / specfs / spec_vnops.c
... / ...
CommitLineData
1/*
2 * Copyright (c) 1989, 1993, 1995
3 * The Regents of the University of California. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by the University of
16 * California, Berkeley and its contributors.
17 * 4. Neither the name of the University nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 * @(#)spec_vnops.c 8.14 (Berkeley) 5/21/95
34 * $FreeBSD: src/sys/miscfs/specfs/spec_vnops.c,v 1.131.2.4 2001/02/26 04:23:20 jlemon Exp $
35 * $DragonFly: src/sys/vfs/specfs/spec_vnops.c,v 1.54 2007/08/08 00:12:52 swildner Exp $
36 */
37
38#include <sys/param.h>
39#include <sys/proc.h>
40#include <sys/systm.h>
41#include <sys/kernel.h>
42#include <sys/conf.h>
43#include <sys/buf.h>
44#include <sys/device.h>
45#include <sys/mount.h>
46#include <sys/vnode.h>
47#include <sys/stat.h>
48#include <sys/fcntl.h>
49#include <sys/vmmeter.h>
50#include <sys/bus.h>
51#include <sys/tty.h>
52
53#include <vm/vm.h>
54#include <vm/vm_object.h>
55#include <vm/vm_page.h>
56#include <vm/vm_pager.h>
57
58#include <machine/limits.h>
59
60#include <sys/buf2.h>
61
62#include <sys/thread2.h>
63
64/*
65 * Specfs chained debugging (bitmask)
66 *
67 * 0 - disable debugging
68 * 1 - report chained I/Os
69 * 2 - force 4K chained I/Os
70 */
71#define SPEC_CHAIN_DEBUG 0
72
73static int spec_advlock (struct vop_advlock_args *);
74static int spec_bmap (struct vop_bmap_args *);
75static int spec_close (struct vop_close_args *);
76static int spec_freeblks (struct vop_freeblks_args *);
77static int spec_fsync (struct vop_fsync_args *);
78static int spec_getpages (struct vop_getpages_args *);
79static int spec_inactive (struct vop_inactive_args *);
80static int spec_ioctl (struct vop_ioctl_args *);
81static int spec_open (struct vop_open_args *);
82static int spec_poll (struct vop_poll_args *);
83static int spec_kqfilter (struct vop_kqfilter_args *);
84static int spec_print (struct vop_print_args *);
85static int spec_read (struct vop_read_args *);
86static int spec_strategy (struct vop_strategy_args *);
87static int spec_write (struct vop_write_args *);
88static void spec_strategy_done(struct bio *nbio);
89
90struct vop_ops spec_vnode_vops = {
91 .vop_default = vop_defaultop,
92 .vop_access = (void *)vop_ebadf,
93 .vop_advlock = spec_advlock,
94 .vop_bmap = spec_bmap,
95 .vop_close = spec_close,
96 .vop_old_create = (void *)vop_panic,
97 .vop_freeblks = spec_freeblks,
98 .vop_fsync = spec_fsync,
99 .vop_getpages = spec_getpages,
100 .vop_inactive = spec_inactive,
101 .vop_ioctl = spec_ioctl,
102 .vop_old_link = (void *)vop_panic,
103 .vop_old_mkdir = (void *)vop_panic,
104 .vop_old_mknod = (void *)vop_panic,
105 .vop_open = spec_open,
106 .vop_pathconf = vop_stdpathconf,
107 .vop_poll = spec_poll,
108 .vop_kqfilter = spec_kqfilter,
109 .vop_print = spec_print,
110 .vop_read = spec_read,
111 .vop_readdir = (void *)vop_panic,
112 .vop_readlink = (void *)vop_panic,
113 .vop_reallocblks = (void *)vop_panic,
114 .vop_reclaim = (void *)vop_null,
115 .vop_old_remove = (void *)vop_panic,
116 .vop_old_rename = (void *)vop_panic,
117 .vop_old_rmdir = (void *)vop_panic,
118 .vop_setattr = (void *)vop_ebadf,
119 .vop_strategy = spec_strategy,
120 .vop_old_symlink = (void *)vop_panic,
121 .vop_write = spec_write
122};
123
124struct vop_ops *spec_vnode_vops_p = &spec_vnode_vops;
125
126VNODEOP_SET(spec_vnode_vops);
127
128extern int dev_ref_debug;
129
130/*
131 * spec_vnoperate()
132 */
133int
134spec_vnoperate(struct vop_generic_args *ap)
135{
136 return (VOCALL(&spec_vnode_vops, ap));
137}
138
139static void spec_getpages_iodone (struct bio *bio);
140
141/*
142 * Open a special file.
143 *
144 * spec_open(struct vnode *a_vp, int a_mode, struct ucred *a_cred,
145 * struct file *a_fp)
146 */
147/* ARGSUSED */
148static int
149spec_open(struct vop_open_args *ap)
150{
151 struct vnode *vp = ap->a_vp;
152 cdev_t dev;
153 int error;
154 const char *cp;
155
156 /*
157 * Don't allow open if fs is mounted -nodev.
158 */
159 if (vp->v_mount && (vp->v_mount->mnt_flag & MNT_NODEV))
160 return (ENXIO);
161 if (vp->v_type == VBLK)
162 return (ENXIO);
163
164 /*
165 * Resolve the device. If the vnode is already open v_rdev may
166 * already be resolved. However, if the device changes out from
167 * under us we report it (and, for now, we allow it). Since
168 * v_release_rdev() zero's v_opencount, we have to save and restore
169 * it when replacing the rdev reference.
170 */
171 if (vp->v_rdev != NULL) {
172 dev = get_dev(vp->v_umajor, vp->v_uminor);
173 if (dev != vp->v_rdev) {
174 int oc = vp->v_opencount;
175 kprintf(
176 "Warning: spec_open: dev %s was lost",
177 vp->v_rdev->si_name);
178 v_release_rdev(vp);
179 error = v_associate_rdev(vp,
180 get_dev(vp->v_umajor, vp->v_uminor));
181 if (error) {
182 kprintf(", reacquisition failed\n");
183 } else {
184 vp->v_opencount = oc;
185 kprintf(", reacquisition successful\n");
186 }
187 } else {
188 error = 0;
189 }
190 } else {
191 error = v_associate_rdev(vp, get_dev(vp->v_umajor, vp->v_uminor));
192 }
193 if (error)
194 return(error);
195
196 /*
197 * Prevent degenerate open/close sequences from nulling out rdev.
198 */
199 dev = vp->v_rdev;
200 KKASSERT(dev != NULL);
201
202 /*
203 * Make this field valid before any I/O in ->d_open. XXX the
204 * device itself should probably be required to initialize
205 * this field in d_open.
206 */
207 if (!dev->si_iosize_max)
208 dev->si_iosize_max = DFLTPHYS;
209
210 /*
211 * XXX: Disks get special billing here, but it is mostly wrong.
212 * XXX: diskpartitions can overlap and the real checks should
213 * XXX: take this into account, and consequently they need to
214 * XXX: live in the diskslicing code. Some checks do.
215 */
216 if (vn_isdisk(vp, NULL) && ap->a_cred != FSCRED &&
217 (ap->a_mode & FWRITE)) {
218 /*
219 * Never allow opens for write if the device is mounted R/W
220 */
221 if (vp->v_rdev && vp->v_rdev->si_mountpoint &&
222 !(vp->v_rdev->si_mountpoint->mnt_flag & MNT_RDONLY)) {
223 error = EBUSY;
224 goto done;
225 }
226
227 /*
228 * When running in secure mode, do not allow opens
229 * for writing if the device is mounted
230 */
231 if (securelevel >= 1 && vfs_mountedon(vp)) {
232 error = EPERM;
233 goto done;
234 }
235
236 /*
237 * When running in very secure mode, do not allow
238 * opens for writing of any devices.
239 */
240 if (securelevel >= 2) {
241 error = EPERM;
242 goto done;
243 }
244 }
245
246 /* XXX: Special casing of ttys for deadfs. Probably redundant */
247 if (dev_dflags(dev) & D_TTY)
248 vp->v_flag |= VISTTY;
249
250 /*
251 * dev_dopen() is always called for each open. dev_dclose() is
252 * only called for the last close unless D_TRACKCLOSE is set.
253 */
254 vn_unlock(vp);
255 error = dev_dopen(dev, ap->a_mode, S_IFCHR, ap->a_cred);
256 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
257
258 if (error)
259 goto done;
260
261 if (dev_dflags(dev) & D_TTY) {
262 if (dev->si_tty) {
263 struct tty *tp;
264 tp = dev->si_tty;
265 if (!tp->t_stop) {
266 kprintf("Warning:%s: no t_stop, using nottystop\n", devtoname(dev));
267 tp->t_stop = nottystop;
268 }
269 }
270 }
271
272 /*
273 * If this is 'disk' or disk-like device, associate a VM object
274 * with it.
275 */
276 if (vn_isdisk(vp, NULL)) {
277 if (!dev->si_bsize_phys)
278 dev->si_bsize_phys = DEV_BSIZE;
279 vinitvmio(vp, IDX_TO_OFF(INT_MAX));
280 }
281 if ((dev_dflags(dev) & D_DISK) == 0) {
282 cp = devtoname(dev);
283 if (*cp == '#') {
284 kprintf("WARNING: driver %s should register devices with make_dev() (cdev_t = \"%s\")\n",
285 dev_dname(dev), cp);
286 }
287 }
288
289 /*
290 * If we were handed a file pointer we may be able to install a
291 * shortcut which issues device read and write operations directly
292 * from the fileops rather then having to go through spec_read()
293 * and spec_write().
294 */
295 if (ap->a_fp)
296 vn_setspecops(ap->a_fp);
297
298 if (dev_ref_debug)
299 kprintf("spec_open: %s %d\n", dev->si_name, vp->v_opencount);
300done:
301 if (error) {
302 if (vp->v_opencount == 0)
303 v_release_rdev(vp);
304 } else {
305 vop_stdopen(ap);
306 }
307 return (error);
308}
309
310/*
311 * Vnode op for read
312 *
313 * spec_read(struct vnode *a_vp, struct uio *a_uio, int a_ioflag,
314 * struct ucred *a_cred)
315 */
316/* ARGSUSED */
317static int
318spec_read(struct vop_read_args *ap)
319{
320 struct vnode *vp;
321 struct thread *td;
322 struct uio *uio;
323 cdev_t dev;
324 int error;
325
326 vp = ap->a_vp;
327 dev = vp->v_rdev;
328 uio = ap->a_uio;
329 td = uio->uio_td;
330
331 if (dev == NULL) /* device was revoked */
332 return (EBADF);
333 if (uio->uio_resid == 0)
334 return (0);
335
336 vn_unlock(vp);
337 error = dev_dread(dev, uio, ap->a_ioflag);
338 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
339 return (error);
340}
341
342/*
343 * Vnode op for write
344 *
345 * spec_write(struct vnode *a_vp, struct uio *a_uio, int a_ioflag,
346 * struct ucred *a_cred)
347 */
348/* ARGSUSED */
349static int
350spec_write(struct vop_write_args *ap)
351{
352 struct vnode *vp;
353 struct thread *td;
354 struct uio *uio;
355 cdev_t dev;
356 int error;
357
358 vp = ap->a_vp;
359 dev = vp->v_rdev;
360 uio = ap->a_uio;
361 td = uio->uio_td;
362
363 if (dev == NULL) /* device was revoked */
364 return (EBADF);
365
366 vn_unlock(vp);
367 error = dev_dwrite(dev, uio, ap->a_ioflag);
368 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
369 return (error);
370}
371
372/*
373 * Device ioctl operation.
374 *
375 * spec_ioctl(struct vnode *a_vp, int a_command, caddr_t a_data,
376 * int a_fflag, struct ucred *a_cred)
377 */
378/* ARGSUSED */
379static int
380spec_ioctl(struct vop_ioctl_args *ap)
381{
382 cdev_t dev;
383
384 if ((dev = ap->a_vp->v_rdev) == NULL)
385 return (EBADF); /* device was revoked */
386
387 return (dev_dioctl(dev, ap->a_command, ap->a_data,
388 ap->a_fflag, ap->a_cred));
389}
390
391/*
392 * spec_poll(struct vnode *a_vp, int a_events, struct ucred *a_cred)
393 */
394/* ARGSUSED */
395static int
396spec_poll(struct vop_poll_args *ap)
397{
398 cdev_t dev;
399
400 if ((dev = ap->a_vp->v_rdev) == NULL)
401 return (EBADF); /* device was revoked */
402 return (dev_dpoll(dev, ap->a_events));
403}
404
405/*
406 * spec_kqfilter(struct vnode *a_vp, struct knote *a_kn)
407 */
408/* ARGSUSED */
409static int
410spec_kqfilter(struct vop_kqfilter_args *ap)
411{
412 cdev_t dev;
413
414 if ((dev = ap->a_vp->v_rdev) == NULL)
415 return (EBADF); /* device was revoked */
416 return (dev_dkqfilter(dev, ap->a_kn));
417}
418
419/*
420 * Synch buffers associated with a block device
421 *
422 * spec_fsync(struct vnode *a_vp, int a_waitfor)
423 */
424/* ARGSUSED */
425static int
426spec_fsync(struct vop_fsync_args *ap)
427{
428 struct vnode *vp = ap->a_vp;
429 int error;
430
431 if (!vn_isdisk(vp, NULL))
432 return (0);
433
434 /*
435 * Flush all dirty buffers associated with a block device.
436 */
437 error = vfsync(vp, ap->a_waitfor, 10000, NULL, NULL);
438 return (error);
439}
440
441/*
442 * spec_inactive(struct vnode *a_vp)
443 */
444static int
445spec_inactive(struct vop_inactive_args *ap)
446{
447 return (0);
448}
449
450/*
451 * Convert a vnode strategy call into a device strategy call. Vnode strategy
452 * calls are not limited to device DMA limits so we have to deal with the
453 * case.
454 *
455 * spec_strategy(struct vnode *a_vp, struct bio *a_bio)
456 */
457static int
458spec_strategy(struct vop_strategy_args *ap)
459{
460 struct bio *bio = ap->a_bio;
461 struct buf *bp = bio->bio_buf;
462 struct buf *nbp;
463 struct vnode *vp;
464 struct mount *mp;
465 int chunksize;
466 int maxiosize;
467
468 if (bp->b_cmd != BUF_CMD_READ &&
469 (LIST_FIRST(&bp->b_dep)) != NULL && bioops.io_start) {
470 (*bioops.io_start)(bp);
471 }
472
473 /*
474 * Collect statistics on synchronous and asynchronous read
475 * and write counts for disks that have associated filesystems.
476 */
477 vp = ap->a_vp;
478 KKASSERT(vp->v_rdev != NULL); /* XXX */
479 if (vn_isdisk(vp, NULL) && (mp = vp->v_rdev->si_mountpoint) != NULL) {
480 if (bp->b_cmd == BUF_CMD_READ) {
481 if (bp->b_flags & B_ASYNC)
482 mp->mnt_stat.f_asyncreads++;
483 else
484 mp->mnt_stat.f_syncreads++;
485 } else {
486 if (bp->b_flags & B_ASYNC)
487 mp->mnt_stat.f_asyncwrites++;
488 else
489 mp->mnt_stat.f_syncwrites++;
490 }
491 }
492
493 /*
494 * Device iosize limitations only apply to read and write. Shortcut
495 * the I/O if it fits.
496 */
497 if ((maxiosize = vp->v_rdev->si_iosize_max) == 0) {
498 kprintf("%s: si_iosize_max not set!\n", dev_dname(vp->v_rdev));
499 maxiosize = MAXPHYS;
500 }
501#if SPEC_CHAIN_DEBUG & 2
502 maxiosize = 4096;
503#endif
504 if (bp->b_bcount <= maxiosize ||
505 (bp->b_cmd != BUF_CMD_READ && bp->b_cmd != BUF_CMD_WRITE)) {
506 dev_dstrategy_chain(vp->v_rdev, bio);
507 return (0);
508 }
509
510 /*
511 * Clone the buffer and set up an I/O chain to chunk up the I/O.
512 */
513 nbp = kmalloc(sizeof(*bp), M_DEVBUF, M_INTWAIT|M_ZERO);
514 initbufbio(nbp);
515 LIST_INIT(&nbp->b_dep);
516 BUF_LOCKINIT(nbp);
517 BUF_LOCK(nbp, LK_EXCLUSIVE);
518 BUF_KERNPROC(nbp);
519 nbp->b_vp = vp;
520 nbp->b_flags = B_PAGING | (bp->b_flags & B_BNOCLIP);
521 nbp->b_data = bp->b_data;
522 nbp->b_bio1.bio_done = spec_strategy_done;
523 nbp->b_bio1.bio_offset = bio->bio_offset;
524 nbp->b_bio1.bio_caller_info1.ptr = bio;
525
526 /*
527 * Start the first transfer
528 */
529 if (vn_isdisk(vp, NULL))
530 chunksize = vp->v_rdev->si_bsize_phys;
531 else
532 chunksize = DEV_BSIZE;
533 chunksize = maxiosize / chunksize * chunksize;
534#if SPEC_CHAIN_DEBUG & 1
535 kprintf("spec_strategy chained I/O chunksize=%d\n", chunksize);
536#endif
537 nbp->b_cmd = bp->b_cmd;
538 nbp->b_bcount = chunksize;
539 nbp->b_bufsize = chunksize; /* used to detect a short I/O */
540 nbp->b_bio1.bio_caller_info2.index = chunksize;
541
542#if SPEC_CHAIN_DEBUG & 1
543 kprintf("spec_strategy: chain %p offset %d/%d bcount %d\n",
544 bp, 0, bp->b_bcount, nbp->b_bcount);
545#endif
546
547 dev_dstrategy(vp->v_rdev, &nbp->b_bio1);
548 return (0);
549}
550
551/*
552 * Chunked up transfer completion routine - chain transfers until done
553 */
554static
555void
556spec_strategy_done(struct bio *nbio)
557{
558 struct buf *nbp = nbio->bio_buf;
559 struct bio *bio = nbio->bio_caller_info1.ptr; /* original bio */
560 struct buf *bp = bio->bio_buf; /* original bp */
561 int chunksize = nbio->bio_caller_info2.index; /* chunking */
562 int boffset = nbp->b_data - bp->b_data;
563
564 if (nbp->b_flags & B_ERROR) {
565 /*
566 * An error terminates the chain, propogate the error back
567 * to the original bp
568 */
569 bp->b_flags |= B_ERROR;
570 bp->b_error = nbp->b_error;
571 bp->b_resid = bp->b_bcount - boffset +
572 (nbp->b_bcount - nbp->b_resid);
573#if SPEC_CHAIN_DEBUG & 1
574 kprintf("spec_strategy: chain %p error %d bcount %d/%d\n",
575 bp, bp->b_error, bp->b_bcount,
576 bp->b_bcount - bp->b_resid);
577#endif
578 kfree(nbp, M_DEVBUF);
579 biodone(bio);
580 } else if (nbp->b_resid) {
581 /*
582 * A short read or write terminates the chain
583 */
584 bp->b_error = nbp->b_error;
585 bp->b_resid = bp->b_bcount - boffset +
586 (nbp->b_bcount - nbp->b_resid);
587#if SPEC_CHAIN_DEBUG & 1
588 kprintf("spec_strategy: chain %p short read(1) bcount %d/%d\n",
589 bp, bp->b_bcount - bp->b_resid, bp->b_bcount);
590#endif
591 kfree(nbp, M_DEVBUF);
592 biodone(bio);
593 } else if (nbp->b_bcount != nbp->b_bufsize) {
594 /*
595 * A short read or write can also occur by truncating b_bcount
596 */
597#if SPEC_CHAIN_DEBUG & 1
598 kprintf("spec_strategy: chain %p short read(2) bcount %d/%d\n",
599 bp, nbp->b_bcount + boffset, bp->b_bcount);
600#endif
601 bp->b_error = 0;
602 bp->b_bcount = nbp->b_bcount + boffset;
603 bp->b_resid = nbp->b_resid;
604 kfree(nbp, M_DEVBUF);
605 biodone(bio);
606 } else if (nbp->b_bcount + boffset == bp->b_bcount) {
607 /*
608 * No more data terminates the chain
609 */
610#if SPEC_CHAIN_DEBUG & 1
611 kprintf("spec_strategy: chain %p finished bcount %d\n",
612 bp, bp->b_bcount);
613#endif
614 bp->b_error = 0;
615 bp->b_resid = 0;
616 kfree(nbp, M_DEVBUF);
617 biodone(bio);
618 } else {
619 /*
620 * Continue the chain
621 */
622 boffset += nbp->b_bcount;
623 nbp->b_data = bp->b_data + boffset;
624 nbp->b_bcount = bp->b_bcount - boffset;
625 if (nbp->b_bcount > chunksize)
626 nbp->b_bcount = chunksize;
627 nbp->b_bio1.bio_done = spec_strategy_done;
628 nbp->b_bio1.bio_offset = bio->bio_offset + boffset;
629
630#if SPEC_CHAIN_DEBUG & 1
631 kprintf("spec_strategy: chain %p offset %d/%d bcount %d\n",
632 bp, boffset, bp->b_bcount, nbp->b_bcount);
633#endif
634
635 dev_dstrategy(nbp->b_vp->v_rdev, &nbp->b_bio1);
636 }
637}
638
639/*
640 * spec_freeblks(struct vnode *a_vp, daddr_t a_addr, daddr_t a_length)
641 */
642static int
643spec_freeblks(struct vop_freeblks_args *ap)
644{
645 struct buf *bp;
646
647 /*
648 * XXX: This assumes that strategy does the deed right away.
649 * XXX: this may not be TRTTD.
650 */
651 KKASSERT(ap->a_vp->v_rdev != NULL);
652 if ((dev_dflags(ap->a_vp->v_rdev) & D_CANFREE) == 0)
653 return (0);
654 bp = geteblk(ap->a_length);
655 bp->b_cmd = BUF_CMD_FREEBLKS;
656 bp->b_bio1.bio_offset = ap->a_offset;
657 bp->b_bcount = ap->a_length;
658 dev_dstrategy(ap->a_vp->v_rdev, &bp->b_bio1);
659 return (0);
660}
661
662/*
663 * Implement degenerate case where the block requested is the block
664 * returned, and assume that the entire device is contiguous in regards
665 * to the contiguous block range (runp and runb).
666 *
667 * spec_bmap(struct vnode *a_vp, off_t a_loffset, struct vnode **a_vpp,
668 * off_t *a_doffsetp, int *a_runp, int *a_runb)
669 */
670static int
671spec_bmap(struct vop_bmap_args *ap)
672{
673 struct vnode *vp = ap->a_vp;
674
675 if (ap->a_vpp != NULL)
676 *ap->a_vpp = vp;
677 if (ap->a_doffsetp != NULL)
678 *ap->a_doffsetp = ap->a_loffset;
679 if (ap->a_runp != NULL)
680 *ap->a_runp = MAXBSIZE;
681 if (ap->a_runb != NULL) {
682 if (ap->a_loffset < MAXBSIZE)
683 *ap->a_runb = (int)ap->a_loffset;
684 else
685 *ap->a_runb = MAXBSIZE;
686 }
687 return (0);
688}
689
690/*
691 * Device close routine
692 *
693 * spec_close(struct vnode *a_vp, int a_fflag)
694 *
695 * NOTE: the vnode may or may not be locked on call.
696 */
697/* ARGSUSED */
698static int
699spec_close(struct vop_close_args *ap)
700{
701 struct proc *p = curproc;
702 struct vnode *vp = ap->a_vp;
703 cdev_t dev = vp->v_rdev;
704 int error;
705 int needrelock;
706
707 /*
708 * Hack: a tty device that is a controlling terminal
709 * has a reference from the session structure.
710 * We cannot easily tell that a character device is
711 * a controlling terminal, unless it is the closing
712 * process' controlling terminal. In that case,
713 * if the reference count is 2 (this last descriptor
714 * plus the session), release the reference from the session.
715 *
716 * It is possible for v_opencount to be 0 or 1 in this case, 0
717 * because the tty might have been revoked.
718 */
719 if (dev)
720 reference_dev(dev);
721 if (vcount(vp) == 2 && vp->v_opencount <= 1 &&
722 p && vp == p->p_session->s_ttyvp) {
723 p->p_session->s_ttyvp = NULL;
724 vrele(vp);
725 }
726
727 /*
728 * Vnodes can be opened and close multiple times. Do not really
729 * close the device unless (1) it is being closed forcibly,
730 * (2) the device wants to track closes, or (3) this is the last
731 * vnode doing its last close on the device.
732 *
733 * XXX the VXLOCK (force close) case can leave vnodes referencing
734 * a closed device.
735 */
736 if (dev && ((vp->v_flag & VRECLAIMED) ||
737 (dev_dflags(dev) & D_TRACKCLOSE) ||
738 (vcount(vp) <= 1 && vp->v_opencount == 1))) {
739 needrelock = 0;
740 if (vn_islocked(vp)) {
741 needrelock = 1;
742 vn_unlock(vp);
743 }
744 error = dev_dclose(dev, ap->a_fflag, S_IFCHR);
745 if (needrelock)
746 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
747 } else {
748 error = 0;
749 }
750
751 /*
752 * Track the actual opens and closes on the vnode. The last close
753 * disassociates the rdev. If the rdev is already disassociated
754 * the vnode might have been revoked and no further opencount
755 * tracking occurs.
756 */
757 if (dev) {
758 /*KKASSERT(vp->v_opencount > 0);*/
759 if (dev_ref_debug) {
760 kprintf("spec_close: %s %d\n",
761 dev->si_name, vp->v_opencount - 1);
762 }
763 if (vp->v_opencount == 1)
764 v_release_rdev(vp);
765 release_dev(dev);
766 }
767 vop_stdclose(ap);
768 return(error);
769}
770
771/*
772 * Print out the contents of a special device vnode.
773 *
774 * spec_print(struct vnode *a_vp)
775 */
776static int
777spec_print(struct vop_print_args *ap)
778{
779 kprintf("tag VT_NON, dev %s\n", devtoname(ap->a_vp->v_rdev));
780 return (0);
781}
782
783/*
784 * Special device advisory byte-level locks.
785 *
786 * spec_advlock(struct vnode *a_vp, caddr_t a_id, int a_op,
787 * struct flock *a_fl, int a_flags)
788 */
789/* ARGSUSED */
790static int
791spec_advlock(struct vop_advlock_args *ap)
792{
793 return ((ap->a_flags & F_POSIX) ? EINVAL : EOPNOTSUPP);
794}
795
796static void
797spec_getpages_iodone(struct bio *bio)
798{
799 bio->bio_buf->b_cmd = BUF_CMD_DONE;
800 wakeup(bio->bio_buf);
801}
802
803static int
804spec_getpages(struct vop_getpages_args *ap)
805{
806 vm_offset_t kva;
807 int error;
808 int i, pcount, size;
809 struct buf *bp;
810 vm_page_t m;
811 vm_ooffset_t offset;
812 int toff, nextoff, nread;
813 struct vnode *vp = ap->a_vp;
814 int blksiz;
815 int gotreqpage;
816
817 error = 0;
818 pcount = round_page(ap->a_count) / PAGE_SIZE;
819
820 /*
821 * Calculate the offset of the transfer and do sanity check.
822 */
823 offset = IDX_TO_OFF(ap->a_m[0]->pindex) + ap->a_offset;
824
825 /*
826 * Round up physical size for real devices. We cannot round using
827 * v_mount's block size data because v_mount has nothing to do with
828 * the device. i.e. it's usually '/dev'. We need the physical block
829 * size for the device itself.
830 *
831 * We can't use v_rdev->si_mountpoint because it only exists when the
832 * block device is mounted. However, we can use v_rdev.
833 */
834
835 if (vn_isdisk(vp, NULL))
836 blksiz = vp->v_rdev->si_bsize_phys;
837 else
838 blksiz = DEV_BSIZE;
839
840 size = (ap->a_count + blksiz - 1) & ~(blksiz - 1);
841
842 bp = getpbuf(NULL);
843 kva = (vm_offset_t)bp->b_data;
844
845 /*
846 * Map the pages to be read into the kva.
847 */
848 pmap_qenter(kva, ap->a_m, pcount);
849
850 /* Build a minimal buffer header. */
851 bp->b_cmd = BUF_CMD_READ;
852 bp->b_bcount = size;
853 bp->b_resid = 0;
854 bp->b_runningbufspace = size;
855 runningbufspace += bp->b_runningbufspace;
856
857 bp->b_bio1.bio_offset = offset;
858 bp->b_bio1.bio_done = spec_getpages_iodone;
859
860 mycpu->gd_cnt.v_vnodein++;
861 mycpu->gd_cnt.v_vnodepgsin += pcount;
862
863 /* Do the input. */
864 vn_strategy(ap->a_vp, &bp->b_bio1);
865
866 crit_enter();
867
868 /* We definitely need to be at splbio here. */
869 while (bp->b_cmd != BUF_CMD_DONE)
870 tsleep(bp, 0, "spread", 0);
871
872 crit_exit();
873
874 if (bp->b_flags & B_ERROR) {
875 if (bp->b_error)
876 error = bp->b_error;
877 else
878 error = EIO;
879 }
880
881 /*
882 * If EOF is encountered we must zero-extend the result in order
883 * to ensure that the page does not contain garabge. When no
884 * error occurs, an early EOF is indicated if b_bcount got truncated.
885 * b_resid is relative to b_bcount and should be 0, but some devices
886 * might indicate an EOF with b_resid instead of truncating b_bcount.
887 */
888 nread = bp->b_bcount - bp->b_resid;
889 if (nread < ap->a_count)
890 bzero((caddr_t)kva + nread, ap->a_count - nread);
891 pmap_qremove(kva, pcount);
892
893 gotreqpage = 0;
894 for (i = 0, toff = 0; i < pcount; i++, toff = nextoff) {
895 nextoff = toff + PAGE_SIZE;
896 m = ap->a_m[i];
897
898 m->flags &= ~PG_ZERO;
899
900 if (nextoff <= nread) {
901 m->valid = VM_PAGE_BITS_ALL;
902 vm_page_undirty(m);
903 } else if (toff < nread) {
904 /*
905 * Since this is a VM request, we have to supply the
906 * unaligned offset to allow vm_page_set_validclean()
907 * to zero sub-DEV_BSIZE'd portions of the page.
908 */
909 vm_page_set_validclean(m, 0, nread - toff);
910 } else {
911 m->valid = 0;
912 vm_page_undirty(m);
913 }
914
915 if (i != ap->a_reqpage) {
916 /*
917 * Just in case someone was asking for this page we
918 * now tell them that it is ok to use.
919 */
920 if (!error || (m->valid == VM_PAGE_BITS_ALL)) {
921 if (m->valid) {
922 if (m->flags & PG_WANTED) {
923 vm_page_activate(m);
924 } else {
925 vm_page_deactivate(m);
926 }
927 vm_page_wakeup(m);
928 } else {
929 vm_page_free(m);
930 }
931 } else {
932 vm_page_free(m);
933 }
934 } else if (m->valid) {
935 gotreqpage = 1;
936 /*
937 * Since this is a VM request, we need to make the
938 * entire page presentable by zeroing invalid sections.
939 */
940 if (m->valid != VM_PAGE_BITS_ALL)
941 vm_page_zero_invalid(m, FALSE);
942 }
943 }
944 if (!gotreqpage) {
945 m = ap->a_m[ap->a_reqpage];
946 kprintf(
947 "spec_getpages:(%s) I/O read failure: (error=%d) bp %p vp %p\n",
948 devtoname(vp->v_rdev), error, bp, bp->b_vp);
949 kprintf(
950 " size: %d, resid: %d, a_count: %d, valid: 0x%x\n",
951 size, bp->b_resid, ap->a_count, m->valid);
952 kprintf(
953 " nread: %d, reqpage: %d, pindex: %lu, pcount: %d\n",
954 nread, ap->a_reqpage, (u_long)m->pindex, pcount);
955 /*
956 * Free the buffer header back to the swap buffer pool.
957 */
958 relpbuf(bp, NULL);
959 return VM_PAGER_ERROR;
960 }
961 /*
962 * Free the buffer header back to the swap buffer pool.
963 */
964 relpbuf(bp, NULL);
965 return VM_PAGER_OK;
966}