2 * Copyright (c) 2009 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Alex Hornung <ahornung@gmail.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 #include <sys/param.h>
35 #include <sys/systm.h>
37 #include <sys/kernel.h>
39 #include <sys/fcntl.h>
42 #include <sys/signalvar.h>
43 #include <sys/vnode.h>
45 #include <sys/mount.h>
47 #include <sys/fcntl.h>
48 #include <sys/namei.h>
49 #include <sys/dirent.h>
50 #include <sys/malloc.h>
54 #include <vm/vm_pager.h>
55 #include <vm/vm_zone.h>
56 #include <vm/vm_object.h>
57 #include <sys/filio.h>
58 #include <sys/ttycom.h>
59 #include <sys/sysref2.h>
61 #include <vfs/devfs/devfs.h>
62 #include <sys/pioctl.h>
64 #include <machine/limits.h>
66 MALLOC_DECLARE(M_DEVFS);
67 #define DEVFS_BADOP (void *)devfs_badop
69 static int devfs_badop(struct vop_generic_args *);
70 static int devfs_access(struct vop_access_args *);
71 static int devfs_inactive(struct vop_inactive_args *);
72 static int devfs_reclaim(struct vop_reclaim_args *);
73 static int devfs_readdir(struct vop_readdir_args *);
74 static int devfs_getattr(struct vop_getattr_args *);
75 static int devfs_setattr(struct vop_setattr_args *);
76 static int devfs_readlink(struct vop_readlink_args *);
77 static int devfs_print(struct vop_print_args *);
79 static int devfs_nresolve(struct vop_nresolve_args *);
80 static int devfs_nlookupdotdot(struct vop_nlookupdotdot_args *);
81 static int devfs_nsymlink(struct vop_nsymlink_args *);
82 static int devfs_nremove(struct vop_nremove_args *);
84 static int devfs_spec_open(struct vop_open_args *);
85 static int devfs_spec_close(struct vop_close_args *);
86 static int devfs_spec_fsync(struct vop_fsync_args *);
88 static int devfs_spec_read(struct vop_read_args *);
89 static int devfs_spec_write(struct vop_write_args *);
90 static int devfs_spec_ioctl(struct vop_ioctl_args *);
91 static int devfs_spec_poll(struct vop_poll_args *);
92 static int devfs_spec_kqfilter(struct vop_kqfilter_args *);
93 static int devfs_spec_strategy(struct vop_strategy_args *);
94 static void devfs_spec_strategy_done(struct bio *);
95 static int devfs_spec_freeblks(struct vop_freeblks_args *);
96 static int devfs_spec_bmap(struct vop_bmap_args *);
97 static int devfs_spec_advlock(struct vop_advlock_args *);
98 static void devfs_spec_getpages_iodone(struct bio *);
99 static int devfs_spec_getpages(struct vop_getpages_args *);
102 static int devfs_specf_close(struct file *);
103 static int devfs_specf_read(struct file *, struct uio *, struct ucred *, int);
104 static int devfs_specf_write(struct file *, struct uio *, struct ucred *, int);
105 static int devfs_specf_stat(struct file *, struct stat *, struct ucred *);
106 static int devfs_specf_kqfilter(struct file *, struct knote *);
107 static int devfs_specf_poll(struct file *, int, struct ucred *);
108 static int devfs_specf_ioctl(struct file *, u_long, caddr_t, struct ucred *);
111 static __inline int sequential_heuristic(struct uio *, struct file *);
112 extern struct lock devfs_lock;
115 * devfs vnode operations for regular files
117 struct vop_ops devfs_vnode_norm_vops = {
118 .vop_default = vop_defaultop,
119 .vop_access = devfs_access,
120 .vop_advlock = DEVFS_BADOP,
121 .vop_bmap = DEVFS_BADOP,
122 .vop_close = vop_stdclose,
123 .vop_getattr = devfs_getattr,
124 .vop_inactive = devfs_inactive,
125 .vop_ncreate = DEVFS_BADOP,
126 .vop_nresolve = devfs_nresolve,
127 .vop_nlookupdotdot = devfs_nlookupdotdot,
128 .vop_nlink = DEVFS_BADOP,
129 .vop_nmkdir = DEVFS_BADOP,
130 .vop_nmknod = DEVFS_BADOP,
131 .vop_nremove = devfs_nremove,
132 .vop_nrename = DEVFS_BADOP,
133 .vop_nrmdir = DEVFS_BADOP,
134 .vop_nsymlink = devfs_nsymlink,
135 .vop_open = vop_stdopen,
136 .vop_pathconf = vop_stdpathconf,
137 .vop_print = devfs_print,
138 .vop_read = DEVFS_BADOP,
139 .vop_readdir = devfs_readdir,
140 .vop_readlink = devfs_readlink,
141 .vop_reclaim = devfs_reclaim,
142 .vop_setattr = devfs_setattr,
143 .vop_write = DEVFS_BADOP,
144 .vop_ioctl = DEVFS_BADOP
148 * devfs vnode operations for character devices
150 struct vop_ops devfs_vnode_dev_vops = {
151 .vop_default = vop_defaultop,
152 .vop_access = devfs_access,
153 .vop_advlock = devfs_spec_advlock,
154 .vop_bmap = devfs_spec_bmap,
155 .vop_close = devfs_spec_close,
156 .vop_freeblks = devfs_spec_freeblks,
157 .vop_fsync = devfs_spec_fsync,
158 .vop_getattr = devfs_getattr,
159 .vop_getpages = devfs_spec_getpages,
160 .vop_inactive = devfs_inactive,
161 .vop_open = devfs_spec_open,
162 .vop_pathconf = vop_stdpathconf,
163 .vop_print = devfs_print,
164 .vop_poll = devfs_spec_poll,
165 .vop_kqfilter = devfs_spec_kqfilter,
166 .vop_read = devfs_spec_read,
167 .vop_readdir = DEVFS_BADOP,
168 .vop_readlink = DEVFS_BADOP,
169 .vop_reclaim = devfs_reclaim,
170 .vop_setattr = devfs_setattr,
171 .vop_strategy = devfs_spec_strategy,
172 .vop_write = devfs_spec_write,
173 .vop_ioctl = devfs_spec_ioctl
176 struct vop_ops *devfs_vnode_dev_vops_p = &devfs_vnode_dev_vops;
178 struct fileops devfs_dev_fileops = {
179 .fo_read = devfs_specf_read,
180 .fo_write = devfs_specf_write,
181 .fo_ioctl = devfs_specf_ioctl,
182 .fo_poll = devfs_specf_poll,
183 .fo_kqfilter = devfs_specf_kqfilter,
184 .fo_stat = devfs_specf_stat,
185 .fo_close = devfs_specf_close,
186 .fo_shutdown = nofo_shutdown
191 * generic entry point for unsupported operations
194 devfs_badop(struct vop_generic_args *ap)
196 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs: specified vnode operation is not implemented (yet)\n");
202 devfs_access(struct vop_access_args *ap)
204 struct devfs_node *node = DEVFS_NODE(ap->a_vp);
207 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_access() called!\n");
209 error = vop_helper_access(ap, node->uid, node->gid,
210 node->mode, node->flags);
212 //devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_access ruled over %s: %d\n", "UNKNOWN", error);
215 //XXX: consider possible special cases? terminal, ...?
220 devfs_inactive(struct vop_inactive_args *ap)
222 struct devfs_node *node = DEVFS_NODE(ap->a_vp);
224 if (node == NULL || (node->flags & DEVFS_NODE_LINKED) == 0)
231 devfs_reclaim(struct vop_reclaim_args *ap)
233 struct devfs_node *node;
237 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_reclaim() called!\n");
240 * Check if it is locked already. if not, we acquire the devfs lock
242 if (!(lockstatus(&devfs_lock, curthread)) == LK_EXCLUSIVE) {
243 lockmgr(&devfs_lock, LK_EXCLUSIVE);
250 * Get rid of the devfs_node if it is no longer linked into the
254 if ((node = DEVFS_NODE(vp)) != NULL) {
255 if ((node->flags & DEVFS_NODE_LINKED) == 0) {
257 /* NOTE: v_data is NULLd out by freep */
260 /* vp->v_data = NULL; handled below */
265 lockmgr(&devfs_lock, LK_RELEASE);
268 * v_rdev was not set with v_associate_rdev (??), so just NULL it
269 * out. Make sure v_data is NULL as well.
279 devfs_readdir(struct vop_readdir_args *ap)
281 struct devfs_node *node;
282 int error2 = 0, r, error = 0;
289 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_readdir() called!\n");
291 if (ap->a_uio->uio_offset < 0 || ap->a_uio->uio_offset > INT_MAX)
293 if ((error = vn_lock(ap->a_vp, LK_EXCLUSIVE | LK_RETRY)) != 0)
296 if (DEVFS_NODE(ap->a_vp) == NULL)
299 lockmgr(&devfs_lock, LK_EXCLUSIVE);
301 saveoff = ap->a_uio->uio_offset;
303 if (ap->a_ncookies) {
304 ncookies = ap->a_uio->uio_resid / 16 + 1; /* Why / 16 ?? */
307 cookies = kmalloc(256 * sizeof(off_t), M_TEMP, M_WAITOK);
315 nanotime(&DEVFS_NODE(ap->a_vp)->atime);
318 r = vop_write_dirent(&error, ap->a_uio, DEVFS_NODE(ap->a_vp)->d_dir.d_ino, DT_DIR, 1, ".");
322 cookies[cookie_index] = saveoff;
325 if (cookie_index == ncookies)
330 if (DEVFS_NODE(ap->a_vp)->parent) {
331 r = vop_write_dirent(&error, ap->a_uio,
332 DEVFS_NODE(ap->a_vp)->d_dir.d_ino,
335 r = vop_write_dirent(&error, ap->a_uio,
336 DEVFS_NODE(ap->a_vp)->d_dir.d_ino, DT_DIR, 2, "..");
341 cookies[cookie_index] = saveoff;
344 if (cookie_index == ncookies)
348 TAILQ_FOREACH(node, DEVFS_DENODE_HEAD(DEVFS_NODE(ap->a_vp)), link) {
349 if ((node->flags & DEVFS_HIDDEN) || (node->flags & DEVFS_INVISIBLE))
352 if (node->cookie < saveoff)
360 saveoff = node->cookie;
362 error2 = vop_write_dirent(&error, ap->a_uio,
363 node->d_dir.d_ino, node->d_dir.d_type,
364 node->d_dir.d_namlen, node->d_dir.d_name);
372 cookies[cookie_index] = node->cookie;
374 if (cookie_index == ncookies)
381 lockmgr(&devfs_lock, LK_RELEASE);
384 ap->a_uio->uio_offset = saveoff;
385 if (error && cookie_index == 0) {
387 kfree(cookies, M_TEMP);
389 *ap->a_cookies = NULL;
393 *ap->a_ncookies = cookie_index;
394 *ap->a_cookies = cookies;
402 devfs_nresolve(struct vop_nresolve_args *ap)
404 struct devfs_node *node, *found = NULL;
405 struct namecache *ncp;
406 struct vnode *vp = NULL;
412 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_nresolve() called!\n");
414 ncp = ap->a_nch->ncp;
417 if (DEVFS_NODE(ap->a_dvp) == NULL)
420 lockmgr(&devfs_lock, LK_EXCLUSIVE);
422 if ((DEVFS_NODE(ap->a_dvp)->node_type != Proot) &&
423 (DEVFS_NODE(ap->a_dvp)->node_type != Pdir)) {
424 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_nresolve: ap->a_dvp is not a dir!!!\n");
425 cache_setvp(ap->a_nch, NULL);
430 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_nresolve -search- \n");
431 TAILQ_FOREACH(node, DEVFS_DENODE_HEAD(DEVFS_NODE(ap->a_dvp)), link) {
432 if (len == node->d_dir.d_namlen) {
433 if (!memcmp(ncp->nc_name, node->d_dir.d_name, len)) {
434 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_nresolve: found: %s\n", ncp->nc_name);
442 if ((found->node_type == Plink) && (found->link_target))
443 found = found->link_target;
445 if (!(found->flags & DEVFS_HIDDEN))
446 devfs_allocv(/*ap->a_dvp->v_mount, */ &vp, found);
449 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_nresolve -2- \n");
452 //devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_nresolve -3- %c%c%c\n", ncp->nc_name[0], ncp->nc_name[1], ncp->nc_name[2]);
454 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_nresolve vp==NULL \n");
456 /* XXX: len is int, devfs_clone expects size_t*, not int* */
457 if ((!hidden) && (!devfs_clone(ncp->nc_name, &len, NULL, 0, ap->a_cred))) {
461 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_nresolve -4- \n");
463 cache_setvp(ap->a_nch, NULL);
464 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_nresolve -5- \n");
468 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_nresolve -6- \n");
471 cache_setvp(ap->a_nch, vp);
474 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_nresolve -9- \n");
476 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_nresolve -end:10- failed? %s \n", (error)?"FAILED!":"OK!");
477 lockmgr(&devfs_lock, LK_RELEASE);
483 devfs_nlookupdotdot(struct vop_nlookupdotdot_args *ap)
485 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_nlookupdotdot() called!\n");
488 lockmgr(&devfs_lock, LK_EXCLUSIVE);
489 if (DEVFS_NODE(ap->a_dvp)->parent != NULL) {
490 devfs_allocv(/*ap->a_dvp->v_mount, */ap->a_vpp, DEVFS_NODE(ap->a_dvp)->parent);
491 vn_unlock(*ap->a_vpp);
493 lockmgr(&devfs_lock, LK_RELEASE);
495 return ((*ap->a_vpp == NULL) ? ENOENT : 0);
500 devfs_getattr(struct vop_getattr_args *ap)
502 struct vattr *vap = ap->a_vap;
503 struct devfs_node *node = DEVFS_NODE(ap->a_vp);
509 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_getattr() called for %s!\n", DEVFS_NODE(ap->a_vp)->d_dir.d_name);
510 lockmgr(&devfs_lock, LK_EXCLUSIVE);
512 /* start by zeroing out the attributes */
515 /* next do all the common fields */
516 vap->va_type = ap->a_vp->v_type;
517 vap->va_mode = node->mode;
518 vap->va_fileid = DEVFS_NODE(ap->a_vp)->d_dir.d_ino ;
519 vap->va_flags = 0; //what should this be?
520 vap->va_blocksize = DEV_BSIZE;
521 vap->va_bytes = vap->va_size = sizeof(struct devfs_node);
523 //devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_getattr() check dev %s!\n", (DEVFS_NODE(ap->a_vp)->d_dev)?(DEVFS_NODE(ap->a_vp)->d_dev->si_name):"Not a device");
525 vap->va_fsid = ap->a_vp->v_mount->mnt_stat.f_fsid.val[0];
528 vap->va_atime = node->atime;
529 vap->va_mtime = node->mtime;
530 vap->va_ctime = node->ctime;
532 vap->va_nlink = 1; /* number of references to file */
534 vap->va_uid = node->uid;
535 vap->va_gid = node->gid;
540 if ((DEVFS_NODE(ap->a_vp)->node_type == Pdev) &&
541 (DEVFS_NODE(ap->a_vp)->d_dev)) {
542 devfs_debug(DEVFS_DEBUG_DEBUG, "getattr: dev is: %p\n", DEVFS_NODE(ap->a_vp)->d_dev);
543 reference_dev(DEVFS_NODE(ap->a_vp)->d_dev);
544 vap->va_rminor = DEVFS_NODE(ap->a_vp)->d_dev->si_uminor;
545 release_dev(DEVFS_NODE(ap->a_vp)->d_dev);
548 /* For a softlink the va_size is the length of the softlink */
549 if (DEVFS_NODE(ap->a_vp)->symlink_name != 0) {
550 vap->va_size = DEVFS_NODE(ap->a_vp)->symlink_namelen;
552 nanotime(&node->atime);
553 lockmgr(&devfs_lock, LK_RELEASE);
554 return (error); //XXX: set error usefully
559 devfs_setattr(struct vop_setattr_args *ap)
561 struct devfs_node *node;
565 node = DEVFS_NODE(ap->a_vp);
570 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_setattr() called!\n");
571 lockmgr(&devfs_lock, LK_EXCLUSIVE);
575 if (vap->va_uid != (uid_t)VNOVAL) {
576 if ((ap->a_cred->cr_uid != node->uid) &&
577 (!groupmember(node->gid, ap->a_cred))) {
578 error = priv_check(curthread, PRIV_VFS_CHOWN);
580 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_setattr, erroring out -1-\n");
584 node->uid = vap->va_uid;
587 if (vap->va_gid != (uid_t)VNOVAL) {
588 if ((ap->a_cred->cr_uid != node->uid) &&
589 (!groupmember(node->gid, ap->a_cred))) {
590 error = priv_check(curthread, PRIV_VFS_CHOWN);
592 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_setattr, erroring out -2-\n");
596 node->gid = vap->va_gid;
599 if (vap->va_mode != (mode_t)VNOVAL) {
600 if (ap->a_cred->cr_uid != node->uid) {
601 error = priv_check(curthread, PRIV_VFS_ADMIN);
603 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_setattr, erroring out -3-\n");
607 node->mode = vap->va_mode;
611 nanotime(&node->mtime);
612 lockmgr(&devfs_lock, LK_RELEASE);
618 devfs_readlink(struct vop_readlink_args *ap)
620 struct devfs_node *node = DEVFS_NODE(ap->a_vp);
626 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_readlink() called!\n");
628 lockmgr(&devfs_lock, LK_EXCLUSIVE);
629 ret = uiomove(node->symlink_name, node->symlink_namelen, ap->a_uio);
630 lockmgr(&devfs_lock, LK_RELEASE);
637 devfs_print(struct vop_print_args *ap)
639 //struct devfs_node *node = DEVFS_NODE(ap->a_vp);
641 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_print() called!\n");
643 //XXX: print some useful debugging about node.
649 devfs_nsymlink(struct vop_nsymlink_args *ap)
651 size_t targetlen = strlen(ap->a_target);
653 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_nsymlink() called!\n");
655 ap->a_vap->va_type = VLNK;
657 if (DEVFS_NODE(ap->a_dvp) == NULL)
660 if ((DEVFS_NODE(ap->a_dvp)->node_type != Proot) &&
661 (DEVFS_NODE(ap->a_dvp)->node_type != Pdir)) {
662 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_nsymlink: ap->a_dvp is not a dir!!!\n");
665 lockmgr(&devfs_lock, LK_EXCLUSIVE);
666 devfs_allocvp(ap->a_dvp->v_mount, ap->a_vpp, Plink,
667 ap->a_nch->ncp->nc_name, DEVFS_NODE(ap->a_dvp), NULL);
670 DEVFS_NODE(*ap->a_vpp)->flags |= DEVFS_USER_CREATED;
672 DEVFS_NODE(*ap->a_vpp)->symlink_namelen = targetlen;
673 DEVFS_NODE(*ap->a_vpp)->symlink_name = kmalloc(targetlen + 1, M_DEVFS, M_WAITOK);
674 memcpy(DEVFS_NODE(*ap->a_vpp)->symlink_name, ap->a_target, targetlen);
675 DEVFS_NODE(*ap->a_vpp)->symlink_name[targetlen] = '\0';
676 cache_setunresolved(ap->a_nch);
677 //problematic to use cache_* inside lockmgr() ? Probably not...
678 cache_setvp(ap->a_nch, *ap->a_vpp);
680 lockmgr(&devfs_lock, LK_RELEASE);
682 return ((*ap->a_vpp == NULL) ? ENOTDIR : 0);
688 devfs_nremove(struct vop_nremove_args *ap)
690 struct devfs_node *node;
691 struct namecache *ncp;
692 //struct vnode *vp = NULL;
695 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_nremove() called!\n");
697 ncp = ap->a_nch->ncp;
699 if (DEVFS_NODE(ap->a_dvp) == NULL)
702 lockmgr(&devfs_lock, LK_EXCLUSIVE);
704 if ((DEVFS_NODE(ap->a_dvp)->node_type != Proot) &&
705 (DEVFS_NODE(ap->a_dvp)->node_type != Pdir)) {
706 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_nremove: ap->a_dvp is not a dir!!!\n");
710 TAILQ_FOREACH(node, DEVFS_DENODE_HEAD(DEVFS_NODE(ap->a_dvp)), link) {
711 if (ncp->nc_nlen == node->d_dir.d_namlen) {
712 if (!memcmp(ncp->nc_name, node->d_dir.d_name, ncp->nc_nlen)) {
713 // allow only removal of user created stuff (e.g. symlinks)
714 if ((node->flags & DEVFS_USER_CREATED) == 0) {
719 cache_inval_vp(node->v_node, CINV_DESTROY);
729 cache_setunresolved(ap->a_nch);
730 cache_setvp(ap->a_nch, NULL);
731 //cache_inval_vp(node->v_node, CINV_DESTROY);
734 lockmgr(&devfs_lock, LK_RELEASE);
742 devfs_spec_open(struct vop_open_args *ap)
744 struct vnode *vp = ap->a_vp;
745 struct vnode *orig_vp = NULL;
746 cdev_t dev, ndev = NULL;
747 struct devfs_node *node = NULL;
751 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_spec_open() called\n");
753 if (DEVFS_NODE(vp)) {
754 if (DEVFS_NODE(vp)->d_dev == NULL)
758 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_spec_open: -1-\n");
760 if ((dev = vp->v_rdev) == NULL)
763 if (DEVFS_NODE(vp) && ap->a_fp) {
764 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_spec_open: -1.1-\n");
765 lockmgr(&devfs_lock, LK_EXCLUSIVE);
766 len = DEVFS_NODE(vp)->d_dir.d_namlen;
767 if (!(devfs_clone(DEVFS_NODE(vp)->d_dir.d_name, &len, &ndev, 1, ap->a_cred))) {
768 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_spec_open: -1.2- |%s|\n", ndev->si_name);
772 node = devfs_create_device_node(DEVFS_MNTDATA(vp->v_mount)->root_node, dev, NULL, NULL);
774 devfs_debug(DEVFS_DEBUG_DEBUG, "parent here is: %s, node is: |%s|\n", (DEVFS_NODE(vp)->parent->node_type == Proot)?"ROOT!":DEVFS_NODE(vp)->parent->d_dir.d_name, node->d_dir.d_name);
775 devfs_debug(DEVFS_DEBUG_DEBUG, "test: %s\n", ((struct devfs_node *)(TAILQ_LAST(DEVFS_DENODE_HEAD(DEVFS_NODE(vp)->parent), devfs_node_head)))->d_dir.d_name);
778 * orig_vp is set to the original vp if we cloned.
780 /* node->flags |= DEVFS_CLONED; */
781 devfs_allocv(&vp, node);
785 lockmgr(&devfs_lock, LK_RELEASE);
788 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_spec_open() called on %s! \n", dev->si_name);
790 * Make this field valid before any I/O in ->d_open
792 if (!dev->si_iosize_max)
793 dev->si_iosize_max = DFLTPHYS;
795 if (dev_dflags(dev) & D_TTY)
796 vp->v_flag |= VISTTY;
799 error = dev_dopen(dev, ap->a_mode, S_IFCHR, ap->a_cred);
800 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
803 * Clean up any cloned vp if we error out.
806 devfs_debug(DEVFS_DEBUG_DEBUG,
807 "devfs_spec_open() error out: %x\n", error);
811 /* orig_vp = NULL; */
817 if (dev_dflags(dev) & D_TTY) {
822 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs: no t_stop\n");
823 tp->t_stop = nottystop;
829 if (vn_isdisk(vp, NULL)) {
830 if (!dev->si_bsize_phys)
831 dev->si_bsize_phys = DEV_BSIZE;
832 vinitvmio(vp, IDX_TO_OFF(INT_MAX));
837 nanotime(&DEVFS_NODE(vp)->atime);
842 /* Ugly pty magic, to make pty devices appear once they are opened */
843 if (DEVFS_NODE(vp) && ((DEVFS_NODE(vp)->flags & DEVFS_PTY) == DEVFS_PTY))
844 DEVFS_NODE(vp)->flags &= ~DEVFS_INVISIBLE;
847 ap->a_fp->f_type = DTYPE_VNODE;
848 ap->a_fp->f_flag = ap->a_mode & FMASK;
849 ap->a_fp->f_ops = &devfs_dev_fileops;
850 ap->a_fp->f_data = vp;
853 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_spec_open: -end:3-\n");
860 devfs_spec_close(struct vop_close_args *ap)
862 struct proc *p = curproc;
863 struct vnode *vp = ap->a_vp;
864 cdev_t dev = vp->v_rdev;
868 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_spec_close() called on %s! \n", dev->si_name);
871 * A couple of hacks for devices and tty devices. The
872 * vnode ref count cannot be used to figure out the
873 * last close, but we can use v_opencount now that
874 * revoke works properly.
876 * Detect the last close on a controlling terminal and clear
877 * the session (half-close).
882 if (p && vp->v_opencount <= 1 && vp == p->p_session->s_ttyvp) {
883 p->p_session->s_ttyvp = NULL;
888 * Vnodes can be opened and closed multiple times. Do not really
889 * close the device unless (1) it is being closed forcibly,
890 * (2) the device wants to track closes, or (3) this is the last
891 * vnode doing its last close on the device.
893 * XXX the VXLOCK (force close) case can leave vnodes referencing
894 * a closed device. This might not occur now that our revoke is
897 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_spec_close() -1- \n");
898 if (dev && ((vp->v_flag & VRECLAIMED) ||
899 (dev_dflags(dev) & D_TRACKCLOSE) ||
900 (vp->v_opencount == 1))) {
902 if (vn_islocked(vp)) {
906 error = dev_dclose(dev, ap->a_fflag, S_IFCHR);
908 if (DEVFS_NODE(vp) && (DEVFS_NODE(vp)->flags & DEVFS_CLONED) == DEVFS_CLONED) {
909 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_spec_close: last of the cloned ones, so delete node %s\n", dev->si_name);
910 devfs_unlinkp(DEVFS_NODE(vp));
911 devfs_freep(DEVFS_NODE(vp));
914 /* Ugly pty magic, to make pty devices disappear again once they are closed */
915 if (DEVFS_NODE(vp) && ((DEVFS_NODE(vp)->flags & DEVFS_PTY) == DEVFS_PTY))
916 DEVFS_NODE(vp)->flags |= DEVFS_INVISIBLE;
919 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
923 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_spec_close() -2- \n");
925 * Track the actual opens and closes on the vnode. The last close
926 * disassociates the rdev. If the rdev is already disassociated or the
927 * opencount is already 0, the vnode might have been revoked and no
928 * further opencount tracking occurs.
931 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_spec_close() -3- \n");
932 if (vp->v_opencount == 1) {
934 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_spec_close() -3.5- \n");
938 if (vp->v_opencount > 0) {
939 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_spec_close() -4- \n");
941 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_spec_close() -5- \n");
944 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_spec_close() -end:6- \n");
951 devfs_specf_close(struct file *fp)
954 struct vnode *vp = (struct vnode *)fp->f_data;
956 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_specf_close() called! \n");
958 fp->f_ops = &badfileops;
960 error = vn_close(vp, fp->f_flag);
968 * Device-optimized file table vnode read routine.
970 * This bypasses the VOP table and talks directly to the device. Most
971 * filesystems just route to specfs and can make this optimization.
973 * MPALMOSTSAFE - acquires mplock
976 devfs_specf_read(struct file *fp, struct uio *uio, struct ucred *cred, int flags)
984 //devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_specf_read() called! \n");
985 KASSERT(uio->uio_td == curthread,
986 ("uio_td %p is not td %p", uio->uio_td, curthread));
988 vp = (struct vnode *)fp->f_data;
989 if (vp == NULL || vp->v_type == VBAD) {
994 if ((dev = vp->v_rdev) == NULL) {
998 //devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_specf_read() called! for dev %s\n", dev->si_name);
1002 if (uio->uio_resid == 0) {
1006 if ((flags & O_FOFFSET) == 0)
1007 uio->uio_offset = fp->f_offset;
1010 if (flags & O_FBLOCKING) {
1011 /* ioflag &= ~IO_NDELAY; */
1012 } else if (flags & O_FNONBLOCKING) {
1013 ioflag |= IO_NDELAY;
1014 } else if (fp->f_flag & FNONBLOCK) {
1015 ioflag |= IO_NDELAY;
1017 if (flags & O_FBUFFERED) {
1018 /* ioflag &= ~IO_DIRECT; */
1019 } else if (flags & O_FUNBUFFERED) {
1020 ioflag |= IO_DIRECT;
1021 } else if (fp->f_flag & O_DIRECT) {
1022 ioflag |= IO_DIRECT;
1024 ioflag |= sequential_heuristic(uio, fp);
1026 error = dev_dread(dev, uio, ioflag);
1030 nanotime(&DEVFS_NODE(vp)->atime);
1031 if ((flags & O_FOFFSET) == 0)
1032 fp->f_offset = uio->uio_offset;
1033 fp->f_nextoff = uio->uio_offset;
1036 //devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_specf_read finished\n");
1042 devfs_specf_write(struct file *fp, struct uio *uio, struct ucred *cred, int flags)
1049 //devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_specf_write() called! \n");
1051 KASSERT(uio->uio_td == curthread,
1052 ("uio_td %p is not p %p", uio->uio_td, curthread));
1054 vp = (struct vnode *)fp->f_data;
1055 if (vp == NULL || vp->v_type == VBAD) {
1059 if (vp->v_type == VREG)
1060 bwillwrite(uio->uio_resid);
1061 vp = (struct vnode *)fp->f_data;
1063 if ((dev = vp->v_rdev) == NULL) {
1067 //devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_specf_write() called! for dev %s\n", dev->si_name);
1070 if ((flags & O_FOFFSET) == 0)
1071 uio->uio_offset = fp->f_offset;
1074 if (vp->v_type == VREG &&
1075 ((fp->f_flag & O_APPEND) || (flags & O_FAPPEND))) {
1076 ioflag |= IO_APPEND;
1079 if (flags & O_FBLOCKING) {
1080 /* ioflag &= ~IO_NDELAY; */
1081 } else if (flags & O_FNONBLOCKING) {
1082 ioflag |= IO_NDELAY;
1083 } else if (fp->f_flag & FNONBLOCK) {
1084 ioflag |= IO_NDELAY;
1086 if (flags & O_FBUFFERED) {
1087 /* ioflag &= ~IO_DIRECT; */
1088 } else if (flags & O_FUNBUFFERED) {
1089 ioflag |= IO_DIRECT;
1090 } else if (fp->f_flag & O_DIRECT) {
1091 ioflag |= IO_DIRECT;
1093 if (flags & O_FASYNCWRITE) {
1094 /* ioflag &= ~IO_SYNC; */
1095 } else if (flags & O_FSYNCWRITE) {
1097 } else if (fp->f_flag & O_FSYNC) {
1101 if (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS))
1103 ioflag |= sequential_heuristic(uio, fp);
1105 error = dev_dwrite(dev, uio, ioflag);
1109 nanotime(&DEVFS_NODE(vp)->mtime);
1111 if ((flags & O_FOFFSET) == 0)
1112 fp->f_offset = uio->uio_offset;
1113 fp->f_nextoff = uio->uio_offset;
1116 //devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_specf_write done\n");
1122 devfs_specf_stat(struct file *fp, struct stat *sb, struct ucred *cred)
1127 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_specf_stat() called\n");
1130 vp = (struct vnode *)fp->f_data;
1131 error = vn_stat(vp, sb, cred);
1143 error = VOP_GETATTR(vp, vap);
1150 * Zero the spare stat fields
1156 * Copy from vattr table ... or not in case it's a cloned device
1158 if (vap->va_fsid != VNOVAL)
1159 sb->st_dev = vap->va_fsid;
1161 sb->st_dev = vp->v_mount->mnt_stat.f_fsid.val[0];
1163 sb->st_ino = vap->va_fileid;
1165 mode = vap->va_mode;
1169 if (vap->va_nlink > (nlink_t)-1)
1170 sb->st_nlink = (nlink_t)-1;
1172 sb->st_nlink = vap->va_nlink;
1173 sb->st_uid = vap->va_uid;
1174 sb->st_gid = vap->va_gid;
1175 sb->st_rdev = dev2udev(DEVFS_NODE(vp)->d_dev);
1176 sb->st_size = vap->va_size;
1177 sb->st_atimespec = vap->va_atime;
1178 sb->st_mtimespec = vap->va_mtime;
1179 sb->st_ctimespec = vap->va_ctime;
1182 * A VCHR and VBLK device may track the last access and last modified
1183 * time independantly of the filesystem. This is particularly true
1184 * because device read and write calls may bypass the filesystem.
1186 if (vp->v_type == VCHR || vp->v_type == VBLK) {
1189 if (dev->si_lastread) {
1190 sb->st_atimespec.tv_sec = dev->si_lastread;
1191 sb->st_atimespec.tv_nsec = 0;
1193 if (dev->si_lastwrite) {
1194 sb->st_atimespec.tv_sec = dev->si_lastwrite;
1195 sb->st_atimespec.tv_nsec = 0;
1201 * According to www.opengroup.org, the meaning of st_blksize is
1202 * "a filesystem-specific preferred I/O block size for this
1203 * object. In some filesystem types, this may vary from file
1205 * Default to PAGE_SIZE after much discussion.
1208 sb->st_blksize = PAGE_SIZE;
1210 sb->st_flags = vap->va_flags;
1212 error = priv_check_cred(cred, PRIV_VFS_GENERATION, 0);
1216 sb->st_gen = (u_int32_t)vap->va_gen;
1218 sb->st_blocks = vap->va_bytes / S_BLKSIZE;
1219 sb->st_fsmid = vap->va_fsmid;
1227 devfs_specf_kqfilter(struct file *fp, struct knote *kn)
1234 //devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_specf_kqfilter() called! \n");
1238 vp = (struct vnode *)fp->f_data;
1239 if (vp == NULL || vp->v_type == VBAD) {
1244 if ((dev = vp->v_rdev) == NULL) {
1250 error = dev_dkqfilter(dev, kn);
1255 nanotime(&DEVFS_NODE(vp)->atime);
1263 devfs_specf_poll(struct file *fp, int events, struct ucred *cred)
1270 //devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_specf_poll() called! \n");
1274 vp = (struct vnode *)fp->f_data;
1275 if (vp == NULL || vp->v_type == VBAD) {
1280 if ((dev = vp->v_rdev) == NULL) {
1285 error = dev_dpoll(dev, events);
1290 nanotime(&DEVFS_NODE(vp)->atime);
1298 * MPALMOSTSAFE - acquires mplock
1301 devfs_specf_ioctl(struct file *fp, u_long com, caddr_t data, struct ucred *ucred)
1303 struct vnode *vp = ((struct vnode *)fp->f_data);
1305 //struct vattr vattr;
1308 struct fiodname_args *name_args;
1312 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_specf_ioctl() called! \n");
1316 if ((dev = vp->v_rdev) == NULL) {
1317 error = EBADF; /* device was revoked */
1320 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_specf_ioctl() called! for dev %s\n", dev->si_name);
1322 if (!(dev_dflags(dev) & D_TTY))
1323 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_specf_ioctl() called on %s! com is: %x\n", dev->si_name, com);
1325 if (com == FIODTYPE) {
1326 *(int *)data = dev_dflags(dev) & D_TYPEMASK;
1329 } else if (com == FIODNAME) {
1330 name_args = (struct fiodname_args *)data;
1331 name = dev->si_name;
1332 namlen = strlen(name) + 1;
1334 devfs_debug(DEVFS_DEBUG_DEBUG, "ioctl, got: FIODNAME for %s\n", name);
1336 if (namlen <= name_args->len)
1337 error = copyout(dev->si_name, name_args->name, namlen);
1341 //name_args->len = namlen; //need _IOWR to enable this
1342 devfs_debug(DEVFS_DEBUG_DEBUG, "ioctl stuff: error: %d\n", error);
1346 error = dev_dioctl(dev, com, data, fp->f_flag, ucred);
1348 if (DEVFS_NODE(vp)) {
1349 nanotime(&DEVFS_NODE(vp)->atime);
1350 nanotime(&DEVFS_NODE(vp)->mtime);
1353 if (com == TIOCSCTTY)
1354 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_specf_ioctl: got TIOCSCTTY on %s\n", dev->si_name);
1355 if (error == 0 && com == TIOCSCTTY) {
1356 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_specf_ioctl: dealing with TIOCSCTTY on %s\n", dev->si_name);
1357 struct proc *p = curthread->td_proc;
1358 struct session *sess;
1363 sess = p->p_session;
1364 /* Do nothing if reassigning same control tty */
1365 if (sess->s_ttyvp == vp) {
1369 /* Get rid of reference to old control tty */
1370 ovp = sess->s_ttyvp;
1379 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_specf_ioctl() finished! \n");
1385 devfs_spec_fsync(struct vop_fsync_args *ap)
1387 struct vnode *vp = ap->a_vp;
1390 if (!vn_isdisk(vp, NULL))
1394 * Flush all dirty buffers associated with a block device.
1396 error = vfsync(vp, ap->a_waitfor, 10000, NULL, NULL);
1420 devfs_spec_read(struct vop_read_args *ap)
1431 if (dev == NULL) /* device was revoked */
1433 if (uio->uio_resid == 0)
1437 error = dev_dread(dev, uio, ap->a_ioflag);
1438 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1441 nanotime(&DEVFS_NODE(vp)->atime);
1447 * Vnode op for write
1449 * spec_write(struct vnode *a_vp, struct uio *a_uio, int a_ioflag,
1450 * struct ucred *a_cred)
1454 devfs_spec_write(struct vop_write_args *ap)
1465 KKASSERT(uio->uio_segflg != UIO_NOCOPY);
1467 if (dev == NULL) /* device was revoked */
1471 error = dev_dwrite(dev, uio, ap->a_ioflag);
1472 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1475 nanotime(&DEVFS_NODE(vp)->mtime);
1481 * Device ioctl operation.
1483 * spec_ioctl(struct vnode *a_vp, int a_command, caddr_t a_data,
1484 * int a_fflag, struct ucred *a_cred)
1488 devfs_spec_ioctl(struct vop_ioctl_args *ap)
1491 struct vnode *vp = ap->a_vp;
1493 if ((dev = vp->v_rdev) == NULL)
1494 return (EBADF); /* device was revoked */
1495 if ( ap->a_command == TIOCSCTTY )
1496 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_*SPEC*_ioctl: got TIOCSCTTY\n");
1498 if (DEVFS_NODE(vp)) {
1499 nanotime(&DEVFS_NODE(vp)->atime);
1500 nanotime(&DEVFS_NODE(vp)->mtime);
1503 return (dev_dioctl(dev, ap->a_command, ap->a_data,
1504 ap->a_fflag, ap->a_cred));
1508 * spec_poll(struct vnode *a_vp, int a_events, struct ucred *a_cred)
1512 devfs_spec_poll(struct vop_poll_args *ap)
1515 struct vnode *vp = ap->a_vp;
1517 if ((dev = vp->v_rdev) == NULL)
1518 return (EBADF); /* device was revoked */
1521 nanotime(&DEVFS_NODE(vp)->atime);
1523 return (dev_dpoll(dev, ap->a_events));
1527 * spec_kqfilter(struct vnode *a_vp, struct knote *a_kn)
1531 devfs_spec_kqfilter(struct vop_kqfilter_args *ap)
1534 struct vnode *vp = ap->a_vp;
1536 if ((dev = vp->v_rdev) == NULL)
1537 return (EBADF); /* device was revoked */
1540 nanotime(&DEVFS_NODE(vp)->atime);
1542 return (dev_dkqfilter(dev, ap->a_kn));
1587 * Convert a vnode strategy call into a device strategy call. Vnode strategy
1588 * calls are not limited to device DMA limits so we have to deal with the
1591 * spec_strategy(struct vnode *a_vp, struct bio *a_bio)
1594 devfs_spec_strategy(struct vop_strategy_args *ap)
1596 struct bio *bio = ap->a_bio;
1597 struct buf *bp = bio->bio_buf;
1604 if (bp->b_cmd != BUF_CMD_READ && LIST_FIRST(&bp->b_dep) != NULL)
1608 * Collect statistics on synchronous and asynchronous read
1609 * and write counts for disks that have associated filesystems.
1612 KKASSERT(vp->v_rdev != NULL); /* XXX */
1613 if (vn_isdisk(vp, NULL) && (mp = vp->v_rdev->si_mountpoint) != NULL) {
1614 if (bp->b_cmd == BUF_CMD_READ) {
1615 //XXX: no idea what has changed here...
1616 if (bp->b_flags & BIO_SYNC)
1617 mp->mnt_stat.f_syncreads++;
1619 mp->mnt_stat.f_asyncreads++;
1621 if (bp->b_flags & BIO_SYNC)
1622 mp->mnt_stat.f_syncwrites++;
1624 mp->mnt_stat.f_asyncwrites++;
1629 * Device iosize limitations only apply to read and write. Shortcut
1630 * the I/O if it fits.
1632 if ((maxiosize = vp->v_rdev->si_iosize_max) == 0) {
1633 devfs_debug(DEVFS_DEBUG_DEBUG, "%s: si_iosize_max not set!\n", dev_dname(vp->v_rdev));
1634 maxiosize = MAXPHYS;
1636 #if SPEC_CHAIN_DEBUG & 2
1639 if (bp->b_bcount <= maxiosize ||
1640 (bp->b_cmd != BUF_CMD_READ && bp->b_cmd != BUF_CMD_WRITE)) {
1641 dev_dstrategy_chain(vp->v_rdev, bio);
1646 * Clone the buffer and set up an I/O chain to chunk up the I/O.
1648 nbp = kmalloc(sizeof(*bp), M_DEVBUF, M_INTWAIT|M_ZERO);
1652 BUF_LOCK(nbp, LK_EXCLUSIVE);
1655 nbp->b_flags = B_PAGING | (bp->b_flags & B_BNOCLIP);
1656 nbp->b_data = bp->b_data;
1657 nbp->b_bio1.bio_done = devfs_spec_strategy_done;
1658 nbp->b_bio1.bio_offset = bio->bio_offset;
1659 nbp->b_bio1.bio_caller_info1.ptr = bio;
1662 * Start the first transfer
1664 if (vn_isdisk(vp, NULL))
1665 chunksize = vp->v_rdev->si_bsize_phys;
1667 chunksize = DEV_BSIZE;
1668 chunksize = maxiosize / chunksize * chunksize;
1669 #if SPEC_CHAIN_DEBUG & 1
1670 devfs_debug(DEVFS_DEBUG_DEBUG, "spec_strategy chained I/O chunksize=%d\n", chunksize);
1672 nbp->b_cmd = bp->b_cmd;
1673 nbp->b_bcount = chunksize;
1674 nbp->b_bufsize = chunksize; /* used to detect a short I/O */
1675 nbp->b_bio1.bio_caller_info2.index = chunksize;
1677 #if SPEC_CHAIN_DEBUG & 1
1678 devfs_debug(DEVFS_DEBUG_DEBUG, "spec_strategy: chain %p offset %d/%d bcount %d\n",
1679 bp, 0, bp->b_bcount, nbp->b_bcount);
1682 dev_dstrategy(vp->v_rdev, &nbp->b_bio1);
1684 if (DEVFS_NODE(vp)) {
1685 nanotime(&DEVFS_NODE(vp)->atime);
1686 nanotime(&DEVFS_NODE(vp)->mtime);
1693 * Chunked up transfer completion routine - chain transfers until done
1697 devfs_spec_strategy_done(struct bio *nbio)
1699 struct buf *nbp = nbio->bio_buf;
1700 struct bio *bio = nbio->bio_caller_info1.ptr; /* original bio */
1701 struct buf *bp = bio->bio_buf; /* original bp */
1702 int chunksize = nbio->bio_caller_info2.index; /* chunking */
1703 int boffset = nbp->b_data - bp->b_data;
1705 if (nbp->b_flags & B_ERROR) {
1707 * An error terminates the chain, propogate the error back
1708 * to the original bp
1710 bp->b_flags |= B_ERROR;
1711 bp->b_error = nbp->b_error;
1712 bp->b_resid = bp->b_bcount - boffset +
1713 (nbp->b_bcount - nbp->b_resid);
1714 #if SPEC_CHAIN_DEBUG & 1
1715 devfs_debug(DEVFS_DEBUG_DEBUG, "spec_strategy: chain %p error %d bcount %d/%d\n",
1716 bp, bp->b_error, bp->b_bcount,
1717 bp->b_bcount - bp->b_resid);
1719 kfree(nbp, M_DEVBUF);
1721 } else if (nbp->b_resid) {
1723 * A short read or write terminates the chain
1725 bp->b_error = nbp->b_error;
1726 bp->b_resid = bp->b_bcount - boffset +
1727 (nbp->b_bcount - nbp->b_resid);
1728 #if SPEC_CHAIN_DEBUG & 1
1729 devfs_debug(DEVFS_DEBUG_DEBUG, "spec_strategy: chain %p short read(1) bcount %d/%d\n",
1730 bp, bp->b_bcount - bp->b_resid, bp->b_bcount);
1732 kfree(nbp, M_DEVBUF);
1734 } else if (nbp->b_bcount != nbp->b_bufsize) {
1736 * A short read or write can also occur by truncating b_bcount
1738 #if SPEC_CHAIN_DEBUG & 1
1739 devfs_debug(DEVFS_DEBUG_DEBUG, "spec_strategy: chain %p short read(2) bcount %d/%d\n",
1740 bp, nbp->b_bcount + boffset, bp->b_bcount);
1743 bp->b_bcount = nbp->b_bcount + boffset;
1744 bp->b_resid = nbp->b_resid;
1745 kfree(nbp, M_DEVBUF);
1747 } else if (nbp->b_bcount + boffset == bp->b_bcount) {
1749 * No more data terminates the chain
1751 #if SPEC_CHAIN_DEBUG & 1
1752 devfs_debug(DEVFS_DEBUG_DEBUG, "spec_strategy: chain %p finished bcount %d\n",
1757 kfree(nbp, M_DEVBUF);
1761 * Continue the chain
1763 boffset += nbp->b_bcount;
1764 nbp->b_data = bp->b_data + boffset;
1765 nbp->b_bcount = bp->b_bcount - boffset;
1766 if (nbp->b_bcount > chunksize)
1767 nbp->b_bcount = chunksize;
1768 nbp->b_bio1.bio_done = devfs_spec_strategy_done;
1769 nbp->b_bio1.bio_offset = bio->bio_offset + boffset;
1771 #if SPEC_CHAIN_DEBUG & 1
1772 devfs_debug(DEVFS_DEBUG_DEBUG, "spec_strategy: chain %p offset %d/%d bcount %d\n",
1773 bp, boffset, bp->b_bcount, nbp->b_bcount);
1776 dev_dstrategy(nbp->b_vp->v_rdev, &nbp->b_bio1);
1781 * spec_freeblks(struct vnode *a_vp, daddr_t a_addr, daddr_t a_length)
1784 devfs_spec_freeblks(struct vop_freeblks_args *ap)
1789 * XXX: This assumes that strategy does the deed right away.
1790 * XXX: this may not be TRTTD.
1792 KKASSERT(ap->a_vp->v_rdev != NULL);
1793 if ((dev_dflags(ap->a_vp->v_rdev) & D_CANFREE) == 0)
1795 bp = geteblk(ap->a_length);
1796 bp->b_cmd = BUF_CMD_FREEBLKS;
1797 bp->b_bio1.bio_offset = ap->a_offset;
1798 bp->b_bcount = ap->a_length;
1799 dev_dstrategy(ap->a_vp->v_rdev, &bp->b_bio1);
1804 * Implement degenerate case where the block requested is the block
1805 * returned, and assume that the entire device is contiguous in regards
1806 * to the contiguous block range (runp and runb).
1808 * spec_bmap(struct vnode *a_vp, off_t a_loffset,
1809 * off_t *a_doffsetp, int *a_runp, int *a_runb)
1812 devfs_spec_bmap(struct vop_bmap_args *ap)
1814 if (ap->a_doffsetp != NULL)
1815 *ap->a_doffsetp = ap->a_loffset;
1816 if (ap->a_runp != NULL)
1817 *ap->a_runp = MAXBSIZE;
1818 if (ap->a_runb != NULL) {
1819 if (ap->a_loffset < MAXBSIZE)
1820 *ap->a_runb = (int)ap->a_loffset;
1822 *ap->a_runb = MAXBSIZE;
1829 * Special device advisory byte-level locks.
1831 * spec_advlock(struct vnode *a_vp, caddr_t a_id, int a_op,
1832 * struct flock *a_fl, int a_flags)
1836 devfs_spec_advlock(struct vop_advlock_args *ap)
1838 return ((ap->a_flags & F_POSIX) ? EINVAL : EOPNOTSUPP);
1842 devfs_spec_getpages_iodone(struct bio *bio)
1844 bio->bio_buf->b_cmd = BUF_CMD_DONE;
1845 wakeup(bio->bio_buf);
1849 * spec_getpages() - get pages associated with device vnode.
1851 * Note that spec_read and spec_write do not use the buffer cache, so we
1852 * must fully implement getpages here.
1855 devfs_spec_getpages(struct vop_getpages_args *ap)
1859 int i, pcount, size;
1862 vm_ooffset_t offset;
1863 int toff, nextoff, nread;
1864 struct vnode *vp = ap->a_vp;
1869 pcount = round_page(ap->a_count) / PAGE_SIZE;
1872 * Calculate the offset of the transfer and do sanity check.
1874 offset = IDX_TO_OFF(ap->a_m[0]->pindex) + ap->a_offset;
1877 * Round up physical size for real devices. We cannot round using
1878 * v_mount's block size data because v_mount has nothing to do with
1879 * the device. i.e. it's usually '/dev'. We need the physical block
1880 * size for the device itself.
1882 * We can't use v_rdev->si_mountpoint because it only exists when the
1883 * block device is mounted. However, we can use v_rdev.
1886 if (vn_isdisk(vp, NULL))
1887 blksiz = vp->v_rdev->si_bsize_phys;
1891 size = (ap->a_count + blksiz - 1) & ~(blksiz - 1);
1894 kva = (vm_offset_t)bp->b_data;
1897 * Map the pages to be read into the kva.
1899 pmap_qenter(kva, ap->a_m, pcount);
1901 /* Build a minimal buffer header. */
1902 bp->b_cmd = BUF_CMD_READ;
1903 bp->b_bcount = size;
1905 bp->b_runningbufspace = size;
1907 runningbufspace += bp->b_runningbufspace;
1911 bp->b_bio1.bio_offset = offset;
1912 bp->b_bio1.bio_done = devfs_spec_getpages_iodone;
1914 mycpu->gd_cnt.v_vnodein++;
1915 mycpu->gd_cnt.v_vnodepgsin += pcount;
1918 vn_strategy(ap->a_vp, &bp->b_bio1);
1922 /* We definitely need to be at splbio here. */
1923 while (bp->b_cmd != BUF_CMD_DONE)
1924 tsleep(bp, 0, "spread", 0);
1928 if (bp->b_flags & B_ERROR) {
1930 error = bp->b_error;
1936 * If EOF is encountered we must zero-extend the result in order
1937 * to ensure that the page does not contain garabge. When no
1938 * error occurs, an early EOF is indicated if b_bcount got truncated.
1939 * b_resid is relative to b_bcount and should be 0, but some devices
1940 * might indicate an EOF with b_resid instead of truncating b_bcount.
1942 nread = bp->b_bcount - bp->b_resid;
1943 if (nread < ap->a_count)
1944 bzero((caddr_t)kva + nread, ap->a_count - nread);
1945 pmap_qremove(kva, pcount);
1948 for (i = 0, toff = 0; i < pcount; i++, toff = nextoff) {
1949 nextoff = toff + PAGE_SIZE;
1952 m->flags &= ~PG_ZERO;
1954 if (nextoff <= nread) {
1955 m->valid = VM_PAGE_BITS_ALL;
1957 } else if (toff < nread) {
1959 * Since this is a VM request, we have to supply the
1960 * unaligned offset to allow vm_page_set_validclean()
1961 * to zero sub-DEV_BSIZE'd portions of the page.
1963 vm_page_set_validclean(m, 0, nread - toff);
1969 if (i != ap->a_reqpage) {
1971 * Just in case someone was asking for this page we
1972 * now tell them that it is ok to use.
1974 if (!error || (m->valid == VM_PAGE_BITS_ALL)) {
1976 if (m->flags & PG_WANTED) {
1977 vm_page_activate(m);
1979 vm_page_deactivate(m);
1988 } else if (m->valid) {
1991 * Since this is a VM request, we need to make the
1992 * entire page presentable by zeroing invalid sections.
1994 if (m->valid != VM_PAGE_BITS_ALL)
1995 vm_page_zero_invalid(m, FALSE);
1999 m = ap->a_m[ap->a_reqpage];
2000 devfs_debug(DEVFS_DEBUG_WARNING,
2001 "spec_getpages:(%s) I/O read failure: (error=%d) bp %p vp %p\n",
2002 devtoname(vp->v_rdev), error, bp, bp->b_vp);
2003 devfs_debug(DEVFS_DEBUG_WARNING,
2004 " size: %d, resid: %d, a_count: %d, valid: 0x%x\n",
2005 size, bp->b_resid, ap->a_count, m->valid);
2006 devfs_debug(DEVFS_DEBUG_WARNING,
2007 " nread: %d, reqpage: %d, pindex: %lu, pcount: %d\n",
2008 nread, ap->a_reqpage, (u_long)m->pindex, pcount);
2010 * Free the buffer header back to the swap buffer pool.
2013 return VM_PAGER_ERROR;
2016 * Free the buffer header back to the swap buffer pool.
2062 sequential_heuristic(struct uio *uio, struct file *fp)
2065 * Sequential heuristic - detect sequential operation
2067 if ((uio->uio_offset == 0 && fp->f_seqcount > 0) ||
2068 uio->uio_offset == fp->f_nextoff) {
2069 int tmpseq = fp->f_seqcount;
2071 * XXX we assume that the filesystem block size is
2072 * the default. Not true, but still gives us a pretty
2073 * good indicator of how sequential the read operations
2076 tmpseq += (uio->uio_resid + BKVASIZE - 1) / BKVASIZE;
2077 if (tmpseq > IO_SEQMAX)
2079 fp->f_seqcount = tmpseq;
2080 return(fp->f_seqcount << IO_SEQSHIFT);
2084 * Not sequential, quick draw-down of seqcount
2086 if (fp->f_seqcount > 1)