2 * Copyright (c) 2009 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Alex Hornung <ahornung@gmail.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 #include <sys/param.h>
35 #include <sys/systm.h>
37 #include <sys/kernel.h>
39 #include <sys/fcntl.h>
42 #include <sys/signalvar.h>
43 #include <sys/vnode.h>
45 #include <sys/mount.h>
47 #include <sys/fcntl.h>
48 #include <sys/namei.h>
49 #include <sys/dirent.h>
50 #include <sys/malloc.h>
54 #include <vm/vm_pager.h>
55 #include <vm/vm_zone.h>
56 #include <vm/vm_object.h>
57 #include <sys/filio.h>
58 #include <sys/ttycom.h>
59 #include <sys/sysref2.h>
61 #include <vfs/devfs/devfs.h>
62 #include <sys/pioctl.h>
64 #include <machine/limits.h>
66 MALLOC_DECLARE(M_DEVFS);
67 #define DEVFS_BADOP (void *)devfs_badop
69 static int devfs_badop(struct vop_generic_args *);
70 static int devfs_access(struct vop_access_args *);
71 static int devfs_inactive(struct vop_inactive_args *);
72 static int devfs_reclaim(struct vop_reclaim_args *);
73 static int devfs_readdir(struct vop_readdir_args *);
74 static int devfs_getattr(struct vop_getattr_args *);
75 static int devfs_setattr(struct vop_setattr_args *);
76 static int devfs_readlink(struct vop_readlink_args *);
77 static int devfs_print(struct vop_print_args *);
79 static int devfs_nresolve(struct vop_nresolve_args *);
80 static int devfs_nlookupdotdot(struct vop_nlookupdotdot_args *);
81 static int devfs_nsymlink(struct vop_nsymlink_args *);
82 static int devfs_nremove(struct vop_nremove_args *);
84 static int devfs_spec_open(struct vop_open_args *);
85 static int devfs_spec_close(struct vop_close_args *);
86 static int devfs_spec_fsync(struct vop_fsync_args *);
88 static int devfs_spec_read(struct vop_read_args *);
89 static int devfs_spec_write(struct vop_write_args *);
90 static int devfs_spec_ioctl(struct vop_ioctl_args *);
91 static int devfs_spec_poll(struct vop_poll_args *);
92 static int devfs_spec_kqfilter(struct vop_kqfilter_args *);
93 static int devfs_spec_strategy(struct vop_strategy_args *);
94 static void devfs_spec_strategy_done(struct bio *);
95 static int devfs_spec_freeblks(struct vop_freeblks_args *);
96 static int devfs_spec_bmap(struct vop_bmap_args *);
97 static int devfs_spec_advlock(struct vop_advlock_args *);
98 static void devfs_spec_getpages_iodone(struct bio *);
99 static int devfs_spec_getpages(struct vop_getpages_args *);
102 static int devfs_specf_close(struct file *);
103 static int devfs_specf_read(struct file *, struct uio *, struct ucred *, int);
104 static int devfs_specf_write(struct file *, struct uio *, struct ucred *, int);
105 static int devfs_specf_stat(struct file *, struct stat *, struct ucred *);
106 static int devfs_specf_kqfilter(struct file *, struct knote *);
107 static int devfs_specf_poll(struct file *, int, struct ucred *);
108 static int devfs_specf_ioctl(struct file *, u_long, caddr_t, struct ucred *);
111 static __inline int sequential_heuristic(struct uio *, struct file *);
112 extern struct lock devfs_lock;
115 * devfs vnode operations for regular files
117 struct vop_ops devfs_vnode_norm_vops = {
118 .vop_default = vop_defaultop,
119 .vop_access = devfs_access,
120 .vop_advlock = DEVFS_BADOP,
121 .vop_bmap = DEVFS_BADOP,
122 .vop_close = vop_stdclose,
123 .vop_getattr = devfs_getattr,
124 .vop_inactive = devfs_inactive,
125 .vop_ncreate = DEVFS_BADOP,
126 .vop_nresolve = devfs_nresolve,
127 .vop_nlookupdotdot = devfs_nlookupdotdot,
128 .vop_nlink = DEVFS_BADOP,
129 .vop_nmkdir = DEVFS_BADOP,
130 .vop_nmknod = DEVFS_BADOP,
131 .vop_nremove = devfs_nremove,
132 .vop_nrename = DEVFS_BADOP,
133 .vop_nrmdir = DEVFS_BADOP,
134 .vop_nsymlink = devfs_nsymlink,
135 .vop_open = vop_stdopen,
136 .vop_pathconf = vop_stdpathconf,
137 .vop_print = devfs_print,
138 .vop_read = DEVFS_BADOP,
139 .vop_readdir = devfs_readdir,
140 .vop_readlink = devfs_readlink,
141 .vop_reclaim = devfs_reclaim,
142 .vop_setattr = devfs_setattr,
143 .vop_write = DEVFS_BADOP,
144 .vop_ioctl = DEVFS_BADOP
148 * devfs vnode operations for character devices
150 struct vop_ops devfs_vnode_dev_vops = {
151 .vop_default = vop_defaultop,
152 .vop_access = devfs_access,
153 .vop_advlock = devfs_spec_advlock,
154 .vop_bmap = devfs_spec_bmap,
155 .vop_close = devfs_spec_close,
156 .vop_freeblks = devfs_spec_freeblks,
157 .vop_fsync = devfs_spec_fsync,
158 .vop_getattr = devfs_getattr,
159 .vop_getpages = devfs_spec_getpages,
160 .vop_inactive = devfs_inactive,
161 .vop_open = devfs_spec_open,
162 .vop_pathconf = vop_stdpathconf,
163 .vop_print = devfs_print,
164 .vop_poll = devfs_spec_poll,
165 .vop_kqfilter = devfs_spec_kqfilter,
166 .vop_read = devfs_spec_read,
167 .vop_readdir = DEVFS_BADOP,
168 .vop_readlink = DEVFS_BADOP,
169 .vop_reclaim = devfs_reclaim,
170 .vop_setattr = devfs_setattr,
171 .vop_strategy = devfs_spec_strategy,
172 .vop_write = devfs_spec_write,
173 .vop_ioctl = devfs_spec_ioctl
176 struct vop_ops *devfs_vnode_dev_vops_p = &devfs_vnode_dev_vops;
178 struct fileops devfs_dev_fileops = {
179 .fo_read = devfs_specf_read,
180 .fo_write = devfs_specf_write,
181 .fo_ioctl = devfs_specf_ioctl,
182 .fo_poll = devfs_specf_poll,
183 .fo_kqfilter = devfs_specf_kqfilter,
184 .fo_stat = devfs_specf_stat,
185 .fo_close = devfs_specf_close,
186 .fo_shutdown = nofo_shutdown
191 * generic entry point for unsupported operations
194 devfs_badop(struct vop_generic_args *ap)
196 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs: specified vnode operation is not implemented (yet)\n");
202 devfs_access(struct vop_access_args *ap)
204 struct devfs_node *node = DEVFS_NODE(ap->a_vp);
207 if (!devfs_node_is_accessible(node))
210 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_access() called!\n");
212 error = vop_helper_access(ap, node->uid, node->gid,
213 node->mode, node->flags);
216 /* XXX: consider possible special cases? terminal, ...? */
221 devfs_inactive(struct vop_inactive_args *ap)
223 struct devfs_node *node = DEVFS_NODE(ap->a_vp);
225 if (node == NULL || (node->flags & DEVFS_NODE_LINKED) == 0)
232 devfs_reclaim(struct vop_reclaim_args *ap)
234 struct devfs_node *node;
238 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_reclaim() called!\n");
241 * Check if it is locked already. if not, we acquire the devfs lock
243 if (!(lockstatus(&devfs_lock, curthread)) == LK_EXCLUSIVE) {
244 lockmgr(&devfs_lock, LK_EXCLUSIVE);
251 * Get rid of the devfs_node if it is no longer linked into the
255 if ((node = DEVFS_NODE(vp)) != NULL) {
256 if ((node->flags & DEVFS_NODE_LINKED) == 0) {
258 /* NOTE: v_data is NULLd out by freep */
261 /* vp->v_data = NULL; handled below */
266 lockmgr(&devfs_lock, LK_RELEASE);
269 * v_rdev needs to be properly released using v_release_rdev
270 * Make sure v_data is NULL as well.
282 devfs_readdir(struct vop_readdir_args *ap)
284 struct devfs_node *node;
285 int error2 = 0, r, error = 0;
292 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_readdir() called!\n");
294 if (ap->a_uio->uio_offset < 0 || ap->a_uio->uio_offset > INT_MAX)
296 if ((error = vn_lock(ap->a_vp, LK_EXCLUSIVE | LK_RETRY)) != 0)
299 if (!devfs_node_is_accessible(DEVFS_NODE(ap->a_vp)))
302 lockmgr(&devfs_lock, LK_EXCLUSIVE);
304 saveoff = ap->a_uio->uio_offset;
306 if (ap->a_ncookies) {
307 ncookies = ap->a_uio->uio_resid / 16 + 1; /* Why / 16 ?? */
310 cookies = kmalloc(256 * sizeof(off_t), M_TEMP, M_WAITOK);
318 nanotime(&DEVFS_NODE(ap->a_vp)->atime);
321 r = vop_write_dirent(&error, ap->a_uio, DEVFS_NODE(ap->a_vp)->d_dir.d_ino, DT_DIR, 1, ".");
325 cookies[cookie_index] = saveoff;
328 if (cookie_index == ncookies)
333 if (DEVFS_NODE(ap->a_vp)->parent) {
334 r = vop_write_dirent(&error, ap->a_uio,
335 DEVFS_NODE(ap->a_vp)->d_dir.d_ino,
338 r = vop_write_dirent(&error, ap->a_uio,
339 DEVFS_NODE(ap->a_vp)->d_dir.d_ino, DT_DIR, 2, "..");
344 cookies[cookie_index] = saveoff;
347 if (cookie_index == ncookies)
351 TAILQ_FOREACH(node, DEVFS_DENODE_HEAD(DEVFS_NODE(ap->a_vp)), link) {
352 if ((node->flags & DEVFS_HIDDEN) || (node->flags & DEVFS_INVISIBLE))
356 * If the node type is a valid devfs alias, then we make sure that the
357 * target isn't hidden. If it is, we don't show the link in the
360 if ((node->node_type == Plink) && (node->link_target != NULL) &&
361 (node->link_target->flags & DEVFS_HIDDEN))
364 if (node->cookie < saveoff)
367 saveoff = node->cookie;
369 error2 = vop_write_dirent(&error, ap->a_uio,
370 node->d_dir.d_ino, node->d_dir.d_type,
371 node->d_dir.d_namlen, node->d_dir.d_name);
379 cookies[cookie_index] = node->cookie;
381 if (cookie_index == ncookies)
386 lockmgr(&devfs_lock, LK_RELEASE);
389 ap->a_uio->uio_offset = saveoff;
390 if (error && cookie_index == 0) {
392 kfree(cookies, M_TEMP);
394 *ap->a_cookies = NULL;
398 *ap->a_ncookies = cookie_index;
399 *ap->a_cookies = cookies;
407 devfs_nresolve(struct vop_nresolve_args *ap)
409 struct devfs_node *node, *found = NULL;
410 struct namecache *ncp;
411 struct vnode *vp = NULL;
416 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_nresolve() called!\n");
418 ncp = ap->a_nch->ncp;
421 if (!devfs_node_is_accessible(DEVFS_NODE(ap->a_dvp)))
424 lockmgr(&devfs_lock, LK_EXCLUSIVE);
426 if ((DEVFS_NODE(ap->a_dvp)->node_type != Proot) &&
427 (DEVFS_NODE(ap->a_dvp)->node_type != Pdir)) {
428 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_nresolve: ap->a_dvp is not a dir!!!\n");
429 cache_setvp(ap->a_nch, NULL);
434 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_nresolve -search- \n");
435 TAILQ_FOREACH(node, DEVFS_DENODE_HEAD(DEVFS_NODE(ap->a_dvp)), link) {
436 if (len == node->d_dir.d_namlen) {
437 if (!memcmp(ncp->nc_name, node->d_dir.d_name, len)) {
438 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_nresolve: found: %s\n", ncp->nc_name);
446 if ((found->node_type == Plink) && (found->link_target))
447 found = found->link_target;
449 if (!(found->flags & DEVFS_HIDDEN))
450 devfs_allocv(/*ap->a_dvp->v_mount, */ &vp, found);
453 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_nresolve -2- \n");
457 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_nresolve vp==NULL \n");
459 /* XXX: len is int, devfs_clone expects size_t*, not int* */
460 if ((!hidden) && (!devfs_clone(ncp->nc_name, &len, NULL, 0, ap->a_cred))) {
464 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_nresolve -4- \n");
466 cache_setvp(ap->a_nch, NULL);
467 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_nresolve -5- \n");
471 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_nresolve -6- \n");
474 cache_setvp(ap->a_nch, vp);
477 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_nresolve -9- \n");
479 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_nresolve -end:10- failed? %s \n", (error)?"FAILED!":"OK!");
480 lockmgr(&devfs_lock, LK_RELEASE);
486 devfs_nlookupdotdot(struct vop_nlookupdotdot_args *ap)
488 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_nlookupdotdot() called!\n");
491 if (!devfs_node_is_accessible(DEVFS_NODE(ap->a_dvp)))
494 lockmgr(&devfs_lock, LK_EXCLUSIVE);
495 if (DEVFS_NODE(ap->a_dvp)->parent != NULL) {
496 devfs_allocv(/*ap->a_dvp->v_mount, */ap->a_vpp, DEVFS_NODE(ap->a_dvp)->parent);
497 vn_unlock(*ap->a_vpp);
499 lockmgr(&devfs_lock, LK_RELEASE);
501 return ((*ap->a_vpp == NULL) ? ENOENT : 0);
506 devfs_getattr(struct vop_getattr_args *ap)
508 struct vattr *vap = ap->a_vap;
509 struct devfs_node *node = DEVFS_NODE(ap->a_vp);
512 if (!devfs_node_is_accessible(node))
515 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_getattr() called for %s!\n", DEVFS_NODE(ap->a_vp)->d_dir.d_name);
516 lockmgr(&devfs_lock, LK_EXCLUSIVE);
518 /* start by zeroing out the attributes */
521 /* next do all the common fields */
522 vap->va_type = ap->a_vp->v_type;
523 vap->va_mode = node->mode;
524 vap->va_fileid = DEVFS_NODE(ap->a_vp)->d_dir.d_ino ;
525 vap->va_flags = 0; /* XXX: what should this be? */
526 vap->va_blocksize = DEV_BSIZE;
527 vap->va_bytes = vap->va_size = sizeof(struct devfs_node);
529 vap->va_fsid = ap->a_vp->v_mount->mnt_stat.f_fsid.val[0];
532 vap->va_atime = node->atime;
533 vap->va_mtime = node->mtime;
534 vap->va_ctime = node->ctime;
536 vap->va_nlink = 1; /* number of references to file */
538 vap->va_uid = node->uid;
539 vap->va_gid = node->gid;
544 if ((DEVFS_NODE(ap->a_vp)->node_type == Pdev) &&
545 (DEVFS_NODE(ap->a_vp)->d_dev)) {
546 devfs_debug(DEVFS_DEBUG_DEBUG, "getattr: dev is: %p\n", DEVFS_NODE(ap->a_vp)->d_dev);
547 reference_dev(DEVFS_NODE(ap->a_vp)->d_dev);
548 vap->va_rminor = DEVFS_NODE(ap->a_vp)->d_dev->si_uminor;
549 release_dev(DEVFS_NODE(ap->a_vp)->d_dev);
552 /* For a softlink the va_size is the length of the softlink */
553 if (DEVFS_NODE(ap->a_vp)->symlink_name != 0) {
554 vap->va_size = DEVFS_NODE(ap->a_vp)->symlink_namelen;
556 nanotime(&node->atime);
557 lockmgr(&devfs_lock, LK_RELEASE);
563 devfs_setattr(struct vop_setattr_args *ap)
565 struct devfs_node *node;
569 node = DEVFS_NODE(ap->a_vp);
571 if (!devfs_node_is_accessible(node))
574 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_setattr() called!\n");
575 lockmgr(&devfs_lock, LK_EXCLUSIVE);
579 if (vap->va_uid != (uid_t)VNOVAL) {
580 if ((ap->a_cred->cr_uid != node->uid) &&
581 (!groupmember(node->gid, ap->a_cred))) {
582 error = priv_check(curthread, PRIV_VFS_CHOWN);
584 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_setattr, erroring out -1-\n");
588 node->uid = vap->va_uid;
591 if (vap->va_gid != (uid_t)VNOVAL) {
592 if ((ap->a_cred->cr_uid != node->uid) &&
593 (!groupmember(node->gid, ap->a_cred))) {
594 error = priv_check(curthread, PRIV_VFS_CHOWN);
596 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_setattr, erroring out -2-\n");
600 node->gid = vap->va_gid;
603 if (vap->va_mode != (mode_t)VNOVAL) {
604 if (ap->a_cred->cr_uid != node->uid) {
605 error = priv_check(curthread, PRIV_VFS_ADMIN);
607 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_setattr, erroring out -3-\n");
611 node->mode = vap->va_mode;
615 nanotime(&node->mtime);
616 lockmgr(&devfs_lock, LK_RELEASE);
622 devfs_readlink(struct vop_readlink_args *ap)
624 struct devfs_node *node = DEVFS_NODE(ap->a_vp);
627 if (!devfs_node_is_accessible(node))
630 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_readlink() called!\n");
632 lockmgr(&devfs_lock, LK_EXCLUSIVE);
633 ret = uiomove(node->symlink_name, node->symlink_namelen, ap->a_uio);
634 lockmgr(&devfs_lock, LK_RELEASE);
641 devfs_print(struct vop_print_args *ap)
643 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_print() called!\n");
645 /* XXX: print some useful debugging about node. */
651 devfs_nsymlink(struct vop_nsymlink_args *ap)
653 size_t targetlen = strlen(ap->a_target);
655 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_nsymlink() called!\n");
657 if (!devfs_node_is_accessible(DEVFS_NODE(ap->a_dvp)))
660 ap->a_vap->va_type = VLNK;
662 if ((DEVFS_NODE(ap->a_dvp)->node_type != Proot) &&
663 (DEVFS_NODE(ap->a_dvp)->node_type != Pdir)) {
664 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_nsymlink: ap->a_dvp is not a dir!!!\n");
667 lockmgr(&devfs_lock, LK_EXCLUSIVE);
668 devfs_allocvp(ap->a_dvp->v_mount, ap->a_vpp, Plink,
669 ap->a_nch->ncp->nc_name, DEVFS_NODE(ap->a_dvp), NULL);
672 DEVFS_NODE(*ap->a_vpp)->flags |= DEVFS_USER_CREATED;
674 DEVFS_NODE(*ap->a_vpp)->symlink_namelen = targetlen;
675 DEVFS_NODE(*ap->a_vpp)->symlink_name = kmalloc(targetlen + 1, M_DEVFS, M_WAITOK);
676 memcpy(DEVFS_NODE(*ap->a_vpp)->symlink_name, ap->a_target, targetlen);
677 DEVFS_NODE(*ap->a_vpp)->symlink_name[targetlen] = '\0';
678 cache_setunresolved(ap->a_nch);
679 cache_setvp(ap->a_nch, *ap->a_vpp);
681 lockmgr(&devfs_lock, LK_RELEASE);
683 return ((*ap->a_vpp == NULL) ? ENOTDIR : 0);
689 devfs_nremove(struct vop_nremove_args *ap)
691 struct devfs_node *node;
692 struct namecache *ncp;
695 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_nremove() called!\n");
697 ncp = ap->a_nch->ncp;
699 if (!devfs_node_is_accessible(DEVFS_NODE(ap->a_dvp)))
702 lockmgr(&devfs_lock, LK_EXCLUSIVE);
704 if ((DEVFS_NODE(ap->a_dvp)->node_type != Proot) &&
705 (DEVFS_NODE(ap->a_dvp)->node_type != Pdir)) {
706 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_nremove: ap->a_dvp is not a dir!!!\n");
710 TAILQ_FOREACH(node, DEVFS_DENODE_HEAD(DEVFS_NODE(ap->a_dvp)), link) {
711 if (ncp->nc_nlen == node->d_dir.d_namlen) {
712 if (!memcmp(ncp->nc_name, node->d_dir.d_name, ncp->nc_nlen)) {
713 /* only allow removal of user created stuff (e.g. symlinks) */
714 if ((node->flags & DEVFS_USER_CREATED) == 0) {
719 cache_inval_vp(node->v_node, CINV_DESTROY);
729 cache_setunresolved(ap->a_nch);
730 cache_setvp(ap->a_nch, NULL);
733 lockmgr(&devfs_lock, LK_RELEASE);
739 devfs_spec_open(struct vop_open_args *ap)
741 struct vnode *vp = ap->a_vp;
742 struct vnode *orig_vp = NULL;
743 cdev_t dev, ndev = NULL;
744 struct devfs_node *node = NULL;
748 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_spec_open() called\n");
750 if (DEVFS_NODE(vp)) {
751 if (DEVFS_NODE(vp)->d_dev == NULL)
753 if (!devfs_node_is_accessible(DEVFS_NODE(vp)))
757 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_spec_open: -1-\n");
759 if ((dev = vp->v_rdev) == NULL)
762 if (DEVFS_NODE(vp) && ap->a_fp) {
763 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_spec_open: -1.1-\n");
764 lockmgr(&devfs_lock, LK_EXCLUSIVE);
765 len = DEVFS_NODE(vp)->d_dir.d_namlen;
766 if (!(devfs_clone(DEVFS_NODE(vp)->d_dir.d_name, &len, &ndev, 1, ap->a_cred))) {
767 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_spec_open: -1.2- |%s|\n", ndev->si_name);
771 node = devfs_create_device_node(DEVFS_MNTDATA(vp->v_mount)->root_node, dev, NULL, NULL);
773 devfs_debug(DEVFS_DEBUG_DEBUG, "parent here is: %s, node is: |%s|\n", (DEVFS_NODE(vp)->parent->node_type == Proot)?"ROOT!":DEVFS_NODE(vp)->parent->d_dir.d_name, node->d_dir.d_name);
774 devfs_debug(DEVFS_DEBUG_DEBUG, "test: %s\n", ((struct devfs_node *)(TAILQ_LAST(DEVFS_DENODE_HEAD(DEVFS_NODE(vp)->parent), devfs_node_head)))->d_dir.d_name);
777 * orig_vp is set to the original vp if we cloned.
779 /* node->flags |= DEVFS_CLONED; */
780 devfs_allocv(&vp, node);
784 lockmgr(&devfs_lock, LK_RELEASE);
787 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_spec_open() called on %s! \n", dev->si_name);
789 * Make this field valid before any I/O in ->d_open
791 if (!dev->si_iosize_max)
792 dev->si_iosize_max = DFLTPHYS;
794 if (dev_dflags(dev) & D_TTY)
795 vp->v_flag |= VISTTY;
798 error = dev_dopen(dev, ap->a_mode, S_IFCHR, ap->a_cred);
799 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
802 * Clean up any cloned vp if we error out.
805 devfs_debug(DEVFS_DEBUG_DEBUG,
806 "devfs_spec_open() error out: %x\n", error);
810 /* orig_vp = NULL; */
816 if (dev_dflags(dev) & D_TTY) {
821 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs: no t_stop\n");
822 tp->t_stop = nottystop;
828 if (vn_isdisk(vp, NULL)) {
829 if (!dev->si_bsize_phys)
830 dev->si_bsize_phys = DEV_BSIZE;
831 vinitvmio(vp, IDX_TO_OFF(INT_MAX));
836 nanotime(&DEVFS_NODE(vp)->atime);
841 /* Ugly pty magic, to make pty devices appear once they are opened */
842 if (DEVFS_NODE(vp) && ((DEVFS_NODE(vp)->flags & DEVFS_PTY) == DEVFS_PTY))
843 DEVFS_NODE(vp)->flags &= ~DEVFS_INVISIBLE;
846 ap->a_fp->f_type = DTYPE_VNODE;
847 ap->a_fp->f_flag = ap->a_mode & FMASK;
848 ap->a_fp->f_ops = &devfs_dev_fileops;
849 ap->a_fp->f_data = vp;
852 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_spec_open: -end:3-\n");
859 devfs_spec_close(struct vop_close_args *ap)
861 struct proc *p = curproc;
862 struct vnode *vp = ap->a_vp;
863 cdev_t dev = vp->v_rdev;
867 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_spec_close() called on %s! \n", dev->si_name);
870 * A couple of hacks for devices and tty devices. The
871 * vnode ref count cannot be used to figure out the
872 * last close, but we can use v_opencount now that
873 * revoke works properly.
875 * Detect the last close on a controlling terminal and clear
876 * the session (half-close).
881 if (p && vp->v_opencount <= 1 && vp == p->p_session->s_ttyvp) {
882 p->p_session->s_ttyvp = NULL;
887 * Vnodes can be opened and closed multiple times. Do not really
888 * close the device unless (1) it is being closed forcibly,
889 * (2) the device wants to track closes, or (3) this is the last
890 * vnode doing its last close on the device.
892 * XXX the VXLOCK (force close) case can leave vnodes referencing
893 * a closed device. This might not occur now that our revoke is
896 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_spec_close() -1- \n");
897 if (dev && ((vp->v_flag & VRECLAIMED) ||
898 (dev_dflags(dev) & D_TRACKCLOSE) ||
899 (vp->v_opencount == 1))) {
901 if (vn_islocked(vp)) {
905 error = dev_dclose(dev, ap->a_fflag, S_IFCHR);
907 if (DEVFS_NODE(vp) && (DEVFS_NODE(vp)->flags & DEVFS_CLONED) == DEVFS_CLONED) {
908 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_spec_close: last of the cloned ones, so delete node %s\n", dev->si_name);
909 devfs_unlinkp(DEVFS_NODE(vp));
910 devfs_freep(DEVFS_NODE(vp));
913 /* Ugly pty magic, to make pty devices disappear again once they are closed */
914 if (DEVFS_NODE(vp) && ((DEVFS_NODE(vp)->flags & DEVFS_PTY) == DEVFS_PTY))
915 DEVFS_NODE(vp)->flags |= DEVFS_INVISIBLE;
918 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
922 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_spec_close() -2- \n");
924 * Track the actual opens and closes on the vnode. The last close
925 * disassociates the rdev. If the rdev is already disassociated or the
926 * opencount is already 0, the vnode might have been revoked and no
927 * further opencount tracking occurs.
930 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_spec_close() -3- \n");
931 if (vp->v_opencount == 1) {
932 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_spec_close() -3.5- \n");
936 if (vp->v_opencount > 0) {
937 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_spec_close() -4- \n");
939 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_spec_close() -5- \n");
942 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_spec_close() -end:6- \n");
949 devfs_specf_close(struct file *fp)
952 struct vnode *vp = (struct vnode *)fp->f_data;
954 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_specf_close() called! \n");
956 fp->f_ops = &badfileops;
958 error = vn_close(vp, fp->f_flag);
966 * Device-optimized file table vnode read routine.
968 * This bypasses the VOP table and talks directly to the device. Most
969 * filesystems just route to specfs and can make this optimization.
971 * MPALMOSTSAFE - acquires mplock
974 devfs_specf_read(struct file *fp, struct uio *uio, struct ucred *cred, int flags)
982 KASSERT(uio->uio_td == curthread,
983 ("uio_td %p is not td %p", uio->uio_td, curthread));
985 vp = (struct vnode *)fp->f_data;
986 if (vp == NULL || vp->v_type == VBAD) {
991 if ((dev = vp->v_rdev) == NULL) {
998 if (uio->uio_resid == 0) {
1002 if ((flags & O_FOFFSET) == 0)
1003 uio->uio_offset = fp->f_offset;
1006 if (flags & O_FBLOCKING) {
1007 /* ioflag &= ~IO_NDELAY; */
1008 } else if (flags & O_FNONBLOCKING) {
1009 ioflag |= IO_NDELAY;
1010 } else if (fp->f_flag & FNONBLOCK) {
1011 ioflag |= IO_NDELAY;
1013 if (flags & O_FBUFFERED) {
1014 /* ioflag &= ~IO_DIRECT; */
1015 } else if (flags & O_FUNBUFFERED) {
1016 ioflag |= IO_DIRECT;
1017 } else if (fp->f_flag & O_DIRECT) {
1018 ioflag |= IO_DIRECT;
1020 ioflag |= sequential_heuristic(uio, fp);
1022 error = dev_dread(dev, uio, ioflag);
1026 nanotime(&DEVFS_NODE(vp)->atime);
1027 if ((flags & O_FOFFSET) == 0)
1028 fp->f_offset = uio->uio_offset;
1029 fp->f_nextoff = uio->uio_offset;
1037 devfs_specf_write(struct file *fp, struct uio *uio, struct ucred *cred, int flags)
1045 KASSERT(uio->uio_td == curthread,
1046 ("uio_td %p is not p %p", uio->uio_td, curthread));
1048 vp = (struct vnode *)fp->f_data;
1049 if (vp == NULL || vp->v_type == VBAD) {
1053 if (vp->v_type == VREG)
1054 bwillwrite(uio->uio_resid);
1055 vp = (struct vnode *)fp->f_data;
1057 if ((dev = vp->v_rdev) == NULL) {
1063 if ((flags & O_FOFFSET) == 0)
1064 uio->uio_offset = fp->f_offset;
1067 if (vp->v_type == VREG &&
1068 ((fp->f_flag & O_APPEND) || (flags & O_FAPPEND))) {
1069 ioflag |= IO_APPEND;
1072 if (flags & O_FBLOCKING) {
1073 /* ioflag &= ~IO_NDELAY; */
1074 } else if (flags & O_FNONBLOCKING) {
1075 ioflag |= IO_NDELAY;
1076 } else if (fp->f_flag & FNONBLOCK) {
1077 ioflag |= IO_NDELAY;
1079 if (flags & O_FBUFFERED) {
1080 /* ioflag &= ~IO_DIRECT; */
1081 } else if (flags & O_FUNBUFFERED) {
1082 ioflag |= IO_DIRECT;
1083 } else if (fp->f_flag & O_DIRECT) {
1084 ioflag |= IO_DIRECT;
1086 if (flags & O_FASYNCWRITE) {
1087 /* ioflag &= ~IO_SYNC; */
1088 } else if (flags & O_FSYNCWRITE) {
1090 } else if (fp->f_flag & O_FSYNC) {
1094 if (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS))
1096 ioflag |= sequential_heuristic(uio, fp);
1098 error = dev_dwrite(dev, uio, ioflag);
1102 nanotime(&DEVFS_NODE(vp)->mtime);
1104 if ((flags & O_FOFFSET) == 0)
1105 fp->f_offset = uio->uio_offset;
1106 fp->f_nextoff = uio->uio_offset;
1114 devfs_specf_stat(struct file *fp, struct stat *sb, struct ucred *cred)
1119 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_specf_stat() called\n");
1122 vp = (struct vnode *)fp->f_data;
1123 error = vn_stat(vp, sb, cred);
1135 error = VOP_GETATTR(vp, vap);
1142 * Zero the spare stat fields
1148 * Copy from vattr table ... or not in case it's a cloned device
1150 if (vap->va_fsid != VNOVAL)
1151 sb->st_dev = vap->va_fsid;
1153 sb->st_dev = vp->v_mount->mnt_stat.f_fsid.val[0];
1155 sb->st_ino = vap->va_fileid;
1157 mode = vap->va_mode;
1161 if (vap->va_nlink > (nlink_t)-1)
1162 sb->st_nlink = (nlink_t)-1;
1164 sb->st_nlink = vap->va_nlink;
1165 sb->st_uid = vap->va_uid;
1166 sb->st_gid = vap->va_gid;
1167 sb->st_rdev = dev2udev(DEVFS_NODE(vp)->d_dev);
1168 sb->st_size = vap->va_size;
1169 sb->st_atimespec = vap->va_atime;
1170 sb->st_mtimespec = vap->va_mtime;
1171 sb->st_ctimespec = vap->va_ctime;
1174 * A VCHR and VBLK device may track the last access and last modified
1175 * time independantly of the filesystem. This is particularly true
1176 * because device read and write calls may bypass the filesystem.
1178 if (vp->v_type == VCHR || vp->v_type == VBLK) {
1181 if (dev->si_lastread) {
1182 sb->st_atimespec.tv_sec = dev->si_lastread;
1183 sb->st_atimespec.tv_nsec = 0;
1185 if (dev->si_lastwrite) {
1186 sb->st_atimespec.tv_sec = dev->si_lastwrite;
1187 sb->st_atimespec.tv_nsec = 0;
1193 * According to www.opengroup.org, the meaning of st_blksize is
1194 * "a filesystem-specific preferred I/O block size for this
1195 * object. In some filesystem types, this may vary from file
1197 * Default to PAGE_SIZE after much discussion.
1200 sb->st_blksize = PAGE_SIZE;
1202 sb->st_flags = vap->va_flags;
1204 error = priv_check_cred(cred, PRIV_VFS_GENERATION, 0);
1208 sb->st_gen = (u_int32_t)vap->va_gen;
1210 sb->st_blocks = vap->va_bytes / S_BLKSIZE;
1211 sb->st_fsmid = vap->va_fsmid;
1219 devfs_specf_kqfilter(struct file *fp, struct knote *kn)
1227 vp = (struct vnode *)fp->f_data;
1228 if (vp == NULL || vp->v_type == VBAD) {
1233 if ((dev = vp->v_rdev) == NULL) {
1239 error = dev_dkqfilter(dev, kn);
1244 nanotime(&DEVFS_NODE(vp)->atime);
1252 devfs_specf_poll(struct file *fp, int events, struct ucred *cred)
1260 vp = (struct vnode *)fp->f_data;
1261 if (vp == NULL || vp->v_type == VBAD) {
1266 if ((dev = vp->v_rdev) == NULL) {
1271 error = dev_dpoll(dev, events);
1276 nanotime(&DEVFS_NODE(vp)->atime);
1284 * MPALMOSTSAFE - acquires mplock
1287 devfs_specf_ioctl(struct file *fp, u_long com, caddr_t data, struct ucred *ucred)
1289 struct vnode *vp = ((struct vnode *)fp->f_data);
1293 struct fiodname_args *name_args;
1297 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_specf_ioctl() called! \n");
1301 if ((dev = vp->v_rdev) == NULL) {
1302 error = EBADF; /* device was revoked */
1305 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_specf_ioctl() called! for dev %s\n", dev->si_name);
1307 if (!(dev_dflags(dev) & D_TTY))
1308 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_specf_ioctl() called on %s! com is: %x\n", dev->si_name, com);
1310 if (com == FIODTYPE) {
1311 *(int *)data = dev_dflags(dev) & D_TYPEMASK;
1314 } else if (com == FIODNAME) {
1315 name_args = (struct fiodname_args *)data;
1316 name = dev->si_name;
1317 namlen = strlen(name) + 1;
1319 devfs_debug(DEVFS_DEBUG_DEBUG, "ioctl, got: FIODNAME for %s\n", name);
1321 if (namlen <= name_args->len)
1322 error = copyout(dev->si_name, name_args->name, namlen);
1326 devfs_debug(DEVFS_DEBUG_DEBUG, "ioctl stuff: error: %d\n", error);
1330 error = dev_dioctl(dev, com, data, fp->f_flag, ucred);
1332 if (DEVFS_NODE(vp)) {
1333 nanotime(&DEVFS_NODE(vp)->atime);
1334 nanotime(&DEVFS_NODE(vp)->mtime);
1337 if (com == TIOCSCTTY)
1338 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_specf_ioctl: got TIOCSCTTY on %s\n", dev->si_name);
1339 if (error == 0 && com == TIOCSCTTY) {
1340 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_specf_ioctl: dealing with TIOCSCTTY on %s\n", dev->si_name);
1341 struct proc *p = curthread->td_proc;
1342 struct session *sess;
1347 sess = p->p_session;
1348 /* Do nothing if reassigning same control tty */
1349 if (sess->s_ttyvp == vp) {
1353 /* Get rid of reference to old control tty */
1354 ovp = sess->s_ttyvp;
1363 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_specf_ioctl() finished! \n");
1369 devfs_spec_fsync(struct vop_fsync_args *ap)
1371 struct vnode *vp = ap->a_vp;
1374 if (!vn_isdisk(vp, NULL))
1378 * Flush all dirty buffers associated with a block device.
1380 error = vfsync(vp, ap->a_waitfor, 10000, NULL, NULL);
1404 devfs_spec_read(struct vop_read_args *ap)
1415 if (dev == NULL) /* device was revoked */
1417 if (uio->uio_resid == 0)
1421 error = dev_dread(dev, uio, ap->a_ioflag);
1422 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1425 nanotime(&DEVFS_NODE(vp)->atime);
1431 * Vnode op for write
1433 * spec_write(struct vnode *a_vp, struct uio *a_uio, int a_ioflag,
1434 * struct ucred *a_cred)
1438 devfs_spec_write(struct vop_write_args *ap)
1449 KKASSERT(uio->uio_segflg != UIO_NOCOPY);
1451 if (dev == NULL) /* device was revoked */
1455 error = dev_dwrite(dev, uio, ap->a_ioflag);
1456 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1459 nanotime(&DEVFS_NODE(vp)->mtime);
1465 * Device ioctl operation.
1467 * spec_ioctl(struct vnode *a_vp, int a_command, caddr_t a_data,
1468 * int a_fflag, struct ucred *a_cred)
1472 devfs_spec_ioctl(struct vop_ioctl_args *ap)
1475 struct vnode *vp = ap->a_vp;
1477 if ((dev = vp->v_rdev) == NULL)
1478 return (EBADF); /* device was revoked */
1479 if ( ap->a_command == TIOCSCTTY )
1480 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_*SPEC*_ioctl: got TIOCSCTTY\n");
1482 if (DEVFS_NODE(vp)) {
1483 nanotime(&DEVFS_NODE(vp)->atime);
1484 nanotime(&DEVFS_NODE(vp)->mtime);
1487 return (dev_dioctl(dev, ap->a_command, ap->a_data,
1488 ap->a_fflag, ap->a_cred));
1492 * spec_poll(struct vnode *a_vp, int a_events, struct ucred *a_cred)
1496 devfs_spec_poll(struct vop_poll_args *ap)
1499 struct vnode *vp = ap->a_vp;
1501 if ((dev = vp->v_rdev) == NULL)
1502 return (EBADF); /* device was revoked */
1505 nanotime(&DEVFS_NODE(vp)->atime);
1507 return (dev_dpoll(dev, ap->a_events));
1511 * spec_kqfilter(struct vnode *a_vp, struct knote *a_kn)
1515 devfs_spec_kqfilter(struct vop_kqfilter_args *ap)
1518 struct vnode *vp = ap->a_vp;
1520 if ((dev = vp->v_rdev) == NULL)
1521 return (EBADF); /* device was revoked */
1524 nanotime(&DEVFS_NODE(vp)->atime);
1526 return (dev_dkqfilter(dev, ap->a_kn));
1571 * Convert a vnode strategy call into a device strategy call. Vnode strategy
1572 * calls are not limited to device DMA limits so we have to deal with the
1575 * spec_strategy(struct vnode *a_vp, struct bio *a_bio)
1578 devfs_spec_strategy(struct vop_strategy_args *ap)
1580 struct bio *bio = ap->a_bio;
1581 struct buf *bp = bio->bio_buf;
1588 if (bp->b_cmd != BUF_CMD_READ && LIST_FIRST(&bp->b_dep) != NULL)
1592 * Collect statistics on synchronous and asynchronous read
1593 * and write counts for disks that have associated filesystems.
1596 KKASSERT(vp->v_rdev != NULL); /* XXX */
1597 if (vn_isdisk(vp, NULL) && (mp = vp->v_rdev->si_mountpoint) != NULL) {
1598 if (bp->b_cmd == BUF_CMD_READ) {
1599 if (bp->b_flags & BIO_SYNC)
1600 mp->mnt_stat.f_syncreads++;
1602 mp->mnt_stat.f_asyncreads++;
1604 if (bp->b_flags & BIO_SYNC)
1605 mp->mnt_stat.f_syncwrites++;
1607 mp->mnt_stat.f_asyncwrites++;
1612 * Device iosize limitations only apply to read and write. Shortcut
1613 * the I/O if it fits.
1615 if ((maxiosize = vp->v_rdev->si_iosize_max) == 0) {
1616 devfs_debug(DEVFS_DEBUG_DEBUG, "%s: si_iosize_max not set!\n", dev_dname(vp->v_rdev));
1617 maxiosize = MAXPHYS;
1619 #if SPEC_CHAIN_DEBUG & 2
1622 if (bp->b_bcount <= maxiosize ||
1623 (bp->b_cmd != BUF_CMD_READ && bp->b_cmd != BUF_CMD_WRITE)) {
1624 dev_dstrategy_chain(vp->v_rdev, bio);
1629 * Clone the buffer and set up an I/O chain to chunk up the I/O.
1631 nbp = kmalloc(sizeof(*bp), M_DEVBUF, M_INTWAIT|M_ZERO);
1635 BUF_LOCK(nbp, LK_EXCLUSIVE);
1638 nbp->b_flags = B_PAGING | (bp->b_flags & B_BNOCLIP);
1639 nbp->b_data = bp->b_data;
1640 nbp->b_bio1.bio_done = devfs_spec_strategy_done;
1641 nbp->b_bio1.bio_offset = bio->bio_offset;
1642 nbp->b_bio1.bio_caller_info1.ptr = bio;
1645 * Start the first transfer
1647 if (vn_isdisk(vp, NULL))
1648 chunksize = vp->v_rdev->si_bsize_phys;
1650 chunksize = DEV_BSIZE;
1651 chunksize = maxiosize / chunksize * chunksize;
1652 #if SPEC_CHAIN_DEBUG & 1
1653 devfs_debug(DEVFS_DEBUG_DEBUG, "spec_strategy chained I/O chunksize=%d\n", chunksize);
1655 nbp->b_cmd = bp->b_cmd;
1656 nbp->b_bcount = chunksize;
1657 nbp->b_bufsize = chunksize; /* used to detect a short I/O */
1658 nbp->b_bio1.bio_caller_info2.index = chunksize;
1660 #if SPEC_CHAIN_DEBUG & 1
1661 devfs_debug(DEVFS_DEBUG_DEBUG, "spec_strategy: chain %p offset %d/%d bcount %d\n",
1662 bp, 0, bp->b_bcount, nbp->b_bcount);
1665 dev_dstrategy(vp->v_rdev, &nbp->b_bio1);
1667 if (DEVFS_NODE(vp)) {
1668 nanotime(&DEVFS_NODE(vp)->atime);
1669 nanotime(&DEVFS_NODE(vp)->mtime);
1676 * Chunked up transfer completion routine - chain transfers until done
1680 devfs_spec_strategy_done(struct bio *nbio)
1682 struct buf *nbp = nbio->bio_buf;
1683 struct bio *bio = nbio->bio_caller_info1.ptr; /* original bio */
1684 struct buf *bp = bio->bio_buf; /* original bp */
1685 int chunksize = nbio->bio_caller_info2.index; /* chunking */
1686 int boffset = nbp->b_data - bp->b_data;
1688 if (nbp->b_flags & B_ERROR) {
1690 * An error terminates the chain, propogate the error back
1691 * to the original bp
1693 bp->b_flags |= B_ERROR;
1694 bp->b_error = nbp->b_error;
1695 bp->b_resid = bp->b_bcount - boffset +
1696 (nbp->b_bcount - nbp->b_resid);
1697 #if SPEC_CHAIN_DEBUG & 1
1698 devfs_debug(DEVFS_DEBUG_DEBUG, "spec_strategy: chain %p error %d bcount %d/%d\n",
1699 bp, bp->b_error, bp->b_bcount,
1700 bp->b_bcount - bp->b_resid);
1702 kfree(nbp, M_DEVBUF);
1704 } else if (nbp->b_resid) {
1706 * A short read or write terminates the chain
1708 bp->b_error = nbp->b_error;
1709 bp->b_resid = bp->b_bcount - boffset +
1710 (nbp->b_bcount - nbp->b_resid);
1711 #if SPEC_CHAIN_DEBUG & 1
1712 devfs_debug(DEVFS_DEBUG_DEBUG, "spec_strategy: chain %p short read(1) bcount %d/%d\n",
1713 bp, bp->b_bcount - bp->b_resid, bp->b_bcount);
1715 kfree(nbp, M_DEVBUF);
1717 } else if (nbp->b_bcount != nbp->b_bufsize) {
1719 * A short read or write can also occur by truncating b_bcount
1721 #if SPEC_CHAIN_DEBUG & 1
1722 devfs_debug(DEVFS_DEBUG_DEBUG, "spec_strategy: chain %p short read(2) bcount %d/%d\n",
1723 bp, nbp->b_bcount + boffset, bp->b_bcount);
1726 bp->b_bcount = nbp->b_bcount + boffset;
1727 bp->b_resid = nbp->b_resid;
1728 kfree(nbp, M_DEVBUF);
1730 } else if (nbp->b_bcount + boffset == bp->b_bcount) {
1732 * No more data terminates the chain
1734 #if SPEC_CHAIN_DEBUG & 1
1735 devfs_debug(DEVFS_DEBUG_DEBUG, "spec_strategy: chain %p finished bcount %d\n",
1740 kfree(nbp, M_DEVBUF);
1744 * Continue the chain
1746 boffset += nbp->b_bcount;
1747 nbp->b_data = bp->b_data + boffset;
1748 nbp->b_bcount = bp->b_bcount - boffset;
1749 if (nbp->b_bcount > chunksize)
1750 nbp->b_bcount = chunksize;
1751 nbp->b_bio1.bio_done = devfs_spec_strategy_done;
1752 nbp->b_bio1.bio_offset = bio->bio_offset + boffset;
1754 #if SPEC_CHAIN_DEBUG & 1
1755 devfs_debug(DEVFS_DEBUG_DEBUG, "spec_strategy: chain %p offset %d/%d bcount %d\n",
1756 bp, boffset, bp->b_bcount, nbp->b_bcount);
1759 dev_dstrategy(nbp->b_vp->v_rdev, &nbp->b_bio1);
1764 * spec_freeblks(struct vnode *a_vp, daddr_t a_addr, daddr_t a_length)
1767 devfs_spec_freeblks(struct vop_freeblks_args *ap)
1772 * XXX: This assumes that strategy does the deed right away.
1773 * XXX: this may not be TRTTD.
1775 KKASSERT(ap->a_vp->v_rdev != NULL);
1776 if ((dev_dflags(ap->a_vp->v_rdev) & D_CANFREE) == 0)
1778 bp = geteblk(ap->a_length);
1779 bp->b_cmd = BUF_CMD_FREEBLKS;
1780 bp->b_bio1.bio_offset = ap->a_offset;
1781 bp->b_bcount = ap->a_length;
1782 dev_dstrategy(ap->a_vp->v_rdev, &bp->b_bio1);
1787 * Implement degenerate case where the block requested is the block
1788 * returned, and assume that the entire device is contiguous in regards
1789 * to the contiguous block range (runp and runb).
1791 * spec_bmap(struct vnode *a_vp, off_t a_loffset,
1792 * off_t *a_doffsetp, int *a_runp, int *a_runb)
1795 devfs_spec_bmap(struct vop_bmap_args *ap)
1797 if (ap->a_doffsetp != NULL)
1798 *ap->a_doffsetp = ap->a_loffset;
1799 if (ap->a_runp != NULL)
1800 *ap->a_runp = MAXBSIZE;
1801 if (ap->a_runb != NULL) {
1802 if (ap->a_loffset < MAXBSIZE)
1803 *ap->a_runb = (int)ap->a_loffset;
1805 *ap->a_runb = MAXBSIZE;
1812 * Special device advisory byte-level locks.
1814 * spec_advlock(struct vnode *a_vp, caddr_t a_id, int a_op,
1815 * struct flock *a_fl, int a_flags)
1819 devfs_spec_advlock(struct vop_advlock_args *ap)
1821 return ((ap->a_flags & F_POSIX) ? EINVAL : EOPNOTSUPP);
1825 devfs_spec_getpages_iodone(struct bio *bio)
1827 bio->bio_buf->b_cmd = BUF_CMD_DONE;
1828 wakeup(bio->bio_buf);
1832 * spec_getpages() - get pages associated with device vnode.
1834 * Note that spec_read and spec_write do not use the buffer cache, so we
1835 * must fully implement getpages here.
1838 devfs_spec_getpages(struct vop_getpages_args *ap)
1842 int i, pcount, size;
1845 vm_ooffset_t offset;
1846 int toff, nextoff, nread;
1847 struct vnode *vp = ap->a_vp;
1852 pcount = round_page(ap->a_count) / PAGE_SIZE;
1855 * Calculate the offset of the transfer and do sanity check.
1857 offset = IDX_TO_OFF(ap->a_m[0]->pindex) + ap->a_offset;
1860 * Round up physical size for real devices. We cannot round using
1861 * v_mount's block size data because v_mount has nothing to do with
1862 * the device. i.e. it's usually '/dev'. We need the physical block
1863 * size for the device itself.
1865 * We can't use v_rdev->si_mountpoint because it only exists when the
1866 * block device is mounted. However, we can use v_rdev.
1869 if (vn_isdisk(vp, NULL))
1870 blksiz = vp->v_rdev->si_bsize_phys;
1874 size = (ap->a_count + blksiz - 1) & ~(blksiz - 1);
1877 kva = (vm_offset_t)bp->b_data;
1880 * Map the pages to be read into the kva.
1882 pmap_qenter(kva, ap->a_m, pcount);
1884 /* Build a minimal buffer header. */
1885 bp->b_cmd = BUF_CMD_READ;
1886 bp->b_bcount = size;
1888 bp->b_runningbufspace = size;
1890 runningbufspace += bp->b_runningbufspace;
1894 bp->b_bio1.bio_offset = offset;
1895 bp->b_bio1.bio_done = devfs_spec_getpages_iodone;
1897 mycpu->gd_cnt.v_vnodein++;
1898 mycpu->gd_cnt.v_vnodepgsin += pcount;
1901 vn_strategy(ap->a_vp, &bp->b_bio1);
1905 /* We definitely need to be at splbio here. */
1906 while (bp->b_cmd != BUF_CMD_DONE)
1907 tsleep(bp, 0, "spread", 0);
1911 if (bp->b_flags & B_ERROR) {
1913 error = bp->b_error;
1919 * If EOF is encountered we must zero-extend the result in order
1920 * to ensure that the page does not contain garabge. When no
1921 * error occurs, an early EOF is indicated if b_bcount got truncated.
1922 * b_resid is relative to b_bcount and should be 0, but some devices
1923 * might indicate an EOF with b_resid instead of truncating b_bcount.
1925 nread = bp->b_bcount - bp->b_resid;
1926 if (nread < ap->a_count)
1927 bzero((caddr_t)kva + nread, ap->a_count - nread);
1928 pmap_qremove(kva, pcount);
1931 for (i = 0, toff = 0; i < pcount; i++, toff = nextoff) {
1932 nextoff = toff + PAGE_SIZE;
1935 m->flags &= ~PG_ZERO;
1937 if (nextoff <= nread) {
1938 m->valid = VM_PAGE_BITS_ALL;
1940 } else if (toff < nread) {
1942 * Since this is a VM request, we have to supply the
1943 * unaligned offset to allow vm_page_set_validclean()
1944 * to zero sub-DEV_BSIZE'd portions of the page.
1946 vm_page_set_validclean(m, 0, nread - toff);
1952 if (i != ap->a_reqpage) {
1954 * Just in case someone was asking for this page we
1955 * now tell them that it is ok to use.
1957 if (!error || (m->valid == VM_PAGE_BITS_ALL)) {
1959 if (m->flags & PG_WANTED) {
1960 vm_page_activate(m);
1962 vm_page_deactivate(m);
1971 } else if (m->valid) {
1974 * Since this is a VM request, we need to make the
1975 * entire page presentable by zeroing invalid sections.
1977 if (m->valid != VM_PAGE_BITS_ALL)
1978 vm_page_zero_invalid(m, FALSE);
1982 m = ap->a_m[ap->a_reqpage];
1983 devfs_debug(DEVFS_DEBUG_WARNING,
1984 "spec_getpages:(%s) I/O read failure: (error=%d) bp %p vp %p\n",
1985 devtoname(vp->v_rdev), error, bp, bp->b_vp);
1986 devfs_debug(DEVFS_DEBUG_WARNING,
1987 " size: %d, resid: %d, a_count: %d, valid: 0x%x\n",
1988 size, bp->b_resid, ap->a_count, m->valid);
1989 devfs_debug(DEVFS_DEBUG_WARNING,
1990 " nread: %d, reqpage: %d, pindex: %lu, pcount: %d\n",
1991 nread, ap->a_reqpage, (u_long)m->pindex, pcount);
1993 * Free the buffer header back to the swap buffer pool.
1996 return VM_PAGER_ERROR;
1999 * Free the buffer header back to the swap buffer pool.
2045 sequential_heuristic(struct uio *uio, struct file *fp)
2048 * Sequential heuristic - detect sequential operation
2050 if ((uio->uio_offset == 0 && fp->f_seqcount > 0) ||
2051 uio->uio_offset == fp->f_nextoff) {
2052 int tmpseq = fp->f_seqcount;
2054 * XXX we assume that the filesystem block size is
2055 * the default. Not true, but still gives us a pretty
2056 * good indicator of how sequential the read operations
2059 tmpseq += (uio->uio_resid + BKVASIZE - 1) / BKVASIZE;
2060 if (tmpseq > IO_SEQMAX)
2062 fp->f_seqcount = tmpseq;
2063 return(fp->f_seqcount << IO_SEQSHIFT);
2067 * Not sequential, quick draw-down of seqcount
2069 if (fp->f_seqcount > 1)