devfs - Introduce MPSAFE read/write/ioctl support
[dragonfly.git] / sys / vfs / devfs / devfs_vnops.c
... / ...
CommitLineData
1/*
2 * Copyright (c) 2009 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Alex Hornung <ahornung@gmail.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 */
34#include <sys/param.h>
35#include <sys/systm.h>
36#include <sys/time.h>
37#include <sys/kernel.h>
38#include <sys/lock.h>
39#include <sys/fcntl.h>
40#include <sys/proc.h>
41#include <sys/priv.h>
42#include <sys/signalvar.h>
43#include <sys/vnode.h>
44#include <sys/uio.h>
45#include <sys/mount.h>
46#include <sys/file.h>
47#include <sys/fcntl.h>
48#include <sys/namei.h>
49#include <sys/dirent.h>
50#include <sys/malloc.h>
51#include <sys/stat.h>
52#include <sys/reg.h>
53#include <vm/vm_pager.h>
54#include <vm/vm_zone.h>
55#include <vm/vm_object.h>
56#include <sys/filio.h>
57#include <sys/ttycom.h>
58#include <sys/tty.h>
59#include <sys/diskslice.h>
60#include <sys/sysctl.h>
61#include <sys/devfs.h>
62#include <sys/pioctl.h>
63
64#include <machine/limits.h>
65
66#include <sys/buf2.h>
67#include <sys/sysref2.h>
68#include <sys/mplock2.h>
69#include <vm/vm_page2.h>
70
71MALLOC_DECLARE(M_DEVFS);
72#define DEVFS_BADOP (void *)devfs_badop
73
74static int devfs_badop(struct vop_generic_args *);
75static int devfs_access(struct vop_access_args *);
76static int devfs_inactive(struct vop_inactive_args *);
77static int devfs_reclaim(struct vop_reclaim_args *);
78static int devfs_readdir(struct vop_readdir_args *);
79static int devfs_getattr(struct vop_getattr_args *);
80static int devfs_setattr(struct vop_setattr_args *);
81static int devfs_readlink(struct vop_readlink_args *);
82static int devfs_print(struct vop_print_args *);
83
84static int devfs_nresolve(struct vop_nresolve_args *);
85static int devfs_nlookupdotdot(struct vop_nlookupdotdot_args *);
86static int devfs_nsymlink(struct vop_nsymlink_args *);
87static int devfs_nremove(struct vop_nremove_args *);
88
89static int devfs_spec_open(struct vop_open_args *);
90static int devfs_spec_close(struct vop_close_args *);
91static int devfs_spec_fsync(struct vop_fsync_args *);
92
93static int devfs_spec_read(struct vop_read_args *);
94static int devfs_spec_write(struct vop_write_args *);
95static int devfs_spec_ioctl(struct vop_ioctl_args *);
96static int devfs_spec_poll(struct vop_poll_args *);
97static int devfs_spec_kqfilter(struct vop_kqfilter_args *);
98static int devfs_spec_strategy(struct vop_strategy_args *);
99static void devfs_spec_strategy_done(struct bio *);
100static int devfs_spec_freeblks(struct vop_freeblks_args *);
101static int devfs_spec_bmap(struct vop_bmap_args *);
102static int devfs_spec_advlock(struct vop_advlock_args *);
103static void devfs_spec_getpages_iodone(struct bio *);
104static int devfs_spec_getpages(struct vop_getpages_args *);
105
106
107static int devfs_specf_close(struct file *);
108static int devfs_specf_read(struct file *, struct uio *, struct ucred *, int);
109static int devfs_specf_write(struct file *, struct uio *, struct ucred *, int);
110static int devfs_specf_stat(struct file *, struct stat *, struct ucred *);
111static int devfs_specf_kqfilter(struct file *, struct knote *);
112static int devfs_specf_poll(struct file *, int, struct ucred *);
113static int devfs_specf_ioctl(struct file *, u_long, caddr_t,
114 struct ucred *, struct sysmsg *);
115static __inline int sequential_heuristic(struct uio *, struct file *);
116
117extern struct lock devfs_lock;
118
119static int mpsafe_reads, mpsafe_writes, mplock_reads, mplock_writes;
120
121/*
122 * devfs vnode operations for regular files
123 */
124struct vop_ops devfs_vnode_norm_vops = {
125 .vop_default = vop_defaultop,
126 .vop_access = devfs_access,
127 .vop_advlock = DEVFS_BADOP,
128 .vop_bmap = DEVFS_BADOP,
129 .vop_close = vop_stdclose,
130 .vop_getattr = devfs_getattr,
131 .vop_inactive = devfs_inactive,
132 .vop_ncreate = DEVFS_BADOP,
133 .vop_nresolve = devfs_nresolve,
134 .vop_nlookupdotdot = devfs_nlookupdotdot,
135 .vop_nlink = DEVFS_BADOP,
136 .vop_nmkdir = DEVFS_BADOP,
137 .vop_nmknod = DEVFS_BADOP,
138 .vop_nremove = devfs_nremove,
139 .vop_nrename = DEVFS_BADOP,
140 .vop_nrmdir = DEVFS_BADOP,
141 .vop_nsymlink = devfs_nsymlink,
142 .vop_open = vop_stdopen,
143 .vop_pathconf = vop_stdpathconf,
144 .vop_print = devfs_print,
145 .vop_read = DEVFS_BADOP,
146 .vop_readdir = devfs_readdir,
147 .vop_readlink = devfs_readlink,
148 .vop_reclaim = devfs_reclaim,
149 .vop_setattr = devfs_setattr,
150 .vop_write = DEVFS_BADOP,
151 .vop_ioctl = DEVFS_BADOP
152};
153
154/*
155 * devfs vnode operations for character devices
156 */
157struct vop_ops devfs_vnode_dev_vops = {
158 .vop_default = vop_defaultop,
159 .vop_access = devfs_access,
160 .vop_advlock = devfs_spec_advlock,
161 .vop_bmap = devfs_spec_bmap,
162 .vop_close = devfs_spec_close,
163 .vop_freeblks = devfs_spec_freeblks,
164 .vop_fsync = devfs_spec_fsync,
165 .vop_getattr = devfs_getattr,
166 .vop_getpages = devfs_spec_getpages,
167 .vop_inactive = devfs_inactive,
168 .vop_open = devfs_spec_open,
169 .vop_pathconf = vop_stdpathconf,
170 .vop_print = devfs_print,
171 .vop_poll = devfs_spec_poll,
172 .vop_kqfilter = devfs_spec_kqfilter,
173 .vop_read = devfs_spec_read,
174 .vop_readdir = DEVFS_BADOP,
175 .vop_readlink = DEVFS_BADOP,
176 .vop_reclaim = devfs_reclaim,
177 .vop_setattr = devfs_setattr,
178 .vop_strategy = devfs_spec_strategy,
179 .vop_write = devfs_spec_write,
180 .vop_ioctl = devfs_spec_ioctl
181};
182
183struct vop_ops *devfs_vnode_dev_vops_p = &devfs_vnode_dev_vops;
184
185struct fileops devfs_dev_fileops = {
186 .fo_read = devfs_specf_read,
187 .fo_write = devfs_specf_write,
188 .fo_ioctl = devfs_specf_ioctl,
189 .fo_poll = devfs_specf_poll,
190 .fo_kqfilter = devfs_specf_kqfilter,
191 .fo_stat = devfs_specf_stat,
192 .fo_close = devfs_specf_close,
193 .fo_shutdown = nofo_shutdown
194};
195
196/*
197 * These two functions are possibly temporary hacks for
198 * devices (aka the pty code) which want to control the
199 * node attributes themselves.
200 *
201 * XXX we may ultimately desire to simply remove the uid/gid/mode
202 * from the node entirely.
203 */
204static __inline void
205node_sync_dev_get(struct devfs_node *node)
206{
207 cdev_t dev;
208
209 if ((dev = node->d_dev) && (dev->si_flags & SI_OVERRIDE)) {
210 node->uid = dev->si_uid;
211 node->gid = dev->si_gid;
212 node->mode = dev->si_perms;
213 }
214}
215
216static __inline void
217node_sync_dev_set(struct devfs_node *node)
218{
219 cdev_t dev;
220
221 if ((dev = node->d_dev) && (dev->si_flags & SI_OVERRIDE)) {
222 dev->si_uid = node->uid;
223 dev->si_gid = node->gid;
224 dev->si_perms = node->mode;
225 }
226}
227
228/*
229 * generic entry point for unsupported operations
230 */
231static int
232devfs_badop(struct vop_generic_args *ap)
233{
234 return (EIO);
235}
236
237
238static int
239devfs_access(struct vop_access_args *ap)
240{
241 struct devfs_node *node = DEVFS_NODE(ap->a_vp);
242 int error;
243
244 if (!devfs_node_is_accessible(node))
245 return ENOENT;
246 node_sync_dev_get(node);
247 error = vop_helper_access(ap, node->uid, node->gid,
248 node->mode, node->flags);
249
250 return error;
251}
252
253
254static int
255devfs_inactive(struct vop_inactive_args *ap)
256{
257 struct devfs_node *node = DEVFS_NODE(ap->a_vp);
258
259 if (node == NULL || (node->flags & DEVFS_NODE_LINKED) == 0)
260 vrecycle(ap->a_vp);
261 return 0;
262}
263
264
265static int
266devfs_reclaim(struct vop_reclaim_args *ap)
267{
268 struct devfs_node *node;
269 struct vnode *vp;
270 int locked;
271
272 /*
273 * Check if it is locked already. if not, we acquire the devfs lock
274 */
275 if (!(lockstatus(&devfs_lock, curthread)) == LK_EXCLUSIVE) {
276 lockmgr(&devfs_lock, LK_EXCLUSIVE);
277 locked = 1;
278 } else {
279 locked = 0;
280 }
281
282 /*
283 * Get rid of the devfs_node if it is no longer linked into the
284 * topology.
285 */
286 vp = ap->a_vp;
287 if ((node = DEVFS_NODE(vp)) != NULL) {
288 node->v_node = NULL;
289 if ((node->flags & DEVFS_NODE_LINKED) == 0)
290 devfs_freep(node);
291 }
292
293 if (locked)
294 lockmgr(&devfs_lock, LK_RELEASE);
295
296 /*
297 * v_rdev needs to be properly released using v_release_rdev
298 * Make sure v_data is NULL as well.
299 */
300 vp->v_data = NULL;
301 v_release_rdev(vp);
302 return 0;
303}
304
305
306static int
307devfs_readdir(struct vop_readdir_args *ap)
308{
309 struct devfs_node *dnode = DEVFS_NODE(ap->a_vp);
310 struct devfs_node *node;
311 int cookie_index;
312 int ncookies;
313 int error2;
314 int error;
315 int r;
316 off_t *cookies;
317 off_t saveoff;
318
319 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_readdir() called!\n");
320
321 if (ap->a_uio->uio_offset < 0 || ap->a_uio->uio_offset > INT_MAX)
322 return (EINVAL);
323 if ((error = vn_lock(ap->a_vp, LK_EXCLUSIVE | LK_RETRY)) != 0)
324 return (error);
325
326 if (!devfs_node_is_accessible(dnode)) {
327 vn_unlock(ap->a_vp);
328 return ENOENT;
329 }
330
331 lockmgr(&devfs_lock, LK_EXCLUSIVE);
332
333 saveoff = ap->a_uio->uio_offset;
334
335 if (ap->a_ncookies) {
336 ncookies = ap->a_uio->uio_resid / 16 + 1; /* Why / 16 ?? */
337 if (ncookies > 256)
338 ncookies = 256;
339 cookies = kmalloc(256 * sizeof(off_t), M_TEMP, M_WAITOK);
340 cookie_index = 0;
341 } else {
342 ncookies = -1;
343 cookies = NULL;
344 cookie_index = 0;
345 }
346
347 nanotime(&dnode->atime);
348
349 if (saveoff == 0) {
350 r = vop_write_dirent(&error, ap->a_uio, dnode->d_dir.d_ino,
351 DT_DIR, 1, ".");
352 if (r)
353 goto done;
354 if (cookies)
355 cookies[cookie_index] = saveoff;
356 saveoff++;
357 cookie_index++;
358 if (cookie_index == ncookies)
359 goto done;
360 }
361
362 if (saveoff == 1) {
363 if (dnode->parent) {
364 r = vop_write_dirent(&error, ap->a_uio,
365 dnode->parent->d_dir.d_ino,
366 DT_DIR, 2, "..");
367 } else {
368 r = vop_write_dirent(&error, ap->a_uio,
369 dnode->d_dir.d_ino,
370 DT_DIR, 2, "..");
371 }
372 if (r)
373 goto done;
374 if (cookies)
375 cookies[cookie_index] = saveoff;
376 saveoff++;
377 cookie_index++;
378 if (cookie_index == ncookies)
379 goto done;
380 }
381
382 TAILQ_FOREACH(node, DEVFS_DENODE_HEAD(dnode), link) {
383 if ((node->flags & DEVFS_HIDDEN) ||
384 (node->flags & DEVFS_INVISIBLE)) {
385 continue;
386 }
387
388 /*
389 * If the node type is a valid devfs alias, then we make sure that the
390 * target isn't hidden. If it is, we don't show the link in the
391 * directory listing.
392 */
393 if ((node->node_type == Plink) && (node->link_target != NULL) &&
394 (node->link_target->flags & DEVFS_HIDDEN))
395 continue;
396
397 if (node->cookie < saveoff)
398 continue;
399
400 saveoff = node->cookie;
401
402 error2 = vop_write_dirent(&error, ap->a_uio, node->d_dir.d_ino,
403 node->d_dir.d_type,
404 node->d_dir.d_namlen,
405 node->d_dir.d_name);
406
407 if (error2)
408 break;
409
410 saveoff++;
411
412 if (cookies)
413 cookies[cookie_index] = node->cookie;
414 ++cookie_index;
415 if (cookie_index == ncookies)
416 break;
417 }
418
419done:
420 lockmgr(&devfs_lock, LK_RELEASE);
421 vn_unlock(ap->a_vp);
422
423 ap->a_uio->uio_offset = saveoff;
424 if (error && cookie_index == 0) {
425 if (cookies) {
426 kfree(cookies, M_TEMP);
427 *ap->a_ncookies = 0;
428 *ap->a_cookies = NULL;
429 }
430 } else {
431 if (cookies) {
432 *ap->a_ncookies = cookie_index;
433 *ap->a_cookies = cookies;
434 }
435 }
436 return (error);
437}
438
439
440static int
441devfs_nresolve(struct vop_nresolve_args *ap)
442{
443 struct devfs_node *dnode = DEVFS_NODE(ap->a_dvp);
444 struct devfs_node *node, *found = NULL;
445 struct namecache *ncp;
446 struct vnode *vp = NULL;
447 int error = 0;
448 int len;
449 int depth;
450
451 ncp = ap->a_nch->ncp;
452 len = ncp->nc_nlen;
453
454 if (!devfs_node_is_accessible(dnode))
455 return ENOENT;
456
457 lockmgr(&devfs_lock, LK_EXCLUSIVE);
458
459 if ((dnode->node_type != Proot) && (dnode->node_type != Pdir)) {
460 error = ENOENT;
461 cache_setvp(ap->a_nch, NULL);
462 goto out;
463 }
464
465 TAILQ_FOREACH(node, DEVFS_DENODE_HEAD(dnode), link) {
466 if (len == node->d_dir.d_namlen) {
467 if (!memcmp(ncp->nc_name, node->d_dir.d_name, len)) {
468 found = node;
469 break;
470 }
471 }
472 }
473
474 if (found) {
475 depth = 0;
476 while ((found->node_type == Plink) && (found->link_target)) {
477 if (depth >= 8) {
478 devfs_debug(DEVFS_DEBUG_SHOW, "Recursive link or depth >= 8");
479 break;
480 }
481
482 found = found->link_target;
483 ++depth;
484 }
485
486 if (!(found->flags & DEVFS_HIDDEN))
487 devfs_allocv(/*ap->a_dvp->v_mount, */ &vp, found);
488 }
489
490 if (vp == NULL) {
491 error = ENOENT;
492 cache_setvp(ap->a_nch, NULL);
493 goto out;
494
495 }
496 KKASSERT(vp);
497 vn_unlock(vp);
498 cache_setvp(ap->a_nch, vp);
499 vrele(vp);
500out:
501 lockmgr(&devfs_lock, LK_RELEASE);
502
503 return error;
504}
505
506
507static int
508devfs_nlookupdotdot(struct vop_nlookupdotdot_args *ap)
509{
510 struct devfs_node *dnode = DEVFS_NODE(ap->a_dvp);
511
512 *ap->a_vpp = NULL;
513 if (!devfs_node_is_accessible(dnode))
514 return ENOENT;
515
516 lockmgr(&devfs_lock, LK_EXCLUSIVE);
517 if (dnode->parent != NULL) {
518 devfs_allocv(ap->a_vpp, dnode->parent);
519 vn_unlock(*ap->a_vpp);
520 }
521 lockmgr(&devfs_lock, LK_RELEASE);
522
523 return ((*ap->a_vpp == NULL) ? ENOENT : 0);
524}
525
526
527static int
528devfs_getattr(struct vop_getattr_args *ap)
529{
530 struct devfs_node *node = DEVFS_NODE(ap->a_vp);
531 struct vattr *vap = ap->a_vap;
532 struct partinfo pinfo;
533 int error = 0;
534
535#if 0
536 if (!devfs_node_is_accessible(node))
537 return ENOENT;
538#endif
539 node_sync_dev_get(node);
540
541 lockmgr(&devfs_lock, LK_EXCLUSIVE);
542
543 /* start by zeroing out the attributes */
544 VATTR_NULL(vap);
545
546 /* next do all the common fields */
547 vap->va_type = ap->a_vp->v_type;
548 vap->va_mode = node->mode;
549 vap->va_fileid = DEVFS_NODE(ap->a_vp)->d_dir.d_ino ;
550 vap->va_flags = 0; /* XXX: what should this be? */
551 vap->va_blocksize = DEV_BSIZE;
552 vap->va_bytes = vap->va_size = sizeof(struct devfs_node);
553
554 vap->va_fsid = ap->a_vp->v_mount->mnt_stat.f_fsid.val[0];
555
556 vap->va_atime = node->atime;
557 vap->va_mtime = node->mtime;
558 vap->va_ctime = node->ctime;
559
560 vap->va_nlink = 1; /* number of references to file */
561
562 vap->va_uid = node->uid;
563 vap->va_gid = node->gid;
564
565 vap->va_rmajor = 0;
566 vap->va_rminor = 0;
567
568 if ((node->node_type == Pdev) && node->d_dev) {
569 reference_dev(node->d_dev);
570 vap->va_rminor = node->d_dev->si_uminor;
571 release_dev(node->d_dev);
572 }
573
574 /* For a softlink the va_size is the length of the softlink */
575 if (node->symlink_name != 0) {
576 vap->va_bytes = vap->va_size = node->symlink_namelen;
577 }
578
579 /*
580 * For a disk-type device, va_size is the size of the underlying
581 * device, so that lseek() works properly.
582 */
583 if ((node->d_dev) && (dev_dflags(node->d_dev) & D_DISK)) {
584 bzero(&pinfo, sizeof(pinfo));
585 error = dev_dioctl(node->d_dev, DIOCGPART, (void *)&pinfo,
586 0, proc0.p_ucred, NULL);
587 if ((error == 0) && (pinfo.media_blksize != 0)) {
588 vap->va_size = pinfo.media_size;
589 } else {
590 vap->va_size = 0;
591 error = 0;
592 }
593 }
594
595 lockmgr(&devfs_lock, LK_RELEASE);
596
597 return (error);
598}
599
600
601static int
602devfs_setattr(struct vop_setattr_args *ap)
603{
604 struct devfs_node *node = DEVFS_NODE(ap->a_vp);
605 struct vattr *vap;
606 int error = 0;
607
608 if (!devfs_node_is_accessible(node))
609 return ENOENT;
610 node_sync_dev_get(node);
611
612 lockmgr(&devfs_lock, LK_EXCLUSIVE);
613
614 vap = ap->a_vap;
615
616 if (vap->va_uid != (uid_t)VNOVAL) {
617 if ((ap->a_cred->cr_uid != node->uid) &&
618 (!groupmember(node->gid, ap->a_cred))) {
619 error = priv_check(curthread, PRIV_VFS_CHOWN);
620 if (error)
621 goto out;
622 }
623 node->uid = vap->va_uid;
624 }
625
626 if (vap->va_gid != (uid_t)VNOVAL) {
627 if ((ap->a_cred->cr_uid != node->uid) &&
628 (!groupmember(node->gid, ap->a_cred))) {
629 error = priv_check(curthread, PRIV_VFS_CHOWN);
630 if (error)
631 goto out;
632 }
633 node->gid = vap->va_gid;
634 }
635
636 if (vap->va_mode != (mode_t)VNOVAL) {
637 if (ap->a_cred->cr_uid != node->uid) {
638 error = priv_check(curthread, PRIV_VFS_ADMIN);
639 if (error)
640 goto out;
641 }
642 node->mode = vap->va_mode;
643 }
644
645out:
646 node_sync_dev_set(node);
647 nanotime(&node->ctime);
648 lockmgr(&devfs_lock, LK_RELEASE);
649
650 return error;
651}
652
653
654static int
655devfs_readlink(struct vop_readlink_args *ap)
656{
657 struct devfs_node *node = DEVFS_NODE(ap->a_vp);
658 int ret;
659
660 if (!devfs_node_is_accessible(node))
661 return ENOENT;
662
663 lockmgr(&devfs_lock, LK_EXCLUSIVE);
664 ret = uiomove(node->symlink_name, node->symlink_namelen, ap->a_uio);
665 lockmgr(&devfs_lock, LK_RELEASE);
666
667 return ret;
668}
669
670
671static int
672devfs_print(struct vop_print_args *ap)
673{
674 return (0);
675}
676
677
678static int
679devfs_nsymlink(struct vop_nsymlink_args *ap)
680{
681 struct devfs_node *dnode = DEVFS_NODE(ap->a_dvp);
682 struct devfs_node *node;
683 size_t targetlen;
684
685 if (!devfs_node_is_accessible(dnode))
686 return ENOENT;
687
688 ap->a_vap->va_type = VLNK;
689
690 if ((dnode->node_type != Proot) && (dnode->node_type != Pdir))
691 goto out;
692
693 lockmgr(&devfs_lock, LK_EXCLUSIVE);
694 devfs_allocvp(ap->a_dvp->v_mount, ap->a_vpp, Plink,
695 ap->a_nch->ncp->nc_name, dnode, NULL);
696
697 targetlen = strlen(ap->a_target);
698 if (*ap->a_vpp) {
699 node = DEVFS_NODE(*ap->a_vpp);
700 node->flags |= DEVFS_USER_CREATED;
701 node->symlink_namelen = targetlen;
702 node->symlink_name = kmalloc(targetlen + 1, M_DEVFS, M_WAITOK);
703 memcpy(node->symlink_name, ap->a_target, targetlen);
704 node->symlink_name[targetlen] = '\0';
705 cache_setunresolved(ap->a_nch);
706 cache_setvp(ap->a_nch, *ap->a_vpp);
707 }
708 lockmgr(&devfs_lock, LK_RELEASE);
709out:
710 return ((*ap->a_vpp == NULL) ? ENOTDIR : 0);
711}
712
713
714static int
715devfs_nremove(struct vop_nremove_args *ap)
716{
717 struct devfs_node *dnode = DEVFS_NODE(ap->a_dvp);
718 struct devfs_node *node;
719 struct namecache *ncp;
720 int error = ENOENT;
721
722 ncp = ap->a_nch->ncp;
723
724 if (!devfs_node_is_accessible(dnode))
725 return ENOENT;
726
727 lockmgr(&devfs_lock, LK_EXCLUSIVE);
728
729 if ((dnode->node_type != Proot) && (dnode->node_type != Pdir))
730 goto out;
731
732 TAILQ_FOREACH(node, DEVFS_DENODE_HEAD(dnode), link) {
733 if (ncp->nc_nlen != node->d_dir.d_namlen)
734 continue;
735 if (memcmp(ncp->nc_name, node->d_dir.d_name, ncp->nc_nlen))
736 continue;
737
738 /*
739 * only allow removal of user created stuff (e.g. symlinks)
740 */
741 if ((node->flags & DEVFS_USER_CREATED) == 0) {
742 error = EPERM;
743 goto out;
744 } else {
745 if (node->v_node)
746 cache_inval_vp(node->v_node, CINV_DESTROY);
747 devfs_unlinkp(node);
748 error = 0;
749 break;
750 }
751 }
752
753 cache_setunresolved(ap->a_nch);
754 cache_setvp(ap->a_nch, NULL);
755
756out:
757 lockmgr(&devfs_lock, LK_RELEASE);
758 return error;
759}
760
761
762static int
763devfs_spec_open(struct vop_open_args *ap)
764{
765 struct vnode *vp = ap->a_vp;
766 struct vnode *orig_vp = NULL;
767 struct devfs_node *node = DEVFS_NODE(vp);
768 struct devfs_node *newnode;
769 cdev_t dev, ndev = NULL;
770 int error = 0;
771
772 if (node) {
773 if (node->d_dev == NULL)
774 return ENXIO;
775 if (!devfs_node_is_accessible(node))
776 return ENOENT;
777 }
778
779 if ((dev = vp->v_rdev) == NULL)
780 return ENXIO;
781
782 if (node && ap->a_fp) {
783 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_spec_open: -1.1-\n");
784 lockmgr(&devfs_lock, LK_EXCLUSIVE);
785
786 ndev = devfs_clone(dev, node->d_dir.d_name, node->d_dir.d_namlen,
787 ap->a_mode, ap->a_cred);
788 if (ndev != NULL) {
789 newnode = devfs_create_device_node(
790 DEVFS_MNTDATA(vp->v_mount)->root_node,
791 ndev, NULL, NULL);
792 /* XXX: possibly destroy device if this happens */
793
794 if (newnode != NULL) {
795 dev = ndev;
796 devfs_link_dev(dev);
797
798 devfs_debug(DEVFS_DEBUG_DEBUG,
799 "parent here is: %s, node is: |%s|\n",
800 ((node->parent->node_type == Proot) ?
801 "ROOT!" : node->parent->d_dir.d_name),
802 newnode->d_dir.d_name);
803 devfs_debug(DEVFS_DEBUG_DEBUG,
804 "test: %s\n",
805 ((struct devfs_node *)(TAILQ_LAST(DEVFS_DENODE_HEAD(node->parent), devfs_node_head)))->d_dir.d_name);
806
807 /*
808 * orig_vp is set to the original vp if we cloned.
809 */
810 /* node->flags |= DEVFS_CLONED; */
811 devfs_allocv(&vp, newnode);
812 orig_vp = ap->a_vp;
813 ap->a_vp = vp;
814 }
815 }
816 lockmgr(&devfs_lock, LK_RELEASE);
817 }
818
819 devfs_debug(DEVFS_DEBUG_DEBUG,
820 "devfs_spec_open() called on %s! \n",
821 dev->si_name);
822
823 /*
824 * Make this field valid before any I/O in ->d_open
825 */
826 if (!dev->si_iosize_max)
827 dev->si_iosize_max = DFLTPHYS;
828
829 if (dev_dflags(dev) & D_TTY)
830 vp->v_flag |= VISTTY;
831
832 vn_unlock(vp);
833 error = dev_dopen(dev, ap->a_mode, S_IFCHR, ap->a_cred);
834 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
835
836 /*
837 * Clean up any cloned vp if we error out.
838 */
839 if (error) {
840 if (orig_vp) {
841 vput(vp);
842 ap->a_vp = orig_vp;
843 /* orig_vp = NULL; */
844 }
845 return error;
846 }
847
848 /*
849 * This checks if the disk device is going to be opened for writing.
850 * It will be only allowed in the cases where securelevel permits it
851 * and it's not mounted R/W.
852 */
853 if ((dev_dflags(dev) & D_DISK) && (ap->a_mode & FWRITE) &&
854 (ap->a_cred != FSCRED)) {
855
856 /* Very secure mode. No open for writing allowed */
857 if (securelevel >= 2)
858 return EPERM;
859
860 /*
861 * If it is mounted R/W, do not allow to open for writing.
862 * In the case it's mounted read-only but securelevel
863 * is >= 1, then do not allow opening for writing either.
864 */
865 if (vfs_mountedon(vp)) {
866 if (!(dev->si_mountpoint->mnt_flag & MNT_RDONLY))
867 return EBUSY;
868 else if (securelevel >= 1)
869 return EPERM;
870 }
871 }
872
873 if (dev_dflags(dev) & D_TTY) {
874 if (dev->si_tty) {
875 struct tty *tp;
876 tp = dev->si_tty;
877 if (!tp->t_stop) {
878 devfs_debug(DEVFS_DEBUG_DEBUG,
879 "devfs: no t_stop\n");
880 tp->t_stop = nottystop;
881 }
882 }
883 }
884
885
886 if (vn_isdisk(vp, NULL)) {
887 if (!dev->si_bsize_phys)
888 dev->si_bsize_phys = DEV_BSIZE;
889 vinitvmio(vp, IDX_TO_OFF(INT_MAX));
890 }
891
892 vop_stdopen(ap);
893#if 0
894 if (node)
895 nanotime(&node->atime);
896#endif
897
898 if (orig_vp)
899 vn_unlock(vp);
900
901 /* Ugly pty magic, to make pty devices appear once they are opened */
902 if (node && (node->flags & DEVFS_PTY) == DEVFS_PTY)
903 node->flags &= ~DEVFS_INVISIBLE;
904
905 if (ap->a_fp) {
906 ap->a_fp->f_type = DTYPE_VNODE;
907 ap->a_fp->f_flag = ap->a_mode & FMASK;
908 ap->a_fp->f_ops = &devfs_dev_fileops;
909 ap->a_fp->f_data = vp;
910 }
911
912 return 0;
913}
914
915
916static int
917devfs_spec_close(struct vop_close_args *ap)
918{
919 struct devfs_node *node = DEVFS_NODE(ap->a_vp);
920 struct proc *p = curproc;
921 struct vnode *vp = ap->a_vp;
922 cdev_t dev = vp->v_rdev;
923 int error = 0;
924 int needrelock;
925
926 devfs_debug(DEVFS_DEBUG_DEBUG,
927 "devfs_spec_close() called on %s! \n",
928 dev->si_name);
929
930 /*
931 * A couple of hacks for devices and tty devices. The
932 * vnode ref count cannot be used to figure out the
933 * last close, but we can use v_opencount now that
934 * revoke works properly.
935 *
936 * Detect the last close on a controlling terminal and clear
937 * the session (half-close).
938 */
939 if (dev)
940 reference_dev(dev);
941
942 if (p && vp->v_opencount <= 1 && vp == p->p_session->s_ttyvp) {
943 p->p_session->s_ttyvp = NULL;
944 vrele(vp);
945 }
946
947 /*
948 * Vnodes can be opened and closed multiple times. Do not really
949 * close the device unless (1) it is being closed forcibly,
950 * (2) the device wants to track closes, or (3) this is the last
951 * vnode doing its last close on the device.
952 *
953 * XXX the VXLOCK (force close) case can leave vnodes referencing
954 * a closed device. This might not occur now that our revoke is
955 * fixed.
956 */
957 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_spec_close() -1- \n");
958 if (dev && ((vp->v_flag & VRECLAIMED) ||
959 (dev_dflags(dev) & D_TRACKCLOSE) ||
960 (vp->v_opencount == 1))) {
961 /*
962 * Unlock around dev_dclose()
963 */
964 needrelock = 0;
965 if (vn_islocked(vp)) {
966 needrelock = 1;
967 vn_unlock(vp);
968 }
969 error = dev_dclose(dev, ap->a_fflag, S_IFCHR);
970
971 /*
972 * Ugly pty magic, to make pty devices disappear again once
973 * they are closed
974 */
975 if (node && (node->flags & DEVFS_PTY) == DEVFS_PTY)
976 node->flags |= DEVFS_INVISIBLE;
977
978 if (needrelock)
979 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
980 } else {
981 error = 0;
982 }
983 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_spec_close() -2- \n");
984
985 /*
986 * Track the actual opens and closes on the vnode. The last close
987 * disassociates the rdev. If the rdev is already disassociated or
988 * the opencount is already 0, the vnode might have been revoked
989 * and no further opencount tracking occurs.
990 */
991 if (dev)
992 release_dev(dev);
993 if (vp->v_opencount > 0)
994 vop_stdclose(ap);
995 return(error);
996
997}
998
999
1000static int
1001devfs_specf_close(struct file *fp)
1002{
1003 struct vnode *vp = (struct vnode *)fp->f_data;
1004 int error;
1005
1006 get_mplock();
1007 fp->f_ops = &badfileops;
1008 error = vn_close(vp, fp->f_flag);
1009 rel_mplock();
1010
1011 return (error);
1012}
1013
1014
1015/*
1016 * Device-optimized file table vnode read routine.
1017 *
1018 * This bypasses the VOP table and talks directly to the device. Most
1019 * filesystems just route to specfs and can make this optimization.
1020 *
1021 * MPALMOSTSAFE - acquires mplock
1022 */
1023static int
1024devfs_specf_read(struct file *fp, struct uio *uio,
1025 struct ucred *cred, int flags)
1026{
1027 struct devfs_node *node;
1028 struct vnode *vp;
1029 int ioflag;
1030 int error;
1031 cdev_t dev;
1032
1033 KASSERT(uio->uio_td == curthread,
1034 ("uio_td %p is not td %p", uio->uio_td, curthread));
1035
1036 if (uio->uio_resid == 0)
1037 return 0;
1038
1039 vp = (struct vnode *)fp->f_data;
1040 if (vp == NULL || vp->v_type == VBAD)
1041 return EBADF;
1042
1043 node = DEVFS_NODE(vp);
1044
1045 if ((dev = vp->v_rdev) == NULL)
1046 return EBADF;
1047
1048 /* only acquire mplock for devices that require it */
1049 if (!(dev_dflags(dev) & D_MPSAFE_READ)) {
1050 atomic_add_int(&mplock_reads, 1);
1051 get_mplock();
1052 } else {
1053 atomic_add_int(&mpsafe_reads, 1);
1054 }
1055
1056 reference_dev(dev);
1057
1058 if ((flags & O_FOFFSET) == 0)
1059 uio->uio_offset = fp->f_offset;
1060
1061 ioflag = 0;
1062 if (flags & O_FBLOCKING) {
1063 /* ioflag &= ~IO_NDELAY; */
1064 } else if (flags & O_FNONBLOCKING) {
1065 ioflag |= IO_NDELAY;
1066 } else if (fp->f_flag & FNONBLOCK) {
1067 ioflag |= IO_NDELAY;
1068 }
1069 if (flags & O_FBUFFERED) {
1070 /* ioflag &= ~IO_DIRECT; */
1071 } else if (flags & O_FUNBUFFERED) {
1072 ioflag |= IO_DIRECT;
1073 } else if (fp->f_flag & O_DIRECT) {
1074 ioflag |= IO_DIRECT;
1075 }
1076 ioflag |= sequential_heuristic(uio, fp);
1077
1078 error = dev_dread(dev, uio, ioflag);
1079
1080 release_dev(dev);
1081 if (node)
1082 nanotime(&node->atime);
1083 if ((flags & O_FOFFSET) == 0)
1084 fp->f_offset = uio->uio_offset;
1085 fp->f_nextoff = uio->uio_offset;
1086
1087 if (!(dev_dflags(dev) & D_MPSAFE_READ))
1088 rel_mplock();
1089
1090 return (error);
1091}
1092
1093
1094static int
1095devfs_specf_write(struct file *fp, struct uio *uio,
1096 struct ucred *cred, int flags)
1097{
1098 struct devfs_node *node;
1099 struct vnode *vp;
1100 int ioflag;
1101 int error;
1102 cdev_t dev;
1103
1104 KASSERT(uio->uio_td == curthread,
1105 ("uio_td %p is not p %p", uio->uio_td, curthread));
1106
1107 vp = (struct vnode *)fp->f_data;
1108 if (vp == NULL || vp->v_type == VBAD)
1109 return EBADF;
1110
1111 node = DEVFS_NODE(vp);
1112
1113 if (vp->v_type == VREG)
1114 bwillwrite(uio->uio_resid);
1115
1116 vp = (struct vnode *)fp->f_data;
1117
1118 if ((dev = vp->v_rdev) == NULL)
1119 return EBADF;
1120
1121 /* only acquire mplock for devices that require it */
1122 if (!(dev_dflags(dev) & D_MPSAFE_WRITE)) {
1123 atomic_add_int(&mplock_writes, 1);
1124 get_mplock();
1125 } else {
1126 atomic_add_int(&mpsafe_writes, 1);
1127 }
1128
1129 reference_dev(dev);
1130
1131 if ((flags & O_FOFFSET) == 0)
1132 uio->uio_offset = fp->f_offset;
1133
1134 ioflag = IO_UNIT;
1135 if (vp->v_type == VREG &&
1136 ((fp->f_flag & O_APPEND) || (flags & O_FAPPEND))) {
1137 ioflag |= IO_APPEND;
1138 }
1139
1140 if (flags & O_FBLOCKING) {
1141 /* ioflag &= ~IO_NDELAY; */
1142 } else if (flags & O_FNONBLOCKING) {
1143 ioflag |= IO_NDELAY;
1144 } else if (fp->f_flag & FNONBLOCK) {
1145 ioflag |= IO_NDELAY;
1146 }
1147 if (flags & O_FBUFFERED) {
1148 /* ioflag &= ~IO_DIRECT; */
1149 } else if (flags & O_FUNBUFFERED) {
1150 ioflag |= IO_DIRECT;
1151 } else if (fp->f_flag & O_DIRECT) {
1152 ioflag |= IO_DIRECT;
1153 }
1154 if (flags & O_FASYNCWRITE) {
1155 /* ioflag &= ~IO_SYNC; */
1156 } else if (flags & O_FSYNCWRITE) {
1157 ioflag |= IO_SYNC;
1158 } else if (fp->f_flag & O_FSYNC) {
1159 ioflag |= IO_SYNC;
1160 }
1161
1162 if (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS))
1163 ioflag |= IO_SYNC;
1164 ioflag |= sequential_heuristic(uio, fp);
1165
1166 error = dev_dwrite(dev, uio, ioflag);
1167
1168 release_dev(dev);
1169 if (node) {
1170 nanotime(&node->atime);
1171 nanotime(&node->mtime);
1172 }
1173
1174 if ((flags & O_FOFFSET) == 0)
1175 fp->f_offset = uio->uio_offset;
1176 fp->f_nextoff = uio->uio_offset;
1177
1178 if (!(dev_dflags(dev) & D_MPSAFE_WRITE))
1179 rel_mplock();
1180 return (error);
1181}
1182
1183
1184static int
1185devfs_specf_stat(struct file *fp, struct stat *sb, struct ucred *cred)
1186{
1187 struct vnode *vp;
1188 struct vattr vattr;
1189 struct vattr *vap;
1190 u_short mode;
1191 cdev_t dev;
1192 int error;
1193
1194 vp = (struct vnode *)fp->f_data;
1195 if (vp == NULL || vp->v_type == VBAD)
1196 return EBADF;
1197
1198 error = vn_stat(vp, sb, cred);
1199 if (error)
1200 return (error);
1201
1202 vap = &vattr;
1203 error = VOP_GETATTR(vp, vap);
1204 if (error)
1205 return (error);
1206
1207 /*
1208 * Zero the spare stat fields
1209 */
1210 sb->st_lspare = 0;
1211 sb->st_qspare1 = 0;
1212 sb->st_qspare2 = 0;
1213
1214 /*
1215 * Copy from vattr table ... or not in case it's a cloned device
1216 */
1217 if (vap->va_fsid != VNOVAL)
1218 sb->st_dev = vap->va_fsid;
1219 else
1220 sb->st_dev = vp->v_mount->mnt_stat.f_fsid.val[0];
1221
1222 sb->st_ino = vap->va_fileid;
1223
1224 mode = vap->va_mode;
1225 mode |= S_IFCHR;
1226 sb->st_mode = mode;
1227
1228 if (vap->va_nlink > (nlink_t)-1)
1229 sb->st_nlink = (nlink_t)-1;
1230 else
1231 sb->st_nlink = vap->va_nlink;
1232
1233 sb->st_uid = vap->va_uid;
1234 sb->st_gid = vap->va_gid;
1235 sb->st_rdev = dev2udev(DEVFS_NODE(vp)->d_dev);
1236 sb->st_size = vap->va_bytes;
1237 sb->st_atimespec = vap->va_atime;
1238 sb->st_mtimespec = vap->va_mtime;
1239 sb->st_ctimespec = vap->va_ctime;
1240
1241 /*
1242 * A VCHR and VBLK device may track the last access and last modified
1243 * time independantly of the filesystem. This is particularly true
1244 * because device read and write calls may bypass the filesystem.
1245 */
1246 if (vp->v_type == VCHR || vp->v_type == VBLK) {
1247 dev = vp->v_rdev;
1248 if (dev != NULL) {
1249 if (dev->si_lastread) {
1250 sb->st_atimespec.tv_sec = dev->si_lastread;
1251 sb->st_atimespec.tv_nsec = 0;
1252 }
1253 if (dev->si_lastwrite) {
1254 sb->st_atimespec.tv_sec = dev->si_lastwrite;
1255 sb->st_atimespec.tv_nsec = 0;
1256 }
1257 }
1258 }
1259
1260 /*
1261 * According to www.opengroup.org, the meaning of st_blksize is
1262 * "a filesystem-specific preferred I/O block size for this
1263 * object. In some filesystem types, this may vary from file
1264 * to file"
1265 * Default to PAGE_SIZE after much discussion.
1266 */
1267
1268 sb->st_blksize = PAGE_SIZE;
1269
1270 sb->st_flags = vap->va_flags;
1271
1272 error = priv_check_cred(cred, PRIV_VFS_GENERATION, 0);
1273 if (error)
1274 sb->st_gen = 0;
1275 else
1276 sb->st_gen = (u_int32_t)vap->va_gen;
1277
1278 sb->st_blocks = vap->va_bytes / S_BLKSIZE;
1279
1280 return (0);
1281}
1282
1283
1284static int
1285devfs_specf_kqfilter(struct file *fp, struct knote *kn)
1286{
1287 struct vnode *vp;
1288 int error;
1289 cdev_t dev;
1290
1291 get_mplock();
1292
1293 vp = (struct vnode *)fp->f_data;
1294 if (vp == NULL || vp->v_type == VBAD) {
1295 error = EBADF;
1296 goto done;
1297 }
1298 if ((dev = vp->v_rdev) == NULL) {
1299 error = EBADF;
1300 goto done;
1301 }
1302 reference_dev(dev);
1303
1304 error = dev_dkqfilter(dev, kn);
1305
1306 release_dev(dev);
1307
1308done:
1309 rel_mplock();
1310 return (error);
1311}
1312
1313
1314static int
1315devfs_specf_poll(struct file *fp, int events, struct ucred *cred)
1316{
1317 struct devfs_node *node;
1318 struct vnode *vp;
1319 int error;
1320 cdev_t dev;
1321
1322 get_mplock();
1323
1324 vp = (struct vnode *)fp->f_data;
1325 if (vp == NULL || vp->v_type == VBAD) {
1326 error = EBADF;
1327 goto done;
1328 }
1329 node = DEVFS_NODE(vp);
1330
1331 if ((dev = vp->v_rdev) == NULL) {
1332 error = EBADF;
1333 goto done;
1334 }
1335 reference_dev(dev);
1336 error = dev_dpoll(dev, events);
1337
1338 release_dev(dev);
1339
1340#if 0
1341 if (node)
1342 nanotime(&node->atime);
1343#endif
1344done:
1345 rel_mplock();
1346 return (error);
1347}
1348
1349
1350/*
1351 * MPALMOSTSAFE - acquires mplock
1352 */
1353static int
1354devfs_specf_ioctl(struct file *fp, u_long com, caddr_t data,
1355 struct ucred *ucred, struct sysmsg *msg)
1356{
1357 struct devfs_node *node;
1358 struct vnode *vp;
1359 struct vnode *ovp;
1360 cdev_t dev;
1361 int error;
1362 struct fiodname_args *name_args;
1363 size_t namlen;
1364 const char *name;
1365
1366 vp = ((struct vnode *)fp->f_data);
1367
1368 if ((dev = vp->v_rdev) == NULL)
1369 return EBADF; /* device was revoked */
1370
1371 reference_dev(dev);
1372
1373 node = DEVFS_NODE(vp);
1374
1375 devfs_debug(DEVFS_DEBUG_DEBUG,
1376 "devfs_specf_ioctl() called! for dev %s\n",
1377 dev->si_name);
1378
1379 if (com == FIODTYPE) {
1380 *(int *)data = dev_dflags(dev) & D_TYPEMASK;
1381 error = 0;
1382 goto out;
1383 } else if (com == FIODNAME) {
1384 name_args = (struct fiodname_args *)data;
1385 name = dev->si_name;
1386 namlen = strlen(name) + 1;
1387
1388 devfs_debug(DEVFS_DEBUG_DEBUG,
1389 "ioctl, got: FIODNAME for %s\n", name);
1390
1391 if (namlen <= name_args->len)
1392 error = copyout(dev->si_name, name_args->name, namlen);
1393 else
1394 error = EINVAL;
1395
1396 devfs_debug(DEVFS_DEBUG_DEBUG,
1397 "ioctl stuff: error: %d\n", error);
1398 goto out;
1399 }
1400
1401 /* only acquire mplock for devices that require it */
1402 if (!(dev_dflags(dev) & D_MPSAFE_IOCTL))
1403 get_mplock();
1404
1405 error = dev_dioctl(dev, com, data, fp->f_flag, ucred, msg);
1406
1407#if 0
1408 if (node) {
1409 nanotime(&node->atime);
1410 nanotime(&node->mtime);
1411 }
1412#endif
1413
1414 if (!(dev_dflags(dev) & D_MPSAFE_IOCTL))
1415 rel_mplock();
1416
1417 if (com == TIOCSCTTY) {
1418 devfs_debug(DEVFS_DEBUG_DEBUG,
1419 "devfs_specf_ioctl: got TIOCSCTTY on %s\n",
1420 dev->si_name);
1421 }
1422 if (error == 0 && com == TIOCSCTTY) {
1423 struct proc *p = curthread->td_proc;
1424 struct session *sess;
1425
1426 devfs_debug(DEVFS_DEBUG_DEBUG,
1427 "devfs_specf_ioctl: dealing with TIOCSCTTY on %s\n",
1428 dev->si_name);
1429 if (p == NULL) {
1430 error = ENOTTY;
1431 goto out;
1432 }
1433 sess = p->p_session;
1434
1435 /*
1436 * Do nothing if reassigning same control tty
1437 */
1438 if (sess->s_ttyvp == vp) {
1439 error = 0;
1440 goto out;
1441 }
1442
1443 /*
1444 * Get rid of reference to old control tty
1445 */
1446 ovp = sess->s_ttyvp;
1447 vref(vp);
1448 sess->s_ttyvp = vp;
1449 if (ovp)
1450 vrele(ovp);
1451 }
1452
1453out:
1454 release_dev(dev);
1455 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_specf_ioctl() finished! \n");
1456 return (error);
1457}
1458
1459
1460static int
1461devfs_spec_fsync(struct vop_fsync_args *ap)
1462{
1463 struct vnode *vp = ap->a_vp;
1464 int error;
1465
1466 if (!vn_isdisk(vp, NULL))
1467 return (0);
1468
1469 /*
1470 * Flush all dirty buffers associated with a block device.
1471 */
1472 error = vfsync(vp, ap->a_waitfor, 10000, NULL, NULL);
1473 return (error);
1474}
1475
1476static int
1477devfs_spec_read(struct vop_read_args *ap)
1478{
1479 struct devfs_node *node;
1480 struct vnode *vp;
1481 struct uio *uio;
1482 cdev_t dev;
1483 int error;
1484
1485 vp = ap->a_vp;
1486 dev = vp->v_rdev;
1487 uio = ap->a_uio;
1488 node = DEVFS_NODE(vp);
1489
1490 if (dev == NULL) /* device was revoked */
1491 return (EBADF);
1492 if (uio->uio_resid == 0)
1493 return (0);
1494
1495 vn_unlock(vp);
1496 error = dev_dread(dev, uio, ap->a_ioflag);
1497 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1498
1499 if (node)
1500 nanotime(&node->atime);
1501
1502 return (error);
1503}
1504
1505/*
1506 * Vnode op for write
1507 *
1508 * spec_write(struct vnode *a_vp, struct uio *a_uio, int a_ioflag,
1509 * struct ucred *a_cred)
1510 */
1511static int
1512devfs_spec_write(struct vop_write_args *ap)
1513{
1514 struct devfs_node *node;
1515 struct vnode *vp;
1516 struct uio *uio;
1517 cdev_t dev;
1518 int error;
1519
1520 vp = ap->a_vp;
1521 dev = vp->v_rdev;
1522 uio = ap->a_uio;
1523 node = DEVFS_NODE(vp);
1524
1525 KKASSERT(uio->uio_segflg != UIO_NOCOPY);
1526
1527 if (dev == NULL) /* device was revoked */
1528 return (EBADF);
1529
1530 vn_unlock(vp);
1531 error = dev_dwrite(dev, uio, ap->a_ioflag);
1532 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1533
1534 if (node) {
1535 nanotime(&node->atime);
1536 nanotime(&node->mtime);
1537 }
1538
1539 return (error);
1540}
1541
1542/*
1543 * Device ioctl operation.
1544 *
1545 * spec_ioctl(struct vnode *a_vp, int a_command, caddr_t a_data,
1546 * int a_fflag, struct ucred *a_cred, struct sysmsg *msg)
1547 */
1548static int
1549devfs_spec_ioctl(struct vop_ioctl_args *ap)
1550{
1551 struct vnode *vp = ap->a_vp;
1552 struct devfs_node *node;
1553 cdev_t dev;
1554
1555 if ((dev = vp->v_rdev) == NULL)
1556 return (EBADF); /* device was revoked */
1557 node = DEVFS_NODE(vp);
1558
1559#if 0
1560 if (node) {
1561 nanotime(&node->atime);
1562 nanotime(&node->mtime);
1563 }
1564#endif
1565
1566 return (dev_dioctl(dev, ap->a_command, ap->a_data, ap->a_fflag,
1567 ap->a_cred, ap->a_sysmsg));
1568}
1569
1570/*
1571 * spec_poll(struct vnode *a_vp, int a_events, struct ucred *a_cred)
1572 */
1573/* ARGSUSED */
1574static int
1575devfs_spec_poll(struct vop_poll_args *ap)
1576{
1577 struct vnode *vp = ap->a_vp;
1578 struct devfs_node *node;
1579 cdev_t dev;
1580
1581 if ((dev = vp->v_rdev) == NULL)
1582 return (EBADF); /* device was revoked */
1583 node = DEVFS_NODE(vp);
1584
1585#if 0
1586 if (node)
1587 nanotime(&node->atime);
1588#endif
1589
1590 return (dev_dpoll(dev, ap->a_events));
1591}
1592
1593/*
1594 * spec_kqfilter(struct vnode *a_vp, struct knote *a_kn)
1595 */
1596/* ARGSUSED */
1597static int
1598devfs_spec_kqfilter(struct vop_kqfilter_args *ap)
1599{
1600 struct vnode *vp = ap->a_vp;
1601 struct devfs_node *node;
1602 cdev_t dev;
1603
1604 if ((dev = vp->v_rdev) == NULL)
1605 return (EBADF); /* device was revoked */
1606 node = DEVFS_NODE(vp);
1607
1608#if 0
1609 if (node)
1610 nanotime(&node->atime);
1611#endif
1612
1613 return (dev_dkqfilter(dev, ap->a_kn));
1614}
1615
1616/*
1617 * Convert a vnode strategy call into a device strategy call. Vnode strategy
1618 * calls are not limited to device DMA limits so we have to deal with the
1619 * case.
1620 *
1621 * spec_strategy(struct vnode *a_vp, struct bio *a_bio)
1622 */
1623static int
1624devfs_spec_strategy(struct vop_strategy_args *ap)
1625{
1626 struct bio *bio = ap->a_bio;
1627 struct buf *bp = bio->bio_buf;
1628 struct buf *nbp;
1629 struct vnode *vp;
1630 struct mount *mp;
1631 int chunksize;
1632 int maxiosize;
1633
1634 if (bp->b_cmd != BUF_CMD_READ && LIST_FIRST(&bp->b_dep) != NULL)
1635 buf_start(bp);
1636
1637 /*
1638 * Collect statistics on synchronous and asynchronous read
1639 * and write counts for disks that have associated filesystems.
1640 */
1641 vp = ap->a_vp;
1642 KKASSERT(vp->v_rdev != NULL); /* XXX */
1643 if (vn_isdisk(vp, NULL) && (mp = vp->v_rdev->si_mountpoint) != NULL) {
1644 if (bp->b_cmd == BUF_CMD_READ) {
1645 if (bp->b_flags & BIO_SYNC)
1646 mp->mnt_stat.f_syncreads++;
1647 else
1648 mp->mnt_stat.f_asyncreads++;
1649 } else {
1650 if (bp->b_flags & BIO_SYNC)
1651 mp->mnt_stat.f_syncwrites++;
1652 else
1653 mp->mnt_stat.f_asyncwrites++;
1654 }
1655 }
1656
1657 /*
1658 * Device iosize limitations only apply to read and write. Shortcut
1659 * the I/O if it fits.
1660 */
1661 if ((maxiosize = vp->v_rdev->si_iosize_max) == 0) {
1662 devfs_debug(DEVFS_DEBUG_DEBUG,
1663 "%s: si_iosize_max not set!\n",
1664 dev_dname(vp->v_rdev));
1665 maxiosize = MAXPHYS;
1666 }
1667#if SPEC_CHAIN_DEBUG & 2
1668 maxiosize = 4096;
1669#endif
1670 if (bp->b_bcount <= maxiosize ||
1671 (bp->b_cmd != BUF_CMD_READ && bp->b_cmd != BUF_CMD_WRITE)) {
1672 dev_dstrategy_chain(vp->v_rdev, bio);
1673 return (0);
1674 }
1675
1676 /*
1677 * Clone the buffer and set up an I/O chain to chunk up the I/O.
1678 */
1679 nbp = kmalloc(sizeof(*bp), M_DEVBUF, M_INTWAIT|M_ZERO);
1680 initbufbio(nbp);
1681 buf_dep_init(nbp);
1682 BUF_LOCKINIT(nbp);
1683 BUF_LOCK(nbp, LK_EXCLUSIVE);
1684 BUF_KERNPROC(nbp);
1685 nbp->b_vp = vp;
1686 nbp->b_flags = B_PAGING | (bp->b_flags & B_BNOCLIP);
1687 nbp->b_data = bp->b_data;
1688 nbp->b_bio1.bio_done = devfs_spec_strategy_done;
1689 nbp->b_bio1.bio_offset = bio->bio_offset;
1690 nbp->b_bio1.bio_caller_info1.ptr = bio;
1691
1692 /*
1693 * Start the first transfer
1694 */
1695 if (vn_isdisk(vp, NULL))
1696 chunksize = vp->v_rdev->si_bsize_phys;
1697 else
1698 chunksize = DEV_BSIZE;
1699 chunksize = maxiosize / chunksize * chunksize;
1700#if SPEC_CHAIN_DEBUG & 1
1701 devfs_debug(DEVFS_DEBUG_DEBUG,
1702 "spec_strategy chained I/O chunksize=%d\n",
1703 chunksize);
1704#endif
1705 nbp->b_cmd = bp->b_cmd;
1706 nbp->b_bcount = chunksize;
1707 nbp->b_bufsize = chunksize; /* used to detect a short I/O */
1708 nbp->b_bio1.bio_caller_info2.index = chunksize;
1709
1710#if SPEC_CHAIN_DEBUG & 1
1711 devfs_debug(DEVFS_DEBUG_DEBUG,
1712 "spec_strategy: chain %p offset %d/%d bcount %d\n",
1713 bp, 0, bp->b_bcount, nbp->b_bcount);
1714#endif
1715
1716 dev_dstrategy(vp->v_rdev, &nbp->b_bio1);
1717
1718 if (DEVFS_NODE(vp)) {
1719 nanotime(&DEVFS_NODE(vp)->atime);
1720 nanotime(&DEVFS_NODE(vp)->mtime);
1721 }
1722
1723 return (0);
1724}
1725
1726/*
1727 * Chunked up transfer completion routine - chain transfers until done
1728 */
1729static
1730void
1731devfs_spec_strategy_done(struct bio *nbio)
1732{
1733 struct buf *nbp = nbio->bio_buf;
1734 struct bio *bio = nbio->bio_caller_info1.ptr; /* original bio */
1735 struct buf *bp = bio->bio_buf; /* original bp */
1736 int chunksize = nbio->bio_caller_info2.index; /* chunking */
1737 int boffset = nbp->b_data - bp->b_data;
1738
1739 if (nbp->b_flags & B_ERROR) {
1740 /*
1741 * An error terminates the chain, propogate the error back
1742 * to the original bp
1743 */
1744 bp->b_flags |= B_ERROR;
1745 bp->b_error = nbp->b_error;
1746 bp->b_resid = bp->b_bcount - boffset +
1747 (nbp->b_bcount - nbp->b_resid);
1748#if SPEC_CHAIN_DEBUG & 1
1749 devfs_debug(DEVFS_DEBUG_DEBUG,
1750 "spec_strategy: chain %p error %d bcount %d/%d\n",
1751 bp, bp->b_error, bp->b_bcount,
1752 bp->b_bcount - bp->b_resid);
1753#endif
1754 kfree(nbp, M_DEVBUF);
1755 biodone(bio);
1756 } else if (nbp->b_resid) {
1757 /*
1758 * A short read or write terminates the chain
1759 */
1760 bp->b_error = nbp->b_error;
1761 bp->b_resid = bp->b_bcount - boffset +
1762 (nbp->b_bcount - nbp->b_resid);
1763#if SPEC_CHAIN_DEBUG & 1
1764 devfs_debug(DEVFS_DEBUG_DEBUG,
1765 "spec_strategy: chain %p short read(1) "
1766 "bcount %d/%d\n",
1767 bp, bp->b_bcount - bp->b_resid, bp->b_bcount);
1768#endif
1769 kfree(nbp, M_DEVBUF);
1770 biodone(bio);
1771 } else if (nbp->b_bcount != nbp->b_bufsize) {
1772 /*
1773 * A short read or write can also occur by truncating b_bcount
1774 */
1775#if SPEC_CHAIN_DEBUG & 1
1776 devfs_debug(DEVFS_DEBUG_DEBUG,
1777 "spec_strategy: chain %p short read(2) "
1778 "bcount %d/%d\n",
1779 bp, nbp->b_bcount + boffset, bp->b_bcount);
1780#endif
1781 bp->b_error = 0;
1782 bp->b_bcount = nbp->b_bcount + boffset;
1783 bp->b_resid = nbp->b_resid;
1784 kfree(nbp, M_DEVBUF);
1785 biodone(bio);
1786 } else if (nbp->b_bcount + boffset == bp->b_bcount) {
1787 /*
1788 * No more data terminates the chain
1789 */
1790#if SPEC_CHAIN_DEBUG & 1
1791 devfs_debug(DEVFS_DEBUG_DEBUG,
1792 "spec_strategy: chain %p finished bcount %d\n",
1793 bp, bp->b_bcount);
1794#endif
1795 bp->b_error = 0;
1796 bp->b_resid = 0;
1797 kfree(nbp, M_DEVBUF);
1798 biodone(bio);
1799 } else {
1800 /*
1801 * Continue the chain
1802 */
1803 boffset += nbp->b_bcount;
1804 nbp->b_data = bp->b_data + boffset;
1805 nbp->b_bcount = bp->b_bcount - boffset;
1806 if (nbp->b_bcount > chunksize)
1807 nbp->b_bcount = chunksize;
1808 nbp->b_bio1.bio_done = devfs_spec_strategy_done;
1809 nbp->b_bio1.bio_offset = bio->bio_offset + boffset;
1810
1811#if SPEC_CHAIN_DEBUG & 1
1812 devfs_debug(DEVFS_DEBUG_DEBUG,
1813 "spec_strategy: chain %p offset %d/%d bcount %d\n",
1814 bp, boffset, bp->b_bcount, nbp->b_bcount);
1815#endif
1816
1817 dev_dstrategy(nbp->b_vp->v_rdev, &nbp->b_bio1);
1818 }
1819}
1820
1821/*
1822 * spec_freeblks(struct vnode *a_vp, daddr_t a_addr, daddr_t a_length)
1823 */
1824static int
1825devfs_spec_freeblks(struct vop_freeblks_args *ap)
1826{
1827 struct buf *bp;
1828
1829 /*
1830 * XXX: This assumes that strategy does the deed right away.
1831 * XXX: this may not be TRTTD.
1832 */
1833 KKASSERT(ap->a_vp->v_rdev != NULL);
1834 if ((dev_dflags(ap->a_vp->v_rdev) & D_CANFREE) == 0)
1835 return (0);
1836 bp = geteblk(ap->a_length);
1837 bp->b_cmd = BUF_CMD_FREEBLKS;
1838 bp->b_bio1.bio_offset = ap->a_offset;
1839 bp->b_bcount = ap->a_length;
1840 dev_dstrategy(ap->a_vp->v_rdev, &bp->b_bio1);
1841 return (0);
1842}
1843
1844/*
1845 * Implement degenerate case where the block requested is the block
1846 * returned, and assume that the entire device is contiguous in regards
1847 * to the contiguous block range (runp and runb).
1848 *
1849 * spec_bmap(struct vnode *a_vp, off_t a_loffset,
1850 * off_t *a_doffsetp, int *a_runp, int *a_runb)
1851 */
1852static int
1853devfs_spec_bmap(struct vop_bmap_args *ap)
1854{
1855 if (ap->a_doffsetp != NULL)
1856 *ap->a_doffsetp = ap->a_loffset;
1857 if (ap->a_runp != NULL)
1858 *ap->a_runp = MAXBSIZE;
1859 if (ap->a_runb != NULL) {
1860 if (ap->a_loffset < MAXBSIZE)
1861 *ap->a_runb = (int)ap->a_loffset;
1862 else
1863 *ap->a_runb = MAXBSIZE;
1864 }
1865 return (0);
1866}
1867
1868
1869/*
1870 * Special device advisory byte-level locks.
1871 *
1872 * spec_advlock(struct vnode *a_vp, caddr_t a_id, int a_op,
1873 * struct flock *a_fl, int a_flags)
1874 */
1875/* ARGSUSED */
1876static int
1877devfs_spec_advlock(struct vop_advlock_args *ap)
1878{
1879 return ((ap->a_flags & F_POSIX) ? EINVAL : EOPNOTSUPP);
1880}
1881
1882static void
1883devfs_spec_getpages_iodone(struct bio *bio)
1884{
1885 bio->bio_buf->b_cmd = BUF_CMD_DONE;
1886 wakeup(bio->bio_buf);
1887}
1888
1889/*
1890 * spec_getpages() - get pages associated with device vnode.
1891 *
1892 * Note that spec_read and spec_write do not use the buffer cache, so we
1893 * must fully implement getpages here.
1894 */
1895static int
1896devfs_spec_getpages(struct vop_getpages_args *ap)
1897{
1898 vm_offset_t kva;
1899 int error;
1900 int i, pcount, size;
1901 struct buf *bp;
1902 vm_page_t m;
1903 vm_ooffset_t offset;
1904 int toff, nextoff, nread;
1905 struct vnode *vp = ap->a_vp;
1906 int blksiz;
1907 int gotreqpage;
1908
1909 error = 0;
1910 pcount = round_page(ap->a_count) / PAGE_SIZE;
1911
1912 /*
1913 * Calculate the offset of the transfer and do sanity check.
1914 */
1915 offset = IDX_TO_OFF(ap->a_m[0]->pindex) + ap->a_offset;
1916
1917 /*
1918 * Round up physical size for real devices. We cannot round using
1919 * v_mount's block size data because v_mount has nothing to do with
1920 * the device. i.e. it's usually '/dev'. We need the physical block
1921 * size for the device itself.
1922 *
1923 * We can't use v_rdev->si_mountpoint because it only exists when the
1924 * block device is mounted. However, we can use v_rdev.
1925 */
1926 if (vn_isdisk(vp, NULL))
1927 blksiz = vp->v_rdev->si_bsize_phys;
1928 else
1929 blksiz = DEV_BSIZE;
1930
1931 size = (ap->a_count + blksiz - 1) & ~(blksiz - 1);
1932
1933 bp = getpbuf(NULL);
1934 kva = (vm_offset_t)bp->b_data;
1935
1936 /*
1937 * Map the pages to be read into the kva.
1938 */
1939 pmap_qenter(kva, ap->a_m, pcount);
1940
1941 /* Build a minimal buffer header. */
1942 bp->b_cmd = BUF_CMD_READ;
1943 bp->b_bcount = size;
1944 bp->b_resid = 0;
1945 bp->b_runningbufspace = size;
1946 if (size) {
1947 runningbufspace += bp->b_runningbufspace;
1948 ++runningbufcount;
1949 }
1950
1951 bp->b_bio1.bio_offset = offset;
1952 bp->b_bio1.bio_done = devfs_spec_getpages_iodone;
1953
1954 mycpu->gd_cnt.v_vnodein++;
1955 mycpu->gd_cnt.v_vnodepgsin += pcount;
1956
1957 /* Do the input. */
1958 vn_strategy(ap->a_vp, &bp->b_bio1);
1959
1960 crit_enter();
1961
1962 /* We definitely need to be at splbio here. */
1963 while (bp->b_cmd != BUF_CMD_DONE)
1964 tsleep(bp, 0, "spread", 0);
1965
1966 crit_exit();
1967
1968 if (bp->b_flags & B_ERROR) {
1969 if (bp->b_error)
1970 error = bp->b_error;
1971 else
1972 error = EIO;
1973 }
1974
1975 /*
1976 * If EOF is encountered we must zero-extend the result in order
1977 * to ensure that the page does not contain garabge. When no
1978 * error occurs, an early EOF is indicated if b_bcount got truncated.
1979 * b_resid is relative to b_bcount and should be 0, but some devices
1980 * might indicate an EOF with b_resid instead of truncating b_bcount.
1981 */
1982 nread = bp->b_bcount - bp->b_resid;
1983 if (nread < ap->a_count)
1984 bzero((caddr_t)kva + nread, ap->a_count - nread);
1985 pmap_qremove(kva, pcount);
1986
1987 gotreqpage = 0;
1988 for (i = 0, toff = 0; i < pcount; i++, toff = nextoff) {
1989 nextoff = toff + PAGE_SIZE;
1990 m = ap->a_m[i];
1991
1992 m->flags &= ~PG_ZERO;
1993
1994 /*
1995 * NOTE: vm_page_undirty/clear_dirty etc do not clear the
1996 * pmap modified bit. pmap modified bit should have
1997 * already been cleared.
1998 */
1999 if (nextoff <= nread) {
2000 m->valid = VM_PAGE_BITS_ALL;
2001 vm_page_undirty(m);
2002 } else if (toff < nread) {
2003 /*
2004 * Since this is a VM request, we have to supply the
2005 * unaligned offset to allow vm_page_set_valid()
2006 * to zero sub-DEV_BSIZE'd portions of the page.
2007 */
2008 vm_page_set_valid(m, 0, nread - toff);
2009 vm_page_clear_dirty_end_nonincl(m, 0, nread - toff);
2010 } else {
2011 m->valid = 0;
2012 vm_page_undirty(m);
2013 }
2014
2015 if (i != ap->a_reqpage) {
2016 /*
2017 * Just in case someone was asking for this page we
2018 * now tell them that it is ok to use.
2019 */
2020 if (!error || (m->valid == VM_PAGE_BITS_ALL)) {
2021 if (m->valid) {
2022 if (m->flags & PG_WANTED) {
2023 vm_page_activate(m);
2024 } else {
2025 vm_page_deactivate(m);
2026 }
2027 vm_page_wakeup(m);
2028 } else {
2029 vm_page_free(m);
2030 }
2031 } else {
2032 vm_page_free(m);
2033 }
2034 } else if (m->valid) {
2035 gotreqpage = 1;
2036 /*
2037 * Since this is a VM request, we need to make the
2038 * entire page presentable by zeroing invalid sections.
2039 */
2040 if (m->valid != VM_PAGE_BITS_ALL)
2041 vm_page_zero_invalid(m, FALSE);
2042 }
2043 }
2044 if (!gotreqpage) {
2045 m = ap->a_m[ap->a_reqpage];
2046 devfs_debug(DEVFS_DEBUG_WARNING,
2047 "spec_getpages:(%s) I/O read failure: (error=%d) bp %p vp %p\n",
2048 devtoname(vp->v_rdev), error, bp, bp->b_vp);
2049 devfs_debug(DEVFS_DEBUG_WARNING,
2050 " size: %d, resid: %d, a_count: %d, valid: 0x%x\n",
2051 size, bp->b_resid, ap->a_count, m->valid);
2052 devfs_debug(DEVFS_DEBUG_WARNING,
2053 " nread: %d, reqpage: %d, pindex: %lu, pcount: %d\n",
2054 nread, ap->a_reqpage, (u_long)m->pindex, pcount);
2055 /*
2056 * Free the buffer header back to the swap buffer pool.
2057 */
2058 relpbuf(bp, NULL);
2059 return VM_PAGER_ERROR;
2060 }
2061 /*
2062 * Free the buffer header back to the swap buffer pool.
2063 */
2064 relpbuf(bp, NULL);
2065 if (DEVFS_NODE(ap->a_vp))
2066 nanotime(&DEVFS_NODE(ap->a_vp)->mtime);
2067 return VM_PAGER_OK;
2068}
2069
2070static __inline
2071int
2072sequential_heuristic(struct uio *uio, struct file *fp)
2073{
2074 /*
2075 * Sequential heuristic - detect sequential operation
2076 */
2077 if ((uio->uio_offset == 0 && fp->f_seqcount > 0) ||
2078 uio->uio_offset == fp->f_nextoff) {
2079 /*
2080 * XXX we assume that the filesystem block size is
2081 * the default. Not true, but still gives us a pretty
2082 * good indicator of how sequential the read operations
2083 * are.
2084 */
2085 int tmpseq = fp->f_seqcount;
2086
2087 tmpseq += (uio->uio_resid + BKVASIZE - 1) / BKVASIZE;
2088 if (tmpseq > IO_SEQMAX)
2089 tmpseq = IO_SEQMAX;
2090 fp->f_seqcount = tmpseq;
2091 return(fp->f_seqcount << IO_SEQSHIFT);
2092 }
2093
2094 /*
2095 * Not sequential, quick draw-down of seqcount
2096 */
2097 if (fp->f_seqcount > 1)
2098 fp->f_seqcount = 1;
2099 else
2100 fp->f_seqcount = 0;
2101 return(0);
2102}
2103
2104extern SYSCTL_NODE(_vfs, OID_AUTO, devfs, CTLFLAG_RW, 0, "devfs");
2105
2106SYSCTL_INT(_vfs_devfs, OID_AUTO, mpsafe_writes, CTLFLAG_RD, &mpsafe_writes,
2107 0, "mpsafe writes");
2108SYSCTL_INT(_vfs_devfs, OID_AUTO, mplock_writes, CTLFLAG_RD, &mplock_writes,
2109 0, "non-mpsafe writes");
2110SYSCTL_INT(_vfs_devfs, OID_AUTO, mpsafe_reads, CTLFLAG_RD, &mpsafe_reads,
2111 0, "mpsafe reads");
2112SYSCTL_INT(_vfs_devfs, OID_AUTO, mplock_reads, CTLFLAG_RD, &mplock_reads,
2113 0, "non-mpsafe reads");