2 * Copyright (c) 2003 Matthew Dillon <dillon@backplane.com> All rights reserved.
3 * cdevsw from kern/kern_conf.c Copyright (c) 1995 Terrence R. Lambert
4 * cdevsw from kern/kern_conf.c Copyright (c) 1995 Julian R. Elishcer,
6 * Copyright (c) 1982, 1986, 1991, 1993
7 * The Regents of the University of California. All rights reserved.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/kernel.h>
34 #include <sys/sysctl.h>
35 #include <sys/module.h>
36 #include <sys/malloc.h>
40 #include <sys/vnode.h>
41 #include <sys/queue.h>
42 #include <sys/device.h>
44 #include <sys/syslink_rpc.h>
46 #include <sys/dsched.h>
47 #include <sys/devfs.h>
49 #include <machine/stdarg.h>
51 #include <sys/thread2.h>
52 #include <sys/mplock2.h>
55 * system link descriptors identify the command in the
56 * arguments structure.
58 #define DDESCNAME(name) __CONCAT(__CONCAT(dev_,name),_desc)
60 #define DEVOP_DESC_INIT(name) \
61 struct syslink_desc DDESCNAME(name) = { \
62 __offsetof(struct dev_ops, __CONCAT(d_, name)), \
65 DEVOP_DESC_INIT(default);
66 DEVOP_DESC_INIT(open);
67 DEVOP_DESC_INIT(close);
68 DEVOP_DESC_INIT(read);
69 DEVOP_DESC_INIT(write);
70 DEVOP_DESC_INIT(ioctl);
71 DEVOP_DESC_INIT(dump);
72 DEVOP_DESC_INIT(psize);
73 DEVOP_DESC_INIT(mmap);
74 DEVOP_DESC_INIT(mmap_single);
75 DEVOP_DESC_INIT(strategy);
76 DEVOP_DESC_INIT(kqfilter);
77 DEVOP_DESC_INIT(revoke);
78 DEVOP_DESC_INIT(clone);
83 struct dev_ops dead_dev_ops;
85 static d_open_t noopen;
86 static d_close_t noclose;
87 static d_read_t noread;
88 static d_write_t nowrite;
89 static d_ioctl_t noioctl;
90 static d_mmap_t nommap;
91 static d_mmap_single_t nommap_single;
92 static d_strategy_t nostrategy;
93 static d_dump_t nodump;
94 static d_psize_t nopsize;
95 static d_kqfilter_t nokqfilter;
96 static d_clone_t noclone;
97 static d_revoke_t norevoke;
99 struct dev_ops default_dev_ops = {
101 .d_default = NULL, /* must be NULL */
108 .d_mmap_single = nommap_single,
109 .d_strategy = nostrategy,
112 .d_kqfilter = nokqfilter,
113 .d_revoke = norevoke,
119 dev_needmplock(cdev_t dev)
121 return((dev->si_ops->head.flags & D_MPSAFE) == 0);
124 /************************************************************************
125 * GENERAL DEVICE API FUNCTIONS *
126 ************************************************************************
128 * The MPSAFEness of these depends on dev->si_ops->head.flags
131 dev_dopen(cdev_t dev, int oflags, int devtype, struct ucred *cred, struct file *fp)
133 struct dev_open_args ap;
134 int needmplock = dev_needmplock(dev);
137 ap.a_head.a_desc = &dev_open_desc;
138 ap.a_head.a_dev = dev;
139 ap.a_oflags = oflags;
140 ap.a_devtype = devtype;
146 error = dev->si_ops->d_open(&ap);
153 dev_dclose(cdev_t dev, int fflag, int devtype, struct file *fp)
155 struct dev_close_args ap;
156 int needmplock = dev_needmplock(dev);
159 ap.a_head.a_desc = &dev_close_desc;
160 ap.a_head.a_dev = dev;
162 ap.a_devtype = devtype;
167 error = dev->si_ops->d_close(&ap);
174 dev_dread(cdev_t dev, struct uio *uio, int ioflag, struct file *fp)
176 struct dev_read_args ap;
177 int needmplock = dev_needmplock(dev);
180 ap.a_head.a_desc = &dev_read_desc;
181 ap.a_head.a_dev = dev;
183 ap.a_ioflag = ioflag;
188 error = dev->si_ops->d_read(&ap);
192 dev->si_lastread = time_uptime;
197 dev_dwrite(cdev_t dev, struct uio *uio, int ioflag, struct file *fp)
199 struct dev_write_args ap;
200 int needmplock = dev_needmplock(dev);
203 dev->si_lastwrite = time_uptime;
204 ap.a_head.a_desc = &dev_write_desc;
205 ap.a_head.a_dev = dev;
207 ap.a_ioflag = ioflag;
212 error = dev->si_ops->d_write(&ap);
219 dev_dioctl(cdev_t dev, u_long cmd, caddr_t data, int fflag, struct ucred *cred,
220 struct sysmsg *msg, struct file *fp)
222 struct dev_ioctl_args ap;
223 int needmplock = dev_needmplock(dev);
226 ap.a_head.a_desc = &dev_ioctl_desc;
227 ap.a_head.a_dev = dev;
237 error = dev->si_ops->d_ioctl(&ap);
244 dev_dmmap(cdev_t dev, vm_offset_t offset, int nprot, struct file *fp)
246 struct dev_mmap_args ap;
247 int needmplock = dev_needmplock(dev);
250 ap.a_head.a_desc = &dev_mmap_desc;
251 ap.a_head.a_dev = dev;
252 ap.a_offset = offset;
258 error = dev->si_ops->d_mmap(&ap);
268 dev_dmmap_single(cdev_t dev, vm_ooffset_t *offset, vm_size_t size,
269 struct vm_object **object, int nprot, struct file *fp)
271 struct dev_mmap_single_args ap;
272 int needmplock = dev_needmplock(dev);
275 ap.a_head.a_desc = &dev_mmap_single_desc;
276 ap.a_head.a_dev = dev;
277 ap.a_offset = offset;
279 ap.a_object = object;
285 error = dev->si_ops->d_mmap_single(&ap);
293 dev_dclone(cdev_t dev)
295 struct dev_clone_args ap;
296 int needmplock = dev_needmplock(dev);
299 ap.a_head.a_desc = &dev_clone_desc;
300 ap.a_head.a_dev = dev;
304 error = dev->si_ops->d_clone(&ap);
311 dev_drevoke(cdev_t dev)
313 struct dev_revoke_args ap;
314 int needmplock = dev_needmplock(dev);
317 ap.a_head.a_desc = &dev_revoke_desc;
318 ap.a_head.a_dev = dev;
322 error = dev->si_ops->d_revoke(&ap);
330 * Core device strategy call, used to issue I/O on a device. There are
331 * two versions, a non-chained version and a chained version. The chained
332 * version reuses a BIO set up by vn_strategy(). The only difference is
333 * that, for now, we do not push a new tracking structure when chaining
334 * from vn_strategy. XXX this will ultimately have to change.
337 dev_dstrategy(cdev_t dev, struct bio *bio)
339 struct dev_strategy_args ap;
340 struct bio_track *track;
341 int needmplock = dev_needmplock(dev);
343 ap.a_head.a_desc = &dev_strategy_desc;
344 ap.a_head.a_dev = dev;
347 KKASSERT(bio->bio_track == NULL);
348 KKASSERT(bio->bio_buf->b_cmd != BUF_CMD_DONE);
349 if (bio->bio_buf->b_cmd == BUF_CMD_READ)
350 track = &dev->si_track_read;
352 track = &dev->si_track_write;
353 bio_track_ref(track);
354 bio->bio_track = track;
355 dsched_buf_enter(bio->bio_buf); /* might stack */
357 KKASSERT((bio->bio_flags & BIO_DONE) == 0);
360 (void)dev->si_ops->d_strategy(&ap);
366 dev_dstrategy_chain(cdev_t dev, struct bio *bio)
368 struct dev_strategy_args ap;
369 int needmplock = dev_needmplock(dev);
371 ap.a_head.a_desc = &dev_strategy_desc;
372 ap.a_head.a_dev = dev;
375 KKASSERT(bio->bio_track != NULL);
376 KKASSERT((bio->bio_flags & BIO_DONE) == 0);
379 (void)dev->si_ops->d_strategy(&ap);
385 * note: the disk layer is expected to set count, blkno, and secsize before
386 * forwarding the message.
389 dev_ddump(cdev_t dev, void *virtual, vm_offset_t physical, off_t offset,
392 struct dev_dump_args ap;
393 int needmplock = dev_needmplock(dev);
396 ap.a_head.a_desc = &dev_dump_desc;
397 ap.a_head.a_dev = dev;
401 ap.a_virtual = virtual;
402 ap.a_physical = physical;
403 ap.a_offset = offset;
404 ap.a_length = length;
408 error = dev->si_ops->d_dump(&ap);
415 dev_dpsize(cdev_t dev)
417 struct dev_psize_args ap;
418 int needmplock = dev_needmplock(dev);
421 ap.a_head.a_desc = &dev_psize_desc;
422 ap.a_head.a_dev = dev;
426 error = dev->si_ops->d_psize(&ap);
431 return (ap.a_result);
436 * Pass-thru to the device kqfilter.
438 * NOTE: We explicitly preset a_result to 0 so d_kqfilter() functions
439 * which return 0 do not have to bother setting a_result.
442 dev_dkqfilter(cdev_t dev, struct knote *kn, struct file *fp)
444 struct dev_kqfilter_args ap;
445 int needmplock = dev_needmplock(dev);
448 ap.a_head.a_desc = &dev_kqfilter_desc;
449 ap.a_head.a_dev = dev;
456 error = dev->si_ops->d_kqfilter(&ap);
465 /************************************************************************
466 * DEVICE HELPER FUNCTIONS *
467 ************************************************************************/
473 dev_drefs(cdev_t dev)
475 return(dev->si_sysref.refcnt);
482 dev_dname(cdev_t dev)
484 return(dev->si_ops->head.name);
491 dev_dflags(cdev_t dev)
493 return(dev->si_ops->head.flags);
502 return(dev->si_ops->head.maj);
506 * Used when forwarding a request through layers. The caller adjusts
507 * ap->a_head.a_dev and then calls this function.
510 dev_doperate(struct dev_generic_args *ap)
512 int (*func)(struct dev_generic_args *);
513 int needmplock = dev_needmplock(ap->a_dev);
516 func = *(void **)((char *)ap->a_dev->si_ops + ap->a_desc->sd_offset);
528 * Used by the console intercept code only. Issue an operation through
529 * a foreign ops structure allowing the ops structure associated
530 * with the device to remain intact.
533 dev_doperate_ops(struct dev_ops *ops, struct dev_generic_args *ap)
535 int (*func)(struct dev_generic_args *);
536 int needmplock = ((ops->head.flags & D_MPSAFE) == 0);
539 func = *(void **)((char *)ops + ap->a_desc->sd_offset);
551 * Convert a template dev_ops into the real thing by filling in
552 * uninitialized fields.
555 compile_dev_ops(struct dev_ops *ops)
559 for (offset = offsetof(struct dev_ops, dev_ops_first_field);
560 offset <= offsetof(struct dev_ops, dev_ops_last_field);
561 offset += sizeof(void *)
563 void **func_p = (void **)((char *)ops + offset);
564 void **def_p = (void **)((char *)&default_dev_ops + offset);
565 if (*func_p == NULL) {
567 *func_p = ops->d_default;
574 /************************************************************************
575 * MAJOR/MINOR SPACE FUNCTION *
576 ************************************************************************/
579 * This makes a dev_ops entry visible to userland (e.g /dev/<blah>).
581 * Disk devices typically register their major, e.g. 'ad0', and then call
582 * into the disk label management code which overloads its own onto e.g. 'ad0'
583 * to support all the various slice and partition combinations.
585 * The mask/match supplied in this call are a full 32 bits and the same
586 * mask and match must be specified in a later dev_ops_remove() call to
587 * match this add. However, the match value for the minor number should never
588 * have any bits set in the major number's bit range (8-15). The mask value
589 * may be conveniently specified as -1 without creating any major number
595 rb_dev_ops_compare(struct dev_ops_maj *a, struct dev_ops_maj *b)
599 else if (a->maj > b->maj)
604 RB_GENERATE2(dev_ops_rb_tree, dev_ops_maj, rbnode, rb_dev_ops_compare, int, maj);
606 struct dev_ops_rb_tree dev_ops_rbhead = RB_INITIALIZER(dev_ops_rbhead);
609 dev_ops_remove_all(struct dev_ops *ops)
611 return devfs_destroy_dev_by_ops(ops, -1);
615 dev_ops_remove_minor(struct dev_ops *ops, int minor)
617 return devfs_destroy_dev_by_ops(ops, minor);
621 dev_ops_intercept(cdev_t dev, struct dev_ops *iops)
623 struct dev_ops *oops = dev->si_ops;
625 compile_dev_ops(iops);
626 iops->head.maj = oops->head.maj;
627 iops->head.data = oops->head.data;
628 iops->head.flags = oops->head.flags;
630 dev->si_flags |= SI_INTERCEPTED;
636 dev_ops_restore(cdev_t dev, struct dev_ops *oops)
638 struct dev_ops *iops = dev->si_ops;
641 dev->si_flags &= ~SI_INTERCEPTED;
643 iops->head.data = NULL;
644 iops->head.flags = 0;
647 /************************************************************************
648 * DEFAULT DEV OPS FUNCTIONS *
649 ************************************************************************/
653 * Unsupported devswitch functions (e.g. for writing to read-only device).
654 * XXX may belong elsewhere.
657 norevoke(struct dev_revoke_args *ap)
664 noclone(struct dev_clone_args *ap)
667 return (0); /* allow the clone */
671 noopen(struct dev_open_args *ap)
677 noclose(struct dev_close_args *ap)
683 noread(struct dev_read_args *ap)
689 nowrite(struct dev_write_args *ap)
695 noioctl(struct dev_ioctl_args *ap)
701 nokqfilter(struct dev_kqfilter_args *ap)
707 nommap(struct dev_mmap_args *ap)
713 nommap_single(struct dev_mmap_single_args *ap)
719 nostrategy(struct dev_strategy_args *ap)
721 struct bio *bio = ap->a_bio;
723 bio->bio_buf->b_flags |= B_ERROR;
724 bio->bio_buf->b_error = EOPNOTSUPP;
730 nopsize(struct dev_psize_args *ap)
737 nodump(struct dev_dump_args *ap)
743 * XXX this is probably bogus. Any device that uses it isn't checking the
747 nullopen(struct dev_open_args *ap)
753 nullclose(struct dev_close_args *ap)