2 * Copyright (c) 2003 Matthew Dillon <dillon@backplane.com> All rights reserved.
3 * cdevsw from kern/kern_conf.c Copyright (c) 1995 Terrence R. Lambert
4 * cdevsw from kern/kern_conf.c Copyright (c) 1995 Julian R. Elishcer,
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * $DragonFly: src/sys/kern/kern_device.c,v 1.9 2004/04/20 01:52:22 dillon Exp $
30 #include <sys/param.h>
31 #include <sys/kernel.h>
32 #include <sys/sysctl.h>
33 #include <sys/systm.h>
34 #include <sys/module.h>
35 #include <sys/malloc.h>
37 #include <sys/vnode.h>
38 #include <sys/queue.h>
39 #include <sys/msgport.h>
40 #include <sys/device.h>
41 #include <machine/stdarg.h>
43 #include <sys/thread2.h>
44 #include <sys/msgport2.h>
46 static struct cdevsw *cdevsw[NUMCDEVSW];
47 static struct lwkt_port *cdevport[NUMCDEVSW];
49 static int cdevsw_putport(lwkt_port_t port, lwkt_msg_t msg);
52 * Initialize a message port to serve as the default message-handling port
53 * for device operations. This message port provides compatibility with
54 * traditional cdevsw dispatch functions. There are two primary modes:
56 * mp_td is NULL: The d_autoq mask is ignored and all messages are translated
57 * into directly, synchronous cdevsw calls.
59 * mp_td not NULL: The d_autoq mask is used to determine which messages should
60 * be queued and which should be handled synchronously.
62 * Don't worry too much about optimizing this code, the critical devices
63 * will implement their own port messaging functions directly.
65 * YYY NOTE: ms_cmd can now hold a function pointer, should this code be
66 * converted from an integer op to a function pointer with a flag to
67 * indicate legacy operation?
70 init_default_cdevsw_port(lwkt_port_t port)
72 lwkt_initport(port, NULL);
73 port->mp_putport = cdevsw_putport;
78 cdevsw_putport(lwkt_port_t port, lwkt_msg_t lmsg)
80 cdevallmsg_t msg = (cdevallmsg_t)lmsg;
81 struct cdevsw *csw = msg->am_msg.csw;
85 * If queueable then officially queue the message
88 int mask = (1 << (msg->am_lmsg.ms_cmd.cm_op & MSG_SUBCMD_MASK));
89 if (csw->d_autoq & mask)
90 return(lwkt_beginmsg(port, &msg->am_lmsg));
94 * Run the device switch function synchronously in the context of the
95 * caller and return a synchronous error code (anything not EASYNC).
97 switch(msg->am_lmsg.ms_cmd.cm_op) {
99 error = csw->old_open(
100 msg->am_open.msg.dev,
102 msg->am_open.devtype,
106 error = csw->old_close(
107 msg->am_close.msg.dev,
109 msg->am_close.devtype,
112 case CDEV_CMD_STRATEGY:
113 csw->old_strategy(msg->am_strategy.bp);
117 error = csw->old_ioctl(
118 msg->am_ioctl.msg.dev,
125 error = csw->old_dump(msg->am_ioctl.msg.dev);
128 msg->am_psize.result = csw->old_psize(msg->am_psize.msg.dev);
132 error = csw->old_read(
133 msg->am_read.msg.dev,
135 msg->am_read.ioflag);
138 error = csw->old_write(
139 msg->am_read.msg.dev,
141 msg->am_read.ioflag);
144 msg->am_poll.events = csw->old_poll(
145 msg->am_poll.msg.dev,
150 case CDEV_CMD_KQFILTER:
151 msg->am_kqfilter.result = csw->old_kqfilter(
152 msg->am_kqfilter.msg.dev,
153 msg->am_kqfilter.kn);
157 msg->am_mmap.result = csw->old_mmap(
158 msg->am_mmap.msg.dev,
167 KKASSERT(error != EASYNC);
172 * These device dispatch functions provide convenient entry points for
173 * any code wishing to make a dev call.
175 * YYY we ought to be able to optimize the port lookup by caching it in
176 * the dev_t structure itself.
185 return (dev->si_devsw);
186 return(cdevsw[major(dev)]);
191 _init_cdevmsg(dev_t dev, cdevmsg_t msg, int cmd)
195 lwkt_initmsg_simple(&msg->msg, cmd);
197 msg->csw = csw = _devsw(dev);
198 if (csw != NULL) { /* YYY too hackish */
199 KKASSERT(csw->d_port); /* YYY too hackish */
200 if (cdevport[major(dev)]) /* YYY too hackish */
201 return(cdevport[major(dev)]);
208 dev_dopen(dev_t dev, int oflags, int devtype, thread_t td)
210 struct cdevmsg_open msg;
213 port = _init_cdevmsg(dev, &msg.msg, CDEV_CMD_OPEN);
217 msg.devtype = devtype;
219 return(lwkt_domsg(port, &msg.msg.msg));
223 dev_dclose(dev_t dev, int fflag, int devtype, thread_t td)
225 struct cdevmsg_close msg;
228 port = _init_cdevmsg(dev, &msg.msg, CDEV_CMD_CLOSE);
232 msg.devtype = devtype;
234 return(lwkt_domsg(port, &msg.msg.msg));
238 dev_dstrategy(dev_t dev, struct buf *bp)
240 struct cdevmsg_strategy msg;
243 port = _init_cdevmsg(dev, &msg.msg, CDEV_CMD_STRATEGY);
244 KKASSERT(port); /* 'nostrategy' function is NULL YYY */
246 lwkt_domsg(port, &msg.msg.msg);
250 dev_dioctl(dev_t dev, u_long cmd, caddr_t data, int fflag, thread_t td)
252 struct cdevmsg_ioctl msg;
255 port = _init_cdevmsg(dev, &msg.msg, CDEV_CMD_IOCTL);
262 return(lwkt_domsg(port, &msg.msg.msg));
268 struct cdevmsg_dump msg;
271 port = _init_cdevmsg(dev, &msg.msg, CDEV_CMD_DUMP);
274 return(lwkt_domsg(port, &msg.msg.msg));
278 dev_dpsize(dev_t dev)
280 struct cdevmsg_psize msg;
284 port = _init_cdevmsg(dev, &msg.msg, CDEV_CMD_PSIZE);
287 error = lwkt_domsg(port, &msg.msg.msg);
294 dev_dread(dev_t dev, struct uio *uio, int ioflag)
296 struct cdevmsg_read msg;
299 port = _init_cdevmsg(dev, &msg.msg, CDEV_CMD_READ);
304 return(lwkt_domsg(port, &msg.msg.msg));
308 dev_dwrite(dev_t dev, struct uio *uio, int ioflag)
310 struct cdevmsg_write msg;
313 port = _init_cdevmsg(dev, &msg.msg, CDEV_CMD_WRITE);
318 return(lwkt_domsg(port, &msg.msg.msg));
322 dev_dpoll(dev_t dev, int events, thread_t td)
324 struct cdevmsg_poll msg;
328 port = _init_cdevmsg(dev, &msg.msg, CDEV_CMD_POLL);
333 error = lwkt_domsg(port, &msg.msg.msg);
336 return(seltrue(dev, msg.events, td));
340 dev_dkqfilter(dev_t dev, struct knote *kn)
342 struct cdevmsg_kqfilter msg;
346 port = _init_cdevmsg(dev, &msg.msg, CDEV_CMD_KQFILTER);
350 error = lwkt_domsg(port, &msg.msg.msg);
357 dev_dmmap(dev_t dev, vm_offset_t offset, int nprot)
359 struct cdevmsg_mmap msg;
363 port = _init_cdevmsg(dev, &msg.msg, CDEV_CMD_MMAP);
368 error = lwkt_domsg(port, &msg.msg.msg);
375 dev_port_dopen(lwkt_port_t port, dev_t dev, int oflags, int devtype, thread_t td)
377 struct cdevmsg_open msg;
379 _init_cdevmsg(dev, &msg.msg, CDEV_CMD_OPEN);
383 msg.devtype = devtype;
385 return(lwkt_domsg(port, &msg.msg.msg));
389 dev_port_dclose(lwkt_port_t port, dev_t dev, int fflag, int devtype, thread_t td)
391 struct cdevmsg_close msg;
393 _init_cdevmsg(dev, &msg.msg, CDEV_CMD_CLOSE);
397 msg.devtype = devtype;
399 return(lwkt_domsg(port, &msg.msg.msg));
403 dev_port_dstrategy(lwkt_port_t port, dev_t dev, struct buf *bp)
405 struct cdevmsg_strategy msg;
407 _init_cdevmsg(dev, &msg.msg, CDEV_CMD_STRATEGY);
408 KKASSERT(port); /* 'nostrategy' function is NULL YYY */
410 lwkt_domsg(port, &msg.msg.msg);
414 dev_port_dioctl(lwkt_port_t port, dev_t dev, u_long cmd, caddr_t data, int fflag, thread_t td)
416 struct cdevmsg_ioctl msg;
418 _init_cdevmsg(dev, &msg.msg, CDEV_CMD_IOCTL);
425 return(lwkt_domsg(port, &msg.msg.msg));
429 dev_port_ddump(lwkt_port_t port, dev_t dev)
431 struct cdevmsg_dump msg;
433 _init_cdevmsg(dev, &msg.msg, CDEV_CMD_DUMP);
436 return(lwkt_domsg(port, &msg.msg.msg));
440 dev_port_dpsize(lwkt_port_t port, dev_t dev)
442 struct cdevmsg_psize msg;
445 _init_cdevmsg(dev, &msg.msg, CDEV_CMD_PSIZE);
448 error = lwkt_domsg(port, &msg.msg.msg);
455 dev_port_dread(lwkt_port_t port, dev_t dev, struct uio *uio, int ioflag)
457 struct cdevmsg_read msg;
459 _init_cdevmsg(dev, &msg.msg, CDEV_CMD_READ);
464 return(lwkt_domsg(port, &msg.msg.msg));
468 dev_port_dwrite(lwkt_port_t port, dev_t dev, struct uio *uio, int ioflag)
470 struct cdevmsg_write msg;
472 _init_cdevmsg(dev, &msg.msg, CDEV_CMD_WRITE);
477 return(lwkt_domsg(port, &msg.msg.msg));
481 dev_port_dpoll(lwkt_port_t port, dev_t dev, int events, thread_t td)
483 struct cdevmsg_poll msg;
486 _init_cdevmsg(dev, &msg.msg, CDEV_CMD_POLL);
491 error = lwkt_domsg(port, &msg.msg.msg);
494 return(seltrue(dev, msg.events, td));
498 dev_port_dkqfilter(lwkt_port_t port, dev_t dev, struct knote *kn)
500 struct cdevmsg_kqfilter msg;
503 _init_cdevmsg(dev, &msg.msg, CDEV_CMD_KQFILTER);
507 error = lwkt_domsg(port, &msg.msg.msg);
514 dev_port_dmmap(lwkt_port_t port, dev_t dev, vm_offset_t offset, int nprot)
516 struct cdevmsg_mmap msg;
519 _init_cdevmsg(dev, &msg.msg, CDEV_CMD_MMAP);
524 error = lwkt_domsg(port, &msg.msg.msg);
535 if ((csw = _devsw(dev)) != NULL)
541 dev_dflags(dev_t dev)
545 if ((csw = _devsw(dev)) != NULL)
546 return(csw->d_flags);
555 if ((csw = _devsw(dev)) != NULL)
565 if ((csw = _devsw(dev)) != NULL) {
566 if (cdevport[major(dev)]) /* YYY too hackish */
567 return(cdevport[major(dev)]);
575 * cdevsw[] array functions, moved from kern/kern_conf.c
585 * Convert a cdevsw template into the real thing, filling in fields the
586 * device left empty with appropriate defaults.
589 compile_devsw(struct cdevsw *devsw)
591 static lwkt_port devsw_compat_port;
593 if (devsw_compat_port.mp_putport == NULL)
594 init_default_cdevsw_port(&devsw_compat_port);
596 if (devsw->old_open == NULL)
597 devsw->old_open = noopen;
598 if (devsw->old_close == NULL)
599 devsw->old_close = noclose;
600 if (devsw->old_read == NULL)
601 devsw->old_read = noread;
602 if (devsw->old_write == NULL)
603 devsw->old_write = nowrite;
604 if (devsw->old_ioctl == NULL)
605 devsw->old_ioctl = noioctl;
606 if (devsw->old_poll == NULL)
607 devsw->old_poll = nopoll;
608 if (devsw->old_mmap == NULL)
609 devsw->old_mmap = nommap;
610 if (devsw->old_strategy == NULL)
611 devsw->old_strategy = nostrategy;
612 if (devsw->old_dump == NULL)
613 devsw->old_dump = nodump;
614 if (devsw->old_psize == NULL)
615 devsw->old_psize = nopsize;
616 if (devsw->old_kqfilter == NULL)
617 devsw->old_kqfilter = nokqfilter;
619 if (devsw->d_port == NULL)
620 devsw->d_port = &devsw_compat_port;
627 cdevsw_add(struct cdevsw *newentry)
629 compile_devsw(newentry);
630 if (newentry->d_maj < 0 || newentry->d_maj >= NUMCDEVSW) {
631 printf("%s: ERROR: driver has bogus cdevsw->d_maj = %d\n",
632 newentry->d_name, newentry->d_maj);
635 if (cdevsw[newentry->d_maj]) {
636 printf("WARNING: \"%s\" is usurping \"%s\"'s cdevsw[]\n",
637 newentry->d_name, cdevsw[newentry->d_maj]->d_name);
639 cdevsw[newentry->d_maj] = newentry;
644 * Add a cdevsw entry and override the port.
647 cdevsw_add_override(struct cdevsw *newentry, lwkt_port_t port)
651 if ((error = cdevsw_add(newentry)) == 0)
652 cdevport[newentry->d_maj] = port;
653 return(newentry->d_port);
657 cdevsw_dev_override(dev_t dev, lwkt_port_t port)
661 KKASSERT(major(dev) >= 0 && major(dev) < NUMCDEVSW);
662 if ((csw = _devsw(dev)) != NULL) {
663 cdevport[major(dev)] = port;
670 * Remove a cdevsw entry
673 cdevsw_remove(struct cdevsw *oldentry)
675 if (oldentry->d_maj < 0 || oldentry->d_maj >= NUMCDEVSW) {
676 printf("%s: ERROR: driver has bogus cdevsw->d_maj = %d\n",
677 oldentry->d_name, oldentry->d_maj);
680 cdevsw[oldentry->d_maj] = NULL;
681 cdevport[oldentry->d_maj] = NULL;