Allow a NULL dev to be passed to _devsw(). This should close any remaining
[dragonfly.git] / sys / kern / kern_device.c
CommitLineData
335dda38
MD
1/*
2 * Copyright (c) 2003 Matthew Dillon <dillon@backplane.com> All rights reserved.
3 * cdevsw from kern/kern_conf.c Copyright (c) 1995 Terrence R. Lambert
4 * cdevsw from kern/kern_conf.c Copyright (c) 1995 Julian R. Elishcer,
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
49b21ad6 28 * $DragonFly: src/sys/kern/kern_device.c,v 1.5 2003/08/23 16:58:36 dillon Exp $
335dda38
MD
29 */
30#include <sys/param.h>
31#include <sys/kernel.h>
32#include <sys/sysctl.h>
33#include <sys/systm.h>
34#include <sys/module.h>
35#include <sys/malloc.h>
36#include <sys/conf.h>
37#include <sys/vnode.h>
38#include <sys/queue.h>
39#include <sys/msgport.h>
40#include <sys/device.h>
41#include <machine/stdarg.h>
42#include <sys/proc.h>
43#include <sys/thread2.h>
44#include <sys/msgport2.h>
45
46static struct cdevsw *cdevsw[NUMCDEVSW];
47static struct lwkt_port *cdevport[NUMCDEVSW];
48
49static int cdevsw_putport(lwkt_port_t port, lwkt_msg_t msg);
50
51/*
52 * Initialize a message port to serve as the default message-handling port
53 * for device operations. This message port provides compatibility with
54 * traditional cdevsw dispatch functions. There are two primary modes:
55 *
56 * mp_td is NULL: The d_autoq mask is ignored and all messages are translated
57 * into directly, synchronous cdevsw calls.
58 *
59 * mp_td not NULL: The d_autoq mask is used to determine which messages should
60 * be queued and which should be handled synchronously.
61 *
62 * Don't worry too much about optimizing this code, the critical devices
63 * will implement their own port messaging functions directly.
64 */
65static void
66init_default_cdevsw_port(lwkt_port_t port)
67{
68 lwkt_init_port(port, NULL);
69 port->mp_beginmsg = cdevsw_putport;
70}
71
72static
73int
74cdevsw_putport(lwkt_port_t port, lwkt_msg_t lmsg)
75{
76 cdevallmsg_t msg = (cdevallmsg_t)lmsg;
77 struct cdevsw *csw = msg->am_msg.csw;
78 int error;
79
80 /*
81 * If queueable then officially queue the message
82 */
83 if (port->mp_td) {
84 int mask = (1 << (msg->am_lmsg.ms_cmd & MSG_SUBCMD_MASK));
85 if (csw->d_autoq & mask)
86 return(lwkt_putport(port, &msg->am_lmsg));
87 }
88
89 /*
90 * Run the device switch function synchronously in the context of the
4fd10eb6 91 * caller and return a synchronous error code (anything not EASYNC).
335dda38
MD
92 */
93 switch(msg->am_lmsg.ms_cmd) {
94 case CDEV_CMD_OPEN:
95 error = csw->old_open(
96 msg->am_open.msg.dev,
97 msg->am_open.oflags,
98 msg->am_open.devtype,
99 msg->am_open.td);
100 break;
101 case CDEV_CMD_CLOSE:
102 error = csw->old_close(
103 msg->am_close.msg.dev,
104 msg->am_close.fflag,
105 msg->am_close.devtype,
106 msg->am_close.td);
107 break;
108 case CDEV_CMD_STRATEGY:
109 csw->old_strategy(msg->am_strategy.bp);
110 error = 0;
111 break;
112 case CDEV_CMD_IOCTL:
113 error = csw->old_ioctl(
114 msg->am_ioctl.msg.dev,
115 msg->am_ioctl.cmd,
116 msg->am_ioctl.data,
117 msg->am_ioctl.fflag,
118 msg->am_ioctl.td);
119 break;
120 case CDEV_CMD_DUMP:
121 error = csw->old_dump(msg->am_ioctl.msg.dev);
122 break;
123 case CDEV_CMD_PSIZE:
124 msg->am_psize.result = csw->old_psize(msg->am_psize.msg.dev);
125 error = 0; /* XXX */
126 break;
127 case CDEV_CMD_READ:
128 error = csw->old_read(
129 msg->am_read.msg.dev,
130 msg->am_read.uio,
131 msg->am_read.ioflag);
132 break;
133 case CDEV_CMD_WRITE:
134 error = csw->old_write(
135 msg->am_read.msg.dev,
136 msg->am_read.uio,
137 msg->am_read.ioflag);
138 break;
139 case CDEV_CMD_POLL:
140 msg->am_poll.events = csw->old_poll(
141 msg->am_poll.msg.dev,
142 msg->am_poll.events,
143 msg->am_poll.td);
144 error = 0;
145 break;
146 case CDEV_CMD_KQFILTER:
147 msg->am_kqfilter.result = csw->old_kqfilter(
148 msg->am_kqfilter.msg.dev,
149 msg->am_kqfilter.kn);
150 error = 0;
151 break;
152 case CDEV_CMD_MMAP:
153 msg->am_mmap.result = csw->old_mmap(
154 msg->am_mmap.msg.dev,
155 msg->am_mmap.offset,
156 msg->am_mmap.nprot);
157 error = 0; /* XXX */
158 break;
159 default:
160 error = ENOSYS;
161 break;
162 }
4fd10eb6 163 KKASSERT(error != EASYNC);
335dda38
MD
164 return(error);
165}
166
167/*
168 * These device dispatch functions provide convenient entry points for
169 * any code wishing to make a dev call.
170 *
171 * YYY we ought to be able to optimize the port lookup by caching it in
172 * the dev_t structure itself.
173 */
174static __inline
175struct cdevsw *
176_devsw(dev_t dev)
177{
49b21ad6
MD
178 if (dev == NULL)
179 return(NULL);
335dda38
MD
180 if (dev->si_devsw)
181 return (dev->si_devsw);
182 return(cdevsw[major(dev)]);
183}
184
185static __inline
186lwkt_port_t
187_init_cdevmsg(dev_t dev, cdevmsg_t msg, int cmd)
188{
189 struct cdevsw *csw;
190
245e4f17 191 lwkt_initmsg(&msg->msg, &curthread->td_msgport, cmd);
335dda38
MD
192 msg->dev = dev;
193 msg->csw = csw = _devsw(dev);
194 if (csw != NULL) { /* YYY too hackish */
195 KKASSERT(csw->d_port); /* YYY too hackish */
196 if (cdevport[major(dev)]) /* YYY too hackish */
197 return(cdevport[major(dev)]);
198 return(csw->d_port);
199 }
200 return(NULL);
201}
202
203int
204dev_dopen(dev_t dev, int oflags, int devtype, thread_t td)
205{
206 struct cdevmsg_open msg;
207 lwkt_port_t port;
208
209 port = _init_cdevmsg(dev, &msg.msg, CDEV_CMD_OPEN);
210 if (port == NULL)
211 return(ENXIO);
212 msg.oflags = oflags;
213 msg.devtype = devtype;
214 msg.td = td;
215 return(lwkt_domsg(port, &msg.msg.msg));
216}
217
218int
219dev_dclose(dev_t dev, int fflag, int devtype, thread_t td)
220{
221 struct cdevmsg_close msg;
222 lwkt_port_t port;
223
224 port = _init_cdevmsg(dev, &msg.msg, CDEV_CMD_CLOSE);
225 if (port == NULL)
226 return(ENXIO);
227 msg.fflag = fflag;
228 msg.devtype = devtype;
229 msg.td = td;
230 return(lwkt_domsg(port, &msg.msg.msg));
231}
232
233void
234dev_dstrategy(dev_t dev, struct buf *bp)
235{
236 struct cdevmsg_strategy msg;
237 lwkt_port_t port;
238
239 port = _init_cdevmsg(dev, &msg.msg, CDEV_CMD_STRATEGY);
240 KKASSERT(port); /* 'nostrategy' function is NULL YYY */
241 msg.bp = bp;
242 lwkt_domsg(port, &msg.msg.msg);
243}
244
245int
246dev_dioctl(dev_t dev, u_long cmd, caddr_t data, int fflag, thread_t td)
247{
248 struct cdevmsg_ioctl msg;
249 lwkt_port_t port;
250
251 port = _init_cdevmsg(dev, &msg.msg, CDEV_CMD_IOCTL);
252 if (port == NULL)
253 return(ENXIO);
254 msg.cmd = cmd;
255 msg.data = data;
256 msg.fflag = fflag;
257 msg.td = td;
258 return(lwkt_domsg(port, &msg.msg.msg));
259}
260
261int
262dev_ddump(dev_t dev)
263{
264 struct cdevmsg_dump msg;
265 lwkt_port_t port;
266
267 port = _init_cdevmsg(dev, &msg.msg, CDEV_CMD_DUMP);
268 if (port == NULL)
269 return(ENXIO);
270 return(lwkt_domsg(port, &msg.msg.msg));
271}
272
273int
274dev_dpsize(dev_t dev)
275{
276 struct cdevmsg_psize msg;
277 lwkt_port_t port;
278 int error;
279
280 port = _init_cdevmsg(dev, &msg.msg, CDEV_CMD_PSIZE);
281 if (port == NULL)
282 return(-1);
283 error = lwkt_domsg(port, &msg.msg.msg);
284 if (error == 0)
285 return(msg.result);
286 return(-1);
287}
288
289int
290dev_dread(dev_t dev, struct uio *uio, int ioflag)
291{
292 struct cdevmsg_read msg;
293 lwkt_port_t port;
294
295 port = _init_cdevmsg(dev, &msg.msg, CDEV_CMD_READ);
296 if (port == NULL)
297 return(ENXIO);
298 msg.uio = uio;
299 msg.ioflag = ioflag;
300 return(lwkt_domsg(port, &msg.msg.msg));
301}
302
303int
304dev_dwrite(dev_t dev, struct uio *uio, int ioflag)
305{
306 struct cdevmsg_write msg;
307 lwkt_port_t port;
308
309 port = _init_cdevmsg(dev, &msg.msg, CDEV_CMD_WRITE);
310 if (port == NULL)
311 return(ENXIO);
312 msg.uio = uio;
313 msg.ioflag = ioflag;
314 return(lwkt_domsg(port, &msg.msg.msg));
315}
316
317int
318dev_dpoll(dev_t dev, int events, thread_t td)
319{
320 struct cdevmsg_poll msg;
321 lwkt_port_t port;
322 int error;
323
324 port = _init_cdevmsg(dev, &msg.msg, CDEV_CMD_POLL);
325 if (port == NULL)
326 return(ENXIO);
327 msg.events = events;
328 msg.td = td;
329 error = lwkt_domsg(port, &msg.msg.msg);
330 if (error == 0)
331 return(msg.events);
332 return(seltrue(dev, msg.events, td));
333}
334
335int
336dev_dkqfilter(dev_t dev, struct knote *kn)
337{
338 struct cdevmsg_kqfilter msg;
339 lwkt_port_t port;
340 int error;
341
342 port = _init_cdevmsg(dev, &msg.msg, CDEV_CMD_KQFILTER);
343 if (port == NULL)
344 return(ENXIO);
345 msg.kn = kn;
346 error = lwkt_domsg(port, &msg.msg.msg);
347 if (error == 0)
348 return(msg.result);
349 return(ENODEV);
350}
351
352int
353dev_dmmap(dev_t dev, vm_offset_t offset, int nprot)
354{
355 struct cdevmsg_mmap msg;
356 lwkt_port_t port;
357 int error;
358
359 port = _init_cdevmsg(dev, &msg.msg, CDEV_CMD_MMAP);
360 if (port == NULL)
361 return(-1);
362 msg.offset = offset;
363 msg.nprot = nprot;
364 error = lwkt_domsg(port, &msg.msg.msg);
365 if (error == 0)
366 return(msg.result);
367 return(-1);
368}
369
370int
371dev_port_dopen(lwkt_port_t port, dev_t dev, int oflags, int devtype, thread_t td)
372{
373 struct cdevmsg_open msg;
374
375 _init_cdevmsg(dev, &msg.msg, CDEV_CMD_OPEN);
376 if (port == NULL)
377 return(ENXIO);
378 msg.oflags = oflags;
379 msg.devtype = devtype;
380 msg.td = td;
381 return(lwkt_domsg(port, &msg.msg.msg));
382}
383
384int
385dev_port_dclose(lwkt_port_t port, dev_t dev, int fflag, int devtype, thread_t td)
386{
387 struct cdevmsg_close msg;
388
389 _init_cdevmsg(dev, &msg.msg, CDEV_CMD_CLOSE);
390 if (port == NULL)
391 return(ENXIO);
392 msg.fflag = fflag;
393 msg.devtype = devtype;
394 msg.td = td;
395 return(lwkt_domsg(port, &msg.msg.msg));
396}
397
398void
399dev_port_dstrategy(lwkt_port_t port, dev_t dev, struct buf *bp)
400{
401 struct cdevmsg_strategy msg;
402
403 _init_cdevmsg(dev, &msg.msg, CDEV_CMD_STRATEGY);
404 KKASSERT(port); /* 'nostrategy' function is NULL YYY */
405 msg.bp = bp;
406 lwkt_domsg(port, &msg.msg.msg);
407}
408
409int
410dev_port_dioctl(lwkt_port_t port, dev_t dev, u_long cmd, caddr_t data, int fflag, thread_t td)
411{
412 struct cdevmsg_ioctl msg;
413
414 _init_cdevmsg(dev, &msg.msg, CDEV_CMD_IOCTL);
415 if (port == NULL)
416 return(ENXIO);
417 msg.cmd = cmd;
418 msg.data = data;
419 msg.fflag = fflag;
420 msg.td = td;
421 return(lwkt_domsg(port, &msg.msg.msg));
422}
423
424int
425dev_port_ddump(lwkt_port_t port, dev_t dev)
426{
427 struct cdevmsg_dump msg;
428
429 _init_cdevmsg(dev, &msg.msg, CDEV_CMD_DUMP);
430 if (port == NULL)
431 return(ENXIO);
432 return(lwkt_domsg(port, &msg.msg.msg));
433}
434
435int
436dev_port_dpsize(lwkt_port_t port, dev_t dev)
437{
438 struct cdevmsg_psize msg;
439 int error;
440
441 _init_cdevmsg(dev, &msg.msg, CDEV_CMD_PSIZE);
442 if (port == NULL)
443 return(-1);
444 error = lwkt_domsg(port, &msg.msg.msg);
445 if (error == 0)
446 return(msg.result);
447 return(-1);
448}
449
450int
451dev_port_dread(lwkt_port_t port, dev_t dev, struct uio *uio, int ioflag)
452{
453 struct cdevmsg_read msg;
454
455 _init_cdevmsg(dev, &msg.msg, CDEV_CMD_READ);
456 if (port == NULL)
457 return(ENXIO);
458 msg.uio = uio;
459 msg.ioflag = ioflag;
460 return(lwkt_domsg(port, &msg.msg.msg));
461}
462
463int
464dev_port_dwrite(lwkt_port_t port, dev_t dev, struct uio *uio, int ioflag)
465{
466 struct cdevmsg_write msg;
467
468 _init_cdevmsg(dev, &msg.msg, CDEV_CMD_WRITE);
469 if (port == NULL)
470 return(ENXIO);
471 msg.uio = uio;
472 msg.ioflag = ioflag;
473 return(lwkt_domsg(port, &msg.msg.msg));
474}
475
476int
477dev_port_dpoll(lwkt_port_t port, dev_t dev, int events, thread_t td)
478{
479 struct cdevmsg_poll msg;
480 int error;
481
482 _init_cdevmsg(dev, &msg.msg, CDEV_CMD_POLL);
483 if (port == NULL)
484 return(ENXIO);
485 msg.events = events;
486 msg.td = td;
487 error = lwkt_domsg(port, &msg.msg.msg);
488 if (error == 0)
489 return(msg.events);
490 return(seltrue(dev, msg.events, td));
491}
492
493int
494dev_port_dkqfilter(lwkt_port_t port, dev_t dev, struct knote *kn)
495{
496 struct cdevmsg_kqfilter msg;
497 int error;
498
499 _init_cdevmsg(dev, &msg.msg, CDEV_CMD_KQFILTER);
500 if (port == NULL)
501 return(ENXIO);
502 msg.kn = kn;
503 error = lwkt_domsg(port, &msg.msg.msg);
504 if (error == 0)
505 return(msg.result);
506 return(ENODEV);
507}
508
509int
510dev_port_dmmap(lwkt_port_t port, dev_t dev, vm_offset_t offset, int nprot)
511{
512 struct cdevmsg_mmap msg;
513 int error;
514
515 _init_cdevmsg(dev, &msg.msg, CDEV_CMD_MMAP);
516 if (port == NULL)
517 return(-1);
518 msg.offset = offset;
519 msg.nprot = nprot;
520 error = lwkt_domsg(port, &msg.msg.msg);
521 if (error == 0)
522 return(msg.result);
523 return(-1);
524}
525
526const char *
527dev_dname(dev_t dev)
528{
529 struct cdevsw *csw;
530
531 if ((csw = _devsw(dev)) != NULL)
532 return(csw->d_name);
533 return(NULL);
534}
535
536int
537dev_dflags(dev_t dev)
538{
539 struct cdevsw *csw;
540
541 if ((csw = _devsw(dev)) != NULL)
542 return(csw->d_flags);
f15db79e 543 return(0);
335dda38
MD
544}
545
546int
547dev_dmaj(dev_t dev)
548{
549 struct cdevsw *csw;
550
551 if ((csw = _devsw(dev)) != NULL)
552 return(csw->d_maj);
f15db79e 553 return(0);
335dda38
MD
554}
555
556lwkt_port_t
557dev_dport(dev_t dev)
558{
559 struct cdevsw *csw;
560
561 if ((csw = _devsw(dev)) != NULL) {
562 if (cdevport[major(dev)]) /* YYY too hackish */
563 return(cdevport[major(dev)]);
564 return(csw->d_port);
565 }
566 return(NULL);
567}
568
569#if 0
570/*
571 * cdevsw[] array functions, moved from kern/kern_conf.c
572 */
573struct cdevsw *
574devsw(dev_t dev)
575{
576 return(_devsw(dev));
577}
578#endif
579
580/*
581 * Convert a cdevsw template into the real thing, filling in fields the
582 * device left empty with appropriate defaults.
583 */
584void
585compile_devsw(struct cdevsw *devsw)
586{
587 static lwkt_port devsw_compat_port;
588
589 if (devsw_compat_port.mp_beginmsg == NULL)
590 init_default_cdevsw_port(&devsw_compat_port);
591
592 if (devsw->old_open == NULL)
593 devsw->old_open = noopen;
594 if (devsw->old_close == NULL)
595 devsw->old_close = noclose;
596 if (devsw->old_read == NULL)
597 devsw->old_read = noread;
598 if (devsw->old_write == NULL)
599 devsw->old_write = nowrite;
600 if (devsw->old_ioctl == NULL)
601 devsw->old_ioctl = noioctl;
602 if (devsw->old_poll == NULL)
603 devsw->old_poll = nopoll;
604 if (devsw->old_mmap == NULL)
605 devsw->old_mmap = nommap;
606 if (devsw->old_strategy == NULL)
607 devsw->old_strategy = nostrategy;
608 if (devsw->old_dump == NULL)
609 devsw->old_dump = nodump;
610 if (devsw->old_psize == NULL)
611 devsw->old_psize = nopsize;
612 if (devsw->old_kqfilter == NULL)
613 devsw->old_kqfilter = nokqfilter;
614
615 if (devsw->d_port == NULL)
616 devsw->d_port = &devsw_compat_port;
617}
618
619/*
620 * Add a cdevsw entry
621 */
622int
623cdevsw_add(struct cdevsw *newentry)
624{
625 compile_devsw(newentry);
626 if (newentry->d_maj < 0 || newentry->d_maj >= NUMCDEVSW) {
627 printf("%s: ERROR: driver has bogus cdevsw->d_maj = %d\n",
628 newentry->d_name, newentry->d_maj);
629 return (EINVAL);
630 }
631 if (cdevsw[newentry->d_maj]) {
632 printf("WARNING: \"%s\" is usurping \"%s\"'s cdevsw[]\n",
633 newentry->d_name, cdevsw[newentry->d_maj]->d_name);
634 }
635 cdevsw[newentry->d_maj] = newentry;
636 return (0);
637}
638
639/*
640 * Add a cdevsw entry and override the port.
641 */
642lwkt_port_t
643cdevsw_add_override(struct cdevsw *newentry, lwkt_port_t port)
644{
645 int error;
646
647 if ((error = cdevsw_add(newentry)) == 0)
648 cdevport[newentry->d_maj] = port;
649 return(newentry->d_port);
650}
651
652lwkt_port_t
653cdevsw_dev_override(dev_t dev, lwkt_port_t port)
654{
655 struct cdevsw *csw;
656
657 KKASSERT(major(dev) >= 0 && major(dev) < NUMCDEVSW);
658 if ((csw = _devsw(dev)) != NULL) {
659 cdevport[major(dev)] = port;
660 return(csw->d_port);
661 }
662 return(NULL);
663}
664
665/*
666 * Remove a cdevsw entry
667 */
668int
669cdevsw_remove(struct cdevsw *oldentry)
670{
671 if (oldentry->d_maj < 0 || oldentry->d_maj >= NUMCDEVSW) {
672 printf("%s: ERROR: driver has bogus cdevsw->d_maj = %d\n",
673 oldentry->d_name, oldentry->d_maj);
674 return EINVAL;
675 }
676 cdevsw[oldentry->d_maj] = NULL;
677 cdevport[oldentry->d_maj] = NULL;
678 return 0;
679}
680