Merge branch 'master' of ssh://crater.dragonflybsd.org/repository/git/dragonfly
[dragonfly.git] / sys / vfs / devfs / devfs_core.c
CommitLineData
21864bc5
MD
1/*
2 * Copyright (c) 2009 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Alex Hornung <ahornung@gmail.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 */
34#include <sys/param.h>
35#include <sys/systm.h>
36#include <sys/kernel.h>
37#include <sys/mount.h>
38#include <sys/vnode.h>
39#include <sys/types.h>
40#include <sys/lock.h>
41#include <sys/msgport.h>
42#include <sys/msgport2.h>
43#include <sys/spinlock2.h>
44#include <sys/sysctl.h>
45#include <sys/ucred.h>
46#include <sys/param.h>
47#include <sys/sysref2.h>
7cbab9da 48#include <sys/systm.h>
21864bc5
MD
49#include <vfs/devfs/devfs.h>
50#include <vfs/devfs/devfs_rules.h>
51
52MALLOC_DEFINE(M_DEVFS, "devfs", "Device File System (devfs) allocations");
7cbab9da 53DEVFS_DECLARE_CLONE_BITMAP(ops_id);
21864bc5
MD
54/*
55 * SYSREF Integration - reference counting, allocation,
56 * sysid and syslink integration.
57 */
58static void devfs_cdev_terminate(cdev_t dev);
59static struct sysref_class cdev_sysref_class = {
60 .name = "cdev",
61 .mtype = M_DEVFS,
62 .proto = SYSREF_PROTO_DEV,
63 .offset = offsetof(struct cdev, si_sysref),
64 .objsize = sizeof(struct cdev),
65 .mag_capacity = 32,
66 .flags = 0,
67 .ops = {
68 .terminate = (sysref_terminate_func_t)devfs_cdev_terminate
69 }
70};
71
72static struct objcache *devfs_node_cache;
73static struct objcache *devfs_msg_cache;
74static struct objcache *devfs_dev_cache;
75
76static struct objcache_malloc_args devfs_node_malloc_args = {
77 sizeof(struct devfs_node), M_DEVFS };
78struct objcache_malloc_args devfs_msg_malloc_args = {
79 sizeof(struct devfs_msg), M_DEVFS };
80struct objcache_malloc_args devfs_dev_malloc_args = {
81 sizeof(struct cdev), M_DEVFS };
82
bc185c5a
AH
83static struct devfs_dev_head devfs_dev_list =
84 TAILQ_HEAD_INITIALIZER(devfs_dev_list);
85static struct devfs_mnt_head devfs_mnt_list =
86 TAILQ_HEAD_INITIALIZER(devfs_mnt_list);
87static struct devfs_chandler_head devfs_chandler_list =
88 TAILQ_HEAD_INITIALIZER(devfs_chandler_list);
89static struct devfs_alias_head devfs_alias_list =
90 TAILQ_HEAD_INITIALIZER(devfs_alias_list);
21864bc5
MD
91
92struct lock devfs_lock;
93static struct lwkt_port devfs_dispose_port;
94static struct lwkt_port devfs_msg_port;
95static struct thread *td_core;
21864bc5 96
21864bc5 97static struct spinlock ino_lock;
d0fe8596
MD
98static ino_t d_ino;
99static int devfs_debug_enable;
100static int devfs_run;
21864bc5
MD
101
102static ino_t devfs_fetch_ino(void);
103static int devfs_gc_dirs(struct devfs_node *);
104static int devfs_gc_links(struct devfs_node *, struct devfs_node *, size_t);
105static int devfs_create_all_dev_worker(struct devfs_node *);
106static int devfs_create_dev_worker(cdev_t, uid_t, gid_t, int);
107static int devfs_destroy_dev_worker(cdev_t);
108static int devfs_destroy_subnames_worker(char *);
109static int devfs_destroy_dev_by_ops_worker(struct dev_ops *, int);
110static int devfs_propagate_dev(cdev_t, int);
ca8d7677 111static int devfs_unlink_dev(cdev_t dev);
d0fe8596 112static void devfs_msg_exec(devfs_msg_t msg);
21864bc5
MD
113
114static int devfs_chandler_add_worker(char *, d_clone_t *);
115static int devfs_chandler_del_worker(char *);
116
117static void devfs_msg_autofree_reply(lwkt_port_t, lwkt_msg_t);
118static void devfs_msg_core(void *);
119
120static int devfs_find_device_by_name_worker(devfs_msg_t);
121static int devfs_find_device_by_udev_worker(devfs_msg_t);
122
fa7e6f37
AH
123static struct vnode *devfs_inode_to_vnode_worker(struct devfs_node *, ino_t);
124
21864bc5
MD
125static int devfs_apply_reset_rules_caller(char *, int);
126static int devfs_apply_reset_rules_worker(struct devfs_node *, int);
127
128static int devfs_scan_callback_worker(devfs_scan_t *);
129
bc185c5a
AH
130static struct devfs_node *devfs_resolve_or_create_dir(struct devfs_node *,
131 char *, size_t, int);
21864bc5
MD
132
133static int devfs_make_alias_worker(struct devfs_alias *);
134static int devfs_alias_remove(cdev_t);
135static int devfs_alias_reap(void);
136static int devfs_alias_propagate(struct devfs_alias *);
137static int devfs_alias_apply(struct devfs_node *, struct devfs_alias *);
138static int devfs_alias_check_create(struct devfs_node *);
139
ca8d7677
MD
140static int devfs_clr_subnames_flag_worker(char *, uint32_t);
141static int devfs_destroy_subnames_without_flag_worker(char *, uint32_t);
142
21864bc5 143/*
bc185c5a
AH
144 * devfs_debug() is a SYSCTL and TUNABLE controlled debug output function
145 * using kvprintf
21864bc5
MD
146 */
147int
148devfs_debug(int level, char *fmt, ...)
149{
150 __va_list ap;
151
152 __va_start(ap, fmt);
153 if (level <= devfs_debug_enable)
154 kvprintf(fmt, ap);
155 __va_end(ap);
156
157 return 0;
158}
159
160/*
ca8d7677
MD
161 * devfs_allocp() Allocates a new devfs node with the specified
162 * parameters. The node is also automatically linked into the topology
163 * if a parent is specified. It also calls the rule and alias stuff to
164 * be applied on the new node
21864bc5
MD
165 */
166struct devfs_node *
ca8d7677
MD
167devfs_allocp(devfs_nodetype devfsnodetype, char *name,
168 struct devfs_node *parent, struct mount *mp, cdev_t dev)
21864bc5
MD
169{
170 struct devfs_node *node = NULL;
171 size_t namlen = strlen(name);
21864bc5
MD
172
173 node = objcache_get(devfs_node_cache, M_WAITOK);
ca8d7677
MD
174 bzero(node, sizeof(*node));
175
21864bc5
MD
176 atomic_add_int(&(DEVFS_MNTDATA(mp)->leak_count), 1);
177
ca8d7677 178 node->d_dev = NULL;
21864bc5
MD
179 node->nchildren = 1;
180 node->mp = mp;
181 node->d_dir.d_ino = devfs_fetch_ino();
21864bc5 182
bc185c5a
AH
183 /*
184 * Cookie jar for children. Leave 0 and 1 for '.' and '..' entries
185 * respectively.
186 */
187 node->cookie_jar = 2;
188
189 /*
190 * Access Control members
191 */
192 node->mode = DEVFS_DEFAULT_MODE;
193 node->uid = DEVFS_DEFAULT_UID;
194 node->gid = DEVFS_DEFAULT_GID;
21864bc5 195
21864bc5
MD
196 switch (devfsnodetype) {
197 case Proot:
bc185c5a
AH
198 /*
199 * Ensure that we don't recycle the root vnode by marking it as
200 * linked into the topology.
201 */
894bbb25 202 node->flags |= DEVFS_NODE_LINKED;
21864bc5
MD
203 case Pdir:
204 TAILQ_INIT(DEVFS_DENODE_HEAD(node));
205 node->d_dir.d_type = DT_DIR;
206 node->nchildren = 2;
207 break;
208
209 case Plink:
210 node->d_dir.d_type = DT_LNK;
211 break;
212
213 case Preg:
214 node->d_dir.d_type = DT_REG;
215 break;
216
217 case Pdev:
218 if (dev != NULL) {
219 node->d_dir.d_type = DT_CHR;
220 node->d_dev = dev;
21864bc5 221
bc185c5a
AH
222 node->mode = dev->si_perms;
223 node->uid = dev->si_uid;
224 node->gid = dev->si_gid;
21864bc5
MD
225
226 devfs_alias_check_create(node);
227 }
228 break;
229
230 default:
231 panic("devfs_allocp: unknown node type");
232 }
233
234 node->v_node = NULL;
235 node->node_type = devfsnodetype;
236
bc185c5a 237 /* Initialize the dirent structure of each devfs vnode */
ca8d7677 238 KKASSERT(namlen < 256);
21864bc5 239 node->d_dir.d_namlen = namlen;
ca8d7677 240 node->d_dir.d_name = kmalloc(namlen+1, M_DEVFS, M_WAITOK);
21864bc5
MD
241 memcpy(node->d_dir.d_name, name, namlen);
242 node->d_dir.d_name[namlen] = '\0';
243
244 /* Initialize the parent node element */
245 node->parent = parent;
246
247 /* Apply rules */
248 devfs_rule_check_apply(node);
249
bc185c5a 250 /* Initialize *time members */
ca8d7677
MD
251 nanotime(&node->atime);
252 node->mtime = node->ctime = node->atime;
253
254 /*
255 * Associate with parent as last step, clean out namecache
256 * reference.
257 */
21864bc5 258 if ((parent != NULL) &&
ca8d7677 259 ((parent->node_type == Proot) || (parent->node_type == Pdir))) {
21864bc5
MD
260 parent->nchildren++;
261 node->cookie = parent->cookie_jar++;
262 node->flags |= DEVFS_NODE_LINKED;
ca8d7677 263 TAILQ_INSERT_TAIL(DEVFS_DENODE_HEAD(parent), node, link);
21864bc5 264
ca8d7677
MD
265 /* This forces negative namecache lookups to clear */
266 ++mp->mnt_namecache_gen;
267 }
21864bc5 268
21864bc5
MD
269 return node;
270}
271
272/*
273 * devfs_allocv() allocates a new vnode based on a devfs node.
274 */
275int
276devfs_allocv(struct vnode **vpp, struct devfs_node *node)
277{
278 struct vnode *vp;
279 int error = 0;
280
281 KKASSERT(node);
282
21864bc5
MD
283try_again:
284 while ((vp = node->v_node) != NULL) {
285 error = vget(vp, LK_EXCLUSIVE);
286 if (error != ENOENT) {
287 *vpp = vp;
21864bc5
MD
288 goto out;
289 }
290 }
21864bc5 291
21864bc5
MD
292 if ((error = getnewvnode(VT_DEVFS, node->mp, vpp, 0, 0)) != 0)
293 goto out;
294
295 vp = *vpp;
296
21864bc5
MD
297 if (node->v_node != NULL) {
298 vp->v_type = VBAD;
299 vx_put(vp);
300 goto try_again;
301 }
302
303 vp->v_data = node;
304 node->v_node = vp;
21864bc5
MD
305
306 switch (node->node_type) {
307 case Proot:
308 vp->v_flag |= VROOT;
309 case Pdir:
310 vp->v_type = VDIR;
311 break;
312
313 case Plink:
314 vp->v_type = VLNK;
315 break;
316
317 case Preg:
318 vp->v_type = VREG;
319 break;
320
321 case Pdev:
322 vp->v_type = VCHR;
21864bc5
MD
323 KKASSERT(node->d_dev);
324
bc185c5a
AH
325 vp->v_uminor = node->d_dev->si_uminor;
326 vp->v_umajor = 0;
327
328 v_associate_rdev(vp, node->d_dev);
329 vp->v_ops = &node->mp->mnt_vn_spec_ops;
21864bc5
MD
330 break;
331
332 default:
333 panic("devfs_allocv: unknown node type");
334 }
335
336out:
21864bc5
MD
337 return error;
338}
339
340/*
341 * devfs_allocvp allocates both a devfs node (with the given settings) and a vnode
342 * based on the newly created devfs node.
343 */
344int
345devfs_allocvp(struct mount *mp, struct vnode **vpp, devfs_nodetype devfsnodetype,
bc185c5a 346 char *name, struct devfs_node *parent, cdev_t dev)
21864bc5
MD
347{
348 struct devfs_node *node;
349
21864bc5 350 node = devfs_allocp(devfsnodetype, name, parent, mp, dev);
bc185c5a 351
21864bc5
MD
352 if (node != NULL)
353 devfs_allocv(vpp, node);
354 else
355 *vpp = NULL;
356
21864bc5
MD
357 return 0;
358}
359
360/*
ca8d7677
MD
361 * Destroy the devfs_node. The node must be unlinked from the topology.
362 *
363 * This function will also destroy any vnode association with the node
364 * and device.
365 *
366 * The cdev_t itself remains intact.
21864bc5
MD
367 */
368int
369devfs_freep(struct devfs_node *node)
370{
ca8d7677
MD
371 struct vnode *vp;
372
21864bc5 373 KKASSERT(node);
ca8d7677
MD
374 KKASSERT(((node->flags & DEVFS_NODE_LINKED) == 0) ||
375 (node->node_type == Proot));
376 KKASSERT((node->flags & DEVFS_DESTROYED) == 0);
21864bc5
MD
377
378 atomic_subtract_int(&(DEVFS_MNTDATA(node->mp)->leak_count), 1);
379 if (node->symlink_name) {
380 kfree(node->symlink_name, M_DEVFS);
381 node->symlink_name = NULL;
382 }
383
ca8d7677
MD
384 /*
385 * Remove the node from the orphan list if it is still on it.
386 */
387 if (node->flags & DEVFS_ORPHANED)
21864bc5
MD
388 devfs_tracer_del_orphan(node);
389
ca8d7677
MD
390 /*
391 * Disassociate the vnode from the node. This also prevents the
392 * vnode's reclaim code from double-freeing the node.
e23485a5
MD
393 *
394 * The vget is needed to safely modify the vp. It also serves
395 * to cycle the refs and terminate the vnode if it happens to
396 * be inactive, otherwise namecache references may not get cleared.
ca8d7677 397 */
e23485a5
MD
398 while ((vp = node->v_node) != NULL) {
399 if (vget(vp, LK_EXCLUSIVE | LK_RETRY) != 0)
400 break;
9b823501 401 v_release_rdev(vp);
ca8d7677
MD
402 vp->v_data = NULL;
403 node->v_node = NULL;
e23485a5 404 vput(vp);
ca8d7677
MD
405 }
406 if (node->d_dir.d_name)
407 kfree(node->d_dir.d_name, M_DEVFS);
408 node->flags |= DEVFS_DESTROYED;
409
21864bc5
MD
410 objcache_put(devfs_node_cache, node);
411
412 return 0;
413}
414
415/*
ca8d7677
MD
416 * Unlink the devfs node from the topology and add it to the orphan list.
417 * The node will later be destroyed by freep.
418 *
419 * Any vnode association, including the v_rdev and v_data, remains intact
420 * until the freep.
21864bc5
MD
421 */
422int
423devfs_unlinkp(struct devfs_node *node)
424{
425 struct devfs_node *parent;
426 KKASSERT(node);
427
bc185c5a
AH
428 /*
429 * Add the node to the orphan list, so it is referenced somewhere, to
430 * so we don't leak it.
431 */
21864bc5 432 devfs_tracer_add_orphan(node);
bc185c5a 433
21864bc5
MD
434 parent = node->parent;
435
ca8d7677
MD
436 /*
437 * If the parent is known we can unlink the node out of the topology
438 */
21864bc5
MD
439 if (parent) {
440 TAILQ_REMOVE(DEVFS_DENODE_HEAD(parent), node, link);
441 parent->nchildren--;
442 KKASSERT((parent->nchildren >= 0));
443 node->flags &= ~DEVFS_NODE_LINKED;
444 }
445 node->parent = NULL;
21864bc5
MD
446 return 0;
447}
448
449/*
ca8d7677
MD
450 * devfs_reaperp() is a recursive function that iterates through all the
451 * topology, unlinking and freeing all devfs nodes.
21864bc5
MD
452 */
453int
454devfs_reaperp(struct devfs_node *node)
455{
456 struct devfs_node *node1, *node2;
457
21864bc5 458 if ((node->node_type == Proot) || (node->node_type == Pdir)) {
21864bc5 459 if (node->nchildren > 2) {
ca8d7677 460 TAILQ_FOREACH_MUTABLE(node1, DEVFS_DENODE_HEAD(node),
bc185c5a 461 link, node2) {
21864bc5
MD
462 devfs_reaperp(node1);
463 }
464 }
465 }
21864bc5
MD
466 devfs_unlinkp(node);
467 devfs_freep(node);
468
469 return 0;
470}
471
472/*
ca8d7677
MD
473 * devfs_gc() is devfs garbage collector. It takes care of unlinking and
474 * freeing a node, but also removes empty directories and links that link
475 * via devfs auto-link mechanism to the node being deleted.
21864bc5
MD
476 */
477int
478devfs_gc(struct devfs_node *node)
479{
480 struct devfs_node *root_node = DEVFS_MNTDATA(node->mp)->root_node;
481
482 devfs_gc_links(root_node, node, node->nlinks);
483 devfs_unlinkp(node);
484 devfs_gc_dirs(root_node);
485
486 devfs_freep(node);
487
488 return 0;
489}
490
491/*
492 * devfs_gc_dirs() is a helper function for devfs_gc, unlinking and freeing
493 * empty directories.
494 */
495static int
496devfs_gc_dirs(struct devfs_node *node)
497{
498 struct devfs_node *node1, *node2;
499
21864bc5 500 if ((node->node_type == Proot) || (node->node_type == Pdir)) {
21864bc5 501 if (node->nchildren > 2) {
ca8d7677 502 TAILQ_FOREACH_MUTABLE(node1, DEVFS_DENODE_HEAD(node),
bc185c5a 503 link, node2) {
21864bc5
MD
504 devfs_gc_dirs(node1);
505 }
506 }
507
508 if (node->nchildren == 2) {
21864bc5
MD
509 devfs_unlinkp(node);
510 devfs_freep(node);
511 }
512 }
513
514 return 0;
515}
516
517/*
518 * devfs_gc_links() is a helper function for devfs_gc, unlinking and freeing
519 * eauto-linked nodes linking to the node being deleted.
520 */
521static int
ca8d7677
MD
522devfs_gc_links(struct devfs_node *node, struct devfs_node *target,
523 size_t nlinks)
21864bc5
MD
524{
525 struct devfs_node *node1, *node2;
526
527 if (nlinks > 0) {
528 if ((node->node_type == Proot) || (node->node_type == Pdir)) {
21864bc5 529 if (node->nchildren > 2) {
bc185c5a
AH
530 TAILQ_FOREACH_MUTABLE(node1, DEVFS_DENODE_HEAD(node),
531 link, node2) {
21864bc5
MD
532 nlinks = devfs_gc_links(node1, target, nlinks);
533 }
534 }
535 } else if (node->link_target == target) {
536 nlinks--;
537 devfs_unlinkp(node);
538 devfs_freep(node);
539 }
540 }
541
542 KKASSERT(nlinks >= 0);
543
544 return nlinks;
545}
546
547/*
ca8d7677
MD
548 * devfs_create_dev() is the asynchronous entry point for device creation.
549 * It just sends a message with the relevant details to the devfs core.
550 *
551 * This function will reference the passed device. The reference is owned
552 * by devfs and represents all of the device's node associations.
21864bc5
MD
553 */
554int
555devfs_create_dev(cdev_t dev, uid_t uid, gid_t gid, int perms)
556{
ca8d7677 557 reference_dev(dev);
bc185c5a
AH
558 devfs_msg_send_dev(DEVFS_DEVICE_CREATE, dev, uid, gid, perms);
559
21864bc5
MD
560 return 0;
561}
562
563/*
ca8d7677
MD
564 * devfs_destroy_dev() is the asynchronous entry point for device destruction.
565 * It just sends a message with the relevant details to the devfs core.
21864bc5
MD
566 */
567int
568devfs_destroy_dev(cdev_t dev)
569{
570 devfs_msg_send_dev(DEVFS_DEVICE_DESTROY, dev, 0, 0, 0);
571 return 0;
572}
573
574/*
ca8d7677
MD
575 * devfs_mount_add() is the synchronous entry point for adding a new devfs
576 * mount. It sends a synchronous message with the relevant details to the
577 * devfs core.
21864bc5
MD
578 */
579int
580devfs_mount_add(struct devfs_mnt_data *mnt)
581{
582 devfs_msg_t msg;
583
584 msg = devfs_msg_get();
ca8d7677 585 msg->mdv_mnt = mnt;
21864bc5
MD
586 msg = devfs_msg_send_sync(DEVFS_MOUNT_ADD, msg);
587 devfs_msg_put(msg);
588
589 return 0;
590}
591
592/*
593 * devfs_mount_del() is the synchronous entry point for removing a devfs mount.
594 * It sends a synchronous message with the relevant details to the devfs core.
595 */
596int
597devfs_mount_del(struct devfs_mnt_data *mnt)
598{
599 devfs_msg_t msg;
600
601 msg = devfs_msg_get();
ca8d7677 602 msg->mdv_mnt = mnt;
21864bc5
MD
603 msg = devfs_msg_send_sync(DEVFS_MOUNT_DEL, msg);
604 devfs_msg_put(msg);
605
606 return 0;
607}
608
609/*
bc185c5a
AH
610 * devfs_destroy_subnames() is the synchronous entry point for device
611 * destruction by subname. It just sends a message with the relevant details to
612 * the devfs core.
21864bc5
MD
613 */
614int
615devfs_destroy_subnames(char *name)
616{
ca8d7677
MD
617 devfs_msg_t msg;
618
619 msg = devfs_msg_get();
620 msg->mdv_load = name;
621 msg = devfs_msg_send_sync(DEVFS_DESTROY_SUBNAMES, msg);
622 devfs_msg_put(msg);
623 return 0;
624}
625
626int
627devfs_clr_subnames_flag(char *name, uint32_t flag)
628{
629 devfs_msg_t msg;
630
631 msg = devfs_msg_get();
632 msg->mdv_flags.name = name;
633 msg->mdv_flags.flag = flag;
634 msg = devfs_msg_send_sync(DEVFS_CLR_SUBNAMES_FLAG, msg);
635 devfs_msg_put(msg);
636
637 return 0;
638}
639
640int
641devfs_destroy_subnames_without_flag(char *name, uint32_t flag)
642{
643 devfs_msg_t msg;
644
645 msg = devfs_msg_get();
646 msg->mdv_flags.name = name;
647 msg->mdv_flags.flag = flag;
648 msg = devfs_msg_send_sync(DEVFS_DESTROY_SUBNAMES_WO_FLAG, msg);
649 devfs_msg_put(msg);
650
21864bc5
MD
651 return 0;
652}
653
654/*
ca8d7677
MD
655 * devfs_create_all_dev is the asynchronous entry point to trigger device
656 * node creation. It just sends a message with the relevant details to
657 * the devfs core.
21864bc5
MD
658 */
659int
660devfs_create_all_dev(struct devfs_node *root)
661{
662 devfs_msg_send_generic(DEVFS_CREATE_ALL_DEV, root);
663 return 0;
664}
665
666/*
ca8d7677
MD
667 * devfs_destroy_dev_by_ops is the asynchronous entry point to destroy all
668 * devices with a specific set of dev_ops and minor. It just sends a
669 * message with the relevant details to the devfs core.
21864bc5
MD
670 */
671int
672devfs_destroy_dev_by_ops(struct dev_ops *ops, int minor)
673{
674 devfs_msg_send_ops(DEVFS_DESTROY_DEV_BY_OPS, ops, minor);
675 return 0;
676}
677
678/*
ca8d7677
MD
679 * devfs_clone_handler_add is the synchronous entry point to add a new
680 * clone handler. It just sends a message with the relevant details to
681 * the devfs core.
21864bc5
MD
682 */
683int
684devfs_clone_handler_add(char *name, d_clone_t *nhandler)
685{
ca8d7677
MD
686 devfs_msg_t msg;
687
688 msg = devfs_msg_get();
d0fe8596 689 msg->mdv_chandler.name = name;
ca8d7677
MD
690 msg->mdv_chandler.nhandler = nhandler;
691 msg = devfs_msg_send_sync(DEVFS_CHANDLER_ADD, msg);
692 devfs_msg_put(msg);
21864bc5
MD
693 return 0;
694}
695
696/*
ca8d7677
MD
697 * devfs_clone_handler_del is the synchronous entry point to remove a
698 * clone handler. It just sends a message with the relevant details to
699 * the devfs core.
21864bc5
MD
700 */
701int
702devfs_clone_handler_del(char *name)
703{
ca8d7677
MD
704 devfs_msg_t msg;
705
706 msg = devfs_msg_get();
d0fe8596 707 msg->mdv_chandler.name = name;
ca8d7677
MD
708 msg->mdv_chandler.nhandler = NULL;
709 msg = devfs_msg_send_sync(DEVFS_CHANDLER_DEL, msg);
710 devfs_msg_put(msg);
21864bc5
MD
711 return 0;
712}
713
714/*
ca8d7677
MD
715 * devfs_find_device_by_name is the synchronous entry point to find a
716 * device given its name. It sends a synchronous message with the
717 * relevant details to the devfs core and returns the answer.
21864bc5
MD
718 */
719cdev_t
720devfs_find_device_by_name(const char *fmt, ...)
721{
722 cdev_t found = NULL;
723 devfs_msg_t msg;
724 char target[PATH_MAX+1];
725 __va_list ap;
726 int i;
727
728 if (fmt == NULL)
729 return NULL;
730
21864bc5
MD
731 __va_start(ap, fmt);
732 i = kvcprintf(fmt, NULL, target, 10, ap);
733 target[i] = '\0';
734 __va_end(ap);
735
21864bc5 736 msg = devfs_msg_get();
ca8d7677 737 msg->mdv_name = target;
21864bc5 738 msg = devfs_msg_send_sync(DEVFS_FIND_DEVICE_BY_NAME, msg);
ca8d7677 739 found = msg->mdv_cdev;
21864bc5
MD
740 devfs_msg_put(msg);
741
21864bc5
MD
742 return found;
743}
744
745/*
ca8d7677
MD
746 * devfs_find_device_by_udev is the synchronous entry point to find a
747 * device given its udev number. It sends a synchronous message with
748 * the relevant details to the devfs core and returns the answer.
21864bc5
MD
749 */
750cdev_t
751devfs_find_device_by_udev(udev_t udev)
752{
753 cdev_t found = NULL;
754 devfs_msg_t msg;
755
756 msg = devfs_msg_get();
ca8d7677 757 msg->mdv_udev = udev;
21864bc5 758 msg = devfs_msg_send_sync(DEVFS_FIND_DEVICE_BY_UDEV, msg);
ca8d7677 759 found = msg->mdv_cdev;
21864bc5
MD
760 devfs_msg_put(msg);
761
ca8d7677
MD
762 devfs_debug(DEVFS_DEBUG_DEBUG,
763 "devfs_find_device_by_udev found? %s -end:3-\n",
764 ((found) ? found->si_name:"NO"));
21864bc5
MD
765 return found;
766}
767
fa7e6f37
AH
768struct vnode *
769devfs_inode_to_vnode(struct mount *mp, ino_t target)
770{
771 struct vnode *vp = NULL;
772 devfs_msg_t msg;
773
774 if (mp == NULL)
775 return NULL;
776
777 msg = devfs_msg_get();
778 msg->mdv_ino.mp = mp;
779 msg->mdv_ino.ino = target;
780 msg = devfs_msg_send_sync(DEVFS_INODE_TO_VNODE, msg);
781 vp = msg->mdv_ino.vp;
782 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
783 devfs_msg_put(msg);
784
785 return vp;
786}
787
21864bc5 788/*
ca8d7677
MD
789 * devfs_make_alias is the asynchronous entry point to register an alias
790 * for a device. It just sends a message with the relevant details to the
791 * devfs core.
21864bc5
MD
792 */
793int
794devfs_make_alias(char *name, cdev_t dev_target)
795{
ca8d7677 796 struct devfs_alias *alias;
5298e788
AH
797 size_t len;
798
799 len = strlen(name);
ca8d7677
MD
800
801 alias = kmalloc(sizeof(struct devfs_alias), M_DEVFS, M_WAITOK);
5298e788
AH
802 alias->name = kmalloc(len + 1, M_DEVFS, M_WAITOK);
803 memcpy(alias->name, name, len + 1);
804 alias->namlen = len;
21864bc5
MD
805 alias->dev_target = dev_target;
806
807 devfs_msg_send_generic(DEVFS_MAKE_ALIAS, alias);
808 return 0;
809}
810
811/*
ca8d7677
MD
812 * devfs_apply_rules is the asynchronous entry point to trigger application
813 * of all rules. It just sends a message with the relevant details to the
814 * devfs core.
21864bc5
MD
815 */
816int
817devfs_apply_rules(char *mntto)
818{
819 char *new_name;
820 size_t namelen;
821
822 namelen = strlen(mntto) + 1;
21864bc5 823 new_name = kmalloc(namelen, M_DEVFS, M_WAITOK);
21864bc5 824 memcpy(new_name, mntto, namelen);
21864bc5 825 devfs_msg_send_name(DEVFS_APPLY_RULES, new_name);
bc185c5a 826
21864bc5
MD
827 return 0;
828}
829
830/*
bc185c5a
AH
831 * devfs_reset_rules is the asynchronous entry point to trigger reset of all
832 * rules. It just sends a message with the relevant details to the devfs core.
21864bc5
MD
833 */
834int
835devfs_reset_rules(char *mntto)
836{
837 char *new_name;
838 size_t namelen;
839
840 namelen = strlen(mntto) + 1;
21864bc5 841 new_name = kmalloc(namelen, M_DEVFS, M_WAITOK);
21864bc5 842 memcpy(new_name, mntto, namelen);
21864bc5 843 devfs_msg_send_name(DEVFS_RESET_RULES, new_name);
bc185c5a 844
21864bc5
MD
845 return 0;
846}
847
848
849/*
850 * devfs_scan_callback is the asynchronous entry point to call a callback
851 * on all cdevs.
852 * It just sends a message with the relevant details to the devfs core.
853 */
854int
855devfs_scan_callback(devfs_scan_t *callback)
856{
857 devfs_msg_t msg;
858
21864bc5
MD
859 KKASSERT(sizeof(callback) == sizeof(void *));
860
861 msg = devfs_msg_get();
ca8d7677 862 msg->mdv_load = callback;
21864bc5
MD
863 msg = devfs_msg_send_sync(DEVFS_SCAN_CALLBACK, msg);
864 devfs_msg_put(msg);
865
866 return 0;
867}
868
869
870/*
bc185c5a
AH
871 * Acts as a message drain. Any message that is replied to here gets destroyed
872 * and the memory freed.
21864bc5
MD
873 */
874static void
875devfs_msg_autofree_reply(lwkt_port_t port, lwkt_msg_t msg)
876{
877 devfs_msg_put((devfs_msg_t)msg);
878}
879
880/*
881 * devfs_msg_get allocates a new devfs msg and returns it.
882 */
883devfs_msg_t
884devfs_msg_get()
885{
886 return objcache_get(devfs_msg_cache, M_WAITOK);
887}
888
889/*
890 * devfs_msg_put deallocates a given devfs msg.
891 */
892int
893devfs_msg_put(devfs_msg_t msg)
894{
895 objcache_put(devfs_msg_cache, msg);
896 return 0;
897}
898
899/*
900 * devfs_msg_send is the generic asynchronous message sending facility
901 * for devfs. By default the reply port is the automatic disposal port.
d0fe8596
MD
902 *
903 * If the current thread is the devfs_msg_port thread we execute the
904 * operation synchronously.
21864bc5 905 */
d0fe8596 906void
21864bc5
MD
907devfs_msg_send(uint32_t cmd, devfs_msg_t devfs_msg)
908{
909 lwkt_port_t port = &devfs_msg_port;
910
d0fe8596 911 lwkt_initmsg(&devfs_msg->hdr, &devfs_dispose_port, 0);
21864bc5 912
d0fe8596 913 devfs_msg->hdr.u.ms_result = cmd;
21864bc5 914
d0fe8596
MD
915 if (port->mpu_td == curthread) {
916 devfs_msg_exec(devfs_msg);
917 lwkt_replymsg(&devfs_msg->hdr, 0);
918 } else {
919 lwkt_sendmsg(port, (lwkt_msg_t)devfs_msg);
920 }
21864bc5
MD
921}
922
923/*
924 * devfs_msg_send_sync is the generic synchronous message sending
925 * facility for devfs. It initializes a local reply port and waits
926 * for the core's answer. This answer is then returned.
927 */
928devfs_msg_t
929devfs_msg_send_sync(uint32_t cmd, devfs_msg_t devfs_msg)
930{
931 struct lwkt_port rep_port;
932 devfs_msg_t msg_incoming;
933 lwkt_port_t port = &devfs_msg_port;
934
935 lwkt_initport_thread(&rep_port, curthread);
d0fe8596 936 lwkt_initmsg(&devfs_msg->hdr, &rep_port, 0);
21864bc5 937
d0fe8596 938 devfs_msg->hdr.u.ms_result = cmd;
21864bc5 939
d0fe8596 940 lwkt_sendmsg(port, (lwkt_msg_t)devfs_msg);
21864bc5
MD
941 msg_incoming = lwkt_waitport(&rep_port, 0);
942
943 return msg_incoming;
944}
945
946/*
947 * sends a message with a generic argument.
948 */
d0fe8596 949void
21864bc5
MD
950devfs_msg_send_generic(uint32_t cmd, void *load)
951{
d0fe8596 952 devfs_msg_t devfs_msg = devfs_msg_get();
21864bc5 953
d0fe8596
MD
954 devfs_msg->mdv_load = load;
955 devfs_msg_send(cmd, devfs_msg);
21864bc5
MD
956}
957
958/*
959 * sends a message with a name argument.
960 */
d0fe8596 961void
21864bc5
MD
962devfs_msg_send_name(uint32_t cmd, char *name)
963{
d0fe8596 964 devfs_msg_t devfs_msg = devfs_msg_get();
21864bc5 965
d0fe8596
MD
966 devfs_msg->mdv_name = name;
967 devfs_msg_send(cmd, devfs_msg);
21864bc5
MD
968}
969
970/*
971 * sends a message with a mount argument.
972 */
d0fe8596 973void
21864bc5
MD
974devfs_msg_send_mount(uint32_t cmd, struct devfs_mnt_data *mnt)
975{
d0fe8596 976 devfs_msg_t devfs_msg = devfs_msg_get();
21864bc5 977
d0fe8596
MD
978 devfs_msg->mdv_mnt = mnt;
979 devfs_msg_send(cmd, devfs_msg);
21864bc5
MD
980}
981
982/*
983 * sends a message with an ops argument.
984 */
d0fe8596 985void
21864bc5
MD
986devfs_msg_send_ops(uint32_t cmd, struct dev_ops *ops, int minor)
987{
d0fe8596 988 devfs_msg_t devfs_msg = devfs_msg_get();
21864bc5 989
d0fe8596
MD
990 devfs_msg->mdv_ops.ops = ops;
991 devfs_msg->mdv_ops.minor = minor;
992 devfs_msg_send(cmd, devfs_msg);
21864bc5
MD
993}
994
995/*
996 * sends a message with a clone handler argument.
997 */
d0fe8596 998void
21864bc5
MD
999devfs_msg_send_chandler(uint32_t cmd, char *name, d_clone_t handler)
1000{
d0fe8596 1001 devfs_msg_t devfs_msg = devfs_msg_get();
21864bc5 1002
d0fe8596
MD
1003 devfs_msg->mdv_chandler.name = name;
1004 devfs_msg->mdv_chandler.nhandler = handler;
1005 devfs_msg_send(cmd, devfs_msg);
21864bc5
MD
1006}
1007
1008/*
1009 * sends a message with a device argument.
1010 */
d0fe8596 1011void
21864bc5
MD
1012devfs_msg_send_dev(uint32_t cmd, cdev_t dev, uid_t uid, gid_t gid, int perms)
1013{
d0fe8596
MD
1014 devfs_msg_t devfs_msg = devfs_msg_get();
1015
1016 devfs_msg->mdv_dev.dev = dev;
ca8d7677
MD
1017 devfs_msg->mdv_dev.uid = uid;
1018 devfs_msg->mdv_dev.gid = gid;
1019 devfs_msg->mdv_dev.perms = perms;
21864bc5 1020
d0fe8596 1021 devfs_msg_send(cmd, devfs_msg);
21864bc5
MD
1022}
1023
1024/*
1025 * sends a message with a link argument.
1026 */
d0fe8596 1027void
21864bc5
MD
1028devfs_msg_send_link(uint32_t cmd, char *name, char *target, struct mount *mp)
1029{
d0fe8596
MD
1030 devfs_msg_t devfs_msg = devfs_msg_get();
1031
1032 devfs_msg->mdv_link.name = name;
ca8d7677
MD
1033 devfs_msg->mdv_link.target = target;
1034 devfs_msg->mdv_link.mp = mp;
d0fe8596 1035 devfs_msg_send(cmd, devfs_msg);
21864bc5
MD
1036}
1037
1038/*
1039 * devfs_msg_core is the main devfs thread. It handles all incoming messages
1040 * and calls the relevant worker functions. By using messages it's assured
1041 * that events occur in the correct order.
1042 */
1043static void
1044devfs_msg_core(void *arg)
1045{
ca8d7677 1046 devfs_msg_t msg;
21864bc5 1047
d0fe8596 1048 devfs_run = 1;
21864bc5 1049 lwkt_initport_thread(&devfs_msg_port, curthread);
bc185c5a 1050 wakeup(td_core);
21864bc5 1051
d0fe8596 1052 while (devfs_run) {
ca8d7677 1053 msg = (devfs_msg_t)lwkt_waitport(&devfs_msg_port, 0);
bc185c5a 1054 devfs_debug(DEVFS_DEBUG_DEBUG,
d0fe8596
MD
1055 "devfs_msg_core, new msg: %x\n",
1056 (unsigned int)msg->hdr.u.ms_result);
1057 devfs_msg_exec(msg);
1058 lwkt_replymsg(&msg->hdr, 0);
1059 }
1060 wakeup(td_core);
1061 lwkt_exit();
1062}
21864bc5 1063
d0fe8596
MD
1064static void
1065devfs_msg_exec(devfs_msg_t msg)
1066{
1067 struct devfs_mnt_data *mnt;
1068 struct devfs_node *node;
1069 cdev_t dev;
fa7e6f37 1070
d0fe8596
MD
1071 /*
1072 * Acquire the devfs lock to ensure safety of all called functions
1073 */
1074 lockmgr(&devfs_lock, LK_EXCLUSIVE);
1075
1076 switch (msg->hdr.u.ms_result) {
1077 case DEVFS_DEVICE_CREATE:
1078 dev = msg->mdv_dev.dev;
1079 devfs_create_dev_worker(dev,
1080 msg->mdv_dev.uid,
1081 msg->mdv_dev.gid,
1082 msg->mdv_dev.perms);
1083 break;
1084 case DEVFS_DEVICE_DESTROY:
1085 dev = msg->mdv_dev.dev;
1086 devfs_destroy_dev_worker(dev);
1087 break;
1088 case DEVFS_DESTROY_SUBNAMES:
1089 devfs_destroy_subnames_worker(msg->mdv_load);
1090 break;
1091 case DEVFS_DESTROY_DEV_BY_OPS:
1092 devfs_destroy_dev_by_ops_worker(msg->mdv_ops.ops,
1093 msg->mdv_ops.minor);
1094 break;
1095 case DEVFS_CREATE_ALL_DEV:
1096 node = (struct devfs_node *)msg->mdv_load;
1097 devfs_create_all_dev_worker(node);
1098 break;
1099 case DEVFS_MOUNT_ADD:
1100 mnt = msg->mdv_mnt;
1101 TAILQ_INSERT_TAIL(&devfs_mnt_list, mnt, link);
1102 devfs_create_all_dev_worker(mnt->root_node);
1103 break;
1104 case DEVFS_MOUNT_DEL:
1105 mnt = msg->mdv_mnt;
1106 TAILQ_REMOVE(&devfs_mnt_list, mnt, link);
1107 devfs_reaperp(mnt->root_node);
1108 if (mnt->leak_count) {
1109 devfs_debug(DEVFS_DEBUG_SHOW,
1110 "Leaked %d devfs_node elements!\n",
1111 mnt->leak_count);
ca8d7677 1112 }
d0fe8596
MD
1113 break;
1114 case DEVFS_CHANDLER_ADD:
1115 devfs_chandler_add_worker(msg->mdv_chandler.name,
1116 msg->mdv_chandler.nhandler);
1117 break;
1118 case DEVFS_CHANDLER_DEL:
1119 devfs_chandler_del_worker(msg->mdv_chandler.name);
1120 break;
1121 case DEVFS_FIND_DEVICE_BY_NAME:
1122 devfs_find_device_by_name_worker(msg);
1123 break;
1124 case DEVFS_FIND_DEVICE_BY_UDEV:
1125 devfs_find_device_by_udev_worker(msg);
1126 break;
1127 case DEVFS_MAKE_ALIAS:
1128 devfs_make_alias_worker((struct devfs_alias *)msg->mdv_load);
1129 break;
1130 case DEVFS_APPLY_RULES:
1131 devfs_apply_reset_rules_caller(msg->mdv_name, 1);
1132 break;
1133 case DEVFS_RESET_RULES:
1134 devfs_apply_reset_rules_caller(msg->mdv_name, 0);
1135 break;
1136 case DEVFS_SCAN_CALLBACK:
1137 devfs_scan_callback_worker((devfs_scan_t *)msg->mdv_load);
1138 break;
1139 case DEVFS_CLR_SUBNAMES_FLAG:
1140 devfs_clr_subnames_flag_worker(msg->mdv_flags.name,
1141 msg->mdv_flags.flag);
1142 break;
1143 case DEVFS_DESTROY_SUBNAMES_WO_FLAG:
1144 devfs_destroy_subnames_without_flag_worker(msg->mdv_flags.name,
1145 msg->mdv_flags.flag);
1146 break;
1147 case DEVFS_INODE_TO_VNODE:
1148 msg->mdv_ino.vp = devfs_inode_to_vnode_worker(
1149 DEVFS_MNTDATA(msg->mdv_ino.mp)->root_node,
1150 msg->mdv_ino.ino);
1151 break;
1152 case DEVFS_TERMINATE_CORE:
1153 devfs_run = 0;
1154 break;
1155 case DEVFS_SYNC:
1156 break;
1157 default:
1158 devfs_debug(DEVFS_DEBUG_WARNING,
1159 "devfs_msg_core: unknown message "
1160 "received at core\n");
1161 break;
ca8d7677 1162 }
d0fe8596 1163 lockmgr(&devfs_lock, LK_RELEASE);
21864bc5
MD
1164}
1165
1166/*
1167 * Worker function to insert a new dev into the dev list and initialize its
1168 * permissions. It also calls devfs_propagate_dev which in turn propagates
1169 * the change to all mount points.
ca8d7677
MD
1170 *
1171 * The passed dev is already referenced. This reference is eaten by this
1172 * function and represents the dev's linkage into devfs_dev_list.
21864bc5
MD
1173 */
1174static int
1175devfs_create_dev_worker(cdev_t dev, uid_t uid, gid_t gid, int perms)
1176{
1177 KKASSERT(dev);
21864bc5
MD
1178
1179 dev->si_uid = uid;
1180 dev->si_gid = gid;
1181 dev->si_perms = perms;
1182
1183 devfs_link_dev(dev);
21864bc5
MD
1184 devfs_propagate_dev(dev, 1);
1185
21864bc5
MD
1186 return 0;
1187}
1188
1189/*
1190 * Worker function to delete a dev from the dev list and free the cdev.
1191 * It also calls devfs_propagate_dev which in turn propagates the change
1192 * to all mount points.
1193 */
1194static int
1195devfs_destroy_dev_worker(cdev_t dev)
1196{
ca8d7677
MD
1197 int error;
1198
21864bc5
MD
1199 KKASSERT(dev);
1200 KKASSERT((lockstatus(&devfs_lock, curthread)) == LK_EXCLUSIVE);
1201
ca8d7677 1202 error = devfs_unlink_dev(dev);
21864bc5 1203 devfs_propagate_dev(dev, 0);
ca8d7677
MD
1204 if (error == 0)
1205 release_dev(dev); /* link ref */
21864bc5
MD
1206 release_dev(dev);
1207 release_dev(dev);
21864bc5 1208
21864bc5
MD
1209 return 0;
1210}
1211
1212/*
1213 * Worker function to destroy all devices with a certain basename.
1214 * Calls devfs_destroy_dev_worker for the actual destruction.
1215 */
1216static int
1217devfs_destroy_subnames_worker(char *name)
1218{
1219 cdev_t dev, dev1;
21864bc5
MD
1220 size_t len = strlen(name);
1221
ca8d7677 1222 TAILQ_FOREACH_MUTABLE(dev, &devfs_dev_list, link, dev1) {
bc185c5a
AH
1223 if ((!strncmp(dev->si_name, name, len)) &&
1224 (dev->si_name[len] != '\0')) {
1225 devfs_destroy_dev_worker(dev);
21864bc5 1226 }
ca8d7677
MD
1227 }
1228 return 0;
1229}
1230
1231static int
1232devfs_clr_subnames_flag_worker(char *name, uint32_t flag)
1233{
1234 cdev_t dev, dev1;
1235 size_t len = strlen(name);
1236
1237 TAILQ_FOREACH_MUTABLE(dev, &devfs_dev_list, link, dev1) {
bc185c5a
AH
1238 if ((!strncmp(dev->si_name, name, len)) &&
1239 (dev->si_name[len] != '\0')) {
1240 dev->si_flags &= ~flag;
ca8d7677
MD
1241 }
1242 }
1243
1244 return 0;
1245}
1246
1247static int
1248devfs_destroy_subnames_without_flag_worker(char *name, uint32_t flag)
1249{
1250 cdev_t dev, dev1;
1251 size_t len = strlen(name);
1252
1253 TAILQ_FOREACH_MUTABLE(dev, &devfs_dev_list, link, dev1) {
bc185c5a
AH
1254 if ((!strncmp(dev->si_name, name, len)) &&
1255 (dev->si_name[len] != '\0')) {
1256 if (!(dev->si_flags & flag)) {
1257 devfs_destroy_dev_worker(dev);
ca8d7677
MD
1258 }
1259 }
1260 }
21864bc5
MD
1261
1262 return 0;
1263}
1264
1265/*
1266 * Worker function that creates all device nodes on top of a devfs
1267 * root node.
1268 */
1269static int
1270devfs_create_all_dev_worker(struct devfs_node *root)
1271{
1272 cdev_t dev;
1273
1274 KKASSERT(root);
21864bc5 1275
d0fe8596 1276 TAILQ_FOREACH(dev, &devfs_dev_list, link) {
21864bc5 1277 devfs_create_device_node(root, dev, NULL, NULL);
d0fe8596 1278 }
bc185c5a 1279
21864bc5
MD
1280 return 0;
1281}
1282
1283/*
1284 * Worker function that destroys all devices that match a specific
1285 * dev_ops and/or minor. If minor is less than 0, it is not matched
1286 * against. It also propagates all changes.
1287 */
1288static int
1289devfs_destroy_dev_by_ops_worker(struct dev_ops *ops, int minor)
1290{
1291 cdev_t dev, dev1;
1292
1293 KKASSERT(ops);
ca8d7677
MD
1294
1295 TAILQ_FOREACH_MUTABLE(dev, &devfs_dev_list, link, dev1) {
1296 if (dev->si_ops != ops)
1297 continue;
1298 if ((minor < 0) || (dev->si_uminor == minor)) {
ca8d7677 1299 devfs_destroy_dev_worker(dev);
21864bc5 1300 }
ca8d7677 1301 }
bc185c5a 1302
21864bc5
MD
1303 return 0;
1304}
1305
1306/*
1307 * Worker function that registers a new clone handler in devfs.
1308 */
1309static int
1310devfs_chandler_add_worker(char *name, d_clone_t *nhandler)
1311{
1312 struct devfs_clone_handler *chandler = NULL;
1313 u_char len = strlen(name);
1314
ca8d7677 1315 if (len == 0)
21864bc5
MD
1316 return 1;
1317
ca8d7677 1318 TAILQ_FOREACH(chandler, &devfs_chandler_list, link) {
bc185c5a
AH
1319 if (chandler->namlen != len)
1320 continue;
1321
1322 if (!memcmp(chandler->name, name, len)) {
1323 /* Clonable basename already exists */
1324 return 1;
21864bc5
MD
1325 }
1326 }
1327
ca8d7677 1328 chandler = kmalloc(sizeof(*chandler), M_DEVFS, M_WAITOK | M_ZERO);
5298e788 1329 chandler->name = kmalloc(len+1, M_DEVFS, M_WAITOK);
21864bc5
MD
1330 memcpy(chandler->name, name, len+1);
1331 chandler->namlen = len;
1332 chandler->nhandler = nhandler;
1333
1334 TAILQ_INSERT_TAIL(&devfs_chandler_list, chandler, link);
1335 return 0;
1336}
1337
1338/*
1339 * Worker function that removes a given clone handler from the
1340 * clone handler list.
1341 */
1342static int
1343devfs_chandler_del_worker(char *name)
1344{
1345 struct devfs_clone_handler *chandler, *chandler2;
1346 u_char len = strlen(name);
1347
ca8d7677 1348 if (len == 0)
21864bc5
MD
1349 return 1;
1350
ca8d7677
MD
1351 TAILQ_FOREACH_MUTABLE(chandler, &devfs_chandler_list, link, chandler2) {
1352 if (chandler->namlen != len)
1353 continue;
1354 if (memcmp(chandler->name, name, len))
1355 continue;
bc185c5a 1356
ca8d7677 1357 TAILQ_REMOVE(&devfs_chandler_list, chandler, link);
5298e788 1358 kfree(chandler->name, M_DEVFS);
ca8d7677 1359 kfree(chandler, M_DEVFS);
5298e788 1360 break;
21864bc5
MD
1361 }
1362
1363 return 0;
1364}
1365
1366/*
1367 * Worker function that finds a given device name and changes
1368 * the message received accordingly so that when replied to,
1369 * the answer is returned to the caller.
1370 */
1371static int
1372devfs_find_device_by_name_worker(devfs_msg_t devfs_msg)
1373{
6507240b
MD
1374 struct devfs_alias *alias;
1375 cdev_t dev;
21864bc5 1376 cdev_t found = NULL;
21864bc5 1377
6507240b
MD
1378 TAILQ_FOREACH(dev, &devfs_dev_list, link) {
1379 if (strcmp(devfs_msg->mdv_name, dev->si_name) == 0) {
21864bc5
MD
1380 found = dev;
1381 break;
1382 }
ca8d7677 1383 }
6507240b
MD
1384 if (found == NULL) {
1385 TAILQ_FOREACH(alias, &devfs_alias_list, link) {
1386 if (strcmp(devfs_msg->mdv_name, alias->name) == 0) {
1387 found = alias->dev_target;
1388 break;
1389 }
1390 }
1391 }
ca8d7677 1392 devfs_msg->mdv_cdev = found;
21864bc5
MD
1393
1394 return 0;
1395}
1396
1397/*
1398 * Worker function that finds a given device udev and changes
1399 * the message received accordingly so that when replied to,
1400 * the answer is returned to the caller.
1401 */
1402static int
1403devfs_find_device_by_udev_worker(devfs_msg_t devfs_msg)
1404{
1405 cdev_t dev, dev1;
1406 cdev_t found = NULL;
21864bc5 1407
ca8d7677
MD
1408 TAILQ_FOREACH_MUTABLE(dev, &devfs_dev_list, link, dev1) {
1409 if (((udev_t)dev->si_inode) == devfs_msg->mdv_udev) {
21864bc5
MD
1410 found = dev;
1411 break;
1412 }
ca8d7677
MD
1413 }
1414 devfs_msg->mdv_cdev = found;
21864bc5
MD
1415
1416 return 0;
1417}
1418
1419/*
1420 * Worker function that inserts a given alias into the
1421 * alias list, and propagates the alias to all mount
1422 * points.
1423 */
1424static int
1425devfs_make_alias_worker(struct devfs_alias *alias)
1426{
1427 struct devfs_alias *alias2;
1428 size_t len = strlen(alias->name);
1429 int found = 0;
1430
1431 TAILQ_FOREACH(alias2, &devfs_alias_list, link) {
bc185c5a
AH
1432 if (len != alias2->namlen)
1433 continue;
1434
1435 if (!memcmp(alias->name, alias2->name, len)) {
1436 found = 1;
1437 break;
21864bc5
MD
1438 }
1439 }
1440
1441 if (!found) {
bc185c5a
AH
1442 /*
1443 * The alias doesn't exist yet, so we add it to the alias list
1444 */
21864bc5
MD
1445 TAILQ_INSERT_TAIL(&devfs_alias_list, alias, link);
1446 devfs_alias_propagate(alias);
1447 } else {
5298e788 1448 devfs_debug(DEVFS_DEBUG_WARNING,
ca8d7677
MD
1449 "Warning: duplicate devfs_make_alias for %s\n",
1450 alias->name);
5298e788 1451 kfree(alias->name, M_DEVFS);
21864bc5
MD
1452 kfree(alias, M_DEVFS);
1453 }
1454
1455 return 0;
1456}
1457
1458/*
1459 * Function that removes and frees all aliases.
1460 */
1461static int
1462devfs_alias_reap(void)
1463{
1464 struct devfs_alias *alias, *alias2;
1465
1466 TAILQ_FOREACH_MUTABLE(alias, &devfs_alias_list, link, alias2) {
1467 TAILQ_REMOVE(&devfs_alias_list, alias, link);
1468 kfree(alias, M_DEVFS);
1469 }
1470 return 0;
1471}
1472
1473/*
1474 * Function that removes an alias matching a specific cdev and frees
1475 * it accordingly.
1476 */
1477static int
1478devfs_alias_remove(cdev_t dev)
1479{
1480 struct devfs_alias *alias, *alias2;
1481
1482 TAILQ_FOREACH_MUTABLE(alias, &devfs_alias_list, link, alias2) {
1483 if (alias->dev_target == dev) {
1484 TAILQ_REMOVE(&devfs_alias_list, alias, link);
1485 kfree(alias, M_DEVFS);
1486 }
1487 }
1488 return 0;
1489}
1490
1491/*
1492 * This function propagates a new alias to all mount points.
1493 */
1494static int
1495devfs_alias_propagate(struct devfs_alias *alias)
1496{
1497 struct devfs_mnt_data *mnt;
1498
1499 TAILQ_FOREACH(mnt, &devfs_mnt_list, link) {
1500 devfs_alias_apply(mnt->root_node, alias);
1501 }
1502 return 0;
1503}
1504
1505/*
1506 * This function is a recursive function iterating through
1507 * all device nodes in the topology and, if applicable,
1508 * creating the relevant alias for a device node.
1509 */
1510static int
1511devfs_alias_apply(struct devfs_node *node, struct devfs_alias *alias)
1512{
1513 struct devfs_node *node1, *node2;
1514
1515 KKASSERT(alias != NULL);
1516
1517 if ((node->node_type == Proot) || (node->node_type == Pdir)) {
21864bc5 1518 if (node->nchildren > 2) {
ca8d7677 1519 TAILQ_FOREACH_MUTABLE(node1, DEVFS_DENODE_HEAD(node), link, node2) {
21864bc5
MD
1520 devfs_alias_apply(node1, alias);
1521 }
1522 }
1523 } else {
1524 if (node->d_dev == alias->dev_target)
1cb12919 1525 devfs_alias_create(alias->name, node, 0);
21864bc5
MD
1526 }
1527 return 0;
1528}
1529
1530/*
1531 * This function checks if any alias possibly is applicable
1532 * to the given node. If so, the alias is created.
1533 */
1534static int
1535devfs_alias_check_create(struct devfs_node *node)
1536{
1537 struct devfs_alias *alias;
1538
1539 TAILQ_FOREACH(alias, &devfs_alias_list, link) {
1540 if (node->d_dev == alias->dev_target)
1cb12919 1541 devfs_alias_create(alias->name, node, 0);
21864bc5
MD
1542 }
1543 return 0;
1544}
1545
1546/*
1547 * This function creates an alias with a given name
1548 * linking to a given devfs node. It also increments
1549 * the link count on the target node.
1550 */
1551int
1cb12919 1552devfs_alias_create(char *name_orig, struct devfs_node *target, int rule_based)
21864bc5
MD
1553{
1554 struct mount *mp = target->mp;
1555 struct devfs_node *parent = DEVFS_MNTDATA(mp)->root_node;
1556 struct devfs_node *linknode;
21864bc5
MD
1557 char *create_path = NULL;
1558 char *name, name_buf[PATH_MAX];
1559
21864bc5
MD
1560 KKASSERT((lockstatus(&devfs_lock, curthread)) == LK_EXCLUSIVE);
1561
1562 devfs_resolve_name_path(name_orig, name_buf, &create_path, &name);
1563
1564 if (create_path)
1565 parent = devfs_resolve_or_create_path(parent, create_path, 1);
1566
1567
1568 if (devfs_find_device_node_by_name(parent, name)) {
bc185c5a 1569 devfs_debug(DEVFS_DEBUG_WARNING,
ca8d7677
MD
1570 "Node already exists: %s "
1571 "(devfs_make_alias_worker)!\n",
1572 name);
21864bc5
MD
1573 return 1;
1574 }
1575
1576
1577 linknode = devfs_allocp(Plink, name, parent, mp, NULL);
1578 if (linknode == NULL)
1579 return 1;
1580
1581 linknode->link_target = target;
1582 target->nlinks++;
21864bc5 1583
1cb12919
AH
1584 if (rule_based)
1585 linknode->flags |= DEVFS_RULE_CREATED;
1586
21864bc5
MD
1587 return 0;
1588}
1589
1590/*
1591 * This function is called by the core and handles mount point
1592 * strings. It either calls the relevant worker (devfs_apply_
1593 * reset_rules_worker) on all mountpoints or only a specific
1594 * one.
1595 */
1596static int
1597devfs_apply_reset_rules_caller(char *mountto, int apply)
1598{
21864bc5
MD
1599 struct devfs_mnt_data *mnt;
1600 size_t len = strlen(mountto);
1601
bc185c5a 1602 if (mountto[0] == '*') {
21864bc5 1603 TAILQ_FOREACH(mnt, &devfs_mnt_list, link) {
bc185c5a 1604 devfs_apply_reset_rules_worker(mnt->root_node, apply);
21864bc5
MD
1605 }
1606 } else {
1607 TAILQ_FOREACH(mnt, &devfs_mnt_list, link) {
bc185c5a
AH
1608 if ((len != mnt->mntonnamelen))
1609 continue;
1610
1611 if (!memcmp(mnt->mp->mnt_stat.f_mntonname, mountto, len)) {
1612 devfs_apply_reset_rules_worker(mnt->root_node, apply);
1613 break;
1614 }
21864bc5
MD
1615 }
1616 }
1617
1618 kfree(mountto, M_DEVFS);
1619 return 0;
1620}
1621
1622/*
1623 * This worker function applies or resets, depending on the arguments, a rule
1624 * to the whole given topology. *RECURSIVE*
1625 */
1626static int
1627devfs_apply_reset_rules_worker(struct devfs_node *node, int apply)
1628{
1629 struct devfs_node *node1, *node2;
1630
1631 if ((node->node_type == Proot) || (node->node_type == Pdir)) {
21864bc5 1632 if (node->nchildren > 2) {
bc185c5a 1633 TAILQ_FOREACH_MUTABLE(node1, DEVFS_DENODE_HEAD(node), link, node2) {
21864bc5
MD
1634 devfs_apply_reset_rules_worker(node1, apply);
1635 }
1636 }
1637 }
1638
1639 if (apply)
1640 devfs_rule_check_apply(node);
1641 else
1642 devfs_rule_reset_node(node);
1643
1644 return 0;
1645}
1646
1647
1648/*
1649 * This function calls a given callback function for
1650 * every dev node in the devfs dev list.
1651 */
1652static int
1653devfs_scan_callback_worker(devfs_scan_t *callback)
1654{
1655 cdev_t dev, dev1;
1656
d0fe8596 1657 TAILQ_FOREACH_MUTABLE(dev, &devfs_dev_list, link, dev1) {
21864bc5 1658 callback(dev);
d0fe8596 1659 }
21864bc5 1660
21864bc5
MD
1661 return 0;
1662}
1663
1664
1665/*
1666 * This function tries to resolve a given directory, or if not
1667 * found and creation requested, creates the given directory.
1668 */
1669static struct devfs_node *
ca8d7677
MD
1670devfs_resolve_or_create_dir(struct devfs_node *parent, char *dir_name,
1671 size_t name_len, int create)
21864bc5
MD
1672{
1673 struct devfs_node *node, *found = NULL;
1674
1675 TAILQ_FOREACH(node, DEVFS_DENODE_HEAD(parent), link) {
bc185c5a
AH
1676 if (name_len != node->d_dir.d_namlen)
1677 continue;
1678
1679 if (!memcmp(dir_name, node->d_dir.d_name, name_len)) {
1680 found = node;
1681 break;
21864bc5
MD
1682 }
1683 }
1684
1685 if ((found == NULL) && (create)) {
1686 found = devfs_allocp(Pdir, dir_name, parent, parent->mp, NULL);
1687 }
1688
1689 return found;
1690}
1691
1692/*
1693 * This function tries to resolve a complete path. If creation is requested,
1694 * if a given part of the path cannot be resolved (because it doesn't exist),
1695 * it is created.
1696 */
1697struct devfs_node *
1698devfs_resolve_or_create_path(struct devfs_node *parent, char *path, int create)
1699{
1700 struct devfs_node *node = parent;
1701 char buf[PATH_MAX];
1702 size_t idx = 0;
1703
1704
1705 if (path == NULL)
1706 return parent;
1707
1708
1709 for (; *path != '\0' ; path++) {
1710 if (*path != '/') {
1711 buf[idx++] = *path;
1712 } else {
1713 buf[idx] = '\0';
1714 node = devfs_resolve_or_create_dir(node, buf, idx, create);
1715 if (node == NULL)
1716 return NULL;
1717 idx = 0;
1718 }
1719 }
1720 buf[idx] = '\0';
1721 return devfs_resolve_or_create_dir(node, buf, idx, create);
1722}
1723
1724/*
1725 * Takes a full path and strips it into a directory path and a name.
1726 * For a/b/c/foo, it returns foo in namep and a/b/c in pathp. It
1727 * requires a working buffer with enough size to keep the whole
1728 * fullpath.
1729 */
1730int
1731devfs_resolve_name_path(char *fullpath, char *buf, char **pathp, char **namep)
1732{
1733 char *name = NULL;
1734 char *path = NULL;
1735 size_t len = strlen(fullpath) + 1;
1736 int i;
1737
bc185c5a
AH
1738 KKASSERT((fullpath != NULL) && (buf != NULL));
1739 KKASSERT((pathp != NULL) && (namep != NULL));
21864bc5
MD
1740
1741 memcpy(buf, fullpath, len);
1742
1743 for (i = len-1; i>= 0; i--) {
1744 if (buf[i] == '/') {
1745 buf[i] = '\0';
1746 name = &(buf[i+1]);
1747 path = buf;
1748 break;
1749 }
1750 }
1751
1752 *pathp = path;
1753
1754 if (name) {
1755 *namep = name;
1756 } else {
1757 *namep = buf;
1758 }
1759
1760 return 0;
1761}
1762
1763/*
ca8d7677 1764 * This function creates a new devfs node for a given device. It can
21864bc5
MD
1765 * handle a complete path as device name, and accordingly creates
1766 * the path and the final device node.
ca8d7677
MD
1767 *
1768 * The reference count on the passed dev remains unchanged.
21864bc5
MD
1769 */
1770struct devfs_node *
ca8d7677
MD
1771devfs_create_device_node(struct devfs_node *root, cdev_t dev,
1772 char *dev_name, char *path_fmt, ...)
21864bc5
MD
1773{
1774 struct devfs_node *parent, *node = NULL;
1775 char *path = NULL;
1776 char *name, name_buf[PATH_MAX];
1777 __va_list ap;
1778 int i, found;
1779
1780 char *create_path = NULL;
1781 char *names = "pqrsPQRS";
1782
21864bc5
MD
1783 if (path_fmt != NULL) {
1784 path = kmalloc(PATH_MAX+1, M_DEVFS, M_WAITOK);
1785
1786 __va_start(ap, path_fmt);
1787 i = kvcprintf(path_fmt, NULL, path, 10, ap);
1788 path[i] = '\0';
1789 __va_end(ap);
1790 }
1791
1792 parent = devfs_resolve_or_create_path(root, path, 1);
1793 KKASSERT(parent);
1794
bc185c5a
AH
1795 devfs_resolve_name_path(
1796 ((dev_name == NULL) && (dev))?(dev->si_name):(dev_name),
1797 name_buf, &create_path, &name);
21864bc5
MD
1798
1799 if (create_path)
1800 parent = devfs_resolve_or_create_path(parent, create_path, 1);
1801
1802
1803 if (devfs_find_device_node_by_name(parent, name)) {
bc185c5a 1804 devfs_debug(DEVFS_DEBUG_WARNING, "devfs_create_device_node: "
894bbb25 1805 "DEVICE %s ALREADY EXISTS!!! Ignoring creation request.\n", name);
21864bc5
MD
1806 goto out;
1807 }
bc185c5a 1808
21864bc5 1809 node = devfs_allocp(Pdev, name, parent, parent->mp, dev);
21864bc5 1810
894bbb25 1811#if 0
bc185c5a
AH
1812 /*
1813 * Ugly unix98 pty magic, to hide pty master (ptm) devices and their
1814 * directory
1815 */
1816 if ((dev) && (strlen(dev->si_name) >= 4) &&
1817 (!memcmp(dev->si_name, "ptm/", 4))) {
894bbb25
AH
1818 node->parent->flags |= DEVFS_HIDDEN;
1819 node->flags |= DEVFS_HIDDEN;
21864bc5 1820 }
894bbb25 1821#endif
bc185c5a
AH
1822
1823 /*
1824 * Ugly pty magic, to tag pty devices as such and hide them if needed.
1825 */
21864bc5
MD
1826 if ((strlen(name) >= 3) && (!memcmp(name, "pty", 3)))
1827 node->flags |= (DEVFS_PTY | DEVFS_INVISIBLE);
1828
21864bc5
MD
1829 if ((strlen(name) >= 3) && (!memcmp(name, "tty", 3))) {
1830 found = 0;
1831 for (i = 0; i < strlen(names); i++) {
1832 if (name[3] == names[i]) {
1833 found = 1;
1834 break;
1835 }
1836 }
1837 if (found)
1838 node->flags |= (DEVFS_PTY | DEVFS_INVISIBLE);
1839 }
21864bc5
MD
1840
1841out:
1842 if (path_fmt != NULL)
1843 kfree(path, M_DEVFS);
21864bc5
MD
1844
1845 return node;
1846}
1847
1848/*
1849 * This function finds a given device node in the topology with a given
1850 * cdev.
1851 */
1852struct devfs_node *
1853devfs_find_device_node(struct devfs_node *node, cdev_t target)
1854{
1855 struct devfs_node *node1, *node2, *found = NULL;
1856
1857 if ((node->node_type == Proot) || (node->node_type == Pdir)) {
21864bc5 1858 if (node->nchildren > 2) {
bc185c5a 1859 TAILQ_FOREACH_MUTABLE(node1, DEVFS_DENODE_HEAD(node), link, node2) {
21864bc5
MD
1860 if ((found = devfs_find_device_node(node1, target)))
1861 return found;
1862 }
1863 }
1864 } else if (node->node_type == Pdev) {
1865 if (node->d_dev == target)
1866 return node;
1867 }
21864bc5
MD
1868
1869 return NULL;
1870}
1871
1872/*
1873 * This function finds a device node in the topology by its
1874 * name and returns it.
1875 */
1876struct devfs_node *
1877devfs_find_device_node_by_name(struct devfs_node *parent, char *target)
1878{
1879 struct devfs_node *node, *found = NULL;
1880 size_t len = strlen(target);
1881
1882 TAILQ_FOREACH(node, DEVFS_DENODE_HEAD(parent), link) {
bc185c5a
AH
1883 if (len != node->d_dir.d_namlen)
1884 continue;
1885
1886 if (!memcmp(node->d_dir.d_name, target, len)) {
21864bc5
MD
1887 found = node;
1888 break;
1889 }
1890 }
1891
1892 return found;
1893}
1894
fa7e6f37
AH
1895static struct vnode*
1896devfs_inode_to_vnode_worker(struct devfs_node *node, ino_t target)
1897{
1898 struct devfs_node *node1, *node2;
1899 struct vnode* vp;
1900
1901 if ((node->node_type == Proot) || (node->node_type == Pdir)) {
fa7e6f37 1902 if (node->nchildren > 2) {
bc185c5a 1903 TAILQ_FOREACH_MUTABLE(node1, DEVFS_DENODE_HEAD(node), link, node2) {
fa7e6f37
AH
1904 if ((vp = devfs_inode_to_vnode_worker(node1, target)))
1905 return vp;
1906 }
1907 }
1908 }
bc185c5a 1909
fa7e6f37
AH
1910 if (node->d_dir.d_ino == target) {
1911 if (node->v_node) {
1912 vp = node->v_node;
1913 vget(vp, LK_EXCLUSIVE | LK_RETRY);
1914 vn_unlock(vp);
1915 } else {
1916 devfs_allocv(&vp, node);
1917 vn_unlock(vp);
1918 }
1919 return vp;
1920 }
1921
1922 return NULL;
1923}
1924
21864bc5 1925/*
ca8d7677
MD
1926 * This function takes a cdev and removes its devfs node in the
1927 * given topology. The cdev remains intact.
21864bc5
MD
1928 */
1929int
1930devfs_destroy_device_node(struct devfs_node *root, cdev_t target)
1931{
1932 struct devfs_node *node, *parent;
21864bc5 1933 char *name, name_buf[PATH_MAX];
21864bc5
MD
1934 char *create_path = NULL;
1935
1936 KKASSERT(target);
1937
21864bc5
MD
1938 memcpy(name_buf, target->si_name, strlen(target->si_name)+1);
1939
1940 devfs_resolve_name_path(target->si_name, name_buf, &create_path, &name);
21864bc5
MD
1941
1942 if (create_path)
1943 parent = devfs_resolve_or_create_path(root, create_path, 0);
1944 else
1945 parent = root;
bc185c5a 1946
21864bc5
MD
1947 if (parent == NULL)
1948 return 1;
bc185c5a 1949
21864bc5 1950 node = devfs_find_device_node_by_name(parent, name);
bc185c5a 1951
ca8d7677 1952 if (node)
21864bc5 1953 devfs_gc(node);
21864bc5
MD
1954
1955 return 0;
1956}
1957
1958/*
1959 * Just set perms and ownership for given node.
1960 */
1961int
bc185c5a
AH
1962devfs_set_perms(struct devfs_node *node, uid_t uid, gid_t gid,
1963 u_short mode, u_long flags)
21864bc5 1964{
bc185c5a
AH
1965 node->mode = mode;
1966 node->uid = uid;
1967 node->gid = gid;
21864bc5
MD
1968
1969 return 0;
1970}
1971
1972/*
1973 * Propagates a device attach/detach to all mount
1974 * points. Also takes care of automatic alias removal
1975 * for a deleted cdev.
1976 */
1977static int
1978devfs_propagate_dev(cdev_t dev, int attach)
1979{
1980 struct devfs_mnt_data *mnt;
1981
21864bc5 1982 TAILQ_FOREACH(mnt, &devfs_mnt_list, link) {
21864bc5
MD
1983 if (attach) {
1984 /* Device is being attached */
ca8d7677
MD
1985 devfs_create_device_node(mnt->root_node, dev,
1986 NULL, NULL );
21864bc5
MD
1987 } else {
1988 /* Device is being detached */
21864bc5
MD
1989 devfs_alias_remove(dev);
1990 devfs_destroy_device_node(mnt->root_node, dev);
1991 }
1992 }
21864bc5
MD
1993 return 0;
1994}
1995
1996/*
1997 * devfs_node_to_path takes a node and a buffer of a size of
1998 * at least PATH_MAX, resolves the full path from the root
1999 * node and writes it in a humanly-readable format into the
2000 * buffer.
2001 * If DEVFS_STASH_DEPTH is less than the directory level up
2002 * to the root node, only the last DEVFS_STASH_DEPTH levels
2003 * of the path are resolved.
2004 */
2005int
2006devfs_node_to_path(struct devfs_node *node, char *buffer)
2007{
2008#define DEVFS_STASH_DEPTH 32
2009 struct devfs_node *node_stash[DEVFS_STASH_DEPTH];
2010 int i, offset;
2011 memset(buffer, 0, PATH_MAX);
2012
2013 for (i = 0; (i < DEVFS_STASH_DEPTH) && (node->node_type != Proot); i++) {
2014 node_stash[i] = node;
2015 node = node->parent;
2016 }
2017 i--;
2018
2019 for (offset = 0; i >= 0; i--) {
bc185c5a
AH
2020 memcpy(buffer+offset, node_stash[i]->d_dir.d_name,
2021 node_stash[i]->d_dir.d_namlen);
21864bc5
MD
2022 offset += node_stash[i]->d_dir.d_namlen;
2023 if (i > 0) {
2024 *(buffer+offset) = '/';
2025 offset++;
2026 }
2027 }
2028#undef DEVFS_STASH_DEPTH
2029 return 0;
2030}
2031
2032/*
2033 * devfs_clone either returns a basename from a complete name by
2034 * returning the length of the name without trailing digits, or,
2035 * if clone != 0, calls the device's clone handler to get a new
2036 * device, which in turn is returned in devp.
2037 */
2038int
bc185c5a
AH
2039devfs_clone(char *name, size_t *namlenp, cdev_t *devp, int clone,
2040 struct ucred *cred)
21864bc5
MD
2041{
2042 KKASSERT(namlenp);
2043
2044 size_t len = *namlenp;
2045 int error = 1;
2046 struct devfs_clone_handler *chandler;
2047 struct dev_clone_args ap;
2048
2049 if (!clone) {
2050 for (; (len > 0) && (DEVFS_ISDIGIT(name[len-1])); len--);
2051 }
2052
d0fe8596 2053 TAILQ_FOREACH(chandler, &devfs_chandler_list, link) {
21864bc5 2054 if ((chandler->namlen == len) &&
d0fe8596
MD
2055 (!memcmp(chandler->name, name, len)) &&
2056 (chandler->nhandler)) {
21864bc5
MD
2057 if (clone) {
2058 ap.a_dev = NULL;
2059 ap.a_name = name;
2060 ap.a_namelen = len;
2061 ap.a_cred = cred;
2062 error = (chandler->nhandler)(&ap);
2063 KKASSERT(devp);
2064 *devp = ap.a_dev;
2065 } else {
2066 *namlenp = len;
2067 error = 0;
2068 }
2069
2070 break;
2071 }
2072 }
2073
2074 return error;
2075}
2076
2077
2078/*
2079 * Registers a new orphan in the orphan list.
2080 */
2081void
2082devfs_tracer_add_orphan(struct devfs_node *node)
2083{
2084 struct devfs_orphan *orphan;
2085
2086 KKASSERT(node);
2087 orphan = kmalloc(sizeof(struct devfs_orphan), M_DEVFS, M_WAITOK);
2088 orphan->node = node;
2089
ca8d7677
MD
2090 KKASSERT((node->flags & DEVFS_ORPHANED) == 0);
2091 node->flags |= DEVFS_ORPHANED;
21864bc5
MD
2092 TAILQ_INSERT_TAIL(DEVFS_ORPHANLIST(node->mp), orphan, link);
2093}
2094
2095/*
2096 * Removes an orphan from the orphan list.
2097 */
2098void
2099devfs_tracer_del_orphan(struct devfs_node *node)
2100{
2101 struct devfs_orphan *orphan;
2102
2103 KKASSERT(node);
2104
2105 TAILQ_FOREACH(orphan, DEVFS_ORPHANLIST(node->mp), link) {
2106 if (orphan->node == node) {
ca8d7677 2107 node->flags &= ~DEVFS_ORPHANED;
21864bc5
MD
2108 TAILQ_REMOVE(DEVFS_ORPHANLIST(node->mp), orphan, link);
2109 kfree(orphan, M_DEVFS);
2110 break;
2111 }
2112 }
2113}
2114
2115/*
2116 * Counts the orphans in the orphan list, and if cleanup
2117 * is specified, also frees the orphan and removes it from
2118 * the list.
2119 */
2120size_t
2121devfs_tracer_orphan_count(struct mount *mp, int cleanup)
2122{
2123 struct devfs_orphan *orphan, *orphan2;
2124 size_t count = 0;
2125
2126 TAILQ_FOREACH_MUTABLE(orphan, DEVFS_ORPHANLIST(mp), link, orphan2) {
2127 count++;
bc185c5a
AH
2128 /*
2129 * If we are instructed to clean up, we do so.
2130 */
21864bc5 2131 if (cleanup) {
21864bc5 2132 TAILQ_REMOVE(DEVFS_ORPHANLIST(mp), orphan, link);
ca8d7677
MD
2133 orphan->node->flags &= ~DEVFS_ORPHANED;
2134 devfs_freep(orphan->node);
21864bc5
MD
2135 kfree(orphan, M_DEVFS);
2136 }
2137 }
2138
2139 return count;
2140}
2141
2142/*
2143 * Fetch an ino_t from the global d_ino by increasing it
2144 * while spinlocked.
2145 */
2146static ino_t
2147devfs_fetch_ino(void)
2148{
2149 ino_t ret;
2150
2151 spin_lock_wr(&ino_lock);
2152 ret = d_ino++;
2153 spin_unlock_wr(&ino_lock);
2154
2155 return ret;
2156}
2157
2158/*
2159 * Allocates a new cdev and initializes it's most basic
2160 * fields.
2161 */
2162cdev_t
2163devfs_new_cdev(struct dev_ops *ops, int minor)
2164{
21864bc5
MD
2165 cdev_t dev = sysref_alloc(&cdev_sysref_class);
2166 sysref_activate(&dev->si_sysref);
2167 reference_dev(dev);
21864bc5
MD
2168 memset(dev, 0, offsetof(struct cdev, si_sysref));
2169
2170 dev->si_uid = 0;
2171 dev->si_gid = 0;
2172 dev->si_perms = 0;
2173 dev->si_drv1 = NULL;
2174 dev->si_drv2 = NULL;
2175 dev->si_lastread = 0; /* time_second */
2176 dev->si_lastwrite = 0; /* time_second */
2177
2178 dev->si_ops = ops;
894bbb25 2179 dev->si_flags = 0;
21864bc5
MD
2180 dev->si_umajor = 0;
2181 dev->si_uminor = minor;
7cbab9da 2182 dev->si_inode = makeudev(devfs_reference_ops(ops), minor);
21864bc5
MD
2183
2184 return dev;
2185}
2186
ca8d7677
MD
2187static void
2188devfs_cdev_terminate(cdev_t dev)
21864bc5
MD
2189{
2190 int locked = 0;
2191
2192 /* Check if it is locked already. if not, we acquire the devfs lock */
2193 if (!(lockstatus(&devfs_lock, curthread)) == LK_EXCLUSIVE) {
2194 lockmgr(&devfs_lock, LK_EXCLUSIVE);
2195 locked = 1;
2196 }
2197
21864bc5
MD
2198 /* Propagate destruction, just in case */
2199 devfs_propagate_dev(dev, 0);
2200
2201 /* If we acquired the lock, we also get rid of it */
2202 if (locked)
2203 lockmgr(&devfs_lock, LK_RELEASE);
2204
7cbab9da
AH
2205 devfs_release_ops(dev->si_ops);
2206
21864bc5
MD
2207 /* Finally destroy the device */
2208 sysref_put(&dev->si_sysref);
2209}
2210
21864bc5
MD
2211/*
2212 * Links a given cdev into the dev list.
2213 */
2214int
2215devfs_link_dev(cdev_t dev)
2216{
ca8d7677 2217 KKASSERT((dev->si_flags & SI_DEVFS_LINKED) == 0);
21864bc5
MD
2218 dev->si_flags |= SI_DEVFS_LINKED;
2219 TAILQ_INSERT_TAIL(&devfs_dev_list, dev, link);
2220
2221 return 0;
2222}
2223
2224/*
ca8d7677
MD
2225 * Removes a given cdev from the dev list. The caller is responsible for
2226 * releasing the reference on the device associated with the linkage.
2227 *
2228 * Returns EALREADY if the dev has already been unlinked.
21864bc5 2229 */
ca8d7677 2230static int
21864bc5
MD
2231devfs_unlink_dev(cdev_t dev)
2232{
2233 if ((dev->si_flags & SI_DEVFS_LINKED)) {
2234 TAILQ_REMOVE(&devfs_dev_list, dev, link);
2235 dev->si_flags &= ~SI_DEVFS_LINKED;
ca8d7677 2236 return (0);
21864bc5 2237 }
ca8d7677 2238 return (EALREADY);
21864bc5
MD
2239}
2240
894bbb25
AH
2241int
2242devfs_node_is_accessible(struct devfs_node *node)
2243{
2244 if ((node) && (!(node->flags & DEVFS_HIDDEN)))
2245 return 1;
2246 else
2247 return 0;
2248}
2249
7cbab9da
AH
2250int
2251devfs_reference_ops(struct dev_ops *ops)
2252{
2253 int unit;
2254
2255 if (ops->head.refs == 0) {
2256 ops->head.id = devfs_clone_bitmap_get(&DEVFS_CLONE_BITMAP(ops_id), 255);
2257 if (ops->head.id == -1) {
2258 /* Ran out of unique ids */
bc185c5a
AH
2259 devfs_debug(DEVFS_DEBUG_WARNING,
2260 "devfs_reference_ops: WARNING: ran out of unique ids\n");
7cbab9da
AH
2261 }
2262 }
2263 unit = ops->head.id;
2264 ++ops->head.refs;
2265
2266 return unit;
2267}
2268
2269void
2270devfs_release_ops(struct dev_ops *ops)
2271{
2272 --ops->head.refs;
2273
2274 if (ops->head.refs == 0) {
2275 devfs_clone_bitmap_put(&DEVFS_CLONE_BITMAP(ops_id), ops->head.id);
2276 }
2277}
2278
21864bc5 2279void
d0fe8596 2280devfs_config(void)
21864bc5
MD
2281{
2282 devfs_msg_t msg;
2283
2284 msg = devfs_msg_get();
21864bc5
MD
2285 msg = devfs_msg_send_sync(DEVFS_SYNC, msg);
2286 devfs_msg_put(msg);
2287}
2288
2289/*
2290 * Called on init of devfs; creates the objcaches and
2291 * spawns off the devfs core thread. Also initializes
2292 * locks.
2293 */
2294static void
2295devfs_init(void)
2296{
2297 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_init() called\n");
2298 /* Create objcaches for nodes, msgs and devs */
d0fe8596
MD
2299 devfs_node_cache = objcache_create("devfs-node-cache", 0, 0,
2300 NULL, NULL, NULL,
2301 objcache_malloc_alloc,
2302 objcache_malloc_free,
2303 &devfs_node_malloc_args );
2304
2305 devfs_msg_cache = objcache_create("devfs-msg-cache", 0, 0,
2306 NULL, NULL, NULL,
2307 objcache_malloc_alloc,
2308 objcache_malloc_free,
2309 &devfs_msg_malloc_args );
2310
2311 devfs_dev_cache = objcache_create("devfs-dev-cache", 0, 0,
2312 NULL, NULL, NULL,
2313 objcache_malloc_alloc,
2314 objcache_malloc_free,
2315 &devfs_dev_malloc_args );
21864bc5 2316
7cbab9da 2317 devfs_clone_bitmap_init(&DEVFS_CLONE_BITMAP(ops_id));
7cbab9da 2318
21864bc5
MD
2319 /* Initialize the reply-only port which acts as a message drain */
2320 lwkt_initport_replyonly(&devfs_dispose_port, devfs_msg_autofree_reply);
2321
2322 /* Initialize *THE* devfs lock */
2323 lockinit(&devfs_lock, "devfs_core lock", 0, 0);
2324
2325
2326 lwkt_create(devfs_msg_core, /*args*/NULL, &td_core, NULL,
2327 0, 0, "devfs_msg_core");
2328
2329 tsleep(td_core/*devfs_id*/, 0, "devfsc", 0);
2330
2331 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_init finished\n");
2332}
2333
2334/*
2335 * Called on unload of devfs; takes care of destroying the core
2336 * and the objcaches. Also removes aliases that are no longer needed.
2337 */
2338static void
2339devfs_uninit(void)
2340{
2341 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_uninit() called\n");
2342
2343 devfs_msg_send(DEVFS_TERMINATE_CORE, NULL);
2344
2345 tsleep(td_core/*devfs_id*/, 0, "devfsc", 0);
2346 tsleep(td_core/*devfs_id*/, 0, "devfsc", 10000);
2347
7cbab9da
AH
2348 devfs_clone_bitmap_uninit(&DEVFS_CLONE_BITMAP(ops_id));
2349
21864bc5
MD
2350 /* Destroy the objcaches */
2351 objcache_destroy(devfs_msg_cache);
2352 objcache_destroy(devfs_node_cache);
2353 objcache_destroy(devfs_dev_cache);
2354
2355 devfs_alias_reap();
2356}
2357
2358/*
2359 * This is a sysctl handler to assist userland devname(3) to
2360 * find the device name for a given udev.
2361 */
2362static int
2363devfs_sysctl_devname_helper(SYSCTL_HANDLER_ARGS)
2364{
2365 udev_t udev;
2366 cdev_t found;
2367 int error;
2368
2369
2370 if ((error = SYSCTL_IN(req, &udev, sizeof(udev_t))))
2371 return (error);
2372
2373 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs sysctl, received udev: %d\n", udev);
2374
2375 if (udev == NOUDEV)
2376 return(EINVAL);
2377
2378 if ((found = devfs_find_device_by_udev(udev)) == NULL)
2379 return(ENOENT);
2380
2381 return(SYSCTL_OUT(req, found->si_name, strlen(found->si_name) + 1));
2382}
2383
2384
2385SYSCTL_PROC(_kern, OID_AUTO, devname, CTLTYPE_OPAQUE|CTLFLAG_RW|CTLFLAG_ANYBODY,
2386 NULL, 0, devfs_sysctl_devname_helper, "", "helper for devname(3)");
2387
2388static SYSCTL_NODE(_vfs, OID_AUTO, devfs, CTLFLAG_RW, 0, "devfs");
2389TUNABLE_INT("vfs.devfs.debug", &devfs_debug_enable);
bc185c5a
AH
2390SYSCTL_INT(_vfs_devfs, OID_AUTO, debug, CTLFLAG_RW, &devfs_debug_enable,
2391 0, "Enable DevFS debugging");
21864bc5 2392
bc185c5a
AH
2393SYSINIT(vfs_devfs_register, SI_SUB_PRE_DRIVERS, SI_ORDER_FIRST,
2394 devfs_init, NULL);
2395SYSUNINIT(vfs_devfs_register, SI_SUB_PRE_DRIVERS, SI_ORDER_ANY,
2396 devfs_uninit, NULL);