Kernel mtx - Add mtxsleep(), interlocked tsleep w/ mutexes
[dragonfly.git] / sys / vfs / devfs / devfs_core.c
CommitLineData
21864bc5
MD
1/*
2 * Copyright (c) 2009 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Alex Hornung <ahornung@gmail.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 */
34#include <sys/param.h>
35#include <sys/systm.h>
36#include <sys/kernel.h>
37#include <sys/mount.h>
38#include <sys/vnode.h>
39#include <sys/types.h>
40#include <sys/lock.h>
41#include <sys/msgport.h>
42#include <sys/msgport2.h>
43#include <sys/spinlock2.h>
44#include <sys/sysctl.h>
45#include <sys/ucred.h>
46#include <sys/param.h>
47#include <sys/sysref2.h>
7cbab9da 48#include <sys/systm.h>
21864bc5
MD
49#include <vfs/devfs/devfs.h>
50#include <vfs/devfs/devfs_rules.h>
51
52MALLOC_DEFINE(M_DEVFS, "devfs", "Device File System (devfs) allocations");
7cbab9da 53DEVFS_DECLARE_CLONE_BITMAP(ops_id);
21864bc5
MD
54/*
55 * SYSREF Integration - reference counting, allocation,
56 * sysid and syslink integration.
57 */
58static void devfs_cdev_terminate(cdev_t dev);
59static struct sysref_class cdev_sysref_class = {
60 .name = "cdev",
61 .mtype = M_DEVFS,
62 .proto = SYSREF_PROTO_DEV,
63 .offset = offsetof(struct cdev, si_sysref),
64 .objsize = sizeof(struct cdev),
65 .mag_capacity = 32,
66 .flags = 0,
67 .ops = {
68 .terminate = (sysref_terminate_func_t)devfs_cdev_terminate
69 }
70};
71
72static struct objcache *devfs_node_cache;
73static struct objcache *devfs_msg_cache;
74static struct objcache *devfs_dev_cache;
75
76static struct objcache_malloc_args devfs_node_malloc_args = {
77 sizeof(struct devfs_node), M_DEVFS };
78struct objcache_malloc_args devfs_msg_malloc_args = {
79 sizeof(struct devfs_msg), M_DEVFS };
80struct objcache_malloc_args devfs_dev_malloc_args = {
81 sizeof(struct cdev), M_DEVFS };
82
bc185c5a
AH
83static struct devfs_dev_head devfs_dev_list =
84 TAILQ_HEAD_INITIALIZER(devfs_dev_list);
85static struct devfs_mnt_head devfs_mnt_list =
86 TAILQ_HEAD_INITIALIZER(devfs_mnt_list);
87static struct devfs_chandler_head devfs_chandler_list =
88 TAILQ_HEAD_INITIALIZER(devfs_chandler_list);
89static struct devfs_alias_head devfs_alias_list =
90 TAILQ_HEAD_INITIALIZER(devfs_alias_list);
21864bc5
MD
91
92struct lock devfs_lock;
93static struct lwkt_port devfs_dispose_port;
94static struct lwkt_port devfs_msg_port;
95static struct thread *td_core;
21864bc5
MD
96
97static ino_t d_ino = 0;
98static __uint32_t msg_id = 0;
99static struct spinlock ino_lock;
100static int devfs_debug_enable = 0;
101
102static ino_t devfs_fetch_ino(void);
103static int devfs_gc_dirs(struct devfs_node *);
104static int devfs_gc_links(struct devfs_node *, struct devfs_node *, size_t);
105static int devfs_create_all_dev_worker(struct devfs_node *);
106static int devfs_create_dev_worker(cdev_t, uid_t, gid_t, int);
107static int devfs_destroy_dev_worker(cdev_t);
108static int devfs_destroy_subnames_worker(char *);
109static int devfs_destroy_dev_by_ops_worker(struct dev_ops *, int);
110static int devfs_propagate_dev(cdev_t, int);
ca8d7677 111static int devfs_unlink_dev(cdev_t dev);
21864bc5
MD
112
113static int devfs_chandler_add_worker(char *, d_clone_t *);
114static int devfs_chandler_del_worker(char *);
115
116static void devfs_msg_autofree_reply(lwkt_port_t, lwkt_msg_t);
117static void devfs_msg_core(void *);
118
119static int devfs_find_device_by_name_worker(devfs_msg_t);
120static int devfs_find_device_by_udev_worker(devfs_msg_t);
121
fa7e6f37
AH
122static struct vnode *devfs_inode_to_vnode_worker(struct devfs_node *, ino_t);
123
21864bc5
MD
124static int devfs_apply_reset_rules_caller(char *, int);
125static int devfs_apply_reset_rules_worker(struct devfs_node *, int);
126
127static int devfs_scan_callback_worker(devfs_scan_t *);
128
bc185c5a
AH
129static struct devfs_node *devfs_resolve_or_create_dir(struct devfs_node *,
130 char *, size_t, int);
21864bc5
MD
131
132static int devfs_make_alias_worker(struct devfs_alias *);
133static int devfs_alias_remove(cdev_t);
134static int devfs_alias_reap(void);
135static int devfs_alias_propagate(struct devfs_alias *);
136static int devfs_alias_apply(struct devfs_node *, struct devfs_alias *);
137static int devfs_alias_check_create(struct devfs_node *);
138
ca8d7677
MD
139static int devfs_clr_subnames_flag_worker(char *, uint32_t);
140static int devfs_destroy_subnames_without_flag_worker(char *, uint32_t);
141
21864bc5 142/*
bc185c5a
AH
143 * devfs_debug() is a SYSCTL and TUNABLE controlled debug output function
144 * using kvprintf
21864bc5
MD
145 */
146int
147devfs_debug(int level, char *fmt, ...)
148{
149 __va_list ap;
150
151 __va_start(ap, fmt);
152 if (level <= devfs_debug_enable)
153 kvprintf(fmt, ap);
154 __va_end(ap);
155
156 return 0;
157}
158
159/*
ca8d7677
MD
160 * devfs_allocp() Allocates a new devfs node with the specified
161 * parameters. The node is also automatically linked into the topology
162 * if a parent is specified. It also calls the rule and alias stuff to
163 * be applied on the new node
21864bc5
MD
164 */
165struct devfs_node *
ca8d7677
MD
166devfs_allocp(devfs_nodetype devfsnodetype, char *name,
167 struct devfs_node *parent, struct mount *mp, cdev_t dev)
21864bc5
MD
168{
169 struct devfs_node *node = NULL;
170 size_t namlen = strlen(name);
21864bc5
MD
171
172 node = objcache_get(devfs_node_cache, M_WAITOK);
ca8d7677
MD
173 bzero(node, sizeof(*node));
174
21864bc5
MD
175 atomic_add_int(&(DEVFS_MNTDATA(mp)->leak_count), 1);
176
ca8d7677 177 node->d_dev = NULL;
21864bc5
MD
178 node->nchildren = 1;
179 node->mp = mp;
180 node->d_dir.d_ino = devfs_fetch_ino();
21864bc5 181
bc185c5a
AH
182 /*
183 * Cookie jar for children. Leave 0 and 1 for '.' and '..' entries
184 * respectively.
185 */
186 node->cookie_jar = 2;
187
188 /*
189 * Access Control members
190 */
191 node->mode = DEVFS_DEFAULT_MODE;
192 node->uid = DEVFS_DEFAULT_UID;
193 node->gid = DEVFS_DEFAULT_GID;
21864bc5 194
21864bc5
MD
195 switch (devfsnodetype) {
196 case Proot:
bc185c5a
AH
197 /*
198 * Ensure that we don't recycle the root vnode by marking it as
199 * linked into the topology.
200 */
894bbb25 201 node->flags |= DEVFS_NODE_LINKED;
21864bc5
MD
202 case Pdir:
203 TAILQ_INIT(DEVFS_DENODE_HEAD(node));
204 node->d_dir.d_type = DT_DIR;
205 node->nchildren = 2;
206 break;
207
208 case Plink:
209 node->d_dir.d_type = DT_LNK;
210 break;
211
212 case Preg:
213 node->d_dir.d_type = DT_REG;
214 break;
215
216 case Pdev:
217 if (dev != NULL) {
218 node->d_dir.d_type = DT_CHR;
219 node->d_dev = dev;
21864bc5 220
bc185c5a
AH
221 node->mode = dev->si_perms;
222 node->uid = dev->si_uid;
223 node->gid = dev->si_gid;
21864bc5
MD
224
225 devfs_alias_check_create(node);
226 }
227 break;
228
229 default:
230 panic("devfs_allocp: unknown node type");
231 }
232
233 node->v_node = NULL;
234 node->node_type = devfsnodetype;
235
bc185c5a 236 /* Initialize the dirent structure of each devfs vnode */
ca8d7677 237 KKASSERT(namlen < 256);
21864bc5 238 node->d_dir.d_namlen = namlen;
ca8d7677 239 node->d_dir.d_name = kmalloc(namlen+1, M_DEVFS, M_WAITOK);
21864bc5
MD
240 memcpy(node->d_dir.d_name, name, namlen);
241 node->d_dir.d_name[namlen] = '\0';
242
243 /* Initialize the parent node element */
244 node->parent = parent;
245
246 /* Apply rules */
247 devfs_rule_check_apply(node);
248
bc185c5a 249 /* Initialize *time members */
ca8d7677
MD
250 nanotime(&node->atime);
251 node->mtime = node->ctime = node->atime;
252
253 /*
254 * Associate with parent as last step, clean out namecache
255 * reference.
256 */
21864bc5 257 if ((parent != NULL) &&
ca8d7677 258 ((parent->node_type == Proot) || (parent->node_type == Pdir))) {
21864bc5
MD
259 parent->nchildren++;
260 node->cookie = parent->cookie_jar++;
261 node->flags |= DEVFS_NODE_LINKED;
ca8d7677 262 TAILQ_INSERT_TAIL(DEVFS_DENODE_HEAD(parent), node, link);
21864bc5 263
ca8d7677
MD
264 /* This forces negative namecache lookups to clear */
265 ++mp->mnt_namecache_gen;
266 }
21864bc5 267
21864bc5
MD
268 return node;
269}
270
271/*
272 * devfs_allocv() allocates a new vnode based on a devfs node.
273 */
274int
275devfs_allocv(struct vnode **vpp, struct devfs_node *node)
276{
277 struct vnode *vp;
278 int error = 0;
279
280 KKASSERT(node);
281
21864bc5
MD
282try_again:
283 while ((vp = node->v_node) != NULL) {
284 error = vget(vp, LK_EXCLUSIVE);
285 if (error != ENOENT) {
286 *vpp = vp;
21864bc5
MD
287 goto out;
288 }
289 }
21864bc5 290
21864bc5
MD
291 if ((error = getnewvnode(VT_DEVFS, node->mp, vpp, 0, 0)) != 0)
292 goto out;
293
294 vp = *vpp;
295
21864bc5
MD
296 if (node->v_node != NULL) {
297 vp->v_type = VBAD;
298 vx_put(vp);
299 goto try_again;
300 }
301
302 vp->v_data = node;
303 node->v_node = vp;
21864bc5
MD
304
305 switch (node->node_type) {
306 case Proot:
307 vp->v_flag |= VROOT;
308 case Pdir:
309 vp->v_type = VDIR;
310 break;
311
312 case Plink:
313 vp->v_type = VLNK;
314 break;
315
316 case Preg:
317 vp->v_type = VREG;
318 break;
319
320 case Pdev:
321 vp->v_type = VCHR;
21864bc5
MD
322 KKASSERT(node->d_dev);
323
bc185c5a
AH
324 vp->v_uminor = node->d_dev->si_uminor;
325 vp->v_umajor = 0;
326
327 v_associate_rdev(vp, node->d_dev);
328 vp->v_ops = &node->mp->mnt_vn_spec_ops;
21864bc5
MD
329 break;
330
331 default:
332 panic("devfs_allocv: unknown node type");
333 }
334
335out:
21864bc5
MD
336 return error;
337}
338
339/*
340 * devfs_allocvp allocates both a devfs node (with the given settings) and a vnode
341 * based on the newly created devfs node.
342 */
343int
344devfs_allocvp(struct mount *mp, struct vnode **vpp, devfs_nodetype devfsnodetype,
bc185c5a 345 char *name, struct devfs_node *parent, cdev_t dev)
21864bc5
MD
346{
347 struct devfs_node *node;
348
21864bc5 349 node = devfs_allocp(devfsnodetype, name, parent, mp, dev);
bc185c5a 350
21864bc5
MD
351 if (node != NULL)
352 devfs_allocv(vpp, node);
353 else
354 *vpp = NULL;
355
21864bc5
MD
356 return 0;
357}
358
359/*
ca8d7677
MD
360 * Destroy the devfs_node. The node must be unlinked from the topology.
361 *
362 * This function will also destroy any vnode association with the node
363 * and device.
364 *
365 * The cdev_t itself remains intact.
21864bc5
MD
366 */
367int
368devfs_freep(struct devfs_node *node)
369{
ca8d7677
MD
370 struct vnode *vp;
371
21864bc5 372 KKASSERT(node);
ca8d7677
MD
373 KKASSERT(((node->flags & DEVFS_NODE_LINKED) == 0) ||
374 (node->node_type == Proot));
375 KKASSERT((node->flags & DEVFS_DESTROYED) == 0);
21864bc5
MD
376
377 atomic_subtract_int(&(DEVFS_MNTDATA(node->mp)->leak_count), 1);
378 if (node->symlink_name) {
379 kfree(node->symlink_name, M_DEVFS);
380 node->symlink_name = NULL;
381 }
382
ca8d7677
MD
383 /*
384 * Remove the node from the orphan list if it is still on it.
385 */
386 if (node->flags & DEVFS_ORPHANED)
21864bc5
MD
387 devfs_tracer_del_orphan(node);
388
ca8d7677
MD
389 /*
390 * Disassociate the vnode from the node. This also prevents the
391 * vnode's reclaim code from double-freeing the node.
392 */
393 if ((vp = node->v_node) != NULL) {
9b823501 394 v_release_rdev(vp);
ca8d7677
MD
395 vp->v_data = NULL;
396 node->v_node = NULL;
397 }
398 if (node->d_dir.d_name)
399 kfree(node->d_dir.d_name, M_DEVFS);
400 node->flags |= DEVFS_DESTROYED;
401
21864bc5
MD
402 objcache_put(devfs_node_cache, node);
403
404 return 0;
405}
406
407/*
ca8d7677
MD
408 * Unlink the devfs node from the topology and add it to the orphan list.
409 * The node will later be destroyed by freep.
410 *
411 * Any vnode association, including the v_rdev and v_data, remains intact
412 * until the freep.
21864bc5
MD
413 */
414int
415devfs_unlinkp(struct devfs_node *node)
416{
417 struct devfs_node *parent;
418 KKASSERT(node);
419
bc185c5a
AH
420 /*
421 * Add the node to the orphan list, so it is referenced somewhere, to
422 * so we don't leak it.
423 */
21864bc5 424 devfs_tracer_add_orphan(node);
bc185c5a 425
21864bc5
MD
426 parent = node->parent;
427
ca8d7677
MD
428 /*
429 * If the parent is known we can unlink the node out of the topology
430 */
21864bc5
MD
431 if (parent) {
432 TAILQ_REMOVE(DEVFS_DENODE_HEAD(parent), node, link);
433 parent->nchildren--;
434 KKASSERT((parent->nchildren >= 0));
435 node->flags &= ~DEVFS_NODE_LINKED;
436 }
437 node->parent = NULL;
21864bc5
MD
438 return 0;
439}
440
441/*
ca8d7677
MD
442 * devfs_reaperp() is a recursive function that iterates through all the
443 * topology, unlinking and freeing all devfs nodes.
21864bc5
MD
444 */
445int
446devfs_reaperp(struct devfs_node *node)
447{
448 struct devfs_node *node1, *node2;
449
21864bc5 450 if ((node->node_type == Proot) || (node->node_type == Pdir)) {
21864bc5 451 if (node->nchildren > 2) {
ca8d7677 452 TAILQ_FOREACH_MUTABLE(node1, DEVFS_DENODE_HEAD(node),
bc185c5a 453 link, node2) {
21864bc5
MD
454 devfs_reaperp(node1);
455 }
456 }
457 }
21864bc5
MD
458 devfs_unlinkp(node);
459 devfs_freep(node);
460
461 return 0;
462}
463
464/*
ca8d7677
MD
465 * devfs_gc() is devfs garbage collector. It takes care of unlinking and
466 * freeing a node, but also removes empty directories and links that link
467 * via devfs auto-link mechanism to the node being deleted.
21864bc5
MD
468 */
469int
470devfs_gc(struct devfs_node *node)
471{
472 struct devfs_node *root_node = DEVFS_MNTDATA(node->mp)->root_node;
473
474 devfs_gc_links(root_node, node, node->nlinks);
475 devfs_unlinkp(node);
476 devfs_gc_dirs(root_node);
477
478 devfs_freep(node);
479
480 return 0;
481}
482
483/*
484 * devfs_gc_dirs() is a helper function for devfs_gc, unlinking and freeing
485 * empty directories.
486 */
487static int
488devfs_gc_dirs(struct devfs_node *node)
489{
490 struct devfs_node *node1, *node2;
491
21864bc5 492 if ((node->node_type == Proot) || (node->node_type == Pdir)) {
21864bc5 493 if (node->nchildren > 2) {
ca8d7677 494 TAILQ_FOREACH_MUTABLE(node1, DEVFS_DENODE_HEAD(node),
bc185c5a 495 link, node2) {
21864bc5
MD
496 devfs_gc_dirs(node1);
497 }
498 }
499
500 if (node->nchildren == 2) {
21864bc5
MD
501 devfs_unlinkp(node);
502 devfs_freep(node);
503 }
504 }
505
506 return 0;
507}
508
509/*
510 * devfs_gc_links() is a helper function for devfs_gc, unlinking and freeing
511 * eauto-linked nodes linking to the node being deleted.
512 */
513static int
ca8d7677
MD
514devfs_gc_links(struct devfs_node *node, struct devfs_node *target,
515 size_t nlinks)
21864bc5
MD
516{
517 struct devfs_node *node1, *node2;
518
519 if (nlinks > 0) {
520 if ((node->node_type == Proot) || (node->node_type == Pdir)) {
21864bc5 521 if (node->nchildren > 2) {
bc185c5a
AH
522 TAILQ_FOREACH_MUTABLE(node1, DEVFS_DENODE_HEAD(node),
523 link, node2) {
21864bc5
MD
524 nlinks = devfs_gc_links(node1, target, nlinks);
525 }
526 }
527 } else if (node->link_target == target) {
528 nlinks--;
529 devfs_unlinkp(node);
530 devfs_freep(node);
531 }
532 }
533
534 KKASSERT(nlinks >= 0);
535
536 return nlinks;
537}
538
539/*
ca8d7677
MD
540 * devfs_create_dev() is the asynchronous entry point for device creation.
541 * It just sends a message with the relevant details to the devfs core.
542 *
543 * This function will reference the passed device. The reference is owned
544 * by devfs and represents all of the device's node associations.
21864bc5
MD
545 */
546int
547devfs_create_dev(cdev_t dev, uid_t uid, gid_t gid, int perms)
548{
ca8d7677 549 reference_dev(dev);
bc185c5a
AH
550 devfs_msg_send_dev(DEVFS_DEVICE_CREATE, dev, uid, gid, perms);
551
21864bc5
MD
552 return 0;
553}
554
555/*
ca8d7677
MD
556 * devfs_destroy_dev() is the asynchronous entry point for device destruction.
557 * It just sends a message with the relevant details to the devfs core.
21864bc5
MD
558 */
559int
560devfs_destroy_dev(cdev_t dev)
561{
562 devfs_msg_send_dev(DEVFS_DEVICE_DESTROY, dev, 0, 0, 0);
563 return 0;
564}
565
566/*
ca8d7677
MD
567 * devfs_mount_add() is the synchronous entry point for adding a new devfs
568 * mount. It sends a synchronous message with the relevant details to the
569 * devfs core.
21864bc5
MD
570 */
571int
572devfs_mount_add(struct devfs_mnt_data *mnt)
573{
574 devfs_msg_t msg;
575
576 msg = devfs_msg_get();
ca8d7677 577 msg->mdv_mnt = mnt;
21864bc5
MD
578 msg = devfs_msg_send_sync(DEVFS_MOUNT_ADD, msg);
579 devfs_msg_put(msg);
580
581 return 0;
582}
583
584/*
585 * devfs_mount_del() is the synchronous entry point for removing a devfs mount.
586 * It sends a synchronous message with the relevant details to the devfs core.
587 */
588int
589devfs_mount_del(struct devfs_mnt_data *mnt)
590{
591 devfs_msg_t msg;
592
593 msg = devfs_msg_get();
ca8d7677 594 msg->mdv_mnt = mnt;
21864bc5
MD
595 msg = devfs_msg_send_sync(DEVFS_MOUNT_DEL, msg);
596 devfs_msg_put(msg);
597
598 return 0;
599}
600
601/*
bc185c5a
AH
602 * devfs_destroy_subnames() is the synchronous entry point for device
603 * destruction by subname. It just sends a message with the relevant details to
604 * the devfs core.
21864bc5
MD
605 */
606int
607devfs_destroy_subnames(char *name)
608{
ca8d7677
MD
609 devfs_msg_t msg;
610
611 msg = devfs_msg_get();
612 msg->mdv_load = name;
613 msg = devfs_msg_send_sync(DEVFS_DESTROY_SUBNAMES, msg);
614 devfs_msg_put(msg);
615 return 0;
616}
617
618int
619devfs_clr_subnames_flag(char *name, uint32_t flag)
620{
621 devfs_msg_t msg;
622
623 msg = devfs_msg_get();
624 msg->mdv_flags.name = name;
625 msg->mdv_flags.flag = flag;
626 msg = devfs_msg_send_sync(DEVFS_CLR_SUBNAMES_FLAG, msg);
627 devfs_msg_put(msg);
628
629 return 0;
630}
631
632int
633devfs_destroy_subnames_without_flag(char *name, uint32_t flag)
634{
635 devfs_msg_t msg;
636
637 msg = devfs_msg_get();
638 msg->mdv_flags.name = name;
639 msg->mdv_flags.flag = flag;
640 msg = devfs_msg_send_sync(DEVFS_DESTROY_SUBNAMES_WO_FLAG, msg);
641 devfs_msg_put(msg);
642
21864bc5
MD
643 return 0;
644}
645
646/*
ca8d7677
MD
647 * devfs_create_all_dev is the asynchronous entry point to trigger device
648 * node creation. It just sends a message with the relevant details to
649 * the devfs core.
21864bc5
MD
650 */
651int
652devfs_create_all_dev(struct devfs_node *root)
653{
654 devfs_msg_send_generic(DEVFS_CREATE_ALL_DEV, root);
655 return 0;
656}
657
658/*
ca8d7677
MD
659 * devfs_destroy_dev_by_ops is the asynchronous entry point to destroy all
660 * devices with a specific set of dev_ops and minor. It just sends a
661 * message with the relevant details to the devfs core.
21864bc5
MD
662 */
663int
664devfs_destroy_dev_by_ops(struct dev_ops *ops, int minor)
665{
666 devfs_msg_send_ops(DEVFS_DESTROY_DEV_BY_OPS, ops, minor);
667 return 0;
668}
669
670/*
ca8d7677
MD
671 * devfs_clone_handler_add is the synchronous entry point to add a new
672 * clone handler. It just sends a message with the relevant details to
673 * the devfs core.
21864bc5
MD
674 */
675int
676devfs_clone_handler_add(char *name, d_clone_t *nhandler)
677{
ca8d7677
MD
678 devfs_msg_t msg;
679
680 msg = devfs_msg_get();
681 msg->mdv_chandler.name = name;
682 msg->mdv_chandler.nhandler = nhandler;
683 msg = devfs_msg_send_sync(DEVFS_CHANDLER_ADD, msg);
684 devfs_msg_put(msg);
21864bc5
MD
685 return 0;
686}
687
688/*
ca8d7677
MD
689 * devfs_clone_handler_del is the synchronous entry point to remove a
690 * clone handler. It just sends a message with the relevant details to
691 * the devfs core.
21864bc5
MD
692 */
693int
694devfs_clone_handler_del(char *name)
695{
ca8d7677
MD
696 devfs_msg_t msg;
697
698 msg = devfs_msg_get();
699 msg->mdv_chandler.name = name;
700 msg->mdv_chandler.nhandler = NULL;
701 msg = devfs_msg_send_sync(DEVFS_CHANDLER_DEL, msg);
702 devfs_msg_put(msg);
21864bc5
MD
703 return 0;
704}
705
706/*
ca8d7677
MD
707 * devfs_find_device_by_name is the synchronous entry point to find a
708 * device given its name. It sends a synchronous message with the
709 * relevant details to the devfs core and returns the answer.
21864bc5
MD
710 */
711cdev_t
712devfs_find_device_by_name(const char *fmt, ...)
713{
714 cdev_t found = NULL;
715 devfs_msg_t msg;
716 char target[PATH_MAX+1];
717 __va_list ap;
718 int i;
719
720 if (fmt == NULL)
721 return NULL;
722
21864bc5
MD
723 __va_start(ap, fmt);
724 i = kvcprintf(fmt, NULL, target, 10, ap);
725 target[i] = '\0';
726 __va_end(ap);
727
21864bc5 728 msg = devfs_msg_get();
ca8d7677 729 msg->mdv_name = target;
21864bc5 730 msg = devfs_msg_send_sync(DEVFS_FIND_DEVICE_BY_NAME, msg);
ca8d7677 731 found = msg->mdv_cdev;
21864bc5
MD
732 devfs_msg_put(msg);
733
21864bc5
MD
734 return found;
735}
736
737/*
ca8d7677
MD
738 * devfs_find_device_by_udev is the synchronous entry point to find a
739 * device given its udev number. It sends a synchronous message with
740 * the relevant details to the devfs core and returns the answer.
21864bc5
MD
741 */
742cdev_t
743devfs_find_device_by_udev(udev_t udev)
744{
745 cdev_t found = NULL;
746 devfs_msg_t msg;
747
748 msg = devfs_msg_get();
ca8d7677 749 msg->mdv_udev = udev;
21864bc5 750 msg = devfs_msg_send_sync(DEVFS_FIND_DEVICE_BY_UDEV, msg);
ca8d7677 751 found = msg->mdv_cdev;
21864bc5
MD
752 devfs_msg_put(msg);
753
ca8d7677
MD
754 devfs_debug(DEVFS_DEBUG_DEBUG,
755 "devfs_find_device_by_udev found? %s -end:3-\n",
756 ((found) ? found->si_name:"NO"));
21864bc5
MD
757 return found;
758}
759
fa7e6f37
AH
760struct vnode *
761devfs_inode_to_vnode(struct mount *mp, ino_t target)
762{
763 struct vnode *vp = NULL;
764 devfs_msg_t msg;
765
766 if (mp == NULL)
767 return NULL;
768
769 msg = devfs_msg_get();
770 msg->mdv_ino.mp = mp;
771 msg->mdv_ino.ino = target;
772 msg = devfs_msg_send_sync(DEVFS_INODE_TO_VNODE, msg);
773 vp = msg->mdv_ino.vp;
774 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
775 devfs_msg_put(msg);
776
777 return vp;
778}
779
21864bc5 780/*
ca8d7677
MD
781 * devfs_make_alias is the asynchronous entry point to register an alias
782 * for a device. It just sends a message with the relevant details to the
783 * devfs core.
21864bc5
MD
784 */
785int
786devfs_make_alias(char *name, cdev_t dev_target)
787{
ca8d7677 788 struct devfs_alias *alias;
5298e788
AH
789 size_t len;
790
791 len = strlen(name);
ca8d7677
MD
792
793 alias = kmalloc(sizeof(struct devfs_alias), M_DEVFS, M_WAITOK);
5298e788
AH
794 alias->name = kmalloc(len + 1, M_DEVFS, M_WAITOK);
795 memcpy(alias->name, name, len + 1);
796 alias->namlen = len;
21864bc5
MD
797 alias->dev_target = dev_target;
798
799 devfs_msg_send_generic(DEVFS_MAKE_ALIAS, alias);
800 return 0;
801}
802
803/*
ca8d7677
MD
804 * devfs_apply_rules is the asynchronous entry point to trigger application
805 * of all rules. It just sends a message with the relevant details to the
806 * devfs core.
21864bc5
MD
807 */
808int
809devfs_apply_rules(char *mntto)
810{
811 char *new_name;
812 size_t namelen;
813
814 namelen = strlen(mntto) + 1;
21864bc5 815 new_name = kmalloc(namelen, M_DEVFS, M_WAITOK);
21864bc5 816 memcpy(new_name, mntto, namelen);
21864bc5 817 devfs_msg_send_name(DEVFS_APPLY_RULES, new_name);
bc185c5a 818
21864bc5
MD
819 return 0;
820}
821
822/*
bc185c5a
AH
823 * devfs_reset_rules is the asynchronous entry point to trigger reset of all
824 * rules. It just sends a message with the relevant details to the devfs core.
21864bc5
MD
825 */
826int
827devfs_reset_rules(char *mntto)
828{
829 char *new_name;
830 size_t namelen;
831
832 namelen = strlen(mntto) + 1;
21864bc5 833 new_name = kmalloc(namelen, M_DEVFS, M_WAITOK);
21864bc5 834 memcpy(new_name, mntto, namelen);
21864bc5 835 devfs_msg_send_name(DEVFS_RESET_RULES, new_name);
bc185c5a 836
21864bc5
MD
837 return 0;
838}
839
840
841/*
842 * devfs_scan_callback is the asynchronous entry point to call a callback
843 * on all cdevs.
844 * It just sends a message with the relevant details to the devfs core.
845 */
846int
847devfs_scan_callback(devfs_scan_t *callback)
848{
849 devfs_msg_t msg;
850
21864bc5
MD
851 KKASSERT(sizeof(callback) == sizeof(void *));
852
853 msg = devfs_msg_get();
ca8d7677 854 msg->mdv_load = callback;
21864bc5
MD
855 msg = devfs_msg_send_sync(DEVFS_SCAN_CALLBACK, msg);
856 devfs_msg_put(msg);
857
858 return 0;
859}
860
861
862/*
bc185c5a
AH
863 * Acts as a message drain. Any message that is replied to here gets destroyed
864 * and the memory freed.
21864bc5
MD
865 */
866static void
867devfs_msg_autofree_reply(lwkt_port_t port, lwkt_msg_t msg)
868{
869 devfs_msg_put((devfs_msg_t)msg);
870}
871
872/*
873 * devfs_msg_get allocates a new devfs msg and returns it.
874 */
875devfs_msg_t
876devfs_msg_get()
877{
878 return objcache_get(devfs_msg_cache, M_WAITOK);
879}
880
881/*
882 * devfs_msg_put deallocates a given devfs msg.
883 */
884int
885devfs_msg_put(devfs_msg_t msg)
886{
887 objcache_put(devfs_msg_cache, msg);
888 return 0;
889}
890
891/*
892 * devfs_msg_send is the generic asynchronous message sending facility
893 * for devfs. By default the reply port is the automatic disposal port.
894 */
895__uint32_t
896devfs_msg_send(uint32_t cmd, devfs_msg_t devfs_msg)
897{
898 lwkt_port_t port = &devfs_msg_port;
899
900 lwkt_initmsg(&devfs_msg->hdr, &devfs_dispose_port, 0);
901
902 devfs_msg->hdr.u.ms_result = cmd;
903 devfs_msg->id = atomic_fetchadd_int(&msg_id, 1);
904
905 lwkt_sendmsg(port, (lwkt_msg_t)devfs_msg);
906
907 return devfs_msg->id;
908}
909
910/*
911 * devfs_msg_send_sync is the generic synchronous message sending
912 * facility for devfs. It initializes a local reply port and waits
913 * for the core's answer. This answer is then returned.
914 */
915devfs_msg_t
916devfs_msg_send_sync(uint32_t cmd, devfs_msg_t devfs_msg)
917{
918 struct lwkt_port rep_port;
919 devfs_msg_t msg_incoming;
920 lwkt_port_t port = &devfs_msg_port;
921
922 lwkt_initport_thread(&rep_port, curthread);
923 lwkt_initmsg(&devfs_msg->hdr, &rep_port, 0);
924
925 devfs_msg->hdr.u.ms_result = cmd;
926 devfs_msg->id = atomic_fetchadd_int(&msg_id, 1);
927
928 lwkt_sendmsg(port, (lwkt_msg_t)devfs_msg);
929 msg_incoming = lwkt_waitport(&rep_port, 0);
930
931 return msg_incoming;
932}
933
934/*
935 * sends a message with a generic argument.
936 */
937__uint32_t
938devfs_msg_send_generic(uint32_t cmd, void *load)
939{
940 devfs_msg_t devfs_msg = devfs_msg_get();
ca8d7677 941 devfs_msg->mdv_load = load;
21864bc5 942
21864bc5
MD
943 return devfs_msg_send(cmd, devfs_msg);
944}
945
946/*
947 * sends a message with a name argument.
948 */
949__uint32_t
950devfs_msg_send_name(uint32_t cmd, char *name)
951{
952 devfs_msg_t devfs_msg = devfs_msg_get();
ca8d7677 953 devfs_msg->mdv_name = name;
21864bc5
MD
954
955 return devfs_msg_send(cmd, devfs_msg);
956}
957
958/*
959 * sends a message with a mount argument.
960 */
961__uint32_t
962devfs_msg_send_mount(uint32_t cmd, struct devfs_mnt_data *mnt)
963{
964 devfs_msg_t devfs_msg = devfs_msg_get();
ca8d7677 965 devfs_msg->mdv_mnt = mnt;
21864bc5 966
21864bc5
MD
967 return devfs_msg_send(cmd, devfs_msg);
968}
969
970/*
971 * sends a message with an ops argument.
972 */
973__uint32_t
974devfs_msg_send_ops(uint32_t cmd, struct dev_ops *ops, int minor)
975{
976 devfs_msg_t devfs_msg = devfs_msg_get();
ca8d7677
MD
977 devfs_msg->mdv_ops.ops = ops;
978 devfs_msg->mdv_ops.minor = minor;
21864bc5
MD
979
980 return devfs_msg_send(cmd, devfs_msg);
981}
982
983/*
984 * sends a message with a clone handler argument.
985 */
986__uint32_t
987devfs_msg_send_chandler(uint32_t cmd, char *name, d_clone_t handler)
988{
989 devfs_msg_t devfs_msg = devfs_msg_get();
ca8d7677
MD
990 devfs_msg->mdv_chandler.name = name;
991 devfs_msg->mdv_chandler.nhandler = handler;
21864bc5
MD
992
993 return devfs_msg_send(cmd, devfs_msg);
994}
995
996/*
997 * sends a message with a device argument.
998 */
999__uint32_t
1000devfs_msg_send_dev(uint32_t cmd, cdev_t dev, uid_t uid, gid_t gid, int perms)
1001{
1002 devfs_msg_t devfs_msg = devfs_msg_get();
ca8d7677
MD
1003 devfs_msg->mdv_dev.dev = dev;
1004 devfs_msg->mdv_dev.uid = uid;
1005 devfs_msg->mdv_dev.gid = gid;
1006 devfs_msg->mdv_dev.perms = perms;
21864bc5 1007
21864bc5
MD
1008 return devfs_msg_send(cmd, devfs_msg);
1009}
1010
1011/*
1012 * sends a message with a link argument.
1013 */
21864bc5
MD
1014__uint32_t
1015devfs_msg_send_link(uint32_t cmd, char *name, char *target, struct mount *mp)
1016{
1017 devfs_msg_t devfs_msg = devfs_msg_get();
ca8d7677
MD
1018 devfs_msg->mdv_link.name = name;
1019 devfs_msg->mdv_link.target = target;
1020 devfs_msg->mdv_link.mp = mp;
21864bc5 1021
21864bc5
MD
1022 return devfs_msg_send(cmd, devfs_msg);
1023}
1024
1025/*
1026 * devfs_msg_core is the main devfs thread. It handles all incoming messages
1027 * and calls the relevant worker functions. By using messages it's assured
1028 * that events occur in the correct order.
1029 */
1030static void
1031devfs_msg_core(void *arg)
1032{
ca8d7677
MD
1033 uint8_t run = 1;
1034 devfs_msg_t msg;
21864bc5
MD
1035 cdev_t dev;
1036 struct devfs_mnt_data *mnt;
1037 struct devfs_node *node;
1038
21864bc5 1039 lwkt_initport_thread(&devfs_msg_port, curthread);
bc185c5a 1040 wakeup(td_core);
21864bc5 1041
ca8d7677 1042 while (run) {
ca8d7677 1043 msg = (devfs_msg_t)lwkt_waitport(&devfs_msg_port, 0);
bc185c5a
AH
1044 devfs_debug(DEVFS_DEBUG_DEBUG,
1045 "devfs_msg_core, new msg: %x (unique id: %x)\n",
1046 (unsigned int)msg->hdr.u.ms_result, msg->id);
1047
1048 /*
1049 * Acquire the devfs lock to ensure safety of all called functions
1050 */
21864bc5 1051 lockmgr(&devfs_lock, LK_EXCLUSIVE);
ca8d7677
MD
1052 switch (msg->hdr.u.ms_result) {
1053
1054 case DEVFS_DEVICE_CREATE:
1055 dev = msg->mdv_dev.dev;
ca8d7677
MD
1056 devfs_create_dev_worker(dev,
1057 msg->mdv_dev.uid,
1058 msg->mdv_dev.gid,
1059 msg->mdv_dev.perms);
21864bc5
MD
1060 break;
1061
1062 case DEVFS_DEVICE_DESTROY:
ca8d7677 1063 dev = msg->mdv_dev.dev;
21864bc5 1064 devfs_destroy_dev_worker(dev);
ca8d7677 1065 break;
21864bc5
MD
1066
1067 case DEVFS_DESTROY_SUBNAMES:
ca8d7677 1068 devfs_destroy_subnames_worker(msg->mdv_load);
21864bc5
MD
1069 break;
1070
1071 case DEVFS_DESTROY_DEV_BY_OPS:
ca8d7677
MD
1072 devfs_destroy_dev_by_ops_worker(msg->mdv_ops.ops,
1073 msg->mdv_ops.minor);
21864bc5
MD
1074 break;
1075
1076 case DEVFS_CREATE_ALL_DEV:
ca8d7677 1077 node = (struct devfs_node *)msg->mdv_load;
21864bc5
MD
1078 devfs_create_all_dev_worker(node);
1079 break;
1080
1081 case DEVFS_MOUNT_ADD:
ca8d7677 1082 mnt = msg->mdv_mnt;
21864bc5
MD
1083 TAILQ_INSERT_TAIL(&devfs_mnt_list, mnt, link);
1084 devfs_create_all_dev_worker(mnt->root_node);
1085 break;
1086
1087 case DEVFS_MOUNT_DEL:
ca8d7677 1088 mnt = msg->mdv_mnt;
21864bc5 1089 TAILQ_REMOVE(&devfs_mnt_list, mnt, link);
21864bc5 1090 devfs_reaperp(mnt->root_node);
0709f681
MD
1091 if (mnt->leak_count) {
1092 devfs_debug(DEVFS_DEBUG_SHOW,
1093 "Leaked %d devfs_node elements!\n",
1094 mnt->leak_count);
1095 }
21864bc5
MD
1096 break;
1097
1098 case DEVFS_CHANDLER_ADD:
bc185c5a
AH
1099 devfs_chandler_add_worker(msg->mdv_chandler.name,
1100 msg->mdv_chandler.nhandler);
21864bc5
MD
1101 break;
1102
1103 case DEVFS_CHANDLER_DEL:
ca8d7677 1104 devfs_chandler_del_worker(msg->mdv_chandler.name);
21864bc5
MD
1105 break;
1106
1107 case DEVFS_FIND_DEVICE_BY_NAME:
1108 devfs_find_device_by_name_worker(msg);
1109 break;
1110
1111 case DEVFS_FIND_DEVICE_BY_UDEV:
1112 devfs_find_device_by_udev_worker(msg);
1113 break;
1114
1115 case DEVFS_MAKE_ALIAS:
ca8d7677 1116 devfs_make_alias_worker((struct devfs_alias *)msg->mdv_load);
21864bc5
MD
1117 break;
1118
1119 case DEVFS_APPLY_RULES:
ca8d7677 1120 devfs_apply_reset_rules_caller(msg->mdv_name, 1);
21864bc5
MD
1121 break;
1122
1123 case DEVFS_RESET_RULES:
ca8d7677 1124 devfs_apply_reset_rules_caller(msg->mdv_name, 0);
21864bc5
MD
1125 break;
1126
1127 case DEVFS_SCAN_CALLBACK:
ca8d7677 1128 devfs_scan_callback_worker((devfs_scan_t *)msg->mdv_load);
21864bc5
MD
1129 break;
1130
ca8d7677
MD
1131 case DEVFS_CLR_SUBNAMES_FLAG:
1132 devfs_clr_subnames_flag_worker(msg->mdv_flags.name,
bc185c5a 1133 msg->mdv_flags.flag);
ca8d7677 1134 break;
21864bc5 1135
ca8d7677
MD
1136 case DEVFS_DESTROY_SUBNAMES_WO_FLAG:
1137 devfs_destroy_subnames_without_flag_worker(msg->mdv_flags.name,
bc185c5a 1138 msg->mdv_flags.flag);
21864bc5
MD
1139 break;
1140
fa7e6f37
AH
1141 case DEVFS_INODE_TO_VNODE:
1142 msg->mdv_ino.vp = devfs_inode_to_vnode_worker(
1143 DEVFS_MNTDATA(msg->mdv_ino.mp)->root_node,
1144 msg->mdv_ino.ino);
1145 break;
1146
ca8d7677
MD
1147 case DEVFS_TERMINATE_CORE:
1148 run = 0;
1149 break;
1150 case DEVFS_SYNC:
1151 break;
1152 default:
bc185c5a 1153 devfs_debug(DEVFS_DEBUG_WARNING,
ca8d7677
MD
1154 "devfs_msg_core: unknown message "
1155 "received at core\n");
1156 break;
1157 }
21864bc5
MD
1158 lockmgr(&devfs_lock, LK_RELEASE);
1159
ca8d7677
MD
1160 lwkt_replymsg((lwkt_msg_t)msg, 0);
1161 }
bc185c5a 1162 wakeup(td_core);
21864bc5
MD
1163 lwkt_exit();
1164}
1165
1166/*
1167 * Worker function to insert a new dev into the dev list and initialize its
1168 * permissions. It also calls devfs_propagate_dev which in turn propagates
1169 * the change to all mount points.
ca8d7677
MD
1170 *
1171 * The passed dev is already referenced. This reference is eaten by this
1172 * function and represents the dev's linkage into devfs_dev_list.
21864bc5
MD
1173 */
1174static int
1175devfs_create_dev_worker(cdev_t dev, uid_t uid, gid_t gid, int perms)
1176{
1177 KKASSERT(dev);
21864bc5
MD
1178
1179 dev->si_uid = uid;
1180 dev->si_gid = gid;
1181 dev->si_perms = perms;
1182
1183 devfs_link_dev(dev);
21864bc5
MD
1184 devfs_propagate_dev(dev, 1);
1185
21864bc5
MD
1186 return 0;
1187}
1188
1189/*
1190 * Worker function to delete a dev from the dev list and free the cdev.
1191 * It also calls devfs_propagate_dev which in turn propagates the change
1192 * to all mount points.
1193 */
1194static int
1195devfs_destroy_dev_worker(cdev_t dev)
1196{
ca8d7677
MD
1197 int error;
1198
21864bc5
MD
1199 KKASSERT(dev);
1200 KKASSERT((lockstatus(&devfs_lock, curthread)) == LK_EXCLUSIVE);
1201
ca8d7677 1202 error = devfs_unlink_dev(dev);
21864bc5 1203 devfs_propagate_dev(dev, 0);
ca8d7677
MD
1204 if (error == 0)
1205 release_dev(dev); /* link ref */
21864bc5
MD
1206 release_dev(dev);
1207 release_dev(dev);
21864bc5 1208
21864bc5
MD
1209 return 0;
1210}
1211
1212/*
1213 * Worker function to destroy all devices with a certain basename.
1214 * Calls devfs_destroy_dev_worker for the actual destruction.
1215 */
1216static int
1217devfs_destroy_subnames_worker(char *name)
1218{
1219 cdev_t dev, dev1;
21864bc5
MD
1220 size_t len = strlen(name);
1221
ca8d7677 1222 TAILQ_FOREACH_MUTABLE(dev, &devfs_dev_list, link, dev1) {
bc185c5a
AH
1223 if ((!strncmp(dev->si_name, name, len)) &&
1224 (dev->si_name[len] != '\0')) {
1225 devfs_destroy_dev_worker(dev);
21864bc5 1226 }
ca8d7677
MD
1227 }
1228 return 0;
1229}
1230
1231static int
1232devfs_clr_subnames_flag_worker(char *name, uint32_t flag)
1233{
1234 cdev_t dev, dev1;
1235 size_t len = strlen(name);
1236
1237 TAILQ_FOREACH_MUTABLE(dev, &devfs_dev_list, link, dev1) {
bc185c5a
AH
1238 if ((!strncmp(dev->si_name, name, len)) &&
1239 (dev->si_name[len] != '\0')) {
1240 dev->si_flags &= ~flag;
ca8d7677
MD
1241 }
1242 }
1243
1244 return 0;
1245}
1246
1247static int
1248devfs_destroy_subnames_without_flag_worker(char *name, uint32_t flag)
1249{
1250 cdev_t dev, dev1;
1251 size_t len = strlen(name);
1252
1253 TAILQ_FOREACH_MUTABLE(dev, &devfs_dev_list, link, dev1) {
bc185c5a
AH
1254 if ((!strncmp(dev->si_name, name, len)) &&
1255 (dev->si_name[len] != '\0')) {
1256 if (!(dev->si_flags & flag)) {
1257 devfs_destroy_dev_worker(dev);
ca8d7677
MD
1258 }
1259 }
1260 }
21864bc5
MD
1261
1262 return 0;
1263}
1264
1265/*
1266 * Worker function that creates all device nodes on top of a devfs
1267 * root node.
1268 */
1269static int
1270devfs_create_all_dev_worker(struct devfs_node *root)
1271{
1272 cdev_t dev;
1273
1274 KKASSERT(root);
21864bc5
MD
1275
1276 TAILQ_FOREACH(dev, &devfs_dev_list, link) {
21864bc5
MD
1277 devfs_create_device_node(root, dev, NULL, NULL);
1278 }
bc185c5a 1279
21864bc5
MD
1280 return 0;
1281}
1282
1283/*
1284 * Worker function that destroys all devices that match a specific
1285 * dev_ops and/or minor. If minor is less than 0, it is not matched
1286 * against. It also propagates all changes.
1287 */
1288static int
1289devfs_destroy_dev_by_ops_worker(struct dev_ops *ops, int minor)
1290{
1291 cdev_t dev, dev1;
1292
1293 KKASSERT(ops);
ca8d7677
MD
1294
1295 TAILQ_FOREACH_MUTABLE(dev, &devfs_dev_list, link, dev1) {
1296 if (dev->si_ops != ops)
1297 continue;
1298 if ((minor < 0) || (dev->si_uminor == minor)) {
ca8d7677 1299 devfs_destroy_dev_worker(dev);
21864bc5 1300 }
ca8d7677 1301 }
bc185c5a 1302
21864bc5
MD
1303 return 0;
1304}
1305
1306/*
1307 * Worker function that registers a new clone handler in devfs.
1308 */
1309static int
1310devfs_chandler_add_worker(char *name, d_clone_t *nhandler)
1311{
1312 struct devfs_clone_handler *chandler = NULL;
1313 u_char len = strlen(name);
1314
ca8d7677 1315 if (len == 0)
21864bc5
MD
1316 return 1;
1317
ca8d7677 1318 TAILQ_FOREACH(chandler, &devfs_chandler_list, link) {
bc185c5a
AH
1319 if (chandler->namlen != len)
1320 continue;
1321
1322 if (!memcmp(chandler->name, name, len)) {
1323 /* Clonable basename already exists */
1324 return 1;
21864bc5
MD
1325 }
1326 }
1327
ca8d7677 1328 chandler = kmalloc(sizeof(*chandler), M_DEVFS, M_WAITOK | M_ZERO);
5298e788 1329 chandler->name = kmalloc(len+1, M_DEVFS, M_WAITOK);
21864bc5
MD
1330 memcpy(chandler->name, name, len+1);
1331 chandler->namlen = len;
1332 chandler->nhandler = nhandler;
1333
1334 TAILQ_INSERT_TAIL(&devfs_chandler_list, chandler, link);
1335 return 0;
1336}
1337
1338/*
1339 * Worker function that removes a given clone handler from the
1340 * clone handler list.
1341 */
1342static int
1343devfs_chandler_del_worker(char *name)
1344{
1345 struct devfs_clone_handler *chandler, *chandler2;
1346 u_char len = strlen(name);
1347
ca8d7677 1348 if (len == 0)
21864bc5
MD
1349 return 1;
1350
ca8d7677
MD
1351 TAILQ_FOREACH_MUTABLE(chandler, &devfs_chandler_list, link, chandler2) {
1352 if (chandler->namlen != len)
1353 continue;
1354 if (memcmp(chandler->name, name, len))
1355 continue;
bc185c5a 1356
ca8d7677 1357 TAILQ_REMOVE(&devfs_chandler_list, chandler, link);
5298e788 1358 kfree(chandler->name, M_DEVFS);
ca8d7677 1359 kfree(chandler, M_DEVFS);
5298e788 1360 break;
21864bc5
MD
1361 }
1362
1363 return 0;
1364}
1365
1366/*
1367 * Worker function that finds a given device name and changes
1368 * the message received accordingly so that when replied to,
1369 * the answer is returned to the caller.
1370 */
1371static int
1372devfs_find_device_by_name_worker(devfs_msg_t devfs_msg)
1373{
6507240b
MD
1374 struct devfs_alias *alias;
1375 cdev_t dev;
21864bc5 1376 cdev_t found = NULL;
21864bc5 1377
6507240b
MD
1378 TAILQ_FOREACH(dev, &devfs_dev_list, link) {
1379 if (strcmp(devfs_msg->mdv_name, dev->si_name) == 0) {
21864bc5
MD
1380 found = dev;
1381 break;
1382 }
ca8d7677 1383 }
6507240b
MD
1384 if (found == NULL) {
1385 TAILQ_FOREACH(alias, &devfs_alias_list, link) {
1386 if (strcmp(devfs_msg->mdv_name, alias->name) == 0) {
1387 found = alias->dev_target;
1388 break;
1389 }
1390 }
1391 }
ca8d7677 1392 devfs_msg->mdv_cdev = found;
21864bc5
MD
1393
1394 return 0;
1395}
1396
1397/*
1398 * Worker function that finds a given device udev and changes
1399 * the message received accordingly so that when replied to,
1400 * the answer is returned to the caller.
1401 */
1402static int
1403devfs_find_device_by_udev_worker(devfs_msg_t devfs_msg)
1404{
1405 cdev_t dev, dev1;
1406 cdev_t found = NULL;
21864bc5 1407
ca8d7677
MD
1408 TAILQ_FOREACH_MUTABLE(dev, &devfs_dev_list, link, dev1) {
1409 if (((udev_t)dev->si_inode) == devfs_msg->mdv_udev) {
21864bc5
MD
1410 found = dev;
1411 break;
1412 }
ca8d7677
MD
1413 }
1414 devfs_msg->mdv_cdev = found;
21864bc5
MD
1415
1416 return 0;
1417}
1418
1419/*
1420 * Worker function that inserts a given alias into the
1421 * alias list, and propagates the alias to all mount
1422 * points.
1423 */
1424static int
1425devfs_make_alias_worker(struct devfs_alias *alias)
1426{
1427 struct devfs_alias *alias2;
1428 size_t len = strlen(alias->name);
1429 int found = 0;
1430
1431 TAILQ_FOREACH(alias2, &devfs_alias_list, link) {
bc185c5a
AH
1432 if (len != alias2->namlen)
1433 continue;
1434
1435 if (!memcmp(alias->name, alias2->name, len)) {
1436 found = 1;
1437 break;
21864bc5
MD
1438 }
1439 }
1440
1441 if (!found) {
bc185c5a
AH
1442 /*
1443 * The alias doesn't exist yet, so we add it to the alias list
1444 */
21864bc5
MD
1445 TAILQ_INSERT_TAIL(&devfs_alias_list, alias, link);
1446 devfs_alias_propagate(alias);
1447 } else {
5298e788 1448 devfs_debug(DEVFS_DEBUG_WARNING,
ca8d7677
MD
1449 "Warning: duplicate devfs_make_alias for %s\n",
1450 alias->name);
5298e788 1451 kfree(alias->name, M_DEVFS);
21864bc5
MD
1452 kfree(alias, M_DEVFS);
1453 }
1454
1455 return 0;
1456}
1457
1458/*
1459 * Function that removes and frees all aliases.
1460 */
1461static int
1462devfs_alias_reap(void)
1463{
1464 struct devfs_alias *alias, *alias2;
1465
1466 TAILQ_FOREACH_MUTABLE(alias, &devfs_alias_list, link, alias2) {
1467 TAILQ_REMOVE(&devfs_alias_list, alias, link);
1468 kfree(alias, M_DEVFS);
1469 }
1470 return 0;
1471}
1472
1473/*
1474 * Function that removes an alias matching a specific cdev and frees
1475 * it accordingly.
1476 */
1477static int
1478devfs_alias_remove(cdev_t dev)
1479{
1480 struct devfs_alias *alias, *alias2;
1481
1482 TAILQ_FOREACH_MUTABLE(alias, &devfs_alias_list, link, alias2) {
1483 if (alias->dev_target == dev) {
1484 TAILQ_REMOVE(&devfs_alias_list, alias, link);
1485 kfree(alias, M_DEVFS);
1486 }
1487 }
1488 return 0;
1489}
1490
1491/*
1492 * This function propagates a new alias to all mount points.
1493 */
1494static int
1495devfs_alias_propagate(struct devfs_alias *alias)
1496{
1497 struct devfs_mnt_data *mnt;
1498
1499 TAILQ_FOREACH(mnt, &devfs_mnt_list, link) {
1500 devfs_alias_apply(mnt->root_node, alias);
1501 }
1502 return 0;
1503}
1504
1505/*
1506 * This function is a recursive function iterating through
1507 * all device nodes in the topology and, if applicable,
1508 * creating the relevant alias for a device node.
1509 */
1510static int
1511devfs_alias_apply(struct devfs_node *node, struct devfs_alias *alias)
1512{
1513 struct devfs_node *node1, *node2;
1514
1515 KKASSERT(alias != NULL);
1516
1517 if ((node->node_type == Proot) || (node->node_type == Pdir)) {
21864bc5 1518 if (node->nchildren > 2) {
ca8d7677 1519 TAILQ_FOREACH_MUTABLE(node1, DEVFS_DENODE_HEAD(node), link, node2) {
21864bc5
MD
1520 devfs_alias_apply(node1, alias);
1521 }
1522 }
1523 } else {
1524 if (node->d_dev == alias->dev_target)
1525 devfs_alias_create(alias->name, node);
1526 }
1527 return 0;
1528}
1529
1530/*
1531 * This function checks if any alias possibly is applicable
1532 * to the given node. If so, the alias is created.
1533 */
1534static int
1535devfs_alias_check_create(struct devfs_node *node)
1536{
1537 struct devfs_alias *alias;
1538
1539 TAILQ_FOREACH(alias, &devfs_alias_list, link) {
1540 if (node->d_dev == alias->dev_target)
1541 devfs_alias_create(alias->name, node);
1542 }
1543 return 0;
1544}
1545
1546/*
1547 * This function creates an alias with a given name
1548 * linking to a given devfs node. It also increments
1549 * the link count on the target node.
1550 */
1551int
1552devfs_alias_create(char *name_orig, struct devfs_node *target)
1553{
1554 struct mount *mp = target->mp;
1555 struct devfs_node *parent = DEVFS_MNTDATA(mp)->root_node;
1556 struct devfs_node *linknode;
21864bc5
MD
1557 char *create_path = NULL;
1558 char *name, name_buf[PATH_MAX];
1559
21864bc5
MD
1560 KKASSERT((lockstatus(&devfs_lock, curthread)) == LK_EXCLUSIVE);
1561
1562 devfs_resolve_name_path(name_orig, name_buf, &create_path, &name);
1563
1564 if (create_path)
1565 parent = devfs_resolve_or_create_path(parent, create_path, 1);
1566
1567
1568 if (devfs_find_device_node_by_name(parent, name)) {
bc185c5a 1569 devfs_debug(DEVFS_DEBUG_WARNING,
ca8d7677
MD
1570 "Node already exists: %s "
1571 "(devfs_make_alias_worker)!\n",
1572 name);
21864bc5
MD
1573 return 1;
1574 }
1575
1576
1577 linknode = devfs_allocp(Plink, name, parent, mp, NULL);
1578 if (linknode == NULL)
1579 return 1;
1580
1581 linknode->link_target = target;
1582 target->nlinks++;
21864bc5
MD
1583
1584 return 0;
1585}
1586
1587/*
1588 * This function is called by the core and handles mount point
1589 * strings. It either calls the relevant worker (devfs_apply_
1590 * reset_rules_worker) on all mountpoints or only a specific
1591 * one.
1592 */
1593static int
1594devfs_apply_reset_rules_caller(char *mountto, int apply)
1595{
21864bc5
MD
1596 struct devfs_mnt_data *mnt;
1597 size_t len = strlen(mountto);
1598
bc185c5a 1599 if (mountto[0] == '*') {
21864bc5 1600 TAILQ_FOREACH(mnt, &devfs_mnt_list, link) {
bc185c5a 1601 devfs_apply_reset_rules_worker(mnt->root_node, apply);
21864bc5
MD
1602 }
1603 } else {
1604 TAILQ_FOREACH(mnt, &devfs_mnt_list, link) {
bc185c5a
AH
1605 if ((len != mnt->mntonnamelen))
1606 continue;
1607
1608 if (!memcmp(mnt->mp->mnt_stat.f_mntonname, mountto, len)) {
1609 devfs_apply_reset_rules_worker(mnt->root_node, apply);
1610 break;
1611 }
21864bc5
MD
1612 }
1613 }
1614
1615 kfree(mountto, M_DEVFS);
1616 return 0;
1617}
1618
1619/*
1620 * This worker function applies or resets, depending on the arguments, a rule
1621 * to the whole given topology. *RECURSIVE*
1622 */
1623static int
1624devfs_apply_reset_rules_worker(struct devfs_node *node, int apply)
1625{
1626 struct devfs_node *node1, *node2;
1627
1628 if ((node->node_type == Proot) || (node->node_type == Pdir)) {
21864bc5 1629 if (node->nchildren > 2) {
bc185c5a 1630 TAILQ_FOREACH_MUTABLE(node1, DEVFS_DENODE_HEAD(node), link, node2) {
21864bc5
MD
1631 devfs_apply_reset_rules_worker(node1, apply);
1632 }
1633 }
1634 }
1635
1636 if (apply)
1637 devfs_rule_check_apply(node);
1638 else
1639 devfs_rule_reset_node(node);
1640
1641 return 0;
1642}
1643
1644
1645/*
1646 * This function calls a given callback function for
1647 * every dev node in the devfs dev list.
1648 */
1649static int
1650devfs_scan_callback_worker(devfs_scan_t *callback)
1651{
1652 cdev_t dev, dev1;
1653
21864bc5
MD
1654 TAILQ_FOREACH_MUTABLE(dev, &devfs_dev_list, link, dev1) {
1655 callback(dev);
1656 }
1657
21864bc5
MD
1658 return 0;
1659}
1660
1661
1662/*
1663 * This function tries to resolve a given directory, or if not
1664 * found and creation requested, creates the given directory.
1665 */
1666static struct devfs_node *
ca8d7677
MD
1667devfs_resolve_or_create_dir(struct devfs_node *parent, char *dir_name,
1668 size_t name_len, int create)
21864bc5
MD
1669{
1670 struct devfs_node *node, *found = NULL;
1671
1672 TAILQ_FOREACH(node, DEVFS_DENODE_HEAD(parent), link) {
bc185c5a
AH
1673 if (name_len != node->d_dir.d_namlen)
1674 continue;
1675
1676 if (!memcmp(dir_name, node->d_dir.d_name, name_len)) {
1677 found = node;
1678 break;
21864bc5
MD
1679 }
1680 }
1681
1682 if ((found == NULL) && (create)) {
1683 found = devfs_allocp(Pdir, dir_name, parent, parent->mp, NULL);
1684 }
1685
1686 return found;
1687}
1688
1689/*
1690 * This function tries to resolve a complete path. If creation is requested,
1691 * if a given part of the path cannot be resolved (because it doesn't exist),
1692 * it is created.
1693 */
1694struct devfs_node *
1695devfs_resolve_or_create_path(struct devfs_node *parent, char *path, int create)
1696{
1697 struct devfs_node *node = parent;
1698 char buf[PATH_MAX];
1699 size_t idx = 0;
1700
1701
1702 if (path == NULL)
1703 return parent;
1704
1705
1706 for (; *path != '\0' ; path++) {
1707 if (*path != '/') {
1708 buf[idx++] = *path;
1709 } else {
1710 buf[idx] = '\0';
1711 node = devfs_resolve_or_create_dir(node, buf, idx, create);
1712 if (node == NULL)
1713 return NULL;
1714 idx = 0;
1715 }
1716 }
1717 buf[idx] = '\0';
1718 return devfs_resolve_or_create_dir(node, buf, idx, create);
1719}
1720
1721/*
1722 * Takes a full path and strips it into a directory path and a name.
1723 * For a/b/c/foo, it returns foo in namep and a/b/c in pathp. It
1724 * requires a working buffer with enough size to keep the whole
1725 * fullpath.
1726 */
1727int
1728devfs_resolve_name_path(char *fullpath, char *buf, char **pathp, char **namep)
1729{
1730 char *name = NULL;
1731 char *path = NULL;
1732 size_t len = strlen(fullpath) + 1;
1733 int i;
1734
bc185c5a
AH
1735 KKASSERT((fullpath != NULL) && (buf != NULL));
1736 KKASSERT((pathp != NULL) && (namep != NULL));
21864bc5
MD
1737
1738 memcpy(buf, fullpath, len);
1739
1740 for (i = len-1; i>= 0; i--) {
1741 if (buf[i] == '/') {
1742 buf[i] = '\0';
1743 name = &(buf[i+1]);
1744 path = buf;
1745 break;
1746 }
1747 }
1748
1749 *pathp = path;
1750
1751 if (name) {
1752 *namep = name;
1753 } else {
1754 *namep = buf;
1755 }
1756
1757 return 0;
1758}
1759
1760/*
ca8d7677 1761 * This function creates a new devfs node for a given device. It can
21864bc5
MD
1762 * handle a complete path as device name, and accordingly creates
1763 * the path and the final device node.
ca8d7677
MD
1764 *
1765 * The reference count on the passed dev remains unchanged.
21864bc5
MD
1766 */
1767struct devfs_node *
ca8d7677
MD
1768devfs_create_device_node(struct devfs_node *root, cdev_t dev,
1769 char *dev_name, char *path_fmt, ...)
21864bc5
MD
1770{
1771 struct devfs_node *parent, *node = NULL;
1772 char *path = NULL;
1773 char *name, name_buf[PATH_MAX];
1774 __va_list ap;
1775 int i, found;
1776
1777 char *create_path = NULL;
1778 char *names = "pqrsPQRS";
1779
21864bc5
MD
1780 if (path_fmt != NULL) {
1781 path = kmalloc(PATH_MAX+1, M_DEVFS, M_WAITOK);
1782
1783 __va_start(ap, path_fmt);
1784 i = kvcprintf(path_fmt, NULL, path, 10, ap);
1785 path[i] = '\0';
1786 __va_end(ap);
1787 }
1788
1789 parent = devfs_resolve_or_create_path(root, path, 1);
1790 KKASSERT(parent);
1791
bc185c5a
AH
1792 devfs_resolve_name_path(
1793 ((dev_name == NULL) && (dev))?(dev->si_name):(dev_name),
1794 name_buf, &create_path, &name);
21864bc5
MD
1795
1796 if (create_path)
1797 parent = devfs_resolve_or_create_path(parent, create_path, 1);
1798
1799
1800 if (devfs_find_device_node_by_name(parent, name)) {
bc185c5a 1801 devfs_debug(DEVFS_DEBUG_WARNING, "devfs_create_device_node: "
894bbb25 1802 "DEVICE %s ALREADY EXISTS!!! Ignoring creation request.\n", name);
21864bc5
MD
1803 goto out;
1804 }
bc185c5a 1805
21864bc5 1806 node = devfs_allocp(Pdev, name, parent, parent->mp, dev);
21864bc5 1807
894bbb25 1808#if 0
bc185c5a
AH
1809 /*
1810 * Ugly unix98 pty magic, to hide pty master (ptm) devices and their
1811 * directory
1812 */
1813 if ((dev) && (strlen(dev->si_name) >= 4) &&
1814 (!memcmp(dev->si_name, "ptm/", 4))) {
894bbb25
AH
1815 node->parent->flags |= DEVFS_HIDDEN;
1816 node->flags |= DEVFS_HIDDEN;
21864bc5 1817 }
894bbb25 1818#endif
bc185c5a
AH
1819
1820 /*
1821 * Ugly pty magic, to tag pty devices as such and hide them if needed.
1822 */
21864bc5
MD
1823 if ((strlen(name) >= 3) && (!memcmp(name, "pty", 3)))
1824 node->flags |= (DEVFS_PTY | DEVFS_INVISIBLE);
1825
21864bc5
MD
1826 if ((strlen(name) >= 3) && (!memcmp(name, "tty", 3))) {
1827 found = 0;
1828 for (i = 0; i < strlen(names); i++) {
1829 if (name[3] == names[i]) {
1830 found = 1;
1831 break;
1832 }
1833 }
1834 if (found)
1835 node->flags |= (DEVFS_PTY | DEVFS_INVISIBLE);
1836 }
21864bc5
MD
1837
1838out:
1839 if (path_fmt != NULL)
1840 kfree(path, M_DEVFS);
21864bc5
MD
1841
1842 return node;
1843}
1844
1845/*
1846 * This function finds a given device node in the topology with a given
1847 * cdev.
1848 */
1849struct devfs_node *
1850devfs_find_device_node(struct devfs_node *node, cdev_t target)
1851{
1852 struct devfs_node *node1, *node2, *found = NULL;
1853
1854 if ((node->node_type == Proot) || (node->node_type == Pdir)) {
21864bc5 1855 if (node->nchildren > 2) {
bc185c5a 1856 TAILQ_FOREACH_MUTABLE(node1, DEVFS_DENODE_HEAD(node), link, node2) {
21864bc5
MD
1857 if ((found = devfs_find_device_node(node1, target)))
1858 return found;
1859 }
1860 }
1861 } else if (node->node_type == Pdev) {
1862 if (node->d_dev == target)
1863 return node;
1864 }
21864bc5
MD
1865
1866 return NULL;
1867}
1868
1869/*
1870 * This function finds a device node in the topology by its
1871 * name and returns it.
1872 */
1873struct devfs_node *
1874devfs_find_device_node_by_name(struct devfs_node *parent, char *target)
1875{
1876 struct devfs_node *node, *found = NULL;
1877 size_t len = strlen(target);
1878
1879 TAILQ_FOREACH(node, DEVFS_DENODE_HEAD(parent), link) {
bc185c5a
AH
1880 if (len != node->d_dir.d_namlen)
1881 continue;
1882
1883 if (!memcmp(node->d_dir.d_name, target, len)) {
21864bc5
MD
1884 found = node;
1885 break;
1886 }
1887 }
1888
1889 return found;
1890}
1891
fa7e6f37
AH
1892static struct vnode*
1893devfs_inode_to_vnode_worker(struct devfs_node *node, ino_t target)
1894{
1895 struct devfs_node *node1, *node2;
1896 struct vnode* vp;
1897
1898 if ((node->node_type == Proot) || (node->node_type == Pdir)) {
fa7e6f37 1899 if (node->nchildren > 2) {
bc185c5a 1900 TAILQ_FOREACH_MUTABLE(node1, DEVFS_DENODE_HEAD(node), link, node2) {
fa7e6f37
AH
1901 if ((vp = devfs_inode_to_vnode_worker(node1, target)))
1902 return vp;
1903 }
1904 }
1905 }
bc185c5a 1906
fa7e6f37
AH
1907 if (node->d_dir.d_ino == target) {
1908 if (node->v_node) {
1909 vp = node->v_node;
1910 vget(vp, LK_EXCLUSIVE | LK_RETRY);
1911 vn_unlock(vp);
1912 } else {
1913 devfs_allocv(&vp, node);
1914 vn_unlock(vp);
1915 }
1916 return vp;
1917 }
1918
1919 return NULL;
1920}
1921
21864bc5 1922/*
ca8d7677
MD
1923 * This function takes a cdev and removes its devfs node in the
1924 * given topology. The cdev remains intact.
21864bc5
MD
1925 */
1926int
1927devfs_destroy_device_node(struct devfs_node *root, cdev_t target)
1928{
1929 struct devfs_node *node, *parent;
21864bc5 1930 char *name, name_buf[PATH_MAX];
21864bc5
MD
1931 char *create_path = NULL;
1932
1933 KKASSERT(target);
1934
21864bc5
MD
1935 memcpy(name_buf, target->si_name, strlen(target->si_name)+1);
1936
1937 devfs_resolve_name_path(target->si_name, name_buf, &create_path, &name);
21864bc5
MD
1938
1939 if (create_path)
1940 parent = devfs_resolve_or_create_path(root, create_path, 0);
1941 else
1942 parent = root;
bc185c5a 1943
21864bc5
MD
1944 if (parent == NULL)
1945 return 1;
bc185c5a 1946
21864bc5 1947 node = devfs_find_device_node_by_name(parent, name);
bc185c5a 1948
ca8d7677 1949 if (node)
21864bc5 1950 devfs_gc(node);
21864bc5
MD
1951
1952 return 0;
1953}
1954
1955/*
1956 * Just set perms and ownership for given node.
1957 */
1958int
bc185c5a
AH
1959devfs_set_perms(struct devfs_node *node, uid_t uid, gid_t gid,
1960 u_short mode, u_long flags)
21864bc5 1961{
bc185c5a
AH
1962 node->mode = mode;
1963 node->uid = uid;
1964 node->gid = gid;
21864bc5
MD
1965
1966 return 0;
1967}
1968
1969/*
1970 * Propagates a device attach/detach to all mount
1971 * points. Also takes care of automatic alias removal
1972 * for a deleted cdev.
1973 */
1974static int
1975devfs_propagate_dev(cdev_t dev, int attach)
1976{
1977 struct devfs_mnt_data *mnt;
1978
21864bc5 1979 TAILQ_FOREACH(mnt, &devfs_mnt_list, link) {
21864bc5
MD
1980 if (attach) {
1981 /* Device is being attached */
ca8d7677
MD
1982 devfs_create_device_node(mnt->root_node, dev,
1983 NULL, NULL );
21864bc5
MD
1984 } else {
1985 /* Device is being detached */
21864bc5
MD
1986 devfs_alias_remove(dev);
1987 devfs_destroy_device_node(mnt->root_node, dev);
1988 }
1989 }
21864bc5
MD
1990 return 0;
1991}
1992
1993/*
1994 * devfs_node_to_path takes a node and a buffer of a size of
1995 * at least PATH_MAX, resolves the full path from the root
1996 * node and writes it in a humanly-readable format into the
1997 * buffer.
1998 * If DEVFS_STASH_DEPTH is less than the directory level up
1999 * to the root node, only the last DEVFS_STASH_DEPTH levels
2000 * of the path are resolved.
2001 */
2002int
2003devfs_node_to_path(struct devfs_node *node, char *buffer)
2004{
2005#define DEVFS_STASH_DEPTH 32
2006 struct devfs_node *node_stash[DEVFS_STASH_DEPTH];
2007 int i, offset;
2008 memset(buffer, 0, PATH_MAX);
2009
2010 for (i = 0; (i < DEVFS_STASH_DEPTH) && (node->node_type != Proot); i++) {
2011 node_stash[i] = node;
2012 node = node->parent;
2013 }
2014 i--;
2015
2016 for (offset = 0; i >= 0; i--) {
bc185c5a
AH
2017 memcpy(buffer+offset, node_stash[i]->d_dir.d_name,
2018 node_stash[i]->d_dir.d_namlen);
21864bc5
MD
2019 offset += node_stash[i]->d_dir.d_namlen;
2020 if (i > 0) {
2021 *(buffer+offset) = '/';
2022 offset++;
2023 }
2024 }
2025#undef DEVFS_STASH_DEPTH
2026 return 0;
2027}
2028
2029/*
2030 * devfs_clone either returns a basename from a complete name by
2031 * returning the length of the name without trailing digits, or,
2032 * if clone != 0, calls the device's clone handler to get a new
2033 * device, which in turn is returned in devp.
2034 */
2035int
bc185c5a
AH
2036devfs_clone(char *name, size_t *namlenp, cdev_t *devp, int clone,
2037 struct ucred *cred)
21864bc5
MD
2038{
2039 KKASSERT(namlenp);
2040
2041 size_t len = *namlenp;
2042 int error = 1;
2043 struct devfs_clone_handler *chandler;
2044 struct dev_clone_args ap;
2045
2046 if (!clone) {
2047 for (; (len > 0) && (DEVFS_ISDIGIT(name[len-1])); len--);
2048 }
2049
2050 TAILQ_FOREACH(chandler, &devfs_chandler_list, link) {
21864bc5
MD
2051 if ((chandler->namlen == len) &&
2052 (!memcmp(chandler->name, name, len)) &&
2053 (chandler->nhandler)) {
21864bc5
MD
2054 if (clone) {
2055 ap.a_dev = NULL;
2056 ap.a_name = name;
2057 ap.a_namelen = len;
2058 ap.a_cred = cred;
2059 error = (chandler->nhandler)(&ap);
2060 KKASSERT(devp);
2061 *devp = ap.a_dev;
2062 } else {
2063 *namlenp = len;
2064 error = 0;
2065 }
2066
2067 break;
2068 }
2069 }
2070
2071 return error;
2072}
2073
2074
2075/*
2076 * Registers a new orphan in the orphan list.
2077 */
2078void
2079devfs_tracer_add_orphan(struct devfs_node *node)
2080{
2081 struct devfs_orphan *orphan;
2082
2083 KKASSERT(node);
2084 orphan = kmalloc(sizeof(struct devfs_orphan), M_DEVFS, M_WAITOK);
2085 orphan->node = node;
2086
ca8d7677
MD
2087 KKASSERT((node->flags & DEVFS_ORPHANED) == 0);
2088 node->flags |= DEVFS_ORPHANED;
21864bc5
MD
2089 TAILQ_INSERT_TAIL(DEVFS_ORPHANLIST(node->mp), orphan, link);
2090}
2091
2092/*
2093 * Removes an orphan from the orphan list.
2094 */
2095void
2096devfs_tracer_del_orphan(struct devfs_node *node)
2097{
2098 struct devfs_orphan *orphan;
2099
2100 KKASSERT(node);
2101
2102 TAILQ_FOREACH(orphan, DEVFS_ORPHANLIST(node->mp), link) {
2103 if (orphan->node == node) {
ca8d7677 2104 node->flags &= ~DEVFS_ORPHANED;
21864bc5
MD
2105 TAILQ_REMOVE(DEVFS_ORPHANLIST(node->mp), orphan, link);
2106 kfree(orphan, M_DEVFS);
2107 break;
2108 }
2109 }
2110}
2111
2112/*
2113 * Counts the orphans in the orphan list, and if cleanup
2114 * is specified, also frees the orphan and removes it from
2115 * the list.
2116 */
2117size_t
2118devfs_tracer_orphan_count(struct mount *mp, int cleanup)
2119{
2120 struct devfs_orphan *orphan, *orphan2;
2121 size_t count = 0;
2122
2123 TAILQ_FOREACH_MUTABLE(orphan, DEVFS_ORPHANLIST(mp), link, orphan2) {
2124 count++;
bc185c5a
AH
2125 /*
2126 * If we are instructed to clean up, we do so.
2127 */
21864bc5 2128 if (cleanup) {
21864bc5 2129 TAILQ_REMOVE(DEVFS_ORPHANLIST(mp), orphan, link);
ca8d7677
MD
2130 orphan->node->flags &= ~DEVFS_ORPHANED;
2131 devfs_freep(orphan->node);
21864bc5
MD
2132 kfree(orphan, M_DEVFS);
2133 }
2134 }
2135
2136 return count;
2137}
2138
2139/*
2140 * Fetch an ino_t from the global d_ino by increasing it
2141 * while spinlocked.
2142 */
2143static ino_t
2144devfs_fetch_ino(void)
2145{
2146 ino_t ret;
2147
2148 spin_lock_wr(&ino_lock);
2149 ret = d_ino++;
2150 spin_unlock_wr(&ino_lock);
2151
2152 return ret;
2153}
2154
2155/*
2156 * Allocates a new cdev and initializes it's most basic
2157 * fields.
2158 */
2159cdev_t
2160devfs_new_cdev(struct dev_ops *ops, int minor)
2161{
21864bc5
MD
2162 cdev_t dev = sysref_alloc(&cdev_sysref_class);
2163 sysref_activate(&dev->si_sysref);
2164 reference_dev(dev);
21864bc5
MD
2165 memset(dev, 0, offsetof(struct cdev, si_sysref));
2166
2167 dev->si_uid = 0;
2168 dev->si_gid = 0;
2169 dev->si_perms = 0;
2170 dev->si_drv1 = NULL;
2171 dev->si_drv2 = NULL;
2172 dev->si_lastread = 0; /* time_second */
2173 dev->si_lastwrite = 0; /* time_second */
2174
2175 dev->si_ops = ops;
894bbb25 2176 dev->si_flags = 0;
21864bc5
MD
2177 dev->si_umajor = 0;
2178 dev->si_uminor = minor;
7cbab9da 2179 dev->si_inode = makeudev(devfs_reference_ops(ops), minor);
21864bc5
MD
2180
2181 return dev;
2182}
2183
ca8d7677
MD
2184static void
2185devfs_cdev_terminate(cdev_t dev)
21864bc5
MD
2186{
2187 int locked = 0;
2188
2189 /* Check if it is locked already. if not, we acquire the devfs lock */
2190 if (!(lockstatus(&devfs_lock, curthread)) == LK_EXCLUSIVE) {
2191 lockmgr(&devfs_lock, LK_EXCLUSIVE);
2192 locked = 1;
2193 }
2194
21864bc5
MD
2195 /* Propagate destruction, just in case */
2196 devfs_propagate_dev(dev, 0);
2197
2198 /* If we acquired the lock, we also get rid of it */
2199 if (locked)
2200 lockmgr(&devfs_lock, LK_RELEASE);
2201
7cbab9da
AH
2202 devfs_release_ops(dev->si_ops);
2203
21864bc5
MD
2204 /* Finally destroy the device */
2205 sysref_put(&dev->si_sysref);
2206}
2207
21864bc5
MD
2208/*
2209 * Links a given cdev into the dev list.
2210 */
2211int
2212devfs_link_dev(cdev_t dev)
2213{
ca8d7677 2214 KKASSERT((dev->si_flags & SI_DEVFS_LINKED) == 0);
21864bc5
MD
2215 dev->si_flags |= SI_DEVFS_LINKED;
2216 TAILQ_INSERT_TAIL(&devfs_dev_list, dev, link);
2217
2218 return 0;
2219}
2220
2221/*
ca8d7677
MD
2222 * Removes a given cdev from the dev list. The caller is responsible for
2223 * releasing the reference on the device associated with the linkage.
2224 *
2225 * Returns EALREADY if the dev has already been unlinked.
21864bc5 2226 */
ca8d7677 2227static int
21864bc5
MD
2228devfs_unlink_dev(cdev_t dev)
2229{
2230 if ((dev->si_flags & SI_DEVFS_LINKED)) {
2231 TAILQ_REMOVE(&devfs_dev_list, dev, link);
2232 dev->si_flags &= ~SI_DEVFS_LINKED;
ca8d7677 2233 return (0);
21864bc5 2234 }
ca8d7677 2235 return (EALREADY);
21864bc5
MD
2236}
2237
894bbb25
AH
2238int
2239devfs_node_is_accessible(struct devfs_node *node)
2240{
2241 if ((node) && (!(node->flags & DEVFS_HIDDEN)))
2242 return 1;
2243 else
2244 return 0;
2245}
2246
7cbab9da
AH
2247int
2248devfs_reference_ops(struct dev_ops *ops)
2249{
2250 int unit;
2251
2252 if (ops->head.refs == 0) {
2253 ops->head.id = devfs_clone_bitmap_get(&DEVFS_CLONE_BITMAP(ops_id), 255);
2254 if (ops->head.id == -1) {
2255 /* Ran out of unique ids */
bc185c5a
AH
2256 devfs_debug(DEVFS_DEBUG_WARNING,
2257 "devfs_reference_ops: WARNING: ran out of unique ids\n");
7cbab9da
AH
2258 }
2259 }
2260 unit = ops->head.id;
2261 ++ops->head.refs;
2262
2263 return unit;
2264}
2265
2266void
2267devfs_release_ops(struct dev_ops *ops)
2268{
2269 --ops->head.refs;
2270
2271 if (ops->head.refs == 0) {
2272 devfs_clone_bitmap_put(&DEVFS_CLONE_BITMAP(ops_id), ops->head.id);
2273 }
2274}
2275
21864bc5
MD
2276void
2277devfs_config(void *arg)
2278{
2279 devfs_msg_t msg;
2280
2281 msg = devfs_msg_get();
2282
2283 kprintf("devfs_config: sync'ing up\n");
2284 msg = devfs_msg_send_sync(DEVFS_SYNC, msg);
2285 devfs_msg_put(msg);
2286}
2287
2288/*
2289 * Called on init of devfs; creates the objcaches and
2290 * spawns off the devfs core thread. Also initializes
2291 * locks.
2292 */
2293static void
2294devfs_init(void)
2295{
2296 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_init() called\n");
2297 /* Create objcaches for nodes, msgs and devs */
2298 devfs_node_cache = objcache_create("devfs-node-cache", 0, 0,
2299 NULL, NULL, NULL,
2300 objcache_malloc_alloc,
2301 objcache_malloc_free,
2302 &devfs_node_malloc_args );
2303
2304 devfs_msg_cache = objcache_create("devfs-msg-cache", 0, 0,
2305 NULL, NULL, NULL,
2306 objcache_malloc_alloc,
2307 objcache_malloc_free,
2308 &devfs_msg_malloc_args );
2309
2310 devfs_dev_cache = objcache_create("devfs-dev-cache", 0, 0,
2311 NULL, NULL, NULL,
2312 objcache_malloc_alloc,
2313 objcache_malloc_free,
2314 &devfs_dev_malloc_args );
2315
7cbab9da 2316 devfs_clone_bitmap_init(&DEVFS_CLONE_BITMAP(ops_id));
7cbab9da 2317
21864bc5
MD
2318 /* Initialize the reply-only port which acts as a message drain */
2319 lwkt_initport_replyonly(&devfs_dispose_port, devfs_msg_autofree_reply);
2320
2321 /* Initialize *THE* devfs lock */
2322 lockinit(&devfs_lock, "devfs_core lock", 0, 0);
2323
2324
2325 lwkt_create(devfs_msg_core, /*args*/NULL, &td_core, NULL,
2326 0, 0, "devfs_msg_core");
2327
2328 tsleep(td_core/*devfs_id*/, 0, "devfsc", 0);
2329
2330 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_init finished\n");
2331}
2332
2333/*
2334 * Called on unload of devfs; takes care of destroying the core
2335 * and the objcaches. Also removes aliases that are no longer needed.
2336 */
2337static void
2338devfs_uninit(void)
2339{
2340 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_uninit() called\n");
2341
2342 devfs_msg_send(DEVFS_TERMINATE_CORE, NULL);
2343
2344 tsleep(td_core/*devfs_id*/, 0, "devfsc", 0);
2345 tsleep(td_core/*devfs_id*/, 0, "devfsc", 10000);
2346
7cbab9da
AH
2347 devfs_clone_bitmap_uninit(&DEVFS_CLONE_BITMAP(ops_id));
2348
21864bc5
MD
2349 /* Destroy the objcaches */
2350 objcache_destroy(devfs_msg_cache);
2351 objcache_destroy(devfs_node_cache);
2352 objcache_destroy(devfs_dev_cache);
2353
2354 devfs_alias_reap();
2355}
2356
2357/*
2358 * This is a sysctl handler to assist userland devname(3) to
2359 * find the device name for a given udev.
2360 */
2361static int
2362devfs_sysctl_devname_helper(SYSCTL_HANDLER_ARGS)
2363{
2364 udev_t udev;
2365 cdev_t found;
2366 int error;
2367
2368
2369 if ((error = SYSCTL_IN(req, &udev, sizeof(udev_t))))
2370 return (error);
2371
2372 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs sysctl, received udev: %d\n", udev);
2373
2374 if (udev == NOUDEV)
2375 return(EINVAL);
2376
2377 if ((found = devfs_find_device_by_udev(udev)) == NULL)
2378 return(ENOENT);
2379
2380 return(SYSCTL_OUT(req, found->si_name, strlen(found->si_name) + 1));
2381}
2382
2383
2384SYSCTL_PROC(_kern, OID_AUTO, devname, CTLTYPE_OPAQUE|CTLFLAG_RW|CTLFLAG_ANYBODY,
2385 NULL, 0, devfs_sysctl_devname_helper, "", "helper for devname(3)");
2386
2387static SYSCTL_NODE(_vfs, OID_AUTO, devfs, CTLFLAG_RW, 0, "devfs");
2388TUNABLE_INT("vfs.devfs.debug", &devfs_debug_enable);
bc185c5a
AH
2389SYSCTL_INT(_vfs_devfs, OID_AUTO, debug, CTLFLAG_RW, &devfs_debug_enable,
2390 0, "Enable DevFS debugging");
21864bc5 2391
bc185c5a
AH
2392SYSINIT(vfs_devfs_register, SI_SUB_PRE_DRIVERS, SI_ORDER_FIRST,
2393 devfs_init, NULL);
2394SYSUNINIT(vfs_devfs_register, SI_SUB_PRE_DRIVERS, SI_ORDER_ANY,
2395 devfs_uninit, NULL);