devfs - remove obsolete KKASSERT
[dragonfly.git] / sys / vfs / devfs / devfs_core.c
CommitLineData
21864bc5
MD
1/*
2 * Copyright (c) 2009 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Alex Hornung <ahornung@gmail.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 */
34#include <sys/param.h>
35#include <sys/systm.h>
36#include <sys/kernel.h>
37#include <sys/mount.h>
38#include <sys/vnode.h>
39#include <sys/types.h>
40#include <sys/lock.h>
41#include <sys/msgport.h>
42#include <sys/msgport2.h>
43#include <sys/spinlock2.h>
44#include <sys/sysctl.h>
45#include <sys/ucred.h>
46#include <sys/param.h>
47#include <sys/sysref2.h>
7cbab9da 48#include <sys/systm.h>
2c1e28dd
AH
49#include <sys/devfs.h>
50#include <sys/devfs_rules.h>
115f9a72 51#include <sys/hotplug.h>
21864bc5
MD
52
53MALLOC_DEFINE(M_DEVFS, "devfs", "Device File System (devfs) allocations");
7cbab9da 54DEVFS_DECLARE_CLONE_BITMAP(ops_id);
21864bc5
MD
55/*
56 * SYSREF Integration - reference counting, allocation,
57 * sysid and syslink integration.
58 */
59static void devfs_cdev_terminate(cdev_t dev);
e654922c
MD
60static void devfs_cdev_lock(cdev_t dev);
61static void devfs_cdev_unlock(cdev_t dev);
21864bc5
MD
62static struct sysref_class cdev_sysref_class = {
63 .name = "cdev",
64 .mtype = M_DEVFS,
65 .proto = SYSREF_PROTO_DEV,
66 .offset = offsetof(struct cdev, si_sysref),
67 .objsize = sizeof(struct cdev),
68 .mag_capacity = 32,
69 .flags = 0,
70 .ops = {
e654922c
MD
71 .terminate = (sysref_terminate_func_t)devfs_cdev_terminate,
72 .lock = (sysref_lock_func_t)devfs_cdev_lock,
73 .unlock = (sysref_unlock_func_t)devfs_cdev_unlock
21864bc5
MD
74 }
75};
76
77static struct objcache *devfs_node_cache;
78static struct objcache *devfs_msg_cache;
79static struct objcache *devfs_dev_cache;
80
81static struct objcache_malloc_args devfs_node_malloc_args = {
82 sizeof(struct devfs_node), M_DEVFS };
83struct objcache_malloc_args devfs_msg_malloc_args = {
84 sizeof(struct devfs_msg), M_DEVFS };
85struct objcache_malloc_args devfs_dev_malloc_args = {
86 sizeof(struct cdev), M_DEVFS };
87
bc185c5a
AH
88static struct devfs_dev_head devfs_dev_list =
89 TAILQ_HEAD_INITIALIZER(devfs_dev_list);
90static struct devfs_mnt_head devfs_mnt_list =
91 TAILQ_HEAD_INITIALIZER(devfs_mnt_list);
92static struct devfs_chandler_head devfs_chandler_list =
93 TAILQ_HEAD_INITIALIZER(devfs_chandler_list);
94static struct devfs_alias_head devfs_alias_list =
95 TAILQ_HEAD_INITIALIZER(devfs_alias_list);
176de024
AH
96static struct devfs_dev_ops_head devfs_dev_ops_list =
97 TAILQ_HEAD_INITIALIZER(devfs_dev_ops_list);
21864bc5
MD
98
99struct lock devfs_lock;
100static struct lwkt_port devfs_dispose_port;
101static struct lwkt_port devfs_msg_port;
102static struct thread *td_core;
21864bc5 103
21864bc5 104static struct spinlock ino_lock;
d0fe8596
MD
105static ino_t d_ino;
106static int devfs_debug_enable;
107static int devfs_run;
21864bc5
MD
108
109static ino_t devfs_fetch_ino(void);
21864bc5
MD
110static int devfs_create_all_dev_worker(struct devfs_node *);
111static int devfs_create_dev_worker(cdev_t, uid_t, gid_t, int);
112static int devfs_destroy_dev_worker(cdev_t);
113static int devfs_destroy_subnames_worker(char *);
114static int devfs_destroy_dev_by_ops_worker(struct dev_ops *, int);
115static int devfs_propagate_dev(cdev_t, int);
ca8d7677 116static int devfs_unlink_dev(cdev_t dev);
d0fe8596 117static void devfs_msg_exec(devfs_msg_t msg);
21864bc5 118
07dfa375
AH
119static int devfs_chandler_add_worker(const char *, d_clone_t *);
120static int devfs_chandler_del_worker(const char *);
21864bc5
MD
121
122static void devfs_msg_autofree_reply(lwkt_port_t, lwkt_msg_t);
123static void devfs_msg_core(void *);
124
125static int devfs_find_device_by_name_worker(devfs_msg_t);
126static int devfs_find_device_by_udev_worker(devfs_msg_t);
127
128static int devfs_apply_reset_rules_caller(char *, int);
21864bc5
MD
129
130static int devfs_scan_callback_worker(devfs_scan_t *);
131
bc185c5a
AH
132static struct devfs_node *devfs_resolve_or_create_dir(struct devfs_node *,
133 char *, size_t, int);
21864bc5
MD
134
135static int devfs_make_alias_worker(struct devfs_alias *);
136static int devfs_alias_remove(cdev_t);
137static int devfs_alias_reap(void);
138static int devfs_alias_propagate(struct devfs_alias *);
139static int devfs_alias_apply(struct devfs_node *, struct devfs_alias *);
140static int devfs_alias_check_create(struct devfs_node *);
141
ca8d7677
MD
142static int devfs_clr_subnames_flag_worker(char *, uint32_t);
143static int devfs_destroy_subnames_without_flag_worker(char *, uint32_t);
144
66abefa5
AH
145static void *devfs_reaperp_callback(struct devfs_node *, void *);
146static void *devfs_gc_dirs_callback(struct devfs_node *, void *);
147static void *devfs_gc_links_callback(struct devfs_node *, struct devfs_node *);
148static void *
149devfs_inode_to_vnode_worker_callback(struct devfs_node *, ino_t *);
150
115f9a72
AP
151/* hotplug */
152void (*devfs_node_added)(struct hotplug_device*) = NULL;
153void (*devfs_node_removed)(struct hotplug_device*) = NULL;
154
21864bc5 155/*
bc185c5a
AH
156 * devfs_debug() is a SYSCTL and TUNABLE controlled debug output function
157 * using kvprintf
21864bc5
MD
158 */
159int
160devfs_debug(int level, char *fmt, ...)
161{
162 __va_list ap;
163
164 __va_start(ap, fmt);
165 if (level <= devfs_debug_enable)
166 kvprintf(fmt, ap);
167 __va_end(ap);
168
169 return 0;
170}
171
172/*
ca8d7677
MD
173 * devfs_allocp() Allocates a new devfs node with the specified
174 * parameters. The node is also automatically linked into the topology
175 * if a parent is specified. It also calls the rule and alias stuff to
176 * be applied on the new node
21864bc5
MD
177 */
178struct devfs_node *
ca8d7677
MD
179devfs_allocp(devfs_nodetype devfsnodetype, char *name,
180 struct devfs_node *parent, struct mount *mp, cdev_t dev)
21864bc5
MD
181{
182 struct devfs_node *node = NULL;
183 size_t namlen = strlen(name);
21864bc5
MD
184
185 node = objcache_get(devfs_node_cache, M_WAITOK);
ca8d7677
MD
186 bzero(node, sizeof(*node));
187
71f27d2d 188 atomic_add_long(&(DEVFS_MNTDATA(mp)->leak_count), 1);
21864bc5 189
ca8d7677 190 node->d_dev = NULL;
21864bc5
MD
191 node->nchildren = 1;
192 node->mp = mp;
193 node->d_dir.d_ino = devfs_fetch_ino();
21864bc5 194
bc185c5a
AH
195 /*
196 * Cookie jar for children. Leave 0 and 1 for '.' and '..' entries
197 * respectively.
198 */
199 node->cookie_jar = 2;
200
201 /*
202 * Access Control members
203 */
204 node->mode = DEVFS_DEFAULT_MODE;
205 node->uid = DEVFS_DEFAULT_UID;
206 node->gid = DEVFS_DEFAULT_GID;
21864bc5 207
21864bc5
MD
208 switch (devfsnodetype) {
209 case Proot:
bc185c5a
AH
210 /*
211 * Ensure that we don't recycle the root vnode by marking it as
212 * linked into the topology.
213 */
894bbb25 214 node->flags |= DEVFS_NODE_LINKED;
21864bc5
MD
215 case Pdir:
216 TAILQ_INIT(DEVFS_DENODE_HEAD(node));
217 node->d_dir.d_type = DT_DIR;
218 node->nchildren = 2;
219 break;
220
221 case Plink:
222 node->d_dir.d_type = DT_LNK;
223 break;
224
225 case Preg:
226 node->d_dir.d_type = DT_REG;
227 break;
228
229 case Pdev:
230 if (dev != NULL) {
231 node->d_dir.d_type = DT_CHR;
232 node->d_dev = dev;
21864bc5 233
bc185c5a
AH
234 node->mode = dev->si_perms;
235 node->uid = dev->si_uid;
236 node->gid = dev->si_gid;
21864bc5
MD
237
238 devfs_alias_check_create(node);
239 }
240 break;
241
242 default:
243 panic("devfs_allocp: unknown node type");
244 }
245
246 node->v_node = NULL;
247 node->node_type = devfsnodetype;
248
bc185c5a 249 /* Initialize the dirent structure of each devfs vnode */
21864bc5 250 node->d_dir.d_namlen = namlen;
ca8d7677 251 node->d_dir.d_name = kmalloc(namlen+1, M_DEVFS, M_WAITOK);
21864bc5
MD
252 memcpy(node->d_dir.d_name, name, namlen);
253 node->d_dir.d_name[namlen] = '\0';
254
255 /* Initialize the parent node element */
256 node->parent = parent;
257
258 /* Apply rules */
66abefa5 259 devfs_rule_check_apply(node, NULL);
21864bc5 260
bc185c5a 261 /* Initialize *time members */
ca8d7677
MD
262 nanotime(&node->atime);
263 node->mtime = node->ctime = node->atime;
264
265 /*
266 * Associate with parent as last step, clean out namecache
267 * reference.
268 */
21864bc5 269 if ((parent != NULL) &&
ca8d7677 270 ((parent->node_type == Proot) || (parent->node_type == Pdir))) {
21864bc5
MD
271 parent->nchildren++;
272 node->cookie = parent->cookie_jar++;
273 node->flags |= DEVFS_NODE_LINKED;
ca8d7677 274 TAILQ_INSERT_TAIL(DEVFS_DENODE_HEAD(parent), node, link);
21864bc5 275
ca8d7677
MD
276 /* This forces negative namecache lookups to clear */
277 ++mp->mnt_namecache_gen;
278 }
21864bc5 279
aee6fa68
AH
280 ++DEVFS_MNTDATA(mp)->file_count;
281
21864bc5
MD
282 return node;
283}
284
285/*
286 * devfs_allocv() allocates a new vnode based on a devfs node.
287 */
288int
289devfs_allocv(struct vnode **vpp, struct devfs_node *node)
290{
291 struct vnode *vp;
292 int error = 0;
293
294 KKASSERT(node);
295
21864bc5
MD
296try_again:
297 while ((vp = node->v_node) != NULL) {
298 error = vget(vp, LK_EXCLUSIVE);
299 if (error != ENOENT) {
300 *vpp = vp;
21864bc5
MD
301 goto out;
302 }
303 }
21864bc5 304
21864bc5
MD
305 if ((error = getnewvnode(VT_DEVFS, node->mp, vpp, 0, 0)) != 0)
306 goto out;
307
308 vp = *vpp;
309
21864bc5
MD
310 if (node->v_node != NULL) {
311 vp->v_type = VBAD;
312 vx_put(vp);
313 goto try_again;
314 }
315
316 vp->v_data = node;
317 node->v_node = vp;
21864bc5
MD
318
319 switch (node->node_type) {
320 case Proot:
2247fe02
MD
321 vsetflags(vp, VROOT);
322 /* fall through */
21864bc5
MD
323 case Pdir:
324 vp->v_type = VDIR;
325 break;
326
327 case Plink:
328 vp->v_type = VLNK;
329 break;
330
331 case Preg:
332 vp->v_type = VREG;
333 break;
334
335 case Pdev:
336 vp->v_type = VCHR;
21864bc5
MD
337 KKASSERT(node->d_dev);
338
bc185c5a
AH
339 vp->v_uminor = node->d_dev->si_uminor;
340 vp->v_umajor = 0;
341
342 v_associate_rdev(vp, node->d_dev);
343 vp->v_ops = &node->mp->mnt_vn_spec_ops;
21864bc5
MD
344 break;
345
346 default:
347 panic("devfs_allocv: unknown node type");
348 }
349
350out:
21864bc5
MD
351 return error;
352}
353
354/*
355 * devfs_allocvp allocates both a devfs node (with the given settings) and a vnode
356 * based on the newly created devfs node.
357 */
358int
359devfs_allocvp(struct mount *mp, struct vnode **vpp, devfs_nodetype devfsnodetype,
bc185c5a 360 char *name, struct devfs_node *parent, cdev_t dev)
21864bc5
MD
361{
362 struct devfs_node *node;
363
21864bc5 364 node = devfs_allocp(devfsnodetype, name, parent, mp, dev);
bc185c5a 365
21864bc5
MD
366 if (node != NULL)
367 devfs_allocv(vpp, node);
368 else
369 *vpp = NULL;
370
21864bc5
MD
371 return 0;
372}
373
374/*
ca8d7677
MD
375 * Destroy the devfs_node. The node must be unlinked from the topology.
376 *
377 * This function will also destroy any vnode association with the node
378 * and device.
379 *
380 * The cdev_t itself remains intact.
21864bc5
MD
381 */
382int
383devfs_freep(struct devfs_node *node)
384{
ca8d7677
MD
385 struct vnode *vp;
386
21864bc5 387 KKASSERT(node);
ca8d7677
MD
388 KKASSERT(((node->flags & DEVFS_NODE_LINKED) == 0) ||
389 (node->node_type == Proot));
390 KKASSERT((node->flags & DEVFS_DESTROYED) == 0);
21864bc5 391
71f27d2d 392 atomic_subtract_long(&(DEVFS_MNTDATA(node->mp)->leak_count), 1);
21864bc5
MD
393 if (node->symlink_name) {
394 kfree(node->symlink_name, M_DEVFS);
395 node->symlink_name = NULL;
396 }
397
ca8d7677
MD
398 /*
399 * Remove the node from the orphan list if it is still on it.
400 */
401 if (node->flags & DEVFS_ORPHANED)
21864bc5
MD
402 devfs_tracer_del_orphan(node);
403
ca8d7677
MD
404 /*
405 * Disassociate the vnode from the node. This also prevents the
406 * vnode's reclaim code from double-freeing the node.
e23485a5
MD
407 *
408 * The vget is needed to safely modify the vp. It also serves
409 * to cycle the refs and terminate the vnode if it happens to
410 * be inactive, otherwise namecache references may not get cleared.
ca8d7677 411 */
e23485a5
MD
412 while ((vp = node->v_node) != NULL) {
413 if (vget(vp, LK_EXCLUSIVE | LK_RETRY) != 0)
414 break;
9b823501 415 v_release_rdev(vp);
ca8d7677
MD
416 vp->v_data = NULL;
417 node->v_node = NULL;
71f27d2d 418 cache_inval_vp(vp, CINV_DESTROY);
e23485a5 419 vput(vp);
ca8d7677 420 }
4062d050 421 if (node->d_dir.d_name) {
ca8d7677 422 kfree(node->d_dir.d_name, M_DEVFS);
4062d050
MD
423 node->d_dir.d_name = NULL;
424 }
ca8d7677
MD
425 node->flags |= DEVFS_DESTROYED;
426
aee6fa68
AH
427 --DEVFS_MNTDATA(node->mp)->file_count;
428
21864bc5
MD
429 objcache_put(devfs_node_cache, node);
430
431 return 0;
432}
433
434/*
ca8d7677
MD
435 * Unlink the devfs node from the topology and add it to the orphan list.
436 * The node will later be destroyed by freep.
437 *
438 * Any vnode association, including the v_rdev and v_data, remains intact
439 * until the freep.
21864bc5
MD
440 */
441int
442devfs_unlinkp(struct devfs_node *node)
443{
444 struct devfs_node *parent;
115f9a72 445 struct hotplug_device *hpdev;
21864bc5
MD
446 KKASSERT(node);
447
bc185c5a
AH
448 /*
449 * Add the node to the orphan list, so it is referenced somewhere, to
450 * so we don't leak it.
451 */
21864bc5 452 devfs_tracer_add_orphan(node);
bc185c5a 453
21864bc5
MD
454 parent = node->parent;
455
ca8d7677
MD
456 /*
457 * If the parent is known we can unlink the node out of the topology
458 */
21864bc5
MD
459 if (parent) {
460 TAILQ_REMOVE(DEVFS_DENODE_HEAD(parent), node, link);
461 parent->nchildren--;
462 KKASSERT((parent->nchildren >= 0));
463 node->flags &= ~DEVFS_NODE_LINKED;
464 }
115f9a72
AP
465 /* hotplug handler */
466 if(devfs_node_removed) {
467 hpdev = kmalloc(sizeof(struct hotplug_device), M_TEMP, M_WAITOK);
468 hpdev->dev = node->d_dev;
469 if(hpdev->dev)
470 hpdev->name = node->d_dev->si_name;
471 devfs_node_removed(hpdev);
472 kfree(hpdev, M_TEMP);
473 }
21864bc5 474 node->parent = NULL;
21864bc5
MD
475 return 0;
476}
477
66abefa5
AH
478void *
479devfs_iterate_topology(struct devfs_node *node,
480 devfs_iterate_callback_t *callback, void *arg1)
21864bc5
MD
481{
482 struct devfs_node *node1, *node2;
66abefa5 483 void *ret = NULL;
21864bc5 484
21864bc5 485 if ((node->node_type == Proot) || (node->node_type == Pdir)) {
21864bc5 486 if (node->nchildren > 2) {
ca8d7677 487 TAILQ_FOREACH_MUTABLE(node1, DEVFS_DENODE_HEAD(node),
bc185c5a 488 link, node2) {
66abefa5
AH
489 if ((ret = devfs_iterate_topology(node1, callback, arg1)))
490 return ret;
21864bc5
MD
491 }
492 }
493 }
21864bc5 494
66abefa5
AH
495 ret = callback(node, arg1);
496 return ret;
21864bc5
MD
497}
498
499/*
66abefa5
AH
500 * devfs_reaperp() is a recursive function that iterates through all the
501 * topology, unlinking and freeing all devfs nodes.
21864bc5 502 */
66abefa5
AH
503static void *
504devfs_reaperp_callback(struct devfs_node *node, void *unused)
21864bc5 505{
21864bc5 506 devfs_unlinkp(node);
21864bc5
MD
507 devfs_freep(node);
508
66abefa5 509 return NULL;
21864bc5
MD
510}
511
66abefa5
AH
512static void *
513devfs_gc_dirs_callback(struct devfs_node *node, void *unused)
21864bc5 514{
66abefa5 515 if (node->node_type == Pdir) {
21864bc5 516 if (node->nchildren == 2) {
21864bc5
MD
517 devfs_unlinkp(node);
518 devfs_freep(node);
519 }
520 }
521
66abefa5
AH
522 return NULL;
523}
524
525static void *
526devfs_gc_links_callback(struct devfs_node *node, struct devfs_node *target)
527{
528 if ((node->node_type == Plink) && (node->link_target == target)) {
529 devfs_unlinkp(node);
530 devfs_freep(node);
531 }
532
533 return NULL;
21864bc5
MD
534}
535
536/*
66abefa5
AH
537 * devfs_gc() is devfs garbage collector. It takes care of unlinking and
538 * freeing a node, but also removes empty directories and links that link
539 * via devfs auto-link mechanism to the node being deleted.
21864bc5 540 */
66abefa5
AH
541int
542devfs_gc(struct devfs_node *node)
21864bc5 543{
66abefa5 544 struct devfs_node *root_node = DEVFS_MNTDATA(node->mp)->root_node;
21864bc5 545
66abefa5
AH
546 if (node->nlinks > 0)
547 devfs_iterate_topology(root_node,
548 (devfs_iterate_callback_t *)devfs_gc_links_callback, node);
21864bc5 549
66abefa5
AH
550 devfs_unlinkp(node);
551 devfs_iterate_topology(root_node,
552 (devfs_iterate_callback_t *)devfs_gc_dirs_callback, NULL);
553
554 devfs_freep(node);
21864bc5 555
66abefa5 556 return 0;
21864bc5
MD
557}
558
559/*
ca8d7677
MD
560 * devfs_create_dev() is the asynchronous entry point for device creation.
561 * It just sends a message with the relevant details to the devfs core.
562 *
563 * This function will reference the passed device. The reference is owned
564 * by devfs and represents all of the device's node associations.
21864bc5
MD
565 */
566int
567devfs_create_dev(cdev_t dev, uid_t uid, gid_t gid, int perms)
568{
ca8d7677 569 reference_dev(dev);
bc185c5a
AH
570 devfs_msg_send_dev(DEVFS_DEVICE_CREATE, dev, uid, gid, perms);
571
21864bc5
MD
572 return 0;
573}
574
575/*
ca8d7677
MD
576 * devfs_destroy_dev() is the asynchronous entry point for device destruction.
577 * It just sends a message with the relevant details to the devfs core.
21864bc5
MD
578 */
579int
580devfs_destroy_dev(cdev_t dev)
581{
582 devfs_msg_send_dev(DEVFS_DEVICE_DESTROY, dev, 0, 0, 0);
583 return 0;
584}
585
586/*
ca8d7677
MD
587 * devfs_mount_add() is the synchronous entry point for adding a new devfs
588 * mount. It sends a synchronous message with the relevant details to the
589 * devfs core.
21864bc5
MD
590 */
591int
592devfs_mount_add(struct devfs_mnt_data *mnt)
593{
594 devfs_msg_t msg;
595
596 msg = devfs_msg_get();
ca8d7677 597 msg->mdv_mnt = mnt;
21864bc5
MD
598 msg = devfs_msg_send_sync(DEVFS_MOUNT_ADD, msg);
599 devfs_msg_put(msg);
600
601 return 0;
602}
603
604/*
605 * devfs_mount_del() is the synchronous entry point for removing a devfs mount.
606 * It sends a synchronous message with the relevant details to the devfs core.
607 */
608int
609devfs_mount_del(struct devfs_mnt_data *mnt)
610{
611 devfs_msg_t msg;
612
613 msg = devfs_msg_get();
ca8d7677 614 msg->mdv_mnt = mnt;
21864bc5
MD
615 msg = devfs_msg_send_sync(DEVFS_MOUNT_DEL, msg);
616 devfs_msg_put(msg);
617
618 return 0;
619}
620
621/*
bc185c5a
AH
622 * devfs_destroy_subnames() is the synchronous entry point for device
623 * destruction by subname. It just sends a message with the relevant details to
624 * the devfs core.
21864bc5
MD
625 */
626int
627devfs_destroy_subnames(char *name)
628{
ca8d7677
MD
629 devfs_msg_t msg;
630
631 msg = devfs_msg_get();
632 msg->mdv_load = name;
633 msg = devfs_msg_send_sync(DEVFS_DESTROY_SUBNAMES, msg);
634 devfs_msg_put(msg);
635 return 0;
636}
637
638int
639devfs_clr_subnames_flag(char *name, uint32_t flag)
640{
641 devfs_msg_t msg;
642
643 msg = devfs_msg_get();
644 msg->mdv_flags.name = name;
645 msg->mdv_flags.flag = flag;
646 msg = devfs_msg_send_sync(DEVFS_CLR_SUBNAMES_FLAG, msg);
647 devfs_msg_put(msg);
648
649 return 0;
650}
651
652int
653devfs_destroy_subnames_without_flag(char *name, uint32_t flag)
654{
655 devfs_msg_t msg;
656
657 msg = devfs_msg_get();
658 msg->mdv_flags.name = name;
659 msg->mdv_flags.flag = flag;
660 msg = devfs_msg_send_sync(DEVFS_DESTROY_SUBNAMES_WO_FLAG, msg);
661 devfs_msg_put(msg);
662
21864bc5
MD
663 return 0;
664}
665
666/*
ca8d7677
MD
667 * devfs_create_all_dev is the asynchronous entry point to trigger device
668 * node creation. It just sends a message with the relevant details to
669 * the devfs core.
21864bc5
MD
670 */
671int
672devfs_create_all_dev(struct devfs_node *root)
673{
674 devfs_msg_send_generic(DEVFS_CREATE_ALL_DEV, root);
675 return 0;
676}
677
678/*
ca8d7677
MD
679 * devfs_destroy_dev_by_ops is the asynchronous entry point to destroy all
680 * devices with a specific set of dev_ops and minor. It just sends a
681 * message with the relevant details to the devfs core.
21864bc5
MD
682 */
683int
684devfs_destroy_dev_by_ops(struct dev_ops *ops, int minor)
685{
686 devfs_msg_send_ops(DEVFS_DESTROY_DEV_BY_OPS, ops, minor);
687 return 0;
688}
689
690/*
ca8d7677
MD
691 * devfs_clone_handler_add is the synchronous entry point to add a new
692 * clone handler. It just sends a message with the relevant details to
693 * the devfs core.
21864bc5
MD
694 */
695int
07dfa375 696devfs_clone_handler_add(const char *name, d_clone_t *nhandler)
21864bc5 697{
ca8d7677
MD
698 devfs_msg_t msg;
699
700 msg = devfs_msg_get();
d0fe8596 701 msg->mdv_chandler.name = name;
ca8d7677
MD
702 msg->mdv_chandler.nhandler = nhandler;
703 msg = devfs_msg_send_sync(DEVFS_CHANDLER_ADD, msg);
704 devfs_msg_put(msg);
21864bc5
MD
705 return 0;
706}
707
708/*
ca8d7677
MD
709 * devfs_clone_handler_del is the synchronous entry point to remove a
710 * clone handler. It just sends a message with the relevant details to
711 * the devfs core.
21864bc5
MD
712 */
713int
07dfa375 714devfs_clone_handler_del(const char *name)
21864bc5 715{
ca8d7677
MD
716 devfs_msg_t msg;
717
718 msg = devfs_msg_get();
d0fe8596 719 msg->mdv_chandler.name = name;
ca8d7677
MD
720 msg->mdv_chandler.nhandler = NULL;
721 msg = devfs_msg_send_sync(DEVFS_CHANDLER_DEL, msg);
722 devfs_msg_put(msg);
21864bc5
MD
723 return 0;
724}
725
726/*
ca8d7677
MD
727 * devfs_find_device_by_name is the synchronous entry point to find a
728 * device given its name. It sends a synchronous message with the
729 * relevant details to the devfs core and returns the answer.
21864bc5
MD
730 */
731cdev_t
732devfs_find_device_by_name(const char *fmt, ...)
733{
734 cdev_t found = NULL;
735 devfs_msg_t msg;
da655383 736 char *target;
21864bc5 737 __va_list ap;
21864bc5
MD
738
739 if (fmt == NULL)
740 return NULL;
741
21864bc5 742 __va_start(ap, fmt);
da655383 743 kvasnrprintf(&target, PATH_MAX, 10, fmt, ap);
21864bc5
MD
744 __va_end(ap);
745
21864bc5 746 msg = devfs_msg_get();
ca8d7677 747 msg->mdv_name = target;
21864bc5 748 msg = devfs_msg_send_sync(DEVFS_FIND_DEVICE_BY_NAME, msg);
ca8d7677 749 found = msg->mdv_cdev;
21864bc5 750 devfs_msg_put(msg);
da655383 751 kvasfree(&target);
21864bc5 752
21864bc5
MD
753 return found;
754}
755
756/*
ca8d7677
MD
757 * devfs_find_device_by_udev is the synchronous entry point to find a
758 * device given its udev number. It sends a synchronous message with
759 * the relevant details to the devfs core and returns the answer.
21864bc5
MD
760 */
761cdev_t
762devfs_find_device_by_udev(udev_t udev)
763{
764 cdev_t found = NULL;
765 devfs_msg_t msg;
766
767 msg = devfs_msg_get();
ca8d7677 768 msg->mdv_udev = udev;
21864bc5 769 msg = devfs_msg_send_sync(DEVFS_FIND_DEVICE_BY_UDEV, msg);
ca8d7677 770 found = msg->mdv_cdev;
21864bc5
MD
771 devfs_msg_put(msg);
772
ca8d7677
MD
773 devfs_debug(DEVFS_DEBUG_DEBUG,
774 "devfs_find_device_by_udev found? %s -end:3-\n",
775 ((found) ? found->si_name:"NO"));
21864bc5
MD
776 return found;
777}
778
fa7e6f37
AH
779struct vnode *
780devfs_inode_to_vnode(struct mount *mp, ino_t target)
781{
782 struct vnode *vp = NULL;
783 devfs_msg_t msg;
784
785 if (mp == NULL)
786 return NULL;
787
788 msg = devfs_msg_get();
789 msg->mdv_ino.mp = mp;
790 msg->mdv_ino.ino = target;
791 msg = devfs_msg_send_sync(DEVFS_INODE_TO_VNODE, msg);
792 vp = msg->mdv_ino.vp;
793 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
794 devfs_msg_put(msg);
795
796 return vp;
797}
798
21864bc5 799/*
ca8d7677
MD
800 * devfs_make_alias is the asynchronous entry point to register an alias
801 * for a device. It just sends a message with the relevant details to the
802 * devfs core.
21864bc5
MD
803 */
804int
07dfa375 805devfs_make_alias(const char *name, cdev_t dev_target)
21864bc5 806{
ca8d7677 807 struct devfs_alias *alias;
5298e788
AH
808 size_t len;
809
810 len = strlen(name);
ca8d7677
MD
811
812 alias = kmalloc(sizeof(struct devfs_alias), M_DEVFS, M_WAITOK);
07dfa375 813 alias->name = kstrdup(name, M_DEVFS);
5298e788 814 alias->namlen = len;
21864bc5
MD
815 alias->dev_target = dev_target;
816
817 devfs_msg_send_generic(DEVFS_MAKE_ALIAS, alias);
818 return 0;
819}
820
821/*
ca8d7677
MD
822 * devfs_apply_rules is the asynchronous entry point to trigger application
823 * of all rules. It just sends a message with the relevant details to the
824 * devfs core.
21864bc5
MD
825 */
826int
827devfs_apply_rules(char *mntto)
828{
829 char *new_name;
21864bc5 830
07dfa375 831 new_name = kstrdup(mntto, M_DEVFS);
21864bc5 832 devfs_msg_send_name(DEVFS_APPLY_RULES, new_name);
bc185c5a 833
21864bc5
MD
834 return 0;
835}
836
837/*
bc185c5a
AH
838 * devfs_reset_rules is the asynchronous entry point to trigger reset of all
839 * rules. It just sends a message with the relevant details to the devfs core.
21864bc5
MD
840 */
841int
842devfs_reset_rules(char *mntto)
843{
844 char *new_name;
21864bc5 845
07dfa375 846 new_name = kstrdup(mntto, M_DEVFS);
21864bc5 847 devfs_msg_send_name(DEVFS_RESET_RULES, new_name);
bc185c5a 848
21864bc5
MD
849 return 0;
850}
851
852
853/*
854 * devfs_scan_callback is the asynchronous entry point to call a callback
855 * on all cdevs.
856 * It just sends a message with the relevant details to the devfs core.
857 */
858int
859devfs_scan_callback(devfs_scan_t *callback)
860{
861 devfs_msg_t msg;
862
21864bc5
MD
863 KKASSERT(sizeof(callback) == sizeof(void *));
864
865 msg = devfs_msg_get();
ca8d7677 866 msg->mdv_load = callback;
21864bc5
MD
867 msg = devfs_msg_send_sync(DEVFS_SCAN_CALLBACK, msg);
868 devfs_msg_put(msg);
869
870 return 0;
871}
872
873
874/*
bc185c5a
AH
875 * Acts as a message drain. Any message that is replied to here gets destroyed
876 * and the memory freed.
21864bc5
MD
877 */
878static void
879devfs_msg_autofree_reply(lwkt_port_t port, lwkt_msg_t msg)
880{
881 devfs_msg_put((devfs_msg_t)msg);
882}
883
884/*
885 * devfs_msg_get allocates a new devfs msg and returns it.
886 */
887devfs_msg_t
b815579b 888devfs_msg_get(void)
21864bc5
MD
889{
890 return objcache_get(devfs_msg_cache, M_WAITOK);
891}
892
893/*
894 * devfs_msg_put deallocates a given devfs msg.
895 */
896int
897devfs_msg_put(devfs_msg_t msg)
898{
899 objcache_put(devfs_msg_cache, msg);
900 return 0;
901}
902
903/*
904 * devfs_msg_send is the generic asynchronous message sending facility
905 * for devfs. By default the reply port is the automatic disposal port.
d0fe8596
MD
906 *
907 * If the current thread is the devfs_msg_port thread we execute the
908 * operation synchronously.
21864bc5 909 */
d0fe8596 910void
21864bc5
MD
911devfs_msg_send(uint32_t cmd, devfs_msg_t devfs_msg)
912{
913 lwkt_port_t port = &devfs_msg_port;
914
d0fe8596 915 lwkt_initmsg(&devfs_msg->hdr, &devfs_dispose_port, 0);
21864bc5 916
d0fe8596 917 devfs_msg->hdr.u.ms_result = cmd;
21864bc5 918
d0fe8596
MD
919 if (port->mpu_td == curthread) {
920 devfs_msg_exec(devfs_msg);
921 lwkt_replymsg(&devfs_msg->hdr, 0);
922 } else {
923 lwkt_sendmsg(port, (lwkt_msg_t)devfs_msg);
924 }
21864bc5
MD
925}
926
927/*
928 * devfs_msg_send_sync is the generic synchronous message sending
929 * facility for devfs. It initializes a local reply port and waits
930 * for the core's answer. This answer is then returned.
931 */
932devfs_msg_t
933devfs_msg_send_sync(uint32_t cmd, devfs_msg_t devfs_msg)
934{
935 struct lwkt_port rep_port;
936 devfs_msg_t msg_incoming;
937 lwkt_port_t port = &devfs_msg_port;
938
939 lwkt_initport_thread(&rep_port, curthread);
d0fe8596 940 lwkt_initmsg(&devfs_msg->hdr, &rep_port, 0);
21864bc5 941
d0fe8596 942 devfs_msg->hdr.u.ms_result = cmd;
21864bc5 943
d0fe8596 944 lwkt_sendmsg(port, (lwkt_msg_t)devfs_msg);
21864bc5
MD
945 msg_incoming = lwkt_waitport(&rep_port, 0);
946
947 return msg_incoming;
948}
949
950/*
951 * sends a message with a generic argument.
952 */
d0fe8596 953void
21864bc5
MD
954devfs_msg_send_generic(uint32_t cmd, void *load)
955{
d0fe8596 956 devfs_msg_t devfs_msg = devfs_msg_get();
21864bc5 957
d0fe8596
MD
958 devfs_msg->mdv_load = load;
959 devfs_msg_send(cmd, devfs_msg);
21864bc5
MD
960}
961
962/*
963 * sends a message with a name argument.
964 */
d0fe8596 965void
21864bc5
MD
966devfs_msg_send_name(uint32_t cmd, char *name)
967{
d0fe8596 968 devfs_msg_t devfs_msg = devfs_msg_get();
21864bc5 969
d0fe8596
MD
970 devfs_msg->mdv_name = name;
971 devfs_msg_send(cmd, devfs_msg);
21864bc5
MD
972}
973
974/*
975 * sends a message with a mount argument.
976 */
d0fe8596 977void
21864bc5
MD
978devfs_msg_send_mount(uint32_t cmd, struct devfs_mnt_data *mnt)
979{
d0fe8596 980 devfs_msg_t devfs_msg = devfs_msg_get();
21864bc5 981
d0fe8596
MD
982 devfs_msg->mdv_mnt = mnt;
983 devfs_msg_send(cmd, devfs_msg);
21864bc5
MD
984}
985
986/*
987 * sends a message with an ops argument.
988 */
d0fe8596 989void
21864bc5
MD
990devfs_msg_send_ops(uint32_t cmd, struct dev_ops *ops, int minor)
991{
d0fe8596 992 devfs_msg_t devfs_msg = devfs_msg_get();
21864bc5 993
d0fe8596
MD
994 devfs_msg->mdv_ops.ops = ops;
995 devfs_msg->mdv_ops.minor = minor;
996 devfs_msg_send(cmd, devfs_msg);
21864bc5
MD
997}
998
999/*
1000 * sends a message with a clone handler argument.
1001 */
d0fe8596 1002void
21864bc5
MD
1003devfs_msg_send_chandler(uint32_t cmd, char *name, d_clone_t handler)
1004{
d0fe8596 1005 devfs_msg_t devfs_msg = devfs_msg_get();
21864bc5 1006
d0fe8596
MD
1007 devfs_msg->mdv_chandler.name = name;
1008 devfs_msg->mdv_chandler.nhandler = handler;
1009 devfs_msg_send(cmd, devfs_msg);
21864bc5
MD
1010}
1011
1012/*
1013 * sends a message with a device argument.
1014 */
d0fe8596 1015void
21864bc5
MD
1016devfs_msg_send_dev(uint32_t cmd, cdev_t dev, uid_t uid, gid_t gid, int perms)
1017{
d0fe8596
MD
1018 devfs_msg_t devfs_msg = devfs_msg_get();
1019
1020 devfs_msg->mdv_dev.dev = dev;
ca8d7677
MD
1021 devfs_msg->mdv_dev.uid = uid;
1022 devfs_msg->mdv_dev.gid = gid;
1023 devfs_msg->mdv_dev.perms = perms;
21864bc5 1024
d0fe8596 1025 devfs_msg_send(cmd, devfs_msg);
21864bc5
MD
1026}
1027
1028/*
1029 * sends a message with a link argument.
1030 */
d0fe8596 1031void
21864bc5
MD
1032devfs_msg_send_link(uint32_t cmd, char *name, char *target, struct mount *mp)
1033{
d0fe8596
MD
1034 devfs_msg_t devfs_msg = devfs_msg_get();
1035
1036 devfs_msg->mdv_link.name = name;
ca8d7677
MD
1037 devfs_msg->mdv_link.target = target;
1038 devfs_msg->mdv_link.mp = mp;
d0fe8596 1039 devfs_msg_send(cmd, devfs_msg);
21864bc5
MD
1040}
1041
1042/*
1043 * devfs_msg_core is the main devfs thread. It handles all incoming messages
1044 * and calls the relevant worker functions. By using messages it's assured
1045 * that events occur in the correct order.
1046 */
1047static void
1048devfs_msg_core(void *arg)
1049{
ca8d7677 1050 devfs_msg_t msg;
21864bc5 1051
d0fe8596 1052 devfs_run = 1;
21864bc5 1053 lwkt_initport_thread(&devfs_msg_port, curthread);
bc185c5a 1054 wakeup(td_core);
21864bc5 1055
d0fe8596 1056 while (devfs_run) {
ca8d7677 1057 msg = (devfs_msg_t)lwkt_waitport(&devfs_msg_port, 0);
bc185c5a 1058 devfs_debug(DEVFS_DEBUG_DEBUG,
d0fe8596
MD
1059 "devfs_msg_core, new msg: %x\n",
1060 (unsigned int)msg->hdr.u.ms_result);
1061 devfs_msg_exec(msg);
1062 lwkt_replymsg(&msg->hdr, 0);
1063 }
1064 wakeup(td_core);
1065 lwkt_exit();
1066}
21864bc5 1067
d0fe8596
MD
1068static void
1069devfs_msg_exec(devfs_msg_t msg)
1070{
1071 struct devfs_mnt_data *mnt;
1072 struct devfs_node *node;
1073 cdev_t dev;
fa7e6f37 1074
d0fe8596
MD
1075 /*
1076 * Acquire the devfs lock to ensure safety of all called functions
1077 */
1078 lockmgr(&devfs_lock, LK_EXCLUSIVE);
1079
1080 switch (msg->hdr.u.ms_result) {
1081 case DEVFS_DEVICE_CREATE:
1082 dev = msg->mdv_dev.dev;
1083 devfs_create_dev_worker(dev,
1084 msg->mdv_dev.uid,
1085 msg->mdv_dev.gid,
1086 msg->mdv_dev.perms);
1087 break;
1088 case DEVFS_DEVICE_DESTROY:
1089 dev = msg->mdv_dev.dev;
1090 devfs_destroy_dev_worker(dev);
1091 break;
1092 case DEVFS_DESTROY_SUBNAMES:
1093 devfs_destroy_subnames_worker(msg->mdv_load);
1094 break;
1095 case DEVFS_DESTROY_DEV_BY_OPS:
1096 devfs_destroy_dev_by_ops_worker(msg->mdv_ops.ops,
1097 msg->mdv_ops.minor);
1098 break;
1099 case DEVFS_CREATE_ALL_DEV:
1100 node = (struct devfs_node *)msg->mdv_load;
1101 devfs_create_all_dev_worker(node);
1102 break;
1103 case DEVFS_MOUNT_ADD:
1104 mnt = msg->mdv_mnt;
1105 TAILQ_INSERT_TAIL(&devfs_mnt_list, mnt, link);
1106 devfs_create_all_dev_worker(mnt->root_node);
1107 break;
1108 case DEVFS_MOUNT_DEL:
1109 mnt = msg->mdv_mnt;
1110 TAILQ_REMOVE(&devfs_mnt_list, mnt, link);
66abefa5
AH
1111 devfs_iterate_topology(mnt->root_node, devfs_reaperp_callback,
1112 NULL);
d0fe8596
MD
1113 if (mnt->leak_count) {
1114 devfs_debug(DEVFS_DEBUG_SHOW,
71f27d2d 1115 "Leaked %ld devfs_node elements!\n",
d0fe8596 1116 mnt->leak_count);
ca8d7677 1117 }
d0fe8596
MD
1118 break;
1119 case DEVFS_CHANDLER_ADD:
1120 devfs_chandler_add_worker(msg->mdv_chandler.name,
1121 msg->mdv_chandler.nhandler);
1122 break;
1123 case DEVFS_CHANDLER_DEL:
1124 devfs_chandler_del_worker(msg->mdv_chandler.name);
1125 break;
1126 case DEVFS_FIND_DEVICE_BY_NAME:
1127 devfs_find_device_by_name_worker(msg);
1128 break;
1129 case DEVFS_FIND_DEVICE_BY_UDEV:
1130 devfs_find_device_by_udev_worker(msg);
1131 break;
1132 case DEVFS_MAKE_ALIAS:
1133 devfs_make_alias_worker((struct devfs_alias *)msg->mdv_load);
1134 break;
1135 case DEVFS_APPLY_RULES:
1136 devfs_apply_reset_rules_caller(msg->mdv_name, 1);
1137 break;
1138 case DEVFS_RESET_RULES:
1139 devfs_apply_reset_rules_caller(msg->mdv_name, 0);
1140 break;
1141 case DEVFS_SCAN_CALLBACK:
1142 devfs_scan_callback_worker((devfs_scan_t *)msg->mdv_load);
1143 break;
1144 case DEVFS_CLR_SUBNAMES_FLAG:
1145 devfs_clr_subnames_flag_worker(msg->mdv_flags.name,
1146 msg->mdv_flags.flag);
1147 break;
1148 case DEVFS_DESTROY_SUBNAMES_WO_FLAG:
1149 devfs_destroy_subnames_without_flag_worker(msg->mdv_flags.name,
1150 msg->mdv_flags.flag);
1151 break;
1152 case DEVFS_INODE_TO_VNODE:
66abefa5
AH
1153 msg->mdv_ino.vp = devfs_iterate_topology(
1154 DEVFS_MNTDATA(msg->mdv_ino.mp)->root_node,
1155 (devfs_iterate_callback_t *)devfs_inode_to_vnode_worker_callback,
1156 &msg->mdv_ino.ino);
d0fe8596
MD
1157 break;
1158 case DEVFS_TERMINATE_CORE:
1159 devfs_run = 0;
1160 break;
1161 case DEVFS_SYNC:
1162 break;
1163 default:
1164 devfs_debug(DEVFS_DEBUG_WARNING,
1165 "devfs_msg_core: unknown message "
1166 "received at core\n");
1167 break;
ca8d7677 1168 }
d0fe8596 1169 lockmgr(&devfs_lock, LK_RELEASE);
21864bc5
MD
1170}
1171
1172/*
1173 * Worker function to insert a new dev into the dev list and initialize its
1174 * permissions. It also calls devfs_propagate_dev which in turn propagates
1175 * the change to all mount points.
ca8d7677
MD
1176 *
1177 * The passed dev is already referenced. This reference is eaten by this
1178 * function and represents the dev's linkage into devfs_dev_list.
21864bc5
MD
1179 */
1180static int
1181devfs_create_dev_worker(cdev_t dev, uid_t uid, gid_t gid, int perms)
1182{
1183 KKASSERT(dev);
21864bc5
MD
1184
1185 dev->si_uid = uid;
1186 dev->si_gid = gid;
1187 dev->si_perms = perms;
1188
1189 devfs_link_dev(dev);
21864bc5
MD
1190 devfs_propagate_dev(dev, 1);
1191
21864bc5
MD
1192 return 0;
1193}
1194
1195/*
1196 * Worker function to delete a dev from the dev list and free the cdev.
1197 * It also calls devfs_propagate_dev which in turn propagates the change
1198 * to all mount points.
1199 */
1200static int
1201devfs_destroy_dev_worker(cdev_t dev)
1202{
ca8d7677
MD
1203 int error;
1204
21864bc5
MD
1205 KKASSERT(dev);
1206 KKASSERT((lockstatus(&devfs_lock, curthread)) == LK_EXCLUSIVE);
1207
ca8d7677 1208 error = devfs_unlink_dev(dev);
21864bc5 1209 devfs_propagate_dev(dev, 0);
ca8d7677
MD
1210 if (error == 0)
1211 release_dev(dev); /* link ref */
21864bc5
MD
1212 release_dev(dev);
1213 release_dev(dev);
21864bc5 1214
21864bc5
MD
1215 return 0;
1216}
1217
1218/*
1219 * Worker function to destroy all devices with a certain basename.
1220 * Calls devfs_destroy_dev_worker for the actual destruction.
1221 */
1222static int
1223devfs_destroy_subnames_worker(char *name)
1224{
1225 cdev_t dev, dev1;
21864bc5
MD
1226 size_t len = strlen(name);
1227
ca8d7677 1228 TAILQ_FOREACH_MUTABLE(dev, &devfs_dev_list, link, dev1) {
bc185c5a
AH
1229 if ((!strncmp(dev->si_name, name, len)) &&
1230 (dev->si_name[len] != '\0')) {
1231 devfs_destroy_dev_worker(dev);
21864bc5 1232 }
ca8d7677
MD
1233 }
1234 return 0;
1235}
1236
1237static int
1238devfs_clr_subnames_flag_worker(char *name, uint32_t flag)
1239{
1240 cdev_t dev, dev1;
1241 size_t len = strlen(name);
1242
1243 TAILQ_FOREACH_MUTABLE(dev, &devfs_dev_list, link, dev1) {
bc185c5a
AH
1244 if ((!strncmp(dev->si_name, name, len)) &&
1245 (dev->si_name[len] != '\0')) {
1246 dev->si_flags &= ~flag;
ca8d7677
MD
1247 }
1248 }
1249
1250 return 0;
1251}
1252
1253static int
1254devfs_destroy_subnames_without_flag_worker(char *name, uint32_t flag)
1255{
1256 cdev_t dev, dev1;
1257 size_t len = strlen(name);
1258
1259 TAILQ_FOREACH_MUTABLE(dev, &devfs_dev_list, link, dev1) {
bc185c5a
AH
1260 if ((!strncmp(dev->si_name, name, len)) &&
1261 (dev->si_name[len] != '\0')) {
1262 if (!(dev->si_flags & flag)) {
1263 devfs_destroy_dev_worker(dev);
ca8d7677
MD
1264 }
1265 }
1266 }
21864bc5
MD
1267
1268 return 0;
1269}
1270
1271/*
1272 * Worker function that creates all device nodes on top of a devfs
1273 * root node.
1274 */
1275static int
1276devfs_create_all_dev_worker(struct devfs_node *root)
1277{
1278 cdev_t dev;
1279
1280 KKASSERT(root);
21864bc5 1281
d0fe8596 1282 TAILQ_FOREACH(dev, &devfs_dev_list, link) {
21864bc5 1283 devfs_create_device_node(root, dev, NULL, NULL);
d0fe8596 1284 }
bc185c5a 1285
21864bc5
MD
1286 return 0;
1287}
1288
1289/*
1290 * Worker function that destroys all devices that match a specific
1291 * dev_ops and/or minor. If minor is less than 0, it is not matched
1292 * against. It also propagates all changes.
1293 */
1294static int
1295devfs_destroy_dev_by_ops_worker(struct dev_ops *ops, int minor)
1296{
1297 cdev_t dev, dev1;
1298
1299 KKASSERT(ops);
ca8d7677
MD
1300
1301 TAILQ_FOREACH_MUTABLE(dev, &devfs_dev_list, link, dev1) {
1302 if (dev->si_ops != ops)
1303 continue;
1304 if ((minor < 0) || (dev->si_uminor == minor)) {
ca8d7677 1305 devfs_destroy_dev_worker(dev);
21864bc5 1306 }
ca8d7677 1307 }
bc185c5a 1308
21864bc5
MD
1309 return 0;
1310}
1311
1312/*
1313 * Worker function that registers a new clone handler in devfs.
1314 */
1315static int
07dfa375 1316devfs_chandler_add_worker(const char *name, d_clone_t *nhandler)
21864bc5
MD
1317{
1318 struct devfs_clone_handler *chandler = NULL;
1319 u_char len = strlen(name);
1320
ca8d7677 1321 if (len == 0)
21864bc5
MD
1322 return 1;
1323
ca8d7677 1324 TAILQ_FOREACH(chandler, &devfs_chandler_list, link) {
bc185c5a
AH
1325 if (chandler->namlen != len)
1326 continue;
1327
1328 if (!memcmp(chandler->name, name, len)) {
1329 /* Clonable basename already exists */
1330 return 1;
21864bc5
MD
1331 }
1332 }
1333
ca8d7677 1334 chandler = kmalloc(sizeof(*chandler), M_DEVFS, M_WAITOK | M_ZERO);
07dfa375 1335 chandler->name = kstrdup(name, M_DEVFS);
21864bc5
MD
1336 chandler->namlen = len;
1337 chandler->nhandler = nhandler;
1338
1339 TAILQ_INSERT_TAIL(&devfs_chandler_list, chandler, link);
1340 return 0;
1341}
1342
1343/*
1344 * Worker function that removes a given clone handler from the
1345 * clone handler list.
1346 */
1347static int
07dfa375 1348devfs_chandler_del_worker(const char *name)
21864bc5
MD
1349{
1350 struct devfs_clone_handler *chandler, *chandler2;
1351 u_char len = strlen(name);
1352
ca8d7677 1353 if (len == 0)
21864bc5
MD
1354 return 1;
1355
ca8d7677
MD
1356 TAILQ_FOREACH_MUTABLE(chandler, &devfs_chandler_list, link, chandler2) {
1357 if (chandler->namlen != len)
1358 continue;
1359 if (memcmp(chandler->name, name, len))
1360 continue;
bc185c5a 1361
ca8d7677 1362 TAILQ_REMOVE(&devfs_chandler_list, chandler, link);
5298e788 1363 kfree(chandler->name, M_DEVFS);
ca8d7677 1364 kfree(chandler, M_DEVFS);
5298e788 1365 break;
21864bc5
MD
1366 }
1367
1368 return 0;
1369}
1370
1371/*
1372 * Worker function that finds a given device name and changes
1373 * the message received accordingly so that when replied to,
1374 * the answer is returned to the caller.
1375 */
1376static int
1377devfs_find_device_by_name_worker(devfs_msg_t devfs_msg)
1378{
6507240b
MD
1379 struct devfs_alias *alias;
1380 cdev_t dev;
21864bc5 1381 cdev_t found = NULL;
21864bc5 1382
6507240b
MD
1383 TAILQ_FOREACH(dev, &devfs_dev_list, link) {
1384 if (strcmp(devfs_msg->mdv_name, dev->si_name) == 0) {
21864bc5
MD
1385 found = dev;
1386 break;
1387 }
ca8d7677 1388 }
6507240b
MD
1389 if (found == NULL) {
1390 TAILQ_FOREACH(alias, &devfs_alias_list, link) {
1391 if (strcmp(devfs_msg->mdv_name, alias->name) == 0) {
1392 found = alias->dev_target;
1393 break;
1394 }
1395 }
1396 }
ca8d7677 1397 devfs_msg->mdv_cdev = found;
21864bc5
MD
1398
1399 return 0;
1400}
1401
1402/*
1403 * Worker function that finds a given device udev and changes
1404 * the message received accordingly so that when replied to,
1405 * the answer is returned to the caller.
1406 */
1407static int
1408devfs_find_device_by_udev_worker(devfs_msg_t devfs_msg)
1409{
1410 cdev_t dev, dev1;
1411 cdev_t found = NULL;
21864bc5 1412
ca8d7677
MD
1413 TAILQ_FOREACH_MUTABLE(dev, &devfs_dev_list, link, dev1) {
1414 if (((udev_t)dev->si_inode) == devfs_msg->mdv_udev) {
21864bc5
MD
1415 found = dev;
1416 break;
1417 }
ca8d7677
MD
1418 }
1419 devfs_msg->mdv_cdev = found;
21864bc5
MD
1420
1421 return 0;
1422}
1423
1424/*
1425 * Worker function that inserts a given alias into the
1426 * alias list, and propagates the alias to all mount
1427 * points.
1428 */
1429static int
1430devfs_make_alias_worker(struct devfs_alias *alias)
1431{
1432 struct devfs_alias *alias2;
1433 size_t len = strlen(alias->name);
1434 int found = 0;
1435
1436 TAILQ_FOREACH(alias2, &devfs_alias_list, link) {
bc185c5a
AH
1437 if (len != alias2->namlen)
1438 continue;
1439
1440 if (!memcmp(alias->name, alias2->name, len)) {
1441 found = 1;
1442 break;
21864bc5
MD
1443 }
1444 }
1445
1446 if (!found) {
bc185c5a
AH
1447 /*
1448 * The alias doesn't exist yet, so we add it to the alias list
1449 */
21864bc5
MD
1450 TAILQ_INSERT_TAIL(&devfs_alias_list, alias, link);
1451 devfs_alias_propagate(alias);
1452 } else {
5298e788 1453 devfs_debug(DEVFS_DEBUG_WARNING,
ca8d7677
MD
1454 "Warning: duplicate devfs_make_alias for %s\n",
1455 alias->name);
5298e788 1456 kfree(alias->name, M_DEVFS);
21864bc5
MD
1457 kfree(alias, M_DEVFS);
1458 }
1459
1460 return 0;
1461}
1462
1463/*
1464 * Function that removes and frees all aliases.
1465 */
1466static int
1467devfs_alias_reap(void)
1468{
1469 struct devfs_alias *alias, *alias2;
1470
1471 TAILQ_FOREACH_MUTABLE(alias, &devfs_alias_list, link, alias2) {
1472 TAILQ_REMOVE(&devfs_alias_list, alias, link);
1473 kfree(alias, M_DEVFS);
1474 }
1475 return 0;
1476}
1477
1478/*
1479 * Function that removes an alias matching a specific cdev and frees
1480 * it accordingly.
1481 */
1482static int
1483devfs_alias_remove(cdev_t dev)
1484{
1485 struct devfs_alias *alias, *alias2;
1486
1487 TAILQ_FOREACH_MUTABLE(alias, &devfs_alias_list, link, alias2) {
1488 if (alias->dev_target == dev) {
1489 TAILQ_REMOVE(&devfs_alias_list, alias, link);
1490 kfree(alias, M_DEVFS);
1491 }
1492 }
1493 return 0;
1494}
1495
1496/*
1497 * This function propagates a new alias to all mount points.
1498 */
1499static int
1500devfs_alias_propagate(struct devfs_alias *alias)
1501{
1502 struct devfs_mnt_data *mnt;
1503
1504 TAILQ_FOREACH(mnt, &devfs_mnt_list, link) {
1505 devfs_alias_apply(mnt->root_node, alias);
1506 }
1507 return 0;
1508}
1509
1510/*
1511 * This function is a recursive function iterating through
1512 * all device nodes in the topology and, if applicable,
1513 * creating the relevant alias for a device node.
1514 */
1515static int
1516devfs_alias_apply(struct devfs_node *node, struct devfs_alias *alias)
1517{
1518 struct devfs_node *node1, *node2;
1519
1520 KKASSERT(alias != NULL);
1521
1522 if ((node->node_type == Proot) || (node->node_type == Pdir)) {
21864bc5 1523 if (node->nchildren > 2) {
ca8d7677 1524 TAILQ_FOREACH_MUTABLE(node1, DEVFS_DENODE_HEAD(node), link, node2) {
21864bc5
MD
1525 devfs_alias_apply(node1, alias);
1526 }
1527 }
1528 } else {
1529 if (node->d_dev == alias->dev_target)
1cb12919 1530 devfs_alias_create(alias->name, node, 0);
21864bc5
MD
1531 }
1532 return 0;
1533}
1534
1535/*
1536 * This function checks if any alias possibly is applicable
1537 * to the given node. If so, the alias is created.
1538 */
1539static int
1540devfs_alias_check_create(struct devfs_node *node)
1541{
1542 struct devfs_alias *alias;
1543
1544 TAILQ_FOREACH(alias, &devfs_alias_list, link) {
1545 if (node->d_dev == alias->dev_target)
1cb12919 1546 devfs_alias_create(alias->name, node, 0);
21864bc5
MD
1547 }
1548 return 0;
1549}
1550
1551/*
1552 * This function creates an alias with a given name
1553 * linking to a given devfs node. It also increments
1554 * the link count on the target node.
1555 */
1556int
1cb12919 1557devfs_alias_create(char *name_orig, struct devfs_node *target, int rule_based)
21864bc5
MD
1558{
1559 struct mount *mp = target->mp;
1560 struct devfs_node *parent = DEVFS_MNTDATA(mp)->root_node;
1561 struct devfs_node *linknode;
115f9a72 1562 struct hotplug_device *hpdev;
21864bc5 1563 char *create_path = NULL;
da655383
MD
1564 char *name;
1565 char *name_buf;
1566 int result = 0;
21864bc5 1567
21864bc5
MD
1568 KKASSERT((lockstatus(&devfs_lock, curthread)) == LK_EXCLUSIVE);
1569
da655383 1570 name_buf = kmalloc(PATH_MAX, M_TEMP, M_WAITOK);
21864bc5
MD
1571 devfs_resolve_name_path(name_orig, name_buf, &create_path, &name);
1572
1573 if (create_path)
1574 parent = devfs_resolve_or_create_path(parent, create_path, 1);
1575
1576
1577 if (devfs_find_device_node_by_name(parent, name)) {
bc185c5a 1578 devfs_debug(DEVFS_DEBUG_WARNING,
ca8d7677
MD
1579 "Node already exists: %s "
1580 "(devfs_make_alias_worker)!\n",
1581 name);
da655383
MD
1582 result = 1;
1583 goto done;
21864bc5
MD
1584 }
1585
21864bc5 1586 linknode = devfs_allocp(Plink, name, parent, mp, NULL);
da655383
MD
1587 if (linknode == NULL) {
1588 result = 1;
1589 goto done;
1590 }
21864bc5
MD
1591
1592 linknode->link_target = target;
1593 target->nlinks++;
21864bc5 1594
1cb12919
AH
1595 if (rule_based)
1596 linknode->flags |= DEVFS_RULE_CREATED;
1597
da655383 1598done:
115f9a72
AP
1599 /* hotplug handler */
1600 if(devfs_node_added) {
1601 hpdev = kmalloc(sizeof(struct hotplug_device), M_TEMP, M_WAITOK);
1602 hpdev->dev = target->d_dev;
1603 hpdev->name = name_orig;
1604 devfs_node_added(hpdev);
1605 kfree(hpdev, M_TEMP);
1606 }
da655383
MD
1607 kfree(name_buf, M_TEMP);
1608 return (result);
21864bc5
MD
1609}
1610
1611/*
1612 * This function is called by the core and handles mount point
1613 * strings. It either calls the relevant worker (devfs_apply_
1614 * reset_rules_worker) on all mountpoints or only a specific
1615 * one.
1616 */
1617static int
1618devfs_apply_reset_rules_caller(char *mountto, int apply)
1619{
21864bc5 1620 struct devfs_mnt_data *mnt;
21864bc5 1621
bc185c5a 1622 if (mountto[0] == '*') {
21864bc5 1623 TAILQ_FOREACH(mnt, &devfs_mnt_list, link) {
66abefa5
AH
1624 devfs_iterate_topology(mnt->root_node,
1625 (apply)?(devfs_rule_check_apply):(devfs_rule_reset_node),
1626 NULL);
21864bc5
MD
1627 }
1628 } else {
1629 TAILQ_FOREACH(mnt, &devfs_mnt_list, link) {
9cf39e57 1630 if (!strcmp(mnt->mp->mnt_stat.f_mntonname, mountto)) {
66abefa5
AH
1631 devfs_iterate_topology(mnt->root_node,
1632 (apply)?(devfs_rule_check_apply):(devfs_rule_reset_node),
1633 NULL);
bc185c5a
AH
1634 break;
1635 }
21864bc5
MD
1636 }
1637 }
1638
1639 kfree(mountto, M_DEVFS);
1640 return 0;
1641}
1642
1643/*
21864bc5
MD
1644 * This function calls a given callback function for
1645 * every dev node in the devfs dev list.
1646 */
1647static int
1648devfs_scan_callback_worker(devfs_scan_t *callback)
1649{
1650 cdev_t dev, dev1;
1651
d0fe8596 1652 TAILQ_FOREACH_MUTABLE(dev, &devfs_dev_list, link, dev1) {
21864bc5 1653 callback(dev);
d0fe8596 1654 }
21864bc5 1655
21864bc5
MD
1656 return 0;
1657}
1658
21864bc5
MD
1659/*
1660 * This function tries to resolve a given directory, or if not
1661 * found and creation requested, creates the given directory.
1662 */
1663static struct devfs_node *
ca8d7677
MD
1664devfs_resolve_or_create_dir(struct devfs_node *parent, char *dir_name,
1665 size_t name_len, int create)
21864bc5
MD
1666{
1667 struct devfs_node *node, *found = NULL;
1668
1669 TAILQ_FOREACH(node, DEVFS_DENODE_HEAD(parent), link) {
bc185c5a
AH
1670 if (name_len != node->d_dir.d_namlen)
1671 continue;
1672
1673 if (!memcmp(dir_name, node->d_dir.d_name, name_len)) {
1674 found = node;
1675 break;
21864bc5
MD
1676 }
1677 }
1678
1679 if ((found == NULL) && (create)) {
1680 found = devfs_allocp(Pdir, dir_name, parent, parent->mp, NULL);
1681 }
1682
1683 return found;
1684}
1685
1686/*
1687 * This function tries to resolve a complete path. If creation is requested,
1688 * if a given part of the path cannot be resolved (because it doesn't exist),
1689 * it is created.
1690 */
1691struct devfs_node *
1692devfs_resolve_or_create_path(struct devfs_node *parent, char *path, int create)
1693{
1694 struct devfs_node *node = parent;
da655383 1695 char *buf;
21864bc5
MD
1696 size_t idx = 0;
1697
21864bc5
MD
1698 if (path == NULL)
1699 return parent;
1700
da655383 1701 buf = kmalloc(PATH_MAX, M_TEMP, M_WAITOK);
21864bc5 1702
da655383 1703 while (*path && idx < PATH_MAX - 1) {
21864bc5
MD
1704 if (*path != '/') {
1705 buf[idx++] = *path;
1706 } else {
1707 buf[idx] = '\0';
1708 node = devfs_resolve_or_create_dir(node, buf, idx, create);
da655383
MD
1709 if (node == NULL) {
1710 kfree(buf, M_TEMP);
21864bc5 1711 return NULL;
da655383 1712 }
21864bc5
MD
1713 idx = 0;
1714 }
da655383 1715 ++path;
21864bc5
MD
1716 }
1717 buf[idx] = '\0';
da655383
MD
1718 node = devfs_resolve_or_create_dir(node, buf, idx, create);
1719 kfree (buf, M_TEMP);
1720 return (node);
21864bc5
MD
1721}
1722
1723/*
1724 * Takes a full path and strips it into a directory path and a name.
1725 * For a/b/c/foo, it returns foo in namep and a/b/c in pathp. It
1726 * requires a working buffer with enough size to keep the whole
1727 * fullpath.
1728 */
1729int
1730devfs_resolve_name_path(char *fullpath, char *buf, char **pathp, char **namep)
1731{
1732 char *name = NULL;
1733 char *path = NULL;
1734 size_t len = strlen(fullpath) + 1;
1735 int i;
1736
bc185c5a
AH
1737 KKASSERT((fullpath != NULL) && (buf != NULL));
1738 KKASSERT((pathp != NULL) && (namep != NULL));
21864bc5
MD
1739
1740 memcpy(buf, fullpath, len);
1741
1742 for (i = len-1; i>= 0; i--) {
1743 if (buf[i] == '/') {
1744 buf[i] = '\0';
1745 name = &(buf[i+1]);
1746 path = buf;
1747 break;
1748 }
1749 }
1750
1751 *pathp = path;
1752
1753 if (name) {
1754 *namep = name;
1755 } else {
1756 *namep = buf;
1757 }
1758
1759 return 0;
1760}
1761
1762/*
ca8d7677 1763 * This function creates a new devfs node for a given device. It can
21864bc5
MD
1764 * handle a complete path as device name, and accordingly creates
1765 * the path and the final device node.
ca8d7677
MD
1766 *
1767 * The reference count on the passed dev remains unchanged.
21864bc5
MD
1768 */
1769struct devfs_node *
ca8d7677
MD
1770devfs_create_device_node(struct devfs_node *root, cdev_t dev,
1771 char *dev_name, char *path_fmt, ...)
21864bc5
MD
1772{
1773 struct devfs_node *parent, *node = NULL;
115f9a72 1774 struct hotplug_device *hpdev;
21864bc5 1775 char *path = NULL;
da655383
MD
1776 char *name;
1777 char *name_buf;
21864bc5
MD
1778 __va_list ap;
1779 int i, found;
21864bc5
MD
1780 char *create_path = NULL;
1781 char *names = "pqrsPQRS";
1782
da655383 1783 name_buf = kmalloc(PATH_MAX, M_TEMP, M_WAITOK);
21864bc5 1784
da655383 1785 if (path_fmt != NULL) {
21864bc5 1786 __va_start(ap, path_fmt);
da655383 1787 kvasnrprintf(&path, PATH_MAX, 10, path_fmt, ap);
21864bc5
MD
1788 __va_end(ap);
1789 }
1790
1791 parent = devfs_resolve_or_create_path(root, path, 1);
1792 KKASSERT(parent);
1793
bc185c5a
AH
1794 devfs_resolve_name_path(
1795 ((dev_name == NULL) && (dev))?(dev->si_name):(dev_name),
1796 name_buf, &create_path, &name);
21864bc5
MD
1797
1798 if (create_path)
1799 parent = devfs_resolve_or_create_path(parent, create_path, 1);
1800
1801
1802 if (devfs_find_device_node_by_name(parent, name)) {
bc185c5a 1803 devfs_debug(DEVFS_DEBUG_WARNING, "devfs_create_device_node: "
894bbb25 1804 "DEVICE %s ALREADY EXISTS!!! Ignoring creation request.\n", name);
21864bc5
MD
1805 goto out;
1806 }
bc185c5a 1807
21864bc5 1808 node = devfs_allocp(Pdev, name, parent, parent->mp, dev);
07dfa375 1809 nanotime(&parent->mtime);
0182b316 1810
bc185c5a
AH
1811 /*
1812 * Ugly unix98 pty magic, to hide pty master (ptm) devices and their
1813 * directory
1814 */
1815 if ((dev) && (strlen(dev->si_name) >= 4) &&
1816 (!memcmp(dev->si_name, "ptm/", 4))) {
894bbb25
AH
1817 node->parent->flags |= DEVFS_HIDDEN;
1818 node->flags |= DEVFS_HIDDEN;
21864bc5 1819 }
bc185c5a
AH
1820
1821 /*
1822 * Ugly pty magic, to tag pty devices as such and hide them if needed.
1823 */
21864bc5
MD
1824 if ((strlen(name) >= 3) && (!memcmp(name, "pty", 3)))
1825 node->flags |= (DEVFS_PTY | DEVFS_INVISIBLE);
1826
21864bc5
MD
1827 if ((strlen(name) >= 3) && (!memcmp(name, "tty", 3))) {
1828 found = 0;
1829 for (i = 0; i < strlen(names); i++) {
1830 if (name[3] == names[i]) {
1831 found = 1;
1832 break;
1833 }
1834 }
1835 if (found)
1836 node->flags |= (DEVFS_PTY | DEVFS_INVISIBLE);
1837 }
115f9a72
AP
1838 /* hotplug handler */
1839 if(devfs_node_added) {
1840 hpdev = kmalloc(sizeof(struct hotplug_device), M_TEMP, M_WAITOK);
1841 hpdev->dev = node->d_dev;
1842 hpdev->name = node->d_dev->si_name;
1843 devfs_node_added(hpdev);
1844 kfree(hpdev, M_TEMP);
1845 }
21864bc5
MD
1846
1847out:
da655383
MD
1848 kfree(name_buf, M_TEMP);
1849 kvasfree(&path);
21864bc5
MD
1850 return node;
1851}
1852
1853/*
1854 * This function finds a given device node in the topology with a given
1855 * cdev.
1856 */
66abefa5
AH
1857void *
1858devfs_find_device_node_callback(struct devfs_node *node, cdev_t target)
21864bc5 1859{
66abefa5
AH
1860 if ((node->node_type == Pdev) && (node->d_dev == target)) {
1861 return node;
21864bc5 1862 }
21864bc5
MD
1863
1864 return NULL;
1865}
1866
1867/*
66abefa5 1868 * This function finds a device node in the given parent directory by its
21864bc5
MD
1869 * name and returns it.
1870 */
1871struct devfs_node *
1872devfs_find_device_node_by_name(struct devfs_node *parent, char *target)
1873{
1874 struct devfs_node *node, *found = NULL;
1875 size_t len = strlen(target);
1876
1877 TAILQ_FOREACH(node, DEVFS_DENODE_HEAD(parent), link) {
bc185c5a
AH
1878 if (len != node->d_dir.d_namlen)
1879 continue;
1880
1881 if (!memcmp(node->d_dir.d_name, target, len)) {
21864bc5
MD
1882 found = node;
1883 break;
1884 }
1885 }
1886
1887 return found;
1888}
1889
66abefa5
AH
1890static void *
1891devfs_inode_to_vnode_worker_callback(struct devfs_node *node, ino_t *inop)
fa7e6f37 1892{
66abefa5
AH
1893 struct vnode *vp = NULL;
1894 ino_t target = *inop;
bc185c5a 1895
fa7e6f37
AH
1896 if (node->d_dir.d_ino == target) {
1897 if (node->v_node) {
1898 vp = node->v_node;
1899 vget(vp, LK_EXCLUSIVE | LK_RETRY);
1900 vn_unlock(vp);
1901 } else {
1902 devfs_allocv(&vp, node);
1903 vn_unlock(vp);
1904 }
fa7e6f37
AH
1905 }
1906
66abefa5 1907 return vp;
fa7e6f37
AH
1908}
1909
21864bc5 1910/*
ca8d7677
MD
1911 * This function takes a cdev and removes its devfs node in the
1912 * given topology. The cdev remains intact.
21864bc5
MD
1913 */
1914int
1915devfs_destroy_device_node(struct devfs_node *root, cdev_t target)
1916{
1917 struct devfs_node *node, *parent;
da655383
MD
1918 char *name;
1919 char *name_buf;
21864bc5
MD
1920 char *create_path = NULL;
1921
1922 KKASSERT(target);
1923
da655383
MD
1924 name_buf = kmalloc(PATH_MAX, M_TEMP, M_WAITOK);
1925 ksnprintf(name_buf, PATH_MAX, "%s", target->si_name);
21864bc5
MD
1926
1927 devfs_resolve_name_path(target->si_name, name_buf, &create_path, &name);
21864bc5
MD
1928
1929 if (create_path)
1930 parent = devfs_resolve_or_create_path(root, create_path, 0);
1931 else
1932 parent = root;
bc185c5a 1933
ab3436e7
MD
1934 if (parent == NULL) {
1935 kfree(name_buf, M_TEMP);
21864bc5 1936 return 1;
ab3436e7 1937 }
bc185c5a 1938
21864bc5 1939 node = devfs_find_device_node_by_name(parent, name);
bc185c5a 1940
07dfa375
AH
1941 if (node) {
1942 nanotime(&node->parent->mtime);
21864bc5 1943 devfs_gc(node);
07dfa375 1944 }
115f9a72 1945
da655383 1946 kfree(name_buf, M_TEMP);
21864bc5
MD
1947
1948 return 0;
1949}
1950
1951/*
1952 * Just set perms and ownership for given node.
1953 */
1954int
bc185c5a
AH
1955devfs_set_perms(struct devfs_node *node, uid_t uid, gid_t gid,
1956 u_short mode, u_long flags)
21864bc5 1957{
bc185c5a
AH
1958 node->mode = mode;
1959 node->uid = uid;
1960 node->gid = gid;
21864bc5
MD
1961
1962 return 0;
1963}
1964
1965/*
1966 * Propagates a device attach/detach to all mount
1967 * points. Also takes care of automatic alias removal
1968 * for a deleted cdev.
1969 */
1970static int
1971devfs_propagate_dev(cdev_t dev, int attach)
1972{
1973 struct devfs_mnt_data *mnt;
1974
21864bc5 1975 TAILQ_FOREACH(mnt, &devfs_mnt_list, link) {
21864bc5
MD
1976 if (attach) {
1977 /* Device is being attached */
ca8d7677
MD
1978 devfs_create_device_node(mnt->root_node, dev,
1979 NULL, NULL );
21864bc5
MD
1980 } else {
1981 /* Device is being detached */
21864bc5
MD
1982 devfs_alias_remove(dev);
1983 devfs_destroy_device_node(mnt->root_node, dev);
1984 }
1985 }
21864bc5
MD
1986 return 0;
1987}
1988
1989/*
21864bc5
MD
1990 * devfs_clone either returns a basename from a complete name by
1991 * returning the length of the name without trailing digits, or,
1992 * if clone != 0, calls the device's clone handler to get a new
1993 * device, which in turn is returned in devp.
1994 */
07dfa375
AH
1995cdev_t
1996devfs_clone(cdev_t dev, const char *name, size_t len, int mode,
bc185c5a 1997 struct ucred *cred)
21864bc5 1998{
07dfa375 1999 int error;
21864bc5
MD
2000 struct devfs_clone_handler *chandler;
2001 struct dev_clone_args ap;
2002
d0fe8596 2003 TAILQ_FOREACH(chandler, &devfs_chandler_list, link) {
07dfa375
AH
2004 if (chandler->namlen != len)
2005 continue;
2006 if ((!memcmp(chandler->name, name, len)) && (chandler->nhandler)) {
2007 lockmgr(&devfs_lock, LK_RELEASE);
2008 devfs_config();
2009 lockmgr(&devfs_lock, LK_EXCLUSIVE);
2010
2011 ap.a_head.a_dev = dev;
2012 ap.a_dev = NULL;
2013 ap.a_name = name;
2014 ap.a_namelen = len;
2015 ap.a_mode = mode;
2016 ap.a_cred = cred;
2017 error = (chandler->nhandler)(&ap);
2018 if (error)
2019 continue;
21864bc5 2020
07dfa375 2021 return ap.a_dev;
21864bc5
MD
2022 }
2023 }
2024
07dfa375 2025 return NULL;
21864bc5
MD
2026}
2027
2028
2029/*
2030 * Registers a new orphan in the orphan list.
2031 */
2032void
2033devfs_tracer_add_orphan(struct devfs_node *node)
2034{
2035 struct devfs_orphan *orphan;
2036
2037 KKASSERT(node);
2038 orphan = kmalloc(sizeof(struct devfs_orphan), M_DEVFS, M_WAITOK);
2039 orphan->node = node;
2040
ca8d7677
MD
2041 KKASSERT((node->flags & DEVFS_ORPHANED) == 0);
2042 node->flags |= DEVFS_ORPHANED;
21864bc5
MD
2043 TAILQ_INSERT_TAIL(DEVFS_ORPHANLIST(node->mp), orphan, link);
2044}
2045
2046/*
2047 * Removes an orphan from the orphan list.
2048 */
2049void
2050devfs_tracer_del_orphan(struct devfs_node *node)
2051{
2052 struct devfs_orphan *orphan;
2053
2054 KKASSERT(node);
2055
2056 TAILQ_FOREACH(orphan, DEVFS_ORPHANLIST(node->mp), link) {
2057 if (orphan->node == node) {
ca8d7677 2058 node->flags &= ~DEVFS_ORPHANED;
21864bc5
MD
2059 TAILQ_REMOVE(DEVFS_ORPHANLIST(node->mp), orphan, link);
2060 kfree(orphan, M_DEVFS);
2061 break;
2062 }
2063 }
2064}
2065
2066/*
2067 * Counts the orphans in the orphan list, and if cleanup
2068 * is specified, also frees the orphan and removes it from
2069 * the list.
2070 */
2071size_t
2072devfs_tracer_orphan_count(struct mount *mp, int cleanup)
2073{
2074 struct devfs_orphan *orphan, *orphan2;
2075 size_t count = 0;
2076
2077 TAILQ_FOREACH_MUTABLE(orphan, DEVFS_ORPHANLIST(mp), link, orphan2) {
2078 count++;
bc185c5a
AH
2079 /*
2080 * If we are instructed to clean up, we do so.
2081 */
21864bc5 2082 if (cleanup) {
21864bc5 2083 TAILQ_REMOVE(DEVFS_ORPHANLIST(mp), orphan, link);
ca8d7677
MD
2084 orphan->node->flags &= ~DEVFS_ORPHANED;
2085 devfs_freep(orphan->node);
21864bc5
MD
2086 kfree(orphan, M_DEVFS);
2087 }
2088 }
2089
2090 return count;
2091}
2092
2093/*
2094 * Fetch an ino_t from the global d_ino by increasing it
2095 * while spinlocked.
2096 */
2097static ino_t
2098devfs_fetch_ino(void)
2099{
2100 ino_t ret;
2101
2102 spin_lock_wr(&ino_lock);
2103 ret = d_ino++;
2104 spin_unlock_wr(&ino_lock);
2105
2106 return ret;
2107}
2108
2109/*
2110 * Allocates a new cdev and initializes it's most basic
2111 * fields.
2112 */
2113cdev_t
8f960aa9 2114devfs_new_cdev(struct dev_ops *ops, int minor, struct dev_ops *bops)
21864bc5 2115{
21864bc5 2116 cdev_t dev = sysref_alloc(&cdev_sysref_class);
da655383 2117
21864bc5
MD
2118 sysref_activate(&dev->si_sysref);
2119 reference_dev(dev);
da655383 2120 bzero(dev, offsetof(struct cdev, si_sysref));
21864bc5
MD
2121
2122 dev->si_uid = 0;
2123 dev->si_gid = 0;
2124 dev->si_perms = 0;
2125 dev->si_drv1 = NULL;
2126 dev->si_drv2 = NULL;
2127 dev->si_lastread = 0; /* time_second */
2128 dev->si_lastwrite = 0; /* time_second */
2129
2130 dev->si_ops = ops;
894bbb25 2131 dev->si_flags = 0;
21864bc5
MD
2132 dev->si_umajor = 0;
2133 dev->si_uminor = minor;
8f960aa9 2134 dev->si_bops = bops;
47ae500f
AH
2135 /* If there is a backing device, we reference its ops */
2136 dev->si_inode = makeudev(
8f960aa9 2137 devfs_reference_ops((bops)?(bops):(ops)),
47ae500f 2138 minor );
21864bc5
MD
2139
2140 return dev;
2141}
2142
ca8d7677
MD
2143static void
2144devfs_cdev_terminate(cdev_t dev)
21864bc5
MD
2145{
2146 int locked = 0;
2147
2148 /* Check if it is locked already. if not, we acquire the devfs lock */
2149 if (!(lockstatus(&devfs_lock, curthread)) == LK_EXCLUSIVE) {
2150 lockmgr(&devfs_lock, LK_EXCLUSIVE);
2151 locked = 1;
2152 }
2153
21864bc5
MD
2154 /* Propagate destruction, just in case */
2155 devfs_propagate_dev(dev, 0);
2156
2157 /* If we acquired the lock, we also get rid of it */
2158 if (locked)
2159 lockmgr(&devfs_lock, LK_RELEASE);
2160
47ae500f 2161 /* If there is a backing device, we release the backing device's ops */
8f960aa9 2162 devfs_release_ops((dev->si_bops)?(dev->si_bops):(dev->si_ops));
7cbab9da 2163
21864bc5
MD
2164 /* Finally destroy the device */
2165 sysref_put(&dev->si_sysref);
2166}
2167
2168/*
e654922c
MD
2169 * Dummies for now (individual locks for MPSAFE)
2170 */
2171static void
2172devfs_cdev_lock(cdev_t dev)
2173{
2174}
2175
2176static void
2177devfs_cdev_unlock(cdev_t dev)
2178{
2179}
2180
2181/*
21864bc5
MD
2182 * Links a given cdev into the dev list.
2183 */
2184int
2185devfs_link_dev(cdev_t dev)
2186{
ca8d7677 2187 KKASSERT((dev->si_flags & SI_DEVFS_LINKED) == 0);
21864bc5
MD
2188 dev->si_flags |= SI_DEVFS_LINKED;
2189 TAILQ_INSERT_TAIL(&devfs_dev_list, dev, link);
2190
2191 return 0;
2192}
2193
2194/*
ca8d7677
MD
2195 * Removes a given cdev from the dev list. The caller is responsible for
2196 * releasing the reference on the device associated with the linkage.
2197 *
2198 * Returns EALREADY if the dev has already been unlinked.
21864bc5 2199 */
ca8d7677 2200static int
21864bc5
MD
2201devfs_unlink_dev(cdev_t dev)
2202{
2203 if ((dev->si_flags & SI_DEVFS_LINKED)) {
2204 TAILQ_REMOVE(&devfs_dev_list, dev, link);
2205 dev->si_flags &= ~SI_DEVFS_LINKED;
ca8d7677 2206 return (0);
21864bc5 2207 }
ca8d7677 2208 return (EALREADY);
21864bc5
MD
2209}
2210
894bbb25
AH
2211int
2212devfs_node_is_accessible(struct devfs_node *node)
2213{
2214 if ((node) && (!(node->flags & DEVFS_HIDDEN)))
2215 return 1;
2216 else
2217 return 0;
2218}
2219
7cbab9da
AH
2220int
2221devfs_reference_ops(struct dev_ops *ops)
2222{
2223 int unit;
176de024
AH
2224 struct devfs_dev_ops *found = NULL;
2225 struct devfs_dev_ops *devops;
7cbab9da 2226
176de024
AH
2227 TAILQ_FOREACH(devops, &devfs_dev_ops_list, link) {
2228 if (devops->ops == ops) {
2229 found = devops;
2230 break;
2231 }
2232 }
2233
2234 if (!found) {
2235 found = kmalloc(sizeof(struct devfs_dev_ops), M_DEVFS, M_WAITOK);
2236 found->ops = ops;
2237 found->ref_count = 0;
2238 TAILQ_INSERT_TAIL(&devfs_dev_ops_list, found, link);
2239 }
2240
2241 KKASSERT(found);
2242
2243 if (found->ref_count == 0) {
2244 found->id = devfs_clone_bitmap_get(&DEVFS_CLONE_BITMAP(ops_id), 255);
2245 if (found->id == -1) {
7cbab9da 2246 /* Ran out of unique ids */
bc185c5a
AH
2247 devfs_debug(DEVFS_DEBUG_WARNING,
2248 "devfs_reference_ops: WARNING: ran out of unique ids\n");
7cbab9da
AH
2249 }
2250 }
176de024
AH
2251 unit = found->id;
2252 ++found->ref_count;
7cbab9da
AH
2253
2254 return unit;
2255}
2256
2257void
2258devfs_release_ops(struct dev_ops *ops)
2259{
176de024
AH
2260 struct devfs_dev_ops *found = NULL;
2261 struct devfs_dev_ops *devops;
2262
2263 TAILQ_FOREACH(devops, &devfs_dev_ops_list, link) {
2264 if (devops->ops == ops) {
2265 found = devops;
2266 break;
2267 }
2268 }
2269
2270 KKASSERT(found);
2271
2272 --found->ref_count;
7cbab9da 2273
176de024
AH
2274 if (found->ref_count == 0) {
2275 TAILQ_REMOVE(&devfs_dev_ops_list, found, link);
2276 devfs_clone_bitmap_put(&DEVFS_CLONE_BITMAP(ops_id), found->id);
2277 kfree(found, M_DEVFS);
7cbab9da
AH
2278 }
2279}
2280
21864bc5 2281void
d0fe8596 2282devfs_config(void)
21864bc5
MD
2283{
2284 devfs_msg_t msg;
2285
2286 msg = devfs_msg_get();
21864bc5
MD
2287 msg = devfs_msg_send_sync(DEVFS_SYNC, msg);
2288 devfs_msg_put(msg);
2289}
2290
2291/*
2292 * Called on init of devfs; creates the objcaches and
2293 * spawns off the devfs core thread. Also initializes
2294 * locks.
2295 */
2296static void
2297devfs_init(void)
2298{
2299 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_init() called\n");
2300 /* Create objcaches for nodes, msgs and devs */
d0fe8596
MD
2301 devfs_node_cache = objcache_create("devfs-node-cache", 0, 0,
2302 NULL, NULL, NULL,
2303 objcache_malloc_alloc,
2304 objcache_malloc_free,
2305 &devfs_node_malloc_args );
2306
2307 devfs_msg_cache = objcache_create("devfs-msg-cache", 0, 0,
2308 NULL, NULL, NULL,
2309 objcache_malloc_alloc,
2310 objcache_malloc_free,
2311 &devfs_msg_malloc_args );
2312
2313 devfs_dev_cache = objcache_create("devfs-dev-cache", 0, 0,
2314 NULL, NULL, NULL,
2315 objcache_malloc_alloc,
2316 objcache_malloc_free,
2317 &devfs_dev_malloc_args );
21864bc5 2318
7cbab9da 2319 devfs_clone_bitmap_init(&DEVFS_CLONE_BITMAP(ops_id));
7cbab9da 2320
21864bc5
MD
2321 /* Initialize the reply-only port which acts as a message drain */
2322 lwkt_initport_replyonly(&devfs_dispose_port, devfs_msg_autofree_reply);
2323
2324 /* Initialize *THE* devfs lock */
2325 lockinit(&devfs_lock, "devfs_core lock", 0, 0);
2326
2327
2328 lwkt_create(devfs_msg_core, /*args*/NULL, &td_core, NULL,
2329 0, 0, "devfs_msg_core");
2330
2331 tsleep(td_core/*devfs_id*/, 0, "devfsc", 0);
2332
2333 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_init finished\n");
2334}
2335
2336/*
2337 * Called on unload of devfs; takes care of destroying the core
2338 * and the objcaches. Also removes aliases that are no longer needed.
2339 */
2340static void
2341devfs_uninit(void)
2342{
2343 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_uninit() called\n");
2344
2345 devfs_msg_send(DEVFS_TERMINATE_CORE, NULL);
2346
2347 tsleep(td_core/*devfs_id*/, 0, "devfsc", 0);
2348 tsleep(td_core/*devfs_id*/, 0, "devfsc", 10000);
2349
7cbab9da
AH
2350 devfs_clone_bitmap_uninit(&DEVFS_CLONE_BITMAP(ops_id));
2351
21864bc5
MD
2352 /* Destroy the objcaches */
2353 objcache_destroy(devfs_msg_cache);
2354 objcache_destroy(devfs_node_cache);
2355 objcache_destroy(devfs_dev_cache);
2356
2357 devfs_alias_reap();
2358}
2359
2360/*
2361 * This is a sysctl handler to assist userland devname(3) to
2362 * find the device name for a given udev.
2363 */
2364static int
2365devfs_sysctl_devname_helper(SYSCTL_HANDLER_ARGS)
2366{
2367 udev_t udev;
2368 cdev_t found;
2369 int error;
2370
2371
2372 if ((error = SYSCTL_IN(req, &udev, sizeof(udev_t))))
2373 return (error);
2374
2375 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs sysctl, received udev: %d\n", udev);
2376
2377 if (udev == NOUDEV)
2378 return(EINVAL);
2379
2380 if ((found = devfs_find_device_by_udev(udev)) == NULL)
2381 return(ENOENT);
2382
2383 return(SYSCTL_OUT(req, found->si_name, strlen(found->si_name) + 1));
2384}
2385
2386
2387SYSCTL_PROC(_kern, OID_AUTO, devname, CTLTYPE_OPAQUE|CTLFLAG_RW|CTLFLAG_ANYBODY,
2388 NULL, 0, devfs_sysctl_devname_helper, "", "helper for devname(3)");
2389
3a1032a6 2390SYSCTL_NODE(_vfs, OID_AUTO, devfs, CTLFLAG_RW, 0, "devfs");
21864bc5 2391TUNABLE_INT("vfs.devfs.debug", &devfs_debug_enable);
bc185c5a
AH
2392SYSCTL_INT(_vfs_devfs, OID_AUTO, debug, CTLFLAG_RW, &devfs_debug_enable,
2393 0, "Enable DevFS debugging");
21864bc5 2394
bc185c5a
AH
2395SYSINIT(vfs_devfs_register, SI_SUB_PRE_DRIVERS, SI_ORDER_FIRST,
2396 devfs_init, NULL);
2397SYSUNINIT(vfs_devfs_register, SI_SUB_PRE_DRIVERS, SI_ORDER_ANY,
2398 devfs_uninit, NULL);